repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
pongem/python-bot-project
appengine/standard/conftest.py
Python
apache-2.0
1,150
0
# Copyright 2015 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS
" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os # Import py.test hooks and fixtures for App Engine from gcp.testing.appengine import ( login, pytest_configure, pytest_runtest_call, run_tasks, testbed) import six (login) (pytest_configure) (pytest_runtest_call) (run_tasks) (testbed) def p
ytest_ignore_collect(path, config): """Skip App Engine tests in python 3 or if no SDK is available.""" if 'appengine/standard' in str(path): if six.PY3: return True if 'GAE_SDK_PATH' not in os.environ: return True return False
endlessm/chromium-browser
chrome/browser/resources/unpack_pak_test.py
Python
bsd-3-clause
1,132
0.0053
#!/usr/bin/env python # Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unpack_pak import unitte
st class UnpackPakTest(unittest.TestCase): def testMapFileLine(self): self.assertTrue(unpack_pak.ParseLine(' {"path.js", IDR_PATH}')) def testGzippedMapFileLine(self): self.assertTrue(unpack_pak.ParseLine(' {"path.js", IDR_PATH, false}')
) self.assertTrue(unpack_pak.ParseLine(' {"path.js", IDR_PATH, true}')) def testGetFileAndDirName(self): (f, d) = unpack_pak.GetFileAndDirName( 'out/build/gen/foo/foo.unpak', 'out/build/gen/foo', 'a/b.js') self.assertEquals('b.js', f) self.assertEquals('out/build/gen/foo/foo.unpak/a', d) def testGetFileAndDirNameForGeneratedResource(self): (f, d) = unpack_pak.GetFileAndDirName( 'out/build/gen/foo/foo.unpak', 'out/build/gen/foo', '@out_folder@/out/build/gen/foo/a/b.js') self.assertEquals('b.js', f) self.assertEquals('out/build/gen/foo/foo.unpak/a', d) if __name__ == '__main__': unittest.main()
bstroebl/QGIS
python/plugins/GdalTools/tools/extentSelector.py
Python
gpl-2.0
7,022
0.026915
# -*- coding: utf-8 -*- """ *************************************************************************** extentSelector.py --------------------- Date : December 2010 Copyright : (C) 2010 by Giuseppe Sucameli Email : brush dot tyler at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Giuseppe Sucameli' __date__ = 'December 2010' __copyright__ = '(C) 2010, Giuseppe Sucameli' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' from PyQt4.QtCore import * from PyQt4.QtGui import * from qgis.core import * from qgis.gui import * from ui_extentSelector import Ui_GdalToolsExtentSelector as Ui_ExtentSelector import GdalTools_utils as Utils class GdalToolsExtentSelector(QWidget, Ui_ExtentSelector): def __init__(self, parent=None): QWidget.__init__(self, parent) self.canvas = None self.tool = None self.previousMapTool = None self.isStarted = False self.setupUi(self) self.connect(self.x1CoordEdit, SIGNAL("textChanged(const QString &)"), self.coordsChanged) self.connect(self.x2CoordEdit, SIGNAL("textChanged(const QString &)"), self.coordsChanged) self.connect(self.y1CoordEdit, SIGNAL("textChanged(const QString &)"), self.coordsChanged) self.connect(self.y2CoordEdit, SIGNAL("textChanged(const QString &)"), self.coordsChanged) self.connect(self.btnEnable, SIGNAL("clicked()"), self.start) def setCanvas(self, canvas): self.canvas = canvas self.tool = RectangleMapTool(self.canvas) self.previousMapTool = self.canvas.mapTool() self.connect(self.tool, SIGNAL("rectangleCreated()"), self.fillCoords) self.connect(self.tool, SIGNAL("deactivated()"), self.pause) def stop(self): if not self.isStarted: return self.isStarted = False self.btnEnable.setVisible(False) self.tool.reset() self.canvas.unsetMapTool(self.tool) if self.previousMapTool != self.tool: self.canvas.setMapTool(self.previousMapTool) #self.coordsChanged() self.emit( SIGNAL( "selectionStopped()" ) ) def start(self): prevMapTool = self.canvas.mapTool() if prevMapTool != self.tool: self.previousMapTool = prevMapTool self.canvas.setMapTool(self.tool) self.isStarted = True self.btnEnable.setVisible(False) self.coordsChanged() self.emit( SIGNAL( "selectionStarted()" ) ) def pause(self): if not self.isStarted: return self.btnEnable.setVisible(True) self.emit( SIGNAL( "selectionPaused()" ) ) def setExtent(self, rect): if self.tool.setRectangle(rect): self.emit( SIGNAL( "newExtentDefined()" ) ) def getExtent(self): return self.tool.rectangle() def isCoordsValid(self): try: point1 = QgsPoint( float(self.x1CoordEdit.text()), float(self.y1CoordEdit.text()) ) point2 = QgsPoint( float(self.x2CoordEdit.text()), float(self.y2CoordEdit.text()) ) except ValueError: return False return True def coordsChanged(self): rect = None if self.isCoordsValid(): point1 = QgsPoint( float(self.x1CoordEdit.text()), float(self.y1CoordEdit.text()) ) point2 = QgsPoint( float(self.x2CoordEdit.text()), float(self.y2CoordEdit.text()) ) rect = QgsRectangle(point1, point2) self.setExtent(rect) def fillCoords(self): rect = self.getExtent() self.blockSignals(True) if rect != None: self.x1CoordEdit.setText( str(rect.xMinimum()) ) self.x2CoordEdit.setText( str(rect.xMaximum()) ) self.y1CoordEdit.setText( str(rect.yMaximum()) ) self.y2CoordEdit.setText( str(rect.yMinimum()) ) else: self.x1CoordEdit.clear() self.x2CoordEdit.clear() self.y1CoordEdit.clear() self.y2CoordEdit.clear() self.blockSignals(False) self.emit( SIGNAL( "newExtentDefined()" ) ) class RectangleMapTool(QgsMapToolEmitPoint): def __init__(self, canvas): self.canvas = canvas QgsMapToolEmitPoint.__init__(self, self.canvas) self.rubberBand = QgsRubberBand( self.canvas, True ) # true, its a polygon self.rubberBand.setColor( Qt.red ) self.rubberBand.setWidth( 1 ) self.reset() def reset(self): self.startPoint = self.endPoint = None self.isEmittingPoint = False self.rubberBand.reset( True ) # true, its a polygon def canvasPressEvent(self, e): self.startPoint = self.toMapCoordinates( e.pos() ) self.endPoint = self.startPoint self.isEmittingPoint = True self.showRect(self.startPoint, self.endPoint) def canvasReleaseEvent(self, e): self.isEmittingPoint = False if self.rectangle() != None: self.emit( SIGNAL("rectangleCreated()") ) def canvasMoveEvent(self, e): if not self.isEmittingPoint: return self.endPoint = self.toMapCoordinates( e.pos()
) self.showRect(self.startPoint, self.endPoint) def showRect(self, startPoint, endPoint): self.rubberBand.reset( True ) # t
rue, it's a polygon if startPoint.x() == endPoint.x() or startPoint.y() == endPoint.y(): return point1 = QgsPoint(startPoint.x(), startPoint.y()) point2 = QgsPoint(startPoint.x(), endPoint.y()) point3 = QgsPoint(endPoint.x(), endPoint.y()) point4 = QgsPoint(endPoint.x(), startPoint.y()) self.rubberBand.addPoint( point1, False ) self.rubberBand.addPoint( point2, False ) self.rubberBand.addPoint( point3, False ) self.rubberBand.addPoint( point4, True ) # true to update canvas self.rubberBand.show() def rectangle(self): if self.startPoint == None or self.endPoint == None: return None elif self.startPoint.x() == self.endPoint.x() or self.startPoint.y() == self.endPoint.y(): return None return QgsRectangle(self.startPoint, self.endPoint) def setRectangle(self, rect): if rect == self.rectangle(): return False if rect == None: self.reset() else: self.startPoint = QgsPoint(rect.xMaximum(), rect.yMaximum()) self.endPoint = QgsPoint(rect.xMinimum(), rect.yMinimum()) self.showRect(self.startPoint, self.endPoint) return True def deactivate(self): QgsMapTool.deactivate(self) self.emit(SIGNAL("deactivated()"))
quentinlautischer/291MiniProject2
src/rgxHandler.py
Python
apache-2.0
3,349
0.01284
import re class rgxHandler: # Taken from https://www.safaribooksonline.com/library/view/python-cookbook-2nd/0596007973/ch01s19.html linetitles = ["product/productId: ", "product/title: ", "product/price: ", "review/userId: ", "review/profileName: ", "review/helpfulness: ", "review/score: ", "review/time: ", "review/summary: ", "review/text: "] getrids = dict([(linetitles[0],''), (linetitles[1],''), (linetitles[2],''), (linetitles[3],''), (linetitles[4],''), (linetitles[5],''), (linetitles[6],''), (linetitles[7],''), (linetitles[8],''), (linetitles[9],'')]) replace = {"\\":"\\\\", '"':"&quot;" } unreplace = {"\\\\":"\\", "&quot;":'"'} def __init__(self): pass def multiple_replace(self, text, adict): rx = re.compile('|'.join(map(re.escape, adict))) def one_xlat(match): return adict[match.group(0)] return rx.sub(one_xlat, text) def line_rgx(self, text): text = text.strip('\n') text = self.multiple_replace(text, self.getrids) text = self.multiple_replace(text, self.replace) return text def find3OrMore(self, line): #line = re.sub("&quot;", ' ', line) line = re.sub(r'([^\s\w]|_)+', ' ', line) words = line.split() rtnwords = [] for word in words: if len(word.strip()) >= 3: rtnwords.append(word.strip().lower()) return rtnwords def putLineTitlesBack(self, review): rtnlines = [] iter = re.finditer('"', review) quotes = [m.start(0) for m in iter] iter = re.finditer(',', review) commas = [m.start(0) for m in iter] q = 0 c = 0 i = 0 while(True): #print(str(i)) #print("c: " + str(c) + "/" + str(len(commas)) + " " + str(commas[c])) #print("q: " + str(q) + "/" + str(len(quotes)) + " " + str(quotes[q])) if commas[c] < quotes[q] and ((c+1) < len(commas)): if c == 0: #print(review[0:commas[c]] + '\n') rtnlines.append(review[0:commas[c]] + '\n') else: if quotes[q-1] > commas[c-1]: pass else: #print(review[commas[c-1]+1:commas[c]] + '\n') rtnlines.append(review[commas[c-1]+1:commas[c]] + '\n') c += 1 elif (commas[c] > quotes[q] and commas[c] < quotes[q+1]) or ((c+1) == len(commas)): #print(review[quotes[q]+1:quotes[q+1]] + '\n') rtnlines
.append(review[quotes[q]+1:quotes[q+1]] + '\n') if q+1 == len(quotes)-1: break while commas[c] < quotes[q+1]:
c += 1 q+=2 else: #print(review[quotes[q]+1:quotes[q+1]] + '\n') rtnlines.append(review[quotes[q]+1:quotes[q+1]] + '\n') q+=2 if q == len(quotes): break i += 1 i = 0 for line in rtnlines: line = self.multiple_replace(line.strip('"'), self.unreplace) rtnlines[i] = self.linetitles[i] + line + '\n' i += 1 return rtnlines
abzaloid/maps
django-project/bin/django-admin.py
Python
mit
161
0
#!/Users/patron/De
sktop/maps/django-project/bin/python from django.core import management if __name__ == "__main__": management.execute_from_command_line()
Cangjians/pycangjie
tests/__init__.py
Python
lgpl-3.0
6,379
0.000314
# Copyright (c) 2013 - The pycangjie authors # # This file is part of pycangjie, the Python bindings to libcangjie. # # pycangjie is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # pycangjie is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with pycangjie. If not, see <http://www.gnu.org/licenses/>. import itertools import operator import string import subprocess import unittest import cangjie class MetaTest(type): """Metaclass for our test cases The goal is to provide every TestCase class wi
th methods like test_a(), test_b(), etc..., in other words, one method per potential Cangjie input code. Well, not
quite, because that would be 12356630 methods (the number of strings composed of 1 to 5 lowercase ascii letters), and even though my laptop has 8Go of RAM, the test process gets killed by the OOM killer. :) So we cheat, and use libcangjie's wildcard support, so that we only generate 26 + 26^2 = 702 methods. """ def __init__(cls, name, bases, dct): super(MetaTest, cls).__init__(name, bases, dct) def gen_codes(): """Generate the 702 possible input codes""" # First, the 1-character codes for c in string.ascii_lowercase: yield c # Next, the 2-characters-with-wildcard codes for t in itertools.product(string.ascii_lowercase, repeat=2): yield '*'.join(t) def tester(code): def func(cls): return cls.run_test(code) return func # Generate the test_* methods for code in gen_codes(): setattr(cls, "test_%s" % code.replace("*", ""), tester(code)) class BaseTestCase(unittest.TestCase): """Base test class, grouping the common stuff for all our unit tests""" def __init__(self, name): super().__init__(name) self.cli_cmd = ["/usr/bin/libcangjie_cli"] + self.cli_options self.language = (cangjie.filters.BIG5 | cangjie.filters.HKSCS | cangjie.filters.PUNCTUATION | cangjie.filters.CHINESE | cangjie.filters.ZHUYIN | cangjie.filters.KANJI | cangjie.filters.KATAKANA | cangjie.filters.HIRAGANA | cangjie.filters.SYMBOLS) def setUp(self): self.cj = cangjie.Cangjie(self.version, self.language) def tearDown(self): del self.cj def run_command(self, cmd): """Run a command, deal with errors, and return its stdout""" proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() try: cangjie.errors.handle_error_code(proc.returncode, msg="Unknown error while running" " libcangjie_cli (%d)" % proc.returncode) except cangjie.errors.CangjieNoCharsError: return "" try: return out.decode("utf-8") except UnicodeDecodeError: # Python's 'utf-8' codec trips over b"\xed\xa1\x9d\xed\xbc\xb2", # but according to [1] and [2], it is a valid sequence of 2 chars: # U+D85D \xed\xa1\x9d # U+DF32 \xed\xbc\xb2 # [1] http://www.utf8-chartable.de/unicode-utf8-table.pl?start=55389&utf8=string-literal # [2] http://www.utf8-chartable.de/unicode-utf8-table.pl?start=57138&utf8=string-literal # TODO: Investigate this further, and eventually open a bug report out2 = [] for line in out.split("\n".encode("utf-8")): try: out2.append(line.decode("utf-8")) except UnicodeDecodeError: pass return "\n".join(out2) def run_test(self, input_code): """Run the actual test This compares the output of the libcangjie_cli tool with the output from pycangjie. The idea is that if pycangjie produces the same results as a C++ tool compiled against libcangjie, then pycangjie properly wraps libcangjie. We do not try to verify that pycangjie produces valid results here, validity is to be checked in libcangjie. Note that this whole test is based on scraping the output of libcangjie_cli, which is quite fragile. """ # Get a list of CangjieChar from libcangjie_cli as a reference tmp_expected = self.run_command(self.cli_cmd+[input_code]).split("\n") tmp_expected = map(lambda x: x.strip(" \n"), tmp_expected) tmp_expected = filter(lambda x: len(x) > 0, tmp_expected) expected = [] for item in tmp_expected: chchar, simpchar, code, frequency = item.split(", ") chchar = chchar.split(": ")[-1].strip("'") simpchar = simpchar.split(": ")[-1].strip("'") code = code.split(": ")[-1].strip("'") frequency = int(frequency.split(" ")[-1]) expected.append(cangjie._core.CangjieChar(chchar.encode("utf-8"), simpchar.encode("utf-8"), code.encode("utf-8"), frequency)) expected = sorted(expected, key=operator.attrgetter('chchar', 'code')) try: # And compare with what pycangjie produces results = sorted(self.cj.get_characters(input_code), key=operator.attrgetter('chchar', 'code')) self.assertEqual(results, expected) except cangjie.errors.CangjieNoCharsError: self.assertEqual(len(expected), 0)
TheShellLand/pies
v3/Libraries/xml/xml-parse.py
Python
mit
321
0
#!/usr/bin/env python # -*- coding: utf8 -*- import xml.etree.ElementTree as ET from pprint import ppri
nt filename = 'GeoLogger.gpx' def main(): tree = ET.parse(filename) root = tree.getroot()
pprint(root.tag) pprint(root.attrib) pprint(root.findtext('.')) if __name__ == "__main__": main()
pschmitt/home-assistant
tests/helpers/test_script.py
Python
apache-2.0
46,540
0.000645
"""The tests for the Script component.""" # pylint: disable=protected-access import asyncio from contextlib import contextmanager from datetime import timedelta import logging from unittest import mock import pytest import voluptuous as vol # Otherwise can't test just this file (import order issue) from homeassistant import exceptions import homeassistant.components.scene as scene from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_ON from homeassistant.core import Context, CoreState, callback from homeassistant.helpers import config_validation as cv, script from homeassistant.helpers.event import async_call_later import homeassistant.util.dt as dt_util from tests.async_mock import patch from tests.common import ( async_capture_events, async_fire_time_changed, async_mock_service, ) ENTITY_ID = "script.test" @pytest.fixture def mock_timeout(hass, monkeypatch): """Mock async_timeout.timeout.""" class MockTimeout: def __init__(self, timeout): self._timeout = timeout self._loop = asyncio.get_event_loop() self._task = None self._cancelled = False self._unsub = None async def __aenter__(self): if self._timeout is None: return self self._task = asyncio.Task.current_task() if self._timeout <= 0: self._loop.call_soon(self._cancel_task) return self # Wait for a time_changed event instead of real time passing. self._unsub = async_call_later(hass, self._timeout, self._cancel_task) return self async def __aexit__(self, exc_type, exc_val, exc_tb): if exc_type is asyncio.CancelledError and self._cancelled: self._unsub = None self._task = None raise asyncio.TimeoutError if self._timeout is not None and self._unsub: self._unsub() self._unsub = None self._task = None return None @callback def _cancel_task(self, now=None): if self._task is not None: self._task.cancel() self._cancelled = True monkeypatch.setattr(script, "timeout", MockTimeout) def async_watch_for_action(script_obj, message): """Watch for message in last_action.""" flag = asyncio.Event() @callback def check_action(): if script_obj.last_action and message in script_obj.last_action: flag.set() script_obj.change_listener = check_action return flag async def test_firing_event_basic(hass): """Test the firing of events.""" event = "test_event" context = Context() events = async_capture_events(hass, event) sequence = cv.SCRIPT_SCHEMA({"event": event, "event_data": {"hello": "world"}}) script_obj = script.Script(hass, sequence) await script_obj.async_run(context=context) await hass.async_block_till_done() assert len(events) == 1 assert events[0].context is context assert events[0].data.get("hello") == "world" async def test_firing_event_template(hass): """Test the firing of events.""" event = "test_event" context = Context() events = async_capture_events(hass, event) sequence = cv.SCRIPT_SCHEMA( { "event": event, "event_data_template": { "dict": { 1: "{{ is_world }}", 2: "{{ is_world }}{{ is_world }}", 3: "{{ is_world }}{{ is_world }}{{ is_world }}", }, "list": ["{{ is_world }}", "{{ is_world }}{{ is_world }}"], }, } ) script_obj = script.Script(hass, sequence) await script_obj.async_run({"is_world": "yes"}, context=context) await hass.async_block_till_done() assert len(events) == 1 assert events[0].context is context assert events[0].data == { "dict": {1: "yes", 2: "yesyes", 3: "yesyesyes"}, "list": ["yes", "yesyes"], } async def test_calling_service_basic(hass): """Test the calling of a service.""" context = Context() calls = async_mock_service(hass, "test", "script") sequence = cv.SCRIPT_SCHEMA({"service": "test.script", "data": {"hello": "world"}}) script_obj = script.Script(hass, sequence) await script_obj.async_run(context=context) await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].context is context assert calls[0].data.get("hello") == "world" async def test_calling_service_template(hass): """Test the calling of a service.""" context = Context() calls = async_mock_service(hass, "test", "script") sequence = cv.SCRIPT_SCHEMA( { "service_template": """ {% if True %} test.script {% else %} test.not_script {% endif %}""", "data_template": { "hello": """ {% if is_world == 'yes' %} world {% else %} not world {% endif %} """ }, } ) script_obj = script.Script(hass, sequence) await script_obj.async_run({"is_world": "yes"}, context=context) await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].context is context assert calls[0].data.get("hello") == "world" async def test_multiple_runs_no_wait(hass): """Test multiple runs with no wait in script.""" logger = logging.getLogger("TEST") calls = [] heard_event = asyncio.Event() async def async_simulate_long_service(service): """Simulate a service that takes a not insignificant time.""" fire = service.data.get("fire") listen = service.data.get("listen") service_done = asyncio.Event() @callback def service_done_cb(event): logger.debug("simulated service (%s:%s) done", fire, listen) service_done.set() calls.append(service) logger.debug("simulated service (%s:%s) started", fire, listen) unsub = hass.bus.async_listen(listen, service_done_cb) hass.bus.async_fire(fire) await service_done.wait() unsub() hass.services.async_register("test", "script", async_simulate_long_service) @callback def heard_event_cb(event): logger.debug("heard: %s", event) heard_event.set() sequence = cv.SCRI
PT_SCHEMA( [ { "service": "test.script", "data_temp
late": {"fire": "{{ fire1 }}", "listen": "{{ listen1 }}"}, }, { "service": "test.script", "data_template": {"fire": "{{ fire2 }}", "listen": "{{ listen2 }}"}, }, ] ) script_obj = script.Script(hass, sequence, script_mode="parallel", max_runs=2) # Start script twice in such a way that second run will be started while first run # is in the middle of the first service call. unsub = hass.bus.async_listen("1", heard_event_cb) logger.debug("starting 1st script") hass.async_create_task( script_obj.async_run( {"fire1": "1", "listen1": "2", "fire2": "3", "listen2": "4"} ) ) await asyncio.wait_for(heard_event.wait(), 1) unsub() logger.debug("starting 2nd script") await script_obj.async_run( {"fire1": "2", "listen1": "3", "fire2": "4", "listen2": "4"} ) await hass.async_block_till_done() assert len(calls) == 4 async def test_activating_scene(hass): """Test the activation of a scene.""" context = Context() calls = async_mock_service(hass, scene.DOMAIN, SERVICE_TURN_ON) sequence = cv.SCRIPT_SCHEMA({"scene": "scene.hello"}) script_obj = script.Script(hass, sequence) await script_obj.async_run(context=context) await hass.async_block_till_done() assert len(calls) == 1 assert calls[0].context is context assert calls[0].data.get(ATTR_ENTITY_ID) == "scene.hello" @pytest.mark.paramet
jainaman224/zenodo
zenodo/modules/deposit/minters.py
Python
gpl-2.0
2,471
0
# -*- coding: utf-8 -*- # # This file is part of Zenodo. # Copyright (C) 2016 CERN. # # Zenodo is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Zenodo is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Zenodo; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. ""
"Persistent identifier minters.""" from __future__ import absolute_import from invenio_pidstore.models import PersistentIdentifier, PIDStatus, \ RecordIdentifier def zenodo_concept_recid_minter(record_uuid=None, data=None): """Mint the Concept RECID. Reserves the Concept RECID for the record. """ parent_id = RecordIdentifier.next() conceptrecid = PersistentIdentifier.create( pid_type='recid', pid_value=str(parent_id), status=PIDStatus.RESERVE
D, ) data['conceptrecid'] = conceptrecid.pid_value return conceptrecid def zenodo_deposit_minter(record_uuid, data): """Mint the DEPID, and reserve the Concept RECID and RECID PIDs.""" if 'conceptrecid' not in data: zenodo_concept_recid_minter(data=data) recid = zenodo_reserved_record_minter(data=data) # Create depid with same pid_value of the recid depid = PersistentIdentifier.create( 'depid', str(recid.pid_value), object_type='rec', object_uuid=record_uuid, status=PIDStatus.REGISTERED, ) data.update({ '_deposit': { 'id': depid.pid_value, 'status': 'draft', }, }) return depid def zenodo_reserved_record_minter(record_uuid=None, data=None): """Reserve a recid.""" id_ = RecordIdentifier.next() recid = PersistentIdentifier.create( 'recid', id_, status=PIDStatus.RESERVED ) data['recid'] = recid.pid_value return recid
southpawtech/TACTIC-DEV
src/pyasm/biz/file.py
Python
epl-1.0
34,859
0.008721
########################################################### # # Copyright (c) 2005, Southpaw Technology # All Rights Reserved # # PROPRIETARY INFORMATION. This software is proprietary to # Southpaw Technology, and is not to be reproduced, transmitted, # or disclosed in any way without written permission. # # # __all__ = ["FileException", "File", "FileAccess", "IconCreator", "FileGroup", "FileRange"] from pyasm.common import Common, Xml, TacticException, Environment, System, Config from pyasm.search import * from project import Project from subprocess import Popen, PIPE import sys, os, string, re, stat, glob try: #import Image from PIL import Image # Test to see if imaging actually works import _imaging HAS_PIL = True except: HAS_PIL = False try: import Image # Test to see if imaging actually works import _imaging HAS_PIL = True except: HAS_PIL = False # check if imagemagick is installed, and find exe if possible convert_exe = '' HAS_IMAGE_MAGICK = False if os.name == "nt": # prefer direct exe to not confuse with other convert.exe present on nt systems convert_exe_list = glob.glob('C:\\Program Files\\ImageMagick*') for exe in convert_exe_list: try: convert_process = Popen(['%s\\convert.exe'%exe,'-version'], stdout=PIPE, stderr=PIPE) convert_return,convert_err = convert_process.communicate() if 'ImageMagick' in convert_return: convert_exe = '%s\\convert.exe'%exe HAS_IMAGE_MAGICK = True except: print "Running %s failed" %exe if not convert_exe_list: # IM might not be in Program Files but may still be in PATH try: convert_process = Popen(['convert','-version'], stdout=PIPE, stderr=PIPE) convert_return,convert_err = convert_process.communicate() if 'ImageMagick' in convert_return: convert_exe = 'convert' HAS_IMAGE_MAGICK = True except: pass else: # in other systems (e.g. unix) 'convert' is expected to be in PATH try: convert_process = Popen(['convert','-version'], stdout=PIPE, stderr=PIPE) convert_return,convert_err = convert_process.communicate() if 'ImageMagick' in convert_return: convert_exe = 'convert' HAS_IMAGE_MAGICK = True except: pass if Common.which("ffprobe"): HAS_FFMPEG = True else: HAS_FFMPEG = False import subprocess class FileException(TacticException): pass class File(SObject): NORMAL_EXT = ['max','ma','xls' ,'xlsx', 'doc', 'docx','txt', 'rtf', 'odt','fla','psd', 'xsi', 'scn', 'hip', 'xml','eani','pdf', 'fbx', 'gz', 'zip', 'rar', 'ini', 'db', 'py', 'pyd', 'spt' ] VIDEO_EXT = ['mov','wmv','mpg','mpeg','m1v','m2v','mp2','mp4','mpa','mpe','mp4','wma','asf','asx','avi','wax', 'wm','wvx','ogg','webm','mkv','m4v','mxf','f4v','rmvb'] IMAGE_EXT = ['jpg','png','tif','tiff','gif','dds','dcm'] SEARCH_TYPE = "sthpw/file" BASE_TYPE_SEQ = "sequence" BASE_TYPE_DIR = "directory" BASE_TYPE_FILE = "file" def get_code(my): return my.get_value("code") def get_file_name(my): return my.get_value("file_name") def get_file_range(my): return my.get_value("file_range") def get_type(my): return my.get_value("type") def get_media_type_by_path(cls, path): tmp, ext = os.path.splitext(path) ext = ext.lstrip(".") ext = ext.lower() if ext in File.VIDEO_EXT: return "video" elif ext in File.NORMAL_EXT: return "document" else: return "image" get_media_type_by_path = classmethod(get_media_type_by_path) def get_sobject(my): '''get the sobject associated with this file''' search = Search(my.get_value("search_type")) search.add_id_filter(my.get_value("search_id")) sobject = search.get_sobject() return sobject def get_full_file_name(my): '''Gets the full file name. This is the same as get_file_name''' return my.get_file_name() def get_lib_dir(my,snapshot=None): '''go through the stored snapshot_code to get the actual path''' code = my.get_value("snapshot_code") from snapshot import Snapshot snapshot = Snapshot.get_by_code(code) return snapshot.get_lib_dir() def get_env_dir(my,snapshot=None): '''go through the stored snapshot_code to get the actual path''' code = my.get_value("snapshot_code") from snapshot import Snapshot snapshot = Snapshot.get_by_code(code) return snapshot.get_env_dir() def get_web_dir(my,snapshot=None): '''go through the stored snapshot_code to get the actual path''' code = my.get_value("snapshot_code") from snapshot import Snapshot snapshot = Snapshot.get_by_code(code) return snapshot.get_web_dir() def get_lib_path(my): filename = my.get_full_file_name() return "%s/%s" % (my.get_lib_dir(), filename) def get_env_path(my): '''path beginning with $TACTIC_ASSET_DIR''' filename = my.get_full_file_name() return "%s/%s" % (my.get_env_dir(), filename) def get_web_path(my): filename = my.get_full_file_name() return "%s/%s" % (my.get_web_dir(), filename) ################## # Static Methods ################## """ # DEPRERECATED PADDING = 10 # DEPRERECATED def add_file_code(file_path, file_code): ext = ".".join( File.get_extensions(file_path) ) padded_id = str(file_code).zfill(File.PADDING) file_path = file_path.replace(".%s" % ext, "_%s.%s" % (padded_id, ext) ) return file_path add_file_code = staticmethod(add_file_code) # DEPRERECATED def remove_file_code(file_path): new_path = re.compile(r'_(\w{%s})\.' % File.PADDING).sub(".", file_path) return new_path remove_file_code = staticm
ethod(remove_file_code) # DEPRERECATED def extract_file_co
de(file_path): p = re.compile(r'_(\w{%s})\.' % File.PADDING) m = p.search(file_path) if not m: return 0 groups = m.groups() if not groups: return 0 else: file_code = groups[0] # make sure there are only alpha/numberic characters if file_code.find("_") != -1: return 0 # make sure the first 3 are numeric if not re.match('^\d{3}\w+$', file_code): return 0 # strip out the leading zeros return file_code.lstrip("0") extract_file_code = staticmethod(extract_file_code) # DEPRERECATED def extract_file_path(file_path): '''return file path without the unique id''' p = re.compile(r'_(\w{%s})\.' % File.PADDING) m = p.search(file_path) if not m: return file_path groups = m.groups() if not groups: return file_path else: new_path = file_path.replace("_%s" % groups[0], "") return new_path extract_file_path = staticmethod(extract_file_path) # DEPRERECATED def has_file_code(file_path): file_code = File.extract_file_code(file_path) if file_code == 0: return False else: return True has_file_code = staticmethod(has_file_code) """ def get_extension(file_path): '''get only the final extension''' parts = os.path.basename(file_path).split(".") ext = parts[len(parts)-1] return ext get_extension = staticmethod(get_extension) def get_extensions(file_path): '''get all of the extensions after the first .''' parts = os.path.basename(file_path).split(".") ext = parts[1:len(parts)] return ext get_extensions = staticmethod(get_extensions) def get_by_snaps
mozilla/telemetry-analysis-service
tests/test_stats.py
Python
mpl-2.0
1,163
0.00086
# This Source Code Form is subject to the terms of the Mozilla
Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, you can obtain one at http://mozilla.org/MPL/2.0/. from atmo.stats.models import Metric def test_metrics_record(now, one_hour_ago): Metric.record("metric-key
-1") Metric.record("metric-key-2", 500) Metric.record("metric-key-3", data={"other-value": "test"}) Metric.record("metric-key-4", created_at=one_hour_ago, data={"other-value-2": 100}) m = Metric.objects.get(key="metric-key-1") assert m.value == 1 assert m.created_at.replace(microsecond=0) == now assert m.data is None m = Metric.objects.get(key="metric-key-2") assert m.value == 500 assert m.created_at.replace(microsecond=0) == now assert m.data is None m = Metric.objects.get(key="metric-key-3") assert m.value == 1 assert m.created_at.replace(microsecond=0) == now assert m.data == {"other-value": "test"} m = Metric.objects.get(key="metric-key-4") assert m.value == 1 assert m.created_at.replace(microsecond=0) == one_hour_ago assert m.data == {"other-value-2": 100}
lkostler/AME60649_project_final
moltemplate/moltemplate/src/nbody_by_type_lib.py
Python
bsd-3-clause
18,422
0.0057
#!/usr/bin/env python # Author: Andrew Jewett (jewett.aij at g mail) # http://www.chem.ucsb.edu/~sheagroup # License: 3-clause BSD License (See LICENSE.TXT) # Copyright (c) 2012, Regents of the University of California # All rights reserved. import sys from nbody_graph_search import * #from collections import namedtuple if sys.version < '2.7': sys.stderr.write('--------------------------------------------------------\n' '----------------- WARNING: OLD PYTHON VERSION ----------\n' ' This program is untested on your python version ('+sys.version+').\n' ' PLEASE LET ME KNOW IF THIS PROGRAM CRASHES (and upgrade python).\n' ' -Andrew 2013-10-25\n' '--------------------------------------------------------\n' '--------------------------------------------------------\n') from ordereddict import OrderedDict else: from collections import OrderedDict from collections import defaultdict from ttree_lex import MatchesPattern, MatchesAll, InputError #import gc def GenInteractions_int(G_system, g_bond_pattern, typepattern_to_coefftypes, canonical_order, #function to sort atoms and bonds atomtypes_int2str, bondtypes_int2str, report_progress = False): #print messages to sys.stderr? """ GenInteractions() automatically determines a list of interactions present in a system of bonded atoms (argument "G_system"), which satisfy the bond topology present in "g_bond_pattern", and satisfy the atom and bond type requirements in "typepattern_to_coefftypes". Whenever a set of atoms in "G_system" are bonded together in a way which matches "g_bond_pattern", and when the atom and bond types is consistent with one of the entries in "typepattern_to_coefftypes", the corresponding list of atoms from G_system is appended to the list of results. These results (the list of lists of atoms participating in an interaction) are organized according their corresponding "coefftype", a string which identifies the type of interaction they obey as explained above. results are returned as a dictionary using "coefftype" as the lookup key. Arguments: -- typepattern_to_coefftypes is a list of 2-tuples -- The first element of the 2-tuple is the "typepattern". It contains a string describing a list of atom types and bond types. The typepattern is associated with a "coefftype", which is the second element of the 2-tuple. This is a string which identifies the type of interaction between the atoms. Later on, this string can be used to lookup the force field parameters for this interaction elsewhere.) -- Arguments: G_system, g_bond_pattern, atomtypes_int2str, bondtypes_int2str -- G_system stores a list of atoms and bonds, and their attributes in "Ugraph" format. In this format: Atom ID numbers are represented by indices into the G_system.verts[] list. Bond ID numbers are represented by indices into the G_system.edges[] list. Atom types are represented as integers in the G_system.verts[i].attr list. Bond types are represented as integers in the G_system.edges[i].attr list. They are converted into strings using atomtypes_int2str, and bondtypes_int2str. g_bond_pattern is a graph which specifies the type of bonding between the atoms required for a match. It is in Ugraph format (however the atom and bond types are left blank.) Atom and bond types are supplied by the user in string format. (These strings typically encode integers, but could be any string in principle.) The string-version of the ith atom type is stored in atomtypes_int2str[ G_system.verts[i].attr ] The string-version of the ith bond type is stored in bondtypes_int2str[ G_system.edges[i].attr ] -- The "canonical_order" argument: -- The search for atoms with a given bond pattern often yields redundant matches. There is no differenc
e for example between the angle formed between three consecutively bonded atoms (named, 1, 2, 3, for example), and the angle between the same atoms in reverse order (3, 2, 1). However both triplets of atoms will be returned by the subgraph- matching algorithm when searching for ALL 3-body interactions.) To eliminate this redundancy, the caller must supply a "canonical_order" argument. T
his is a function which sorts the atoms and bonds in a way which is consistent with the type of N-body interaction being considered. The atoms (and bonds) in a candidate match are rearranged by the canonical_order(). Then the re-ordered list of atom and bond ids is tested against the list of atom/bond ids in the matches-found-so-far, before it is added. """ if report_progress: startatomid = 0 sys.stderr.write(' searching for matching bond patterns:\n') sys.stderr.write(' 0%') # Figure out which atoms from "G_system" bond together in a way which # matches the "g_bond_pattern" argument. Organize these matches by # atom and bond types and store all of the non-redundant ones in # the "interactions_by_type" variable. gm = GraphMatcher(G_system, g_bond_pattern) interactions_by_type = defaultdict(list) for atombondids in gm.Matches(): # "atombondids" is a tuple. # atombondids[0] has atomIDs from G_system corresponding to g_bond_pattern # (These atomID numbers are indices into the G_system.verts[] list.) # atombondids[1] has bondIDs from G_system corresponding to g_bond_pattern # (These bondID numbers are indices into the G_system.edges[] list.) # It's convenient to organize the list of interactions-between- # atoms in a dictionary indexed by atomtypes and bondtypes. # (Because many atoms and bonds typically share the same type, # organizing the results this way makes it faster to check # whether a given interaction matches a "typepattern" defined # by the user. We only have to check once for the whole group.) atombondtypes = \ (tuple([G_system.GetVert(Iv).attr for Iv in atombondids[0]]), tuple([G_system.GetEdge(Ie).attr for Ie in atombondids[1]])) interactions_by_type[atombondtypes].append(atombondids) if report_progress: # GraphMatcher.Matches() searches for matches in an order # that selects a different atomid number from G_system, # starting at 0, and continuing up to the number of atoms (-1) # in the system (G_system.nv-1), and using this as the first # atom in the match (ie match[0][0]). This number can be used # to guess much progress has been made so far. oldatomid = startatomid startatomid = atombondids[0][0] percent_complete = (100 * startatomid) // G_system.GetNumVerts() # report less often as more progress made if percent_complete <= 4: old_pc = (100 * oldatomid) // G_system.GetNumVerts() if percent_complete > old_pc: sys.stderr.write(' '+str(percent_complete)+'%') elif percent_complete <= 8: pc_d2 = (100 * startatomid) // (2*G_system.GetNumVerts()) oldpc_d2 = (100 * oldatomid) // (2*G_system.GetNumVerts()) if pc_d2 > oldpc_d2: sys.stderr.write(' '+str(percent_complete)+'%') elif percent_complete <= 20: pc_d4 = (100 * startatomid) // (4*G_system.GetNumVerts()) oldpc_d4 = (100 * oldatomid) // (4*G_system.GetNumVerts()) if pc_d4 > oldpc_d4: sys.stderr.write(' '+str(percent_complete)+'%') else: pc_d10 = (100 * startatomid) // (10*G_system.GetNumV
rorychatt/GPCook
gpcook/modules/documentation.py
Python
mit
88
0
def generate_documentat
ion(): print("generate_doc
umentation Stub") return True
michel-cf/Propeller-WebIDE
projects/forms.py
Python
lgpl-3.0
1,061
0
from django import forms from django.core.exceptions import ValidationError from projects.models import Project class CreateProjectForm(forms.ModelForm): def __init__(self, user, *args, **kwargs): self.u
ser = user super(CreateProjectForm, self).__init__(*args, **kwargs) def clean_name(self): return self.cleaned_data['name'].strip() def clean_code(self): code = self.cleaned_data['code'].strip().lower().replace(' ', '_') if Project.objects.filter(user=self.user, code=
code).exists(): raise ValidationError('A project with this code already exists') return code class Meta: model = Project fields = ['name', 'code', 'public'] class CreateProjectFormBasic(forms.Form): name = forms.CharField(label='Name', max_length=255) code = forms.SlugField(label='Code', max_length=255) def clean_name(self): return self.cleaned_data['name'].strip() def clean_code(self): return self.cleaned_data['code'].strip().lower().replace(' ', '_')
interlegis/atendimento
solicitacoes/models.py
Python
gpl-3.0
2,330
0
# -*- coding: utf-8 -*- from django.db import models from django.utils.translation import ugettext_lazy as _ from usuarios.models import Usuario class Sistema(models.Model): sigla = models.CharField(verbose_name=_('Sigla'), max_length=10) nome = models.CharField(verbose_name=_('Nome Sistema'), max_length=100) descricao = models.TextField(null=True, blank=True, verbose_name=_('Descrição')) class Meta: verbose_name = _('Sistema') verbose_name_plural = _('Sistemas') def __str__(self): return "%s - %s" % (self.sigla, self.nome) class Solicitacao(models.Model): codigo = models.PositiveIntegerField(unique=True) usuario = models.ForeignKey(Usuario) sistema = models.ForeignKey(Sistema) titulo = models.CharField(verbose_name=_('Título'), max_length=100) resumo = models.CharField(verbose_name=_('Resumo'), max_length=50) casa_legislativa = models.CharField(verbose_name=_('Casa Legislativa'), max_length=200) email_contato = models.EmailField(blank=True, null=True, verbose_name=_('Email de contato')) # Substituir por usuarios.models.Telefone? telefone_contato = models.CharField(max_length=15, null=True, blank=True, verbose_name=_('Telefone de contato')) data_criacao = models.DateTi
meField(auto_now_add=True, verbose_name=_('Data de criação')) descricao = models.TextField(blank=True, null=True,
verbose_name=_('Descrição')) osticket = models.CharField(blank=True, null=True, max_length=256, verbose_name=_('Código Ticket')) class Meta: verbose_name = _('Solicitação de Novo Serviço') verbose_name_plural = _('Solicitações de Novos Serviços') ordering = ['data_criacao'] def __str__(self): return "%s - %s" % (self.codigo, self.resumo)
hrayr-artunyan/shuup
shuup/admin/views/select.py
Python
agpl-3.0
2,842
0.001407
# -*- coding: utf-8 -*- # This file is part of Shuup. # # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. from __future__ import unicode_literals from django.apps import apps from django.contrib.auth import get_user_model from django.core.exceptions import FieldDoesNotExist from django.db.models import Q from django.http import JsonResponse from django.utils.encoding import force_text from django.utils.translation import ugettext_lazy as _ from django.views.generic import TemplateView from shuup.core.models import Contact, Product def _field_exists(model, field): try: model._meta.get_field(field) return True except FieldDoesNotExist: return False class MultiselectAjaxView(TemplateView): model = None search_fields = [] result_limit = 20 de
f init_search_fields(self, cls): self.search_fields = [] key = "%sname" % ("translations__" if hasattr(cls, "translat
ions") else "") self.search_fields.append(key) if issubclass(cls, Contact): self.search_fields.append("email") if issubclass(cls, Product): self.search_fields.append("sku") self.search_fields.append("barcode") user_model = get_user_model() if issubclass(cls, user_model): if _field_exists(user_model, "username"): self.search_fields.append("username") if _field_exists(user_model, "email"): self.search_fields.append("email") if not _field_exists(user_model, "name"): self.search_fields.remove("name") def get_data(self, request, *args, **kwargs): model_name = request.GET.get("model") if not model_name: return [] cls = apps.get_model(model_name) qs = cls.objects.all() if hasattr(cls.objects, "all_except_deleted"): qs = cls.objects.all_except_deleted() self.init_search_fields(cls) if not self.search_fields: return [{"id": None, "name": _("Couldn't get selections for %s.") % model_name}] if request.GET.get("search"): query = Q() keyword = request.GET.get("search", "").strip() for field in self.search_fields: query |= Q(**{"%s__icontains" % field: keyword}) if issubclass(cls, Contact) or issubclass(cls, get_user_model()): query &= Q(is_active=True) qs = qs.filter(query).distinct() return [{"id": obj.id, "name": force_text(obj)} for obj in qs[:self.result_limit]] def get(self, request, *args, **kwargs): return JsonResponse({"results": self.get_data(request, *args, **kwargs)})
evenmarbles/mlpy
mlpy/tools/log.py
Python
mit
9,057
0.001877
from __future__ import division, print_function, absolute_import import os import logging from datetime import datetime from ..modules.patterns import Singleton class SilenceableStreamHandler(logging.StreamHandler): def __init__(self, *args, **kwargs): super(SilenceableStreamHandler, self).__init__(*args, **kwargs) self.silenced = False def emit(self, record): if not self.silenced: super(SilenceableStreamHandler, self).emit(record) class SilenceableFileHandler(logging.FileHandler): def __init__(self, *args, **kwargs): super(SilenceableFileHandler, self).__init__(*args, **kwargs) self.silenced = False def emit(self, record): if not self.silenced: super(SilenceableFileHandler, self).emit(record) class LoggingMgr(object): """The logging manager :class:`.Singleton` class. The logger manager can be included as a member to any class to manager logging of information. Each logger is identified by the module id (`mid`), with which the logger settings can be changed. By default a logger with log level LOG_INFO that is output to the stdout is created. Attributes ---------- LOG_TYPE_STREAM=0 Log only to output stream (stdout). LOG_TYPE_FILE=1 Log only to an output file. LOG_TYPE_ALL=2 Log to both output stream (stdout) and file. LOG_DEBUG=10 Detailed information, typically of interest only when diagnosing problems. LOG_INFO=20 Confirmation that things are working as expected. LOG_WARNING=30 An indication that something unexpected happened, or indicative of some problem in the near future. The software is still working as expected. LOG_ERROR=40 Due to a more serious problem, the software has not been able to perform some function. LOG_CRITICAL=50 A serious error, indicating that the problem itself may be unable to continue running. See Also -------- :mod:`logging` Examples -------- >>> from mlpy.tools.log import LoggingMgr >>> logger = LoggingMgr().get_logger('my_id') >>> logger.info('This is a useful information.') This gets a new logger. If a logger with the module id `my_id` already exists that logger will be returned, otherwise a logger with the default settings is created. >>> LoggingMgr().add_handler('my_id', htype=LoggingMgr.LOG_TYPE_FILE) This adds a new handler for the logger with module id `my_id` writing the logs to a file. >>> LoggingMgr().remove_handler('my_id', htype=LoggingMgr.LOG_TYPE_STREAM) This removes the stream handler from the logger with module id `my_id`. >>> LoggingMgr().change_level('my_id', LoggingMgr.LOG_TYPE_ALL, LoggingMgr.LOG_DEBUG) This changes the log level for all attached handlers of the logger identified by `my_id` to LOG_DEBUG. """ __metaclass__ = Singleton LOG_TYPE_STREAM = 0 LOG_TYPE_FILE = 1 LOG_TYPE_ALL = 2 LOG_DEBUG = logging.DEBUG LOG_INFO = logging.INFO LOG_WARNING = logging.WARNING LOG_ERROR = logging.ERROR LOG_CRITICAL = logging.CRITICAL def __init__(self): self._loggers = {} self._verbosity = {} self._filename = None def get_verbosity(self, mid): """ Gets the verbosity. The current setting of the verbosity of the logger identified by `mid` is returned. Parameters ---------- mid : str The module id of the logger to change the verbosity of. Returns ------- bool : Whether to turn the verbosity on or off. """ return self._verbosity[mid] def set_verbosity(self, mid, value): """Sets the verbosity. Turn logging on/off for logger identified by `mid`. Parameters ---------- mid : str The module id of the logger to change the verbosity of. value : bool Whether to turn the verbosity on or off. """ handlers = self._loggers[mid].handlers for hdl in handlers: hdl.silenced = value def get_logger(self, mid, level=LOG_INFO, htype=LOG_TYPE_STREAM, fmt=None, verbose=True, filename=None): """Get the logger instance with the identified `mid`. If a logger with the `mid` does not exist, a new logger will be created with the given settings. By default only a stream handler is attached to the logger. Parameters ---------- mid : str The module id of the logger. level : int, optional The top level logging level. Default is LOG_INFO. htype : int, optional The logging type of handler. Default is LOG_TYPE_STREAM. fmt : str, optional The format in which the information is presented. Default is "[%(levelname)-8s ] %(name)s: %(funcName)s: %(message)s" verbose : bool, optional The verbosity setting of the logger. Default is True filename : str, optional The name of the file the file handler writes the logs to. Default is a generated filename. Returns ------- The logging instance. """ if mid not in self._loggers: logger = logging.getLogger(mid) logger.setLevel(level) self._loggers[mid] = logger self._verbosity[mid] = verbose if verbose is not None else True self.add_handler(mid, htype, level, fmt, filename) return self._loggers[mid] def add_handler(self, mid, htype=LOG_TYPE_STREAM, hlevel=LOG_INFO, fmt=None, filename=None): """Add a handler to the logger. Parameters ---------- mid : str The module id of the logger htype : int, optional The logging type to add to the handler. Default is LOG_TYPE_STREAM. hlevel : int, optional The logging level. Default is LOG_INFO. fmt : str, optional The format in which the information is presented. Default is "[%(levelname)-8s ] %(name)s: %(funcName)s: %(message)s" filename : str, optional The name of the file the file handler writes the logs to. Default is a generated filename. """ if fmt is None: fmt = "[%(levelname)-8s ] %(name)s: %(funcName)s: %(message)s" formatter = logging.Formatter(fmt) if htype == self.LOG_TYPE_STREAM or htype == self.LOG_TYPE_ALL: handler = SilenceableStreamHandler() self._add_handler(mid, hlevel, handler, formatter) if htype == self.LOG_TYPE_FILE or htype == self.LOG_TYPE_ALL: if self._filename is None: if not os.path.exists("logs"): os.makedirs("logs") dt = datetime.now().strftime("%Y-%m-%d %H-%M-%S") self._filename = "logs\logfile " + dt + ".log" filename = filename if filename is not None else self._filename handler = SilenceableFileHandler(filename) self._add_handler(mid, hlevel, handler, formatter) def remove_handler(self, mid, htype): """Remove handlers. Removes all handlers of the given handler type from the logger. Parameters ---------- mid : str The module id of the logger htype : int The logging type to remove from the handler. """ handlers = self._loggers[mid].handlers for hdl in handlers: if htype == self.LOG_TYPE_FILE and isinstance(hdl, logging.FileHandler): self._loggers[mid].removeHandler(hdl) elif htype == self.LOG_TYPE_STREAM and isinstance(hdl, logging.StreamHandler): self._loggers[mid].removeHandler(hdl) def change_level(self, mid, hlevel, htype=LOG_TYPE_ALL): """Set the l
og level for a handler. Parameters --------
-- mid : str The module id of the logger
talon-one/talon_one.py
talon_one/models/session.py
Python
mit
5,860
0
# coding: utf-8 """ Talon.One API The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501 The version of the OpenAPI document: 1.0.0 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from talon_one.configuration import Configuration class Session(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'user_id': 'int', 'token': 'str', 'created': 'datetime' } attribute_map = { 'user_id': 'userId', 'token': 'token', 'created': 'created' } def __init__(self, user_id=None, token=None, created=None, local_vars_configuration=None): # noqa: E501 """Session - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._user_id = None
self._token = None self._created = None self.discriminator = None self.user_id = user_id self.token = token self.created = created @property def user_id(self)
: """Gets the user_id of this Session. # noqa: E501 The ID of the user of this session # noqa: E501 :return: The user_id of this Session. # noqa: E501 :rtype: int """ return self._user_id @user_id.setter def user_id(self, user_id): """Sets the user_id of this Session. The ID of the user of this session # noqa: E501 :param user_id: The user_id of this Session. # noqa: E501 :type: int """ if self.local_vars_configuration.client_side_validation and user_id is None: # noqa: E501 raise ValueError("Invalid value for `user_id`, must not be `None`") # noqa: E501 self._user_id = user_id @property def token(self): """Gets the token of this Session. # noqa: E501 An opaque session identifier # noqa: E501 :return: The token of this Session. # noqa: E501 :rtype: str """ return self._token @token.setter def token(self, token): """Sets the token of this Session. An opaque session identifier # noqa: E501 :param token: The token of this Session. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and token is None: # noqa: E501 raise ValueError("Invalid value for `token`, must not be `None`") # noqa: E501 self._token = token @property def created(self): """Gets the created of this Session. # noqa: E501 Unix timestamp indicating when the session was first created. # noqa: E501 :return: The created of this Session. # noqa: E501 :rtype: datetime """ return self._created @created.setter def created(self, created): """Sets the created of this Session. Unix timestamp indicating when the session was first created. # noqa: E501 :param created: The created of this Session. # noqa: E501 :type: datetime """ if self.local_vars_configuration.client_side_validation and created is None: # noqa: E501 raise ValueError("Invalid value for `created`, must not be `None`") # noqa: E501 self._created = created def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, Session): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, Session): return True return self.to_dict() != other.to_dict()
sleepers-anonymous/zscore
useful_scripts/timezones.py
Python
mit
292
0.017123
from django.utils impor
t timezone t = timezone.get_current_timezone() for s in Sleep.objects.all(): if timezone.is_aware(s.start_time): s.start_time = timezone.make_naive(s.start_time, t) if timezone.is_aware(s.end_time):
s.end_time = timezone.make_naive(s.end_time,t) s.save()
VirusTotal/content
Tests/Marketplace/marketplace_services.py
Python
mit
150,166
0.004761
import base64 import fnmatch import glob import json import os import re import shutil import stat import subprocess import urllib.parse import warnings from datetime import datetime, timedelta from distutils.util import strtobool from distutils.version import LooseVersion from typing import Tuple, Any, Union, List, Dict, Optional from zipfile import ZipFile, ZIP_DEFLATED import git import google.auth import sys import yaml from google.cloud import storage import Tests.Marketplace.marketplace_statistics as mp_statistics from Tests.Marketplace.marketplace_constants import PackFolders, Metadata, GCPConfig, BucketUploadFlow, PACKS_FOLDER, \ PackTags, PackIgnored, Changelog from Utils.release_notes_generator import aggregate_release_notes_for_marketplace from Tests.scripts.utils import logging_wrapper as logging class Pack(object): """ Class that manipulates and manages the upload of pack's artifact and metadata to cloud storage. Args: pack_name (str): Pack root folder name. pack_path (str): Full path to pack folder. Attributes: PACK_INITIAL_VERSION (str): pack initial version that will be used as default. CHANGELOG_JSON (str): changelog json full name, may be changed in the future. README (str): pack's readme file name. METADATA (str): pack's metadata file name, the one that will be deployed to cloud storage. USER_METADATA (str); user metadata file name, the one that located in content repo. EXCLUDE_DIRECTORIES (list): list of directories to excluded before uploading pack zip to storage. AUTHOR_IMAGE_NAME (str): author image file name. RELEASE_NOTES (str): release notes folder name. """ PACK_INITIAL_VERSION = "1.0.0" CHANGELOG_JSON = "changelog.json" README = "README.md" USER_METADATA = "pack_metadata.json" METADATA = "metadata.json" AUTHOR_IMAGE_NAME = "Author_image.png" EXCLUDE_DIRECTORIES = [PackFolders.TEST_PLAYBOOKS.value] RELEASE_NOTES = "ReleaseNotes" def __init__(self, pack_name, pack_path): self._pack
_name = pack_name self._pack_path = pack_path self._status = None self._public_storage_path = "" self._remove_files_list = [] # tracking temporary files, in order to delete in later step self._server_min_version = "99.99.99" # initialized min version self._latest_version = None # pack latest version found in changelog self._support_type = None # initialized in load_user_metadata function self._current_version = None # initialized in load_
user_metadata function self._hidden = False # initialized in load_user_metadata function self._description = None # initialized in load_user_metadata function self._display_name = None # initialized in load_user_metadata function self._user_metadata = None # initialized in load_user_metadata function self.eula_link = None # initialized in load_user_metadata function self._is_feed = False # a flag that specifies if pack is a feed pack self._downloads_count = 0 # number of pack downloads self._bucket_url = None # URL of where the pack was uploaded. self._aggregated = False # weather the pack's rn was aggregated or not. self._aggregation_str = "" # the aggregation string msg when the pack versions are aggregated self._create_date = None # initialized in enhance_pack_attributes function self._update_date = None # initialized in enhance_pack_attributes function self._uploaded_author_image = False # whether the pack author image was uploaded or not self._uploaded_integration_images = [] # the list of all integration images that were uploaded for the pack self._support_details = None # initialized in enhance_pack_attributes function self._author = None # initialized in enhance_pack_attributes function self._certification = None # initialized in enhance_pack_attributes function self._legacy = None # initialized in enhance_pack_attributes function self._author_image = None # initialized in upload_author_image function self._displayed_integration_images = None # initialized in upload_integration_images function self._price = 0 # initialized in enhance_pack_attributes function self._is_private_pack = False # initialized in enhance_pack_attributes function self._is_premium = False # initialized in enhance_pack_attributes function self._vendor_id = None # initialized in enhance_pack_attributes function self._partner_id = None # initialized in enhance_pack_attributes function self._partner_name = None # initialized in enhance_pack_attributes function self._content_commit_hash = None # initialized in enhance_pack_attributes function self._preview_only = None # initialized in enhance_pack_attributes function self._tags = None # initialized in enhance_pack_attributes function self._categories = None # initialized in enhance_pack_attributes function self._content_items = None # initialized in collect_content_items function self._search_rank = None # initialized in enhance_pack_attributes function self._related_integration_images = None # initialized in enhance_pack_attributes function self._use_cases = None # initialized in enhance_pack_attributes function self._keywords = None # initialized in enhance_pack_attributes function self._dependencies = None # initialized in enhance_pack_attributes function self._pack_statistics_handler = None # initialized in enhance_pack_attributes function self._contains_transformer = False # initialized in collect_content_items function self._contains_filter = False # initialized in collect_content_items function self._is_missing_dependencies = False # a flag that specifies if pack is missing dependencies @property def name(self): """ str: pack root folder name. """ return self._pack_name @property def path(self): """ str: pack folder full path. """ return self._pack_path @property def latest_version(self): """ str: pack latest version from sorted keys of changelog.json file. """ if not self._latest_version: self._latest_version = self._get_latest_version() return self._latest_version else: return self._latest_version @latest_version.setter def latest_version(self, latest_version): self._latest_version = latest_version @property def status(self): """ str: current status of the packs. """ return self._status @property def is_feed(self): """ bool: whether the pack is a feed pack """ return self._is_feed @is_feed.setter def is_feed(self, is_feed): """ setter of is_feed """ self._is_feed = is_feed @status.setter # type: ignore[attr-defined,no-redef] def status(self, status_value): """ setter of pack current status. """ self._status = status_value @property def public_storage_path(self): """ str: public gcs path of uploaded pack. """ return self._public_storage_path @public_storage_path.setter def public_storage_path(self, path_value): """ setter of public gcs path of uploaded pack. """ self._public_storage_path = path_value @property def support_type(self): """ str: support type of the pack. """ return self._support_type @support_type.setter def support_type(self, support_value): """ setter of support type of the pack. """ self._support_type = support_value @property def current_version(self): """ str: current version of the pack (different from latest_version). """ return self._current_version @current_version.setter def current_version(self,
SCPR/firetracker
calfire_tracker/migrations/0027_auto__add_field_wildfiredisplaycontent_content_type.py
Python
gpl-2.0
11,098
0.007839
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'WildfireDisplayContent.content_type' db.add_column('calfire_tracker_wildfiredisplaycontent', 'content_type', self.gf('django.db.models.fields.CharField')(default='Display Content', max_length=1024, null=True), keep_default=False) def backwards(self, orm): # Deleting field 'WildfireDisplayContent.content_type' db.delete_column('calfire_tracker_wildfiredisplaycontent', 'content_type') models = { 'calfire_tracker.calwildfire': { 'Meta': {'object_name': 'CalWildfire'}, 'acres_burned': ('django.db.models.fields.IntegerField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}), 'administrative_unit': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'air_quality_rating': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}), 'asset_host_image_id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}), 'asset_photo_credit': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'asset_url_link': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'cause': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'computed_location': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'conditions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'containment_percent': ('django.db.models.fields.IntegerField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}), 'cooperating_agencies': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'county': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'county_slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}), 'created_fire_id': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'current_situation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'damage_assessment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'data_source': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'date_time_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'evacuations': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'fire_name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'fire_slug': ('django.db.models.fields.Slug
Field', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}), 'historical_narrative': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'injuries': ('dja
ngo.db.models.fields.CharField', [], {'max_length': '2024', 'null': 'True', 'blank': 'True'}), 'last_saved': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'last_scraped': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'location': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'location_geocode_error': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'location_latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'location_longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'more_info': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'perimeters_image': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'phone_numbers': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'promoted_fire': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'road_closures': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'school_closures': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'structures_destroyed': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'structures_threatened': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'total_airtankers': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}), 'total_dozers': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}), 'total_fire_crews': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}), 'total_fire_engines': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}), 'total_fire_personnel': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}), 'total_helicopters': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}), 'total_water_tenders': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}), 'training': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'twitter_hashtag': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}), 'update_lockout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'year': ('django.db.models.fields.IntegerField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}) }, 'calfire_tracker.wildfireannualreview': { 'Meta': {'object_name': 'WildfireAnnualReview'}, 'acres_burned': ('django.db.models.fields.IntegerField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}), 'administrative_unit': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'data_source': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'date_range_beginning': ('django.db.models.fields.DateTimeField', [], {}), 'date_range_end': ('django.db.models.fields.DateTimeField', [], {}), 'dollar_damage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'injuries': ('django.db.models.fields.CharField', [], {'max_length': '2024', 'null': 'True', 'blank': 'True'}), 'jurisdiction': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}), 'last_saved': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'number_of_fires': ('d
rspavel/spack
var/spack/repos/builtin/packages/py-distributed/package.py
Python
lgpl-2.1
1,699
0.002943
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for de
tails. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyDistributed(PythonPackage): """Distributed scheduler for Dask"""
homepage = "https://distributed.dask.org/" url = "https://pypi.io/packages/source/d/distributed/distributed-2.10.0.tar.gz" version('2.10.0', sha256='2f8cca741a20f776929cbad3545f2df64cf60207fb21f774ef24aad6f6589e8b') version('1.28.1', sha256='3bd83f8b7eb5938af5f2be91ccff8984630713f36f8f66097e531a63f141c48a') depends_on('python@2.7:2.8,3.5:', when='@:1', type=('build', 'run')) depends_on('python@3.6:', when='@2:', type=('build', 'run')) depends_on('py-setuptools', type=('build', 'run')) depends_on('py-click@6.6:', type=('build', 'run')) depends_on('py-cloudpickle@0.2.2:', type=('build', 'run')) depends_on('py-msgpack', type=('build', 'run')) depends_on('py-psutil@5.0:', type=('build', 'run')) depends_on('py-six', type=('build', 'run'), when='@:1') depends_on('py-sortedcontainers@:1.999,2.0.2:', type=('build', 'run')) depends_on('py-tblib', type=('build', 'run')) depends_on('py-toolz@0.7.4:', type=('build', 'run')) depends_on('py-tornado@5:', type=('build', 'run')) depends_on('py-zict@0.1.3:', type=('build', 'run')) depends_on('py-pyyaml', type=('build', 'run')) depends_on('py-futures', when='@:1 ^python@2.7:2.8', type=('build', 'run')) depends_on('py-singledispatch', when='@:1 ^python@2.7:2.8', type=('build', 'run')) def patch(self): filter_file('^dask .*', '', 'requirements.txt')
hfp/tensorflow-xsmm
tensorflow/contrib/tensorrt/test/reshape_transpose_test.py
Python
apache-2.0
6,477
0.003705
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Basic tests for TF-TensorRT integration.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.tensorrt.test import tf_trt_integration_test_base as trt_test from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class ReshapeTest(trt_test.TfTrtIntegrationTestBase): def GetParams(self): dtype = dtypes.float32 input_name = "input" input_dims = [100, 24, 24, 2] output_name = "output" g = ops.Graph() with g.as_default(): inp = array_ops.placeholder( dtype=dtype, shape=[None] + input_dims[1:], name=input_name) outputs = [] # Here we test two types of reshapes, one changes the batch dimension and # the other does not. Note that we're not able to test reshaping to # scalar, since TRT requires input tensor to be of rank at least 2, so a # reshape with scalar input will be filtered out of the segment before # conversion. with g.device("/GPU:0"): # These reshapes happen at batch dimension, thus conversion should fail. for shape in [[2, 50, 24, 24, 2], [-1, 50, 24, 24, 2], [2, 50, -1, 24, 2]]: incompatible_reshape = array_ops.reshape(inp, shape) reshape_back = array_ops.reshape(incompatible_reshape, [-1, 24, 24, 2]) outputs.append(self.trt_incompatible_op(reshape_back)) # Add another block with many reshapes that don't change the batch # dimension. compatible_reshape = array_ops.reshape( inp, [-1, 24 * 24, 2], name="reshape-0") compatible_reshape = array_ops.reshape( compatible_reshape, [100, 24, -1], name="reshape-1") compatible_reshape = array_ops.reshape( compatible_reshape, [100, 24 * 2, 24], name="reshape-2") compatible_reshape = array_ops.reshape( compatible_reshape, [-1, 24, 24 * 2], name="reshape-3") compatible_reshape = array_ops.reshape( compatible_reshape, [-1, 6, 4, 24, 2], name="reshape-4") compatible_reshape = array_ops.reshape( compatible_reshape, [-1, 6, 4, 6, 4, 2, 1], name="reshape-5") compatible_reshape = array_ops.reshape( compatible_reshape, [-1, 24, 24, 2], name="reshape-6") outputs.append(self.trt_incompatible_op(compatible_reshape)) math_ops.add_n(outputs, name=output_name) return trt_test.TfTrtIntegrationTestParams( gdef=g.as_graph_def(), input_names=[input_name], input_dims=[input_d
ims], output_names=[output_name], expected_output_dims=[tuple(input_dims)]) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return { "TRTEngineOp_0": ["reshape-%d" % i for i in range(7)] +
["reshape-%d/shape" % i for i in range(7)] } def ShouldRunTest(self, run_params): """Whether to run the test.""" return (not trt_test.IsQuantizationMode(run_params.precision_mode) and not run_params.dynamic_engine) class TransposeTest(trt_test.TfTrtIntegrationTestBase): def GetParams(self): """Create a graph containing single segment.""" dtype = dtypes.float32 input_name = "input" input_dims = [100, 24, 24, 2] output_name = "output" g = ops.Graph() with g.as_default(): inp = array_ops.placeholder( dtype=dtype, shape=[None] + input_dims[1:], name=input_name) with g.device("/GPU:0"): # Add a block with compatible transposes. compatible_transpose = array_ops.transpose( inp, [0, 3, 1, 2], name="transpose-1") compatible_transpose = array_ops.transpose( compatible_transpose, [0, 2, 3, 1], name="transposeback") # Add an incompatible op so the first block will not be in the same # subgraph where the following block belongs. bridge = self.trt_incompatible_op(compatible_transpose) # Add a block with incompatible transposes. # # Note: by default Grappler will run the TRT optimizer twice. At the # first time it will group the two transpose ops below to same segment # then fail the conversion due to the expected batch dimension problem. # At the second time, since the input of bridge op is TRTEngineOp_0, it # will fail to do shape inference which then cause conversion to fail. # TODO(laigd): support shape inference, make TRT optimizer run only # once, and fix this. incompatible_transpose = array_ops.transpose( bridge, [2, 1, 0, 3], name="transpose-2") excluded_transpose = array_ops.transpose( incompatible_transpose, [0, 2, 3, 1], name="transpose-3") array_ops.identity(excluded_transpose, name=output_name) return trt_test.TfTrtIntegrationTestParams( gdef=g.as_graph_def(), input_names=[input_name], input_dims=[input_dims], output_names=[output_name], expected_output_dims=[(24, 100, 2, 24)]) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return { "TRTEngineOp_0": [ "transpose-1", "transpose-1/perm", "transposeback", "transposeback/perm" ] } def ShouldRunTest(self, run_params): """Whether to run the test.""" return (not trt_test.IsQuantizationMode(run_params.precision_mode) and not run_params.dynamic_engine) if __name__ == "__main__": test.main()
zookeepr/zookeepr
zkpylons/tests/functional/conftest.py
Python
gpl-2.0
3,543
0.007056
import pytest import sys import logging from sqlalchemy import create_engine import zk.model.meta as zkmeta import zkpylons.model.meta as pymeta from zkpylons.config.routing import make_map from paste.deploy import loadapp from webtest import TestApp from paste.fixture import Dummy_smtplib from .fixtures import ConfigFactory from ConfigParser import ConfigParser # Get settings from config file, only need it once ini = ConfigParser() ini_filename = "test.ini" ini.read(ini_filename) # Logging displayed by passing -s to pytest #logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) @pytest.yield_fixture def map(): config = { 'pylons.paths' : { 'controllers' : None }, 'debug' : True, } yield make_map(config) @pytest.yield_fi
xture def app(): wsgiapp = loadapp('config:'+ini_filename, relative_to=".") app = TestApp(wsgiapp) yield app class DoubleSession(object): # There is an issue with the zkpylons -> zk migration # Some files use zk.model, which uses zk.mo
del.meta.Session # Some files use zkpylons.model, which uses zkpylons.model.meta.Session # Some files use relative paths, which means you can kinda guess at it # The best way around this is to configure both Session objects # But then operations frequently have to be applied to both # This class wraps operations needed for testing, and applies both def __init__(self, session1, session2): self.s1 = session1 self.s2 = session2 def remove(self): self.s1.remove() self.s2.remove() def configure(self, engine): self.s1.configure(bind=engine) self.s2.configure(bind=engine) self.s1.configure(autoflush=False) self.s2.configure(autoflush=False) def commit(self): self.s1.commit() self.s2.commit() # TODO: Maybe expire_all or refresh would be better def expunge_all(self): self.s1.expunge_all() self.s2.expunge_all() def query(self, cls): return self.s1.query(cls) def execute(self, *args, **kwargs): return self.s1.execute(*args, **kwargs) base_general_config = { 'sponsors' : {"top":[],"slideshow":[]}, 'account_creation' : True, 'cfp_status' : "open", 'conference_status' : "open", } base_rego_config = { 'personal_info' : {"phone":"yes","home_address":"yes"} } @pytest.yield_fixture def db_session(): # Set up SQLAlchemy to provide DB access dsess = DoubleSession(zkmeta.Session, pymeta.Session) # Clean up old sessions if they exist dsess.remove() engine = create_engine(ini.get("app:main", "sqlalchemy.url")) # Drop all data to establish known state, mostly to prevent primary-key conflicts engine.execute("drop schema if exists public cascade") engine.execute("create schema public") zkmeta.Base.metadata.create_all(engine) dsess.configure(engine) # Create basic config values, to allow basic pages to render for key, val in base_general_config.iteritems(): ConfigFactory(key=key, value=val) for key, val in base_rego_config.iteritems(): ConfigFactory(category='rego', key=key, value=val) dsess.commit() # Run the actual test yield dsess # No rollback, for functional tests we have to actually commit to DB @pytest.yield_fixture def smtplib(): Dummy_smtplib.install() yield Dummy_smtplib if Dummy_smtplib.existing: Dummy_smtplib.existing.reset()
truevision/django_banklink
django_banklink/models.py
Python
bsd-3-clause
1,291
0.035631
from django.db import models from django.utils.translation import ugettext_lazy as _ from django.contrib.auth.models import User TRANSACTION_STATUS = ( ('P', _('pending')), ('F', _('failed')), ('C', _('complete')), ) class Transaction(models.Model): user = models.ForeignKey(User, blank = True, null = True, default = None, verbose_name = _("user"), help_text = _("user who started transaction")) description = models.CharField(_("reference description"), max_length = 255, help_text = _("reference description")) amount = models.FloatField(_("amount")) currency = models.CharField(_("currency"), max_length = 3) details = models.CharField(_("details"), max_length = 255, help_text = _("payment details")) created = models.DateTimeField(auto_now_add = True)
last_modified = models.DateTimeField(auto_now = True) status = models.CharField(_("status"), max_length = 1, default = 'P') redirect_after_success = models.Ch
arField(max_length = 255, editable = False) redirect_on_failure = models.CharField(max_length = 255, editable = False) def __unicode__(self): return _("transaction %s " % self.pk) class Meta: verbose_name = _("transaction") ordering = ['-last_modified']
ArneBachmann/tagsplorer
tagsplorer/__main__.py
Python
mpl-2.0
183
0.010929
# codi
ng=utf-8 ''' tagsPlorer package entry point (C) 2021-2021 Arne Bachmann https://github.com/ArneBachmann/tagsplorer ''' from tagsplorer import tp tp.Main().par
se_and_run()
diegojromerolopez/djanban
src/djanban/apps/journal/migrations/0002_auto_20160926_0155.py
Python
mit
819
0.002442
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2016-09-25 23:55 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('journal', '0001_initial'), ] operation
s = [ migrations.CreateModel(
name='JournalEntryTag', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=64, verbose_name='Name')), ], ), migrations.AddField( model_name='journalentry', name='tags', field=models.ManyToManyField(blank=True, to='journal.JournalEntryTag', verbose_name='Tags this entry has'), ), ]
xArm-Developer/xArm-Python-SDK
setup.py
Python
bsd-3-clause
1,795
0.000557
#!/usr/bin/env python3 # Software License Agreement (BSD License) # # Copyright (c) 2017, UFactory, Inc. # All rights reserved. # # Author: Vinman <vinman.wen@ufactory.cc> import os from distutils.util import convert_path try: from setuptools import setup, find_packages except ImportError: from distutils.core import setup def find_packages(base_path='.'): base_path = convert_path(base_path) found = [] for root, dirs, files in os.walk(base_path, followlinks=True): dirs[:] = [d for d in dirs if d[0] != '.' and d not in ('ez_setup', '__pycache__')] relpath = os.path.relpath(root, base_path) parent = relpath.replace(os.sep, '.').lstrip('.') if relpath != '.' and parent not in found: # foo.bar package but no foo package, skip continue for dir in dirs: if os.path.isfile(os.path.join(root, dir, '__init__.py')): package = '.'.join((parent, dir)) if parent else dir found.append(package) return found main_ns = {} ver_path = convert_path
('xarm/version.py') with open(os.path.join(os.getcwd(), ver_path)) as ver_file: exec(ver_file.read(), main_ns) version = main_ns['__version__'] # long_description = open('README.rst').read() long_description = 'long description for xArm-Python-SDK' with open(os.path.join(os.getcwd(), 'requirements.txt')) as f: requirements = f.read().splitlines() setup( name='xArm-Python-SDK', version=version, author='Vinman
', description='Python SDK for xArm', packages=find_packages(), author_email='vinman@ufactory.cc', install_requires=requirements, long_description=long_description, license='MIT', zip_safe=False )
frasern/ADL_LRS
lrs/tests/AgentTests.py
Python
apache-2.0
3,199
0.012504
import json import base64 from django.test import TestCase from django.core.urlresolvers import reverse from django.conf import settings from ..views import register, agents from ..models import Agent class AgentTests(TestCase): @classmethod def setUpClass(cls): print "\n%s" % __name__ def setUp(self): self.username = "tester" self.password = "test" self.email = "test@example.com" self.auth = "Basic %s" % base64.b64encode("%s:%s" % (self.username, self.password)) form = {'username':self.username,'password':self.password,'password2':self.password, 'email':self.email} self.client.post
(reverse(register),form, X_Experience_API_Version=settings.XAPI_VERSION) def test_get_no_agents(self): agent = json.dumps({"name":"me","mbox":"mailto:me@example.com"}) response = self.client.get(reverse(agents), {'agent':agent}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.status_code, 404) self.assertEqual(
response.content, "Error with Agent. The agent partial did not match any agents on record") def test_get(self): a = json.dumps({"name":"me","mbox":"mailto:me@example.com"}) Agent.objects.retrieve_or_create(**json.loads(a)) response = self.client.get(reverse(agents), {'agent':a}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) r_data = json.loads(response.content) self.assertTrue(isinstance(r_data['mbox'], list)) self.assertTrue(isinstance(r_data['name'], list)) self.assertEqual(r_data['mbox'], ['mailto:me@example.com']) self.assertEqual(r_data['name'], ['me']) self.assertEqual(r_data['objectType'], 'Person') self.assertIn('content-length', response._headers) def test_get_no_existing_agent(self): a = json.dumps({"mbox":"mailto:fail@fail.com"}) response = self.client.get(reverse(agents), {'agent':a}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.content, 'Error with Agent. The agent partial did not match any agents on record') self.assertEqual(response.status_code, 404) def test_head(self): a = json.dumps({"name":"me","mbox":"mailto:me@example.com"}) Agent.objects.retrieve_or_create(**json.loads(a)) response = self.client.head(reverse(agents), {'agent':a}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.content, '') self.assertIn('content-length', response._headers) def test_get_no_agent(self): response = self.client.get(reverse(agents), Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.status_code, 400) def test_post(self): agent = json.dumps({"name":"me","mbox":"mailto:me@example.com"}) response = self.client.post(reverse(agents), {'agent':agent},content_type='application/x-www-form-urlencoded', Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.status_code, 405)
gecos-team/gecosws-agent
gecosfirstlogin_lib/Window.py
Python
gpl-2.0
3,073
0.000651
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*- # This file is part of Guadalinex # # This software is free software; you can redistr
ibute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of #
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this package; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA __author__ = "Antonio Hernández <ahernandez@emergya.com>" __copyright__ = "Copyright (C) 2011, Junta de Andalucía <devmaster@guadalinex.org>" __license__ = "GPL-2" from gi.repository import Gtk from gi.repository import GObject from . helpers import get_builder, show_uri, get_help_uri # This class is meant to be subclassed by FirstbootWindow. It provides # common functions and some boilerplate. class Window(Gtk.Window): __gtype_name__ = "Window" # To construct a new instance of this method, the following notable # methods are called in this order: # __new__(cls) # __init__(self) # finish_initializing(self, builder) # __init__(self) # # For this reason, it's recommended you leave __init__ empty and put # your initialization code in finish_initializing def __init__(self): GObject.GObject.__init__(self) def __new__(cls): """Special static method that's automatically called by Python when constructing a new instance of this class. Returns a fully instantiated BaseFirstbootWindow object. """ builder = get_builder(cls.__gtype_name__) new_object = builder.get_object(cls.__gtype_name__) new_object._finish_initializing(builder) return new_object def _finish_initializing(self, builder): """Called while initializing this instance in __new__ finish_initializing should be called after parsing the UI definition and creating a FirstbootWindow object with it in order to finish initializing the start of the new FirstbootWindow instance. """ # Get a reference to the builder and set up the signals. self.builder = builder self.ui = builder.get_ui(self, True) self.connect("delete_event", self.on_delete_event) self.translate() self.finish_initializing(builder) def finish_initializing(self, builder): pass def on_destroy(self, widget, data=None): """Called when the FirstbootWindow is closed.""" # Clean up code for saving application state should be added here. Gtk.main_quit() def on_delete_event(self, widget, data=None): return False def translate(): pass
15Mpedia/15Mpedia-scripts
completa-circulos-podemos.py
Python
gpl-3.0
4,844
0.020544
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2014 emijrp # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import catlib import os import pagegenerators import re import urllib import wikipedia """ Hace algunos reemplazos básicos en páginas de círculos de Podemos y sube la imagen del círculo, si está en Twitter. """ def main(): site = wikipedia.Site('15mpedia', '15mpedia') cat = catlib.Category(site, u"Category:Círculos de Podemos") gen = pagegenerators.CategorizedPageGenerator(cat) pre = pagegenerators.PreloadingGenerator(gen, pageNumber=60) for page in pre: wtitle = page.title() wtext = page.get() if not re.search(ur"(?im)\{\{\s*Infobox Nodo", wtext): continue print '\n===', wtitle, '===' newtext = wtext suffix = ' '.join(wtitle.split(' ')[1:]) if re.search(ur"(?im)\{\{\s*nodos\s*\}\}", newtext) and not re.search(ur"(?im)\{\{\s*podemos\s*\}\}", newtext): newtext = re.sub(ur"(?im)\{\{\s*nodos\s*\}\}", ur"{{podemos}}", newtext) if re.search(ur"(?im)^'''([^\']+)''' es un \[\[nodo\]\]( de \[\[Podemos\]\])?\.", newtext): newtext = re.sub(ur"(?im)^'''([^\']+)''' es un \[\[nodo\]\](?: de \[\[Podemos\]\])?\.", ur"'''\1''' es un [[Lista de círculos de Podemos|círculo]] de [[Podemos]] de [[%s]]." % (suffix), newtext) if re.search(ur"(?im)== Enlaces externos ==\s*\*[^\r\n]+\r\n", newtext): newtext = re.sub(ur"(?im)== Enlaces externos ==\s*\*[^\r\n]+\r\n", ur"== Enlaces externos ==\n{{enlaces externos}}\n", newtext) newtext = re.sub(ur"(?im)\[\[Categoría:Podemos\]\]", ur"", newtext) newtext = re.sub(ur"(?im)\[\[Categoría:Nodos\]\]", ur"[[Categoría:Círculos de Podemos|%s]]" % (suffix), newtext) newtext = re.sub(ur"(?im)\[\[Categoría:Círculos de Podemos\]\]", ur"[[Categoría:Círculos de Podemos|%s]]" % (suffix), newtext) newtext = re.sub(ur"(?im)== Véase también ==\r\
n\* \[\[Lista de nodos de Podemos\]\]\r\n\r\n", ur"== Véase también ==\n* [[Podemos]]\n* [[Lista de círculos de Podemos]]\n\n", newtext) if wtext != newtext: wikipedia.showDiff(wtext, newtext) page.put(newtext, u"BOT - Unificando círculos") #imagen if not re.search(ur"(?im)\|\s*imagen\s*=", newtext): twitter = re.findall(ur"(?im)\|\s*twitter\s*=([^\r\n]+)\r\n", newtext) if twitter:
twitter = twitter[0].split(',')[0].strip() f = urllib.urlopen("https://twitter.com/%s" % twitter) html = unicode(f.read(), 'utf-8') imageurl = re.findall(ur"data-resolved-url-large=\"(https://pbs.twimg.com/profile_images/[^\"]+)\"", html) if imageurl: imageurl = imageurl[0] if 'default_profile' in imageurl: print 'Default twitter image, skiping' continue desc = u"{{Infobox Archivo\n|embebido id=\n|embebido usuario=\n|embebido título=\n|descripción=Logotipo de [[%s]].\n|fuente={{twitter|%s}}\n}}" % (wtitle, twitter) if imageurl.endswith('jpeg') or imageurl.endswith('jpg'): ext = 'jpg' elif imageurl.endswith('pneg') or imageurl.endswith('png'): ext = 'png' else: print 'Twitter image extension is %s, skiping' % (imageurl.split('.')[-1]) continue imagename = u"%s.%s" % (wtitle, ext) #https://www.mediawiki.org/wiki/Manual:Pywikibot/upload.py os.system('python upload.py -lang:15mpedia -family:15mpedia -keep -filename:"%s" -noverify "%s" "%s"' % (imagename.encode('utf-8'), imageurl.encode('utf-8'), desc.encode('utf-8'))) newtext = re.sub(ur"(?im)\{\{Infobox Nodo", ur"{{Infobox Nodo\n|imagen=%s" % (imagename), newtext) wikipedia.showDiff(wtext, newtext) page.put(newtext, u"BOT - Añadiendo imagen") if __name__ == '__main__': main()
BansheeMediaPlayer/bockbuild
packages/atk.py
Python
mit
68
0.073529
G
nomeXzPackage ('atk', version_major = '2.16', version_minor = '0')
zarafagroupware/zarafa-zsm
tests/tests_authorization.py
Python
agpl-3.0
11,327
0.002472
# Copyright 2012 - 2013 Zarafa B.V. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License, version 3, # as published by the Free Software Foundation with the following additional # term according to sec. 7: # # According to sec. 7 of the GNU Affero General Public License, version # 3, the terms of the AGPL are supplemented with the following terms: # # "Zarafa" is a registered trademark of Zarafa B.V. The licensing of # the Program under the AGPL does not imply a trademark license. # Therefore any rights, title and interest in our trademarks remain # entirely with us. # # However, if you propagate an unmodified version of the Program you are # allowed to use the term "Zarafa" to indicate that you distribute the # Program. Furthermore you may use our trademarks where it is necessary # to indicate the intended purpose of a product or service provided you # use it in accordance with honest practices in industrial or commercial # matters. If you want to propagate modified versions of the Program # under the name "Zarafa" or "Zarafa Server", you may only do so if you # have a written permission by Zarafa B.V. (to acquire a permission # please contact Zarafa at trademark@zarafa.com). # # The interactive user interface of the software displays an attribution # notice containing the term "Zarafa" and/or the logo of Zarafa. # Interactive user interfaces of unmodified and modified versions must # display Appropriate Legal Notices according to sec. 5 of the GNU # Affero General Public License, version 3, when you propagate # unmodified or modified versions of the Program. In accordance with # sec. 7 b) of the GNU Affero General Public License, version 3, these # Appropriate Legal Notices must retain the logo of Zarafa or display # the words "Initial Development by Zarafa" if the display of the logo # is not reasonably feasible for technical reasons. The use of the logo # of Zarafa in Legal Notices is allowed for unmodified and modified # versions of the software. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from libzsm.rest_client.utils import get_api from libzsm.rest_client.exc import Http403 from common import ApiTestBase class AuthorizationTest(ApiTestBase): def __init__(self, *args, **kwargs): super(AuthorizationTest, self).__init__(*args, **kwargs) self.s = get_api() def setUp(self): ''' Trans [Harry (adm), Jeeves] # NOQA # NOQA | # NOQA v # NOQA # NOQA Wheels [Rob] -> Cars [Jack] # NOQA # NOQA | # NOQA v # NOQA # NOQA Bikes [Harry] # NOQA # NOQA Refer to the diagram: https://confluence.zarafa.com/pages/viewpage.action?pageId=20841313 ''' ## Hank is a tenant admin data = dict( name=u'trans', ) self.ten_trans = self.s.create_tenant(initial=data) data = dict( username=u'hank', password=u'nk', name=u'Hank', surname=u'R', tenant=self.ten_trans, userServer=self.server1, ) self.trans_hank = self.s.create_user(initial=data) data = { 'permissions': [ 'ViewContact', 'ViewGroup', 'ViewGroupPrivileges', 'ViewTenant', 'ViewTenantAcl', 'ViewUser', 'ViewUserPrivileges', 'WriteContact', 'WriteGroup', 'WriteGroupPrivileges', 'WriteTenant', 'WriteTenantAcl', 'WriteUser', 'WriteUserPrivileges', ], 'user': self.trans_hank.resourceUri, } self.s.add_tenant_ace(self.ten_trans, data) data = [ u'CreateTenant', ] self.s.put_user_privs(self.trans_hank, data) self.s_trans_hank = self.s.get_session(self.trans_hank) ## Jeeves is Hank's butler data = dict( username=u'jeeves', password=u'jv', name=u'Jeeves', surname=u'H', tenant=self.ten_trans, userServer=self.server1, ) self.trans_jeeves = self.s_trans_hank.create_user(initial=data) self.s_trans_jeeves = self.s.get_session(self.trans_jeeves) ## Trans has a customer Wheels with a user Rob data = dict( name=u'wheels', ) self.ten_wheels = self.s_trans_hank.create_tenant(initial=data) data = dict( username=u'rob', password=u'rb', name=u'Rob', surname=u'Dole', tenant=self.ten_wheels, userServer=self.server1, ) self.wheels_rob = self.s_trans_hank.create_user(initial=data) data = [ u'CreateTenant', ] self.s_trans_hank.put_user_privs(self.wheels_rob, data) self.s_wheels_rob = self.s.get_session(self.wheels_rob) ## Wheels has a customer Bikes with a user Harry data = dict( name=u'bikes', ) self.ten_bikes = self.s_wheels_rob.create_tenant(initial=data) data = dict( username=u'harry', password=u'hr', name=u'Harry', surname=u'W', tenant=self.ten_bikes, userServer=self.server1, ) self.bikes_harry = self.s_wheels_rob.create_user(initial=data) self.s_bikes_harry = self.s.get_session(self.bikes_harry) ## Wheels has a customer Cars with a user Jack data = dict( name=u'cars', ) self.ten_cars = self.s_wheels_rob.create_tenant(initial=data) data = dict( username=u'jack', password=u'jk', name=u'Jack', surname=u'Hicks', tenant=self.ten_cars, userServer=self.server1, ) self.cars_jack = self.s_wheels_rob.create_user(initial=data) self.s_cars_jack = self.s.get_session(self.cars_jack) ## Set some handy groupings self.all_tenants = [ self.ten_trans, self.ten_wheels, self.ten_bikes, self.ten_cars, ] def tearDown(self): self.s_wheels_rob.delete_tenant(self.ten_bikes) self.s_wheels_rob.delete_tenant(self.ten_cars) self.s_trans_hank.delete_tenant(self.ten_wheels) self.s.delete_tenant(self.ten_trans) def test_neg_tenant_access(self): ## Hank only sees the tenants he created tens = self.s_trans_hank.all_tenant() self.assertEqual(2, len(tens), u'Incorrect number of tenants.') self.verify_iterable(tens, [self.ten_trans, self.ten_wheels]) ## Jeeves sees no tenants tens = self.s_trans_jeeves.all_tenant() self.assertEqual(0, len(tens), u'Incorrect number of tenants.') ## Rob sees Bikes and Cars tens = self.s_wheels_rob.all_tenant() self.assertEqual(2, len(tens), u'Incorrect number of tenants.') self
.verify_iterable(tens, [self.ten_bikes,
self.ten_cars]) ## Harry sees no tenants tens = self.s_bikes_harry.all_tenant() self.assertEqual(0, len(tens), u'Incorrect number of tenants.') ##
Jspsun/LEETCodePractice
Python/NumberOfIslands.py
Python
mit
834
0
class Solution(object): def numIslands(self, grid): """ :type grid: List[List[str]] :rtype:
int """ count = 0 for r in range(len(grid)): for c in range(len(grid[0])): if grid[r][c] == "1": self.clearIsland(grid, r, c) count += 1 return count def clearIsland(self, grid, r, c): grid[r][c] = "0" if r > 0 and grid[r - 1][c] == "1": self.clearIsland(grid, r - 1, c) if r < len(grid) - 1 and grid[r + 1][c] == "1": self.clearIsl
and(grid, r + 1, c) if c > 0 and grid[r][c - 1] == "1": self.clearIsland(grid, r, c - 1) if c < len(grid[0]) - 1 and grid[r][c + 1] == "1": self.clearIsland(grid, r, c + 1) return
jasonlvhit/whoops
whoops/wsgilib/wsgi_server.py
Python
mit
3,279
0.00061
import sys from io import BytesIO from whoops.httplib.http_server import HttpServer from whoops import ioloop class WSGIServer(HttpServer): def __init__(self, ioloop, address): super(WSGIServer, self).__init__(ioloop, address) self.app = None self.environ = None self.result = None self.cgi_environ = None self.http_version = "HTTP/1.1" self.wsgi_version = (1, 0) self.wsgi_multithread = True self.wsgi_multiprocess = False self.wsgi_run_once = False def set_app(self, app): self.app = app def on_connection(self, conn): self.connection = conn self.parse_request() self.setup_environ() self.result = self.app(self.environ, self.start_response) self.finish_response() def setup_cgi_environ(self): env = {} request_line = self.raw_requestline.decode("latin-1").rstrip("\r\n") method, path, version = request_line.split(" ") if "?" in path: _path, query_string = path.split("?") env["QUERY_STRING"] = query_string env["REQUEST_METHOD"] = method env["PATH_INFO"] = path env["SERVER_PROTOCOL"] = self.http_version
env["SERVER_HOST"] = self.host env["SERVER_PORT"] = self.port if "content-type" in self.header: env["CONTENT_TYPE"] = self.header.get("content-type") if "content-length" in self.header: env["CONTENT_LENGTH"] = self.header.get("content-length") for key, value in self.header.items(): env["HTTP_" + key.replace("-", "_").upper()] = value self.cgi_environ = env def se
tup_environ(self): self.setup_cgi_environ() env = self.environ = self.cgi_environ.copy() env["wsgi.input"] = BytesIO(self.request_body) env["wsgi.errors"] = sys.stdout env["wsgi.version"] = self.wsgi_version env["wsgi.run_once"] = self.wsgi_run_once env["wsgi.url_scheme"] = "http" env["wsgi.multithread"] = self.wsgi_multithread env["wsgi.wsgi_multiprocess"] = self.wsgi_multiprocess def start_response(self, status, headers, exc_info=None): code = int(status[0:3]) message = str(status[4:]) self.send_response(code, message) self.ioloop.logger.info( self.cgi_environ["PATH_INFO"] + " %s %d %s" % ("HTTP/1.1", code, message) ) self.need_content_length = True for name, val in headers: if name == "Content-Length": self.need_content_length = False self.send_header(name, val) self.send_header("Date", self.date_string()) if code == 304: self.need_content_length = False def finish_response(self): if self.need_content_length: content_length = 0 for data in self.result: content_length += len(data) self.send_header("Content-Length", content_length) self.end_headers() for data in self.result: self.send(data) def make_server(host, port, app): server = WSGIServer(ioloop.IOLoop.instance(num_backends=1000), (host, port)) server.set_app(app) return server
takus/dd-agent
utils/service_discovery/zookeeper_config_store.py
Python
bsd-3-clause
3,661
0.001639
# (C) Datadog, Inc. 2016 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) # std import logging from kazoo.client import KazooClient, NoNodeError from utils.service_discovery.abstract_config_store import AbstractConfigStore, KeyNotFound DEFAULT_ZK_HOST = '127.0.0.1' DEFAULT_ZK_PORT = 2181 DEFAULT_TIMEOUT = 5 log = logging.getLogger(__name__) class ZookeeperStore(AbstractConfigStore): """Implementation of a config store client for Zookeeper""" def _extract_settings(self, config): """Extract settings from a config object""" settings = { 'host': config.get('sd_backend_host', DEFAULT_ZK_HOST), 'port': int(config.get('sd_backend_port', DEFAULT_ZK_PORT)), } return settings def get_client(self, reset=False): if self.client is None or reset is True: self.client = KazooClient( hosts=self.settings.get('host') + ":" + str(self
.settings.get('port')), read_only=True, ) self.client.start() return self.client def client_read(self, path, **kwargs): """Retrieve a value from a Zookeeper key.""" try: if kwargs.get('watch', False): return self.recursive_mtime(path) elif kwargs.get('all', False):
# we use it in _populate_identifier_to_checks results = [] self.recursive_list(path, results) return results else: res, stats = self.client.get(path) return res.decode("utf-8") except NoNodeError: raise KeyNotFound("The key %s was not found in Zookeeper" % path) def recursive_list(self, path, results): """Recursively walks the children from the given path and build a list of key/value tuples""" try: data, stat = self.client.get(path) if data: node_as_string = data.decode("utf-8") if not node_as_string: results.append((path.decode("utf-8"), node_as_string)) children = self.client.get_children(path) if children is not None: for child in children: new_path = '/'.join([path.rstrip('/'), child]) self.recursive_list(new_path, results) except NoNodeError: raise KeyNotFound("The key %s was not found in Zookeeper" % path) def recursive_mtime(self, path): """Recursively walks the children from the given path to find the maximum modification time""" try: data, stat = self.client.get(path) children = self.client.get_children(path) if children is not None and len(children) > 0: for child in children: new_path = '/'.join([path.rstrip('/'), child]) return max(stat.mtime, self.recursive_mtime(new_path)) else: return stat.mtime except NoNodeError: raise KeyNotFound("The key %s was not found in Zookeeper" % path) def dump_directory(self, path, **kwargs): """Return a dict made of all image names and their corresponding check info""" templates = {} paths = [] self.recursive_list(path, paths) for pair in paths: splits = pair[0].split('/') image = splits[-2] param = splits[-1] value = pair[1] if image not in templates: templates[image] = {} templates[image][param] = value return templates
MikaelSchultz/dofiloop-sentinel
sentinel/device/admin.py
Python
mit
1,293
0.001547
from django.contrib import admin from .models import SnmpDevice, SnmpDeviceMessage, PingHistory # Register your models here. class SnmpDeviceAdmin(admin.ModelAdmin): fields = [ 'name', 'hostname', 'status', 'ping_mode', 'ping_port', 'snmp_template', 'snmp_port', 'snmp_community', 'snmp_system_contact', 'snmp_system_description', 'snmp_system_name', 'snmp_system_location', 'snmp_system_uptime','ping_last_se
en', 'ping_last_tried', 'snmp_last_tried', 'snmp_last_poll', 'snmp_logged_on_users' ] readonly_fields = ( 'ping_last_seen', 'ping_last_tried', 'snmp_last_tried', 'snmp_last_poll' ) list_display = [ 'name', 'hostname', 'snmp_logged_on_users' ] class Sn
mpDeviceMessageAdmin(admin.ModelAdmin): fields = ( 'snmp_device', 'status', 'message_choice', 'resolved', 'resolved_by' ) class PingHistoryAdmin(admin.ModelAdmin): fields = [ 'snmp_device', 'online', 'timestamp' ] readonly_fields = [ 'timestamp', ] list_display = [ 'snmp_device', 'online', 'timestamp' ] admin.site.register(SnmpDevice, SnmpDeviceAdmin) admin.site.register(PingHistory, PingHistoryAdmin) admin.site.register(SnmpDeviceMessage, SnmpDeviceMessageAdmin)
manhhomienbienthuy/scikit-learn
benchmarks/bench_plot_randomized_svd.py
Python
bsd-3-clause
17,938
0.000557
""" Benchmarks on the power iterations phase in randomized SVD. We test on various synthetic and real datasets the effect of increasing the number of power iterations in terms of quality of approximation and running time. A number greater than 0 should help with noisy matrices, which are characterized by a slow spectral decay. We test several policy for normalizing the power iterations. Normalization is crucial to avoid numerical issues. The quality of the approximation is measured by the spectral norm discrepancy between the original input matrix and the reconstructed one (by multiplying the randomized_svd's outputs). The spectral norm is always equivalent to the largest singular value of a matrix. (3) justifies this choice. However, one can notice in these experiments that Frobenius and spectral norms behave very similarly in a qualitative sense. Therefore, we suggest to run these benchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to compute. The benchmarks follow. (a) plot: time vs norm, varying number of power iterations data: many datasets goal: compare normalization policies and study how the number of power iterations affect time and norm (b) plot: n_iter vs norm, varying rank of data and number of components for randomized_SVD data: low-rank matrices on which we control the rank goal: study whether the rank of the matrix and the number of components extracted by randomized SVD affect "the optimal" number of power iterations (c) plot: time vs norm, varying datasets data: many datasets goal: compare default configurations We compare the following algorithms: - randomized_svd(..., power_iteration_normalizer='none') - randomized_svd(..., power_iteration_normalizer='LU') - randomized_svd(..., power_iteration_normalizer='QR') - randomized_svd(..., power_iteration_normalizer='auto') - fbpca.pca() from https://github.com/facebook/fbpca (if installed) Conclusion ---------- - n_iter=2 appears to be a good default value - power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU gives similar errors to QR but is cheaper. That's what 'auto' implements. References ---------- (1) :arxiv:`"Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions." <0909.4061>` Halko, et al., (2009) (2) A randomized algorithm for the decomposition of matrices Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert (3) An implementation of a randomized algorithm for principal component analysis A. Szlam et al. 2014 """ # Author: Giorgio Patrini import numpy as np import scipy as sp import matplotlib.pyplot as plt import gc import pickle from time import time from collections import defaultdict import os.path from sklearn.utils._arpack import _init_arpack_v0 from sklearn.utils import gen_batches from sklearn.utils.validation import check_random_state from sklearn.utils.extmath import randomized_svd from sklearn.datasets import make_low_rank_matrix, make_sparse_uncorrelated from sklearn.datasets import ( fetch_lfw_people, fetch_openml, fetch_20newsgroups_vectorized, fetch_olivetti_faces, fetch_rcv1, ) try: import fbpca fbpca_available = True except ImportError: fbpca_available = False # If this is enabled, tests are much slower and will crash with the large data enable_spectral_norm = False # TODO: compute approximate spectral norms with the power method as in # Estimating the largest eigenvalues by the power and Lanczos methods with # a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on # Matrix Analysis and Applications, 13 (4): 1094-1122, 1992. # This approximation is a very fast estimate of the spectral norm, but depends # on starting random vectors. # Determine when to switch to batch computation for matrix norms, # in case the reconstructed (dense) matrix is too large MAX_MEMORY = int(2e9) # The following datasets can be downloaded manually from: # CIFAR 10: https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz # SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat CIFAR_FOLDER = "./cifar-10-batches-py/" SVHN_FOLDER = "./SVHN/" datasets = [ "low rank matrix", "lfw_people", "olivetti_faces", "20newsgroups", "mnist_784", "CIFAR", "a3a", "SVHN", "uncorrelated matrix", ] big_sparse_datasets = ["big sparse matrix", "rcv1"] def unpickle(file_name): with open(file_name, "rb") as fo: return pickle.load(fo, encoding="latin1")["data"] def handle_missing_dataset(file_folder): if not os.path.isdir(file_folder): print("%s file folder not found. Test skipped." % file_folder) return 0 def get_data(dataset_name): print("Getting dataset: %s" % dataset_name) if dataset_name == "lfw_people": X = fetch_lfw_people().data elif dataset_name == "20newsgroups": X = fetch_20newsgroups_vectorized().data[:, :100000] elif dataset_name == "olivetti_faces": X = fetch_olivetti_faces().data elif dataset_name == "rcv1": X = fetch_rcv1().data elif dataset_name == "CIFAR": if handle_missing_dataset(CIFAR_FOLDER) == "skip": return X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1)) for i in range(5)] X = np.vstack(X1) del X1 elif dataset_name == "SVHN": if handle_missing_dataset(SVHN_FOLDER) == 0: return X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)["X"] X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])] X = np.vstack(X2) del X1 del X2 elif dataset_name == "low rank matrix": X = make_low_rank_matrix( n_samples=500, n_features=int(1e4), effective_rank=100, tail_strength=0.5, random_state=random_state, ) elif dataset_name == "uncorrelated matrix": X, _ = make_sparse_uncorrelated( n_samples=500, n_features=10000, random_state=random_state ) elif dataset_name == "big sparse matrix": sparsity = int(1e6) size = int(1e6) small_size = int(1e4) data = np.random.normal(0, 1, int(sparsity / 10)) data = np.repeat(data, 10) row = np.random.uniform(0, small_size, sparsity) col = np.random.uniform(0, small_size, sparsity) X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size)) del data del row del col else: X = fetch_openml(dataset_name).data return X def plot_time_vs_s(time, norm, point_labels, title): plt.figure() colors = ["g", "b", "y"] for i, l in enumerate(sorted(norm.keys())): if l != "fbpca": plt.plot(time[l], norm[l], label=l, marker="o", c=colors.pop()) else: plt.plot(time[l], norm[l], label=l, marker="^", c="red") for label, x, y in zip(point_labels, list(time[l]), list(norm[l])): plt.annotate(
label, xy=(x, y), xytext=(0, -20), textcoords="offset points", ha="right",
va="bottom", ) plt.legend(loc="upper right") plt.suptitle(title) plt.ylabel("norm discrepancy") plt.xlabel("running time [s]") def scatter_time_vs_s(time, norm, point_labels, title): plt.figure() size = 100 for i, l in enumerate(sorted(norm.keys())): if l != "fbpca": plt.scatter(time[l], norm[l], label=l, marker="o", c="b", s=size) for label, x, y in zip(point_labels, list(time[l]), list(norm[l])): plt.annotate( label, xy=(x, y), xytext=(0, -80), textcoords="offset points", ha="right", arrowprops=dict(arrowstyle="->", connectionstyle="arc3"), va="bottom", size=11, rotation=90, ) else: plt.scatter(time[l], norm[l
NYUCCL/psiTurk
tests/test_tasks.py
Python
mit
3,640
0.003571
import pytest CODEVERSION = '0.0.1' NEW_CODEVERSION = '0.0.2' @pytest.fixture() def campaign(): from psiturk.models import Campaign parameters = { 'codeversion': CODEVERSION, 'mode': 'sandbox', 'goal': 100, 'minutes_between_rounds': 1, 'assignments_per_round': 10, 'hit_reward': 1.00, 'hit_duration_hours': 1, } new_campaign = Campaign(**parameters) from psiturk.db import db_session db_session.add(new_campaign) db_session.commit() return new_campaign def test_campaign_round_codeversion_change_cancel(patch_aws_services, campaign, mocker, caplog): from psiturk.tasks import do_campaign_round campaign_args = { 'campaign': campaign, 'job_id': campaign.campaign_job_id } from psiturk.experiment import app mocker.patch.object(app.apscheduler, 'remove_job', lambda *args, **kwargs: True) from psiturk.amt_services_wrapper import MTurkServicesWrapper aws_services_wrapper = MTurkServicesWrapper() aws_services_wrapper.config['Task Parameters']['experiment_code_version'] = NEW_CODEVERSION import psiturk.tasks mocker.patch.object(psiturk.tasks.TaskUtils, 'aws_services_wrapper', aws_services_wrapper) import psiturk.experiment remove_job_mock = mocker.patch.object(psiturk.experiment.app.apscheduler, 'remove_job') do_campaign_round(**campaign_args) remove_job_mock.assert_called() def test_campaign_goal_met_cancel(patch_aws_services, campaign, mocker, caplog, stubber): from psiturk.tasks import do_campaign_round campaign_args = { 'campaign': ca
mpaign,
'job_id': campaign.campaign_job_id } from psiturk.experiment import app mocker.patch.object(app.apscheduler, 'remove_job', lambda *args, **kwargs: True) import psiturk.tasks mocker.patch.object(psiturk.models.Participant, 'count_completed', lambda *args, **kwargs: campaign.goal) import psiturk.experiment remove_job_mock = mocker.patch.object(psiturk.experiment.app.apscheduler, 'remove_job') do_campaign_round(**campaign_args) remove_job_mock.assert_called() assert not campaign.is_active def test_campaign_posts_hits(patch_aws_services, stubber, campaign, mocker, caplog): from psiturk.amt_services_wrapper import MTurkServicesWrapper aws_services_wrapper = MTurkServicesWrapper() import psiturk.tasks mocker.patch.object(psiturk.tasks.TaskUtils, 'aws_services_wrapper', aws_services_wrapper) mocked_create_hit = mocker.patch.object(aws_services_wrapper, 'create_hit') campaign_args = { 'campaign': campaign, 'job_id': campaign.campaign_job_id } from psiturk.tasks import do_campaign_round do_campaign_round(**campaign_args) assert mocked_create_hit.call_count == 2 mocked_create_hit.assert_any_call(num_workers=9, reward=campaign.hit_reward, duration=campaign.hit_duration_hours) mocked_create_hit.assert_any_call(num_workers=1, reward=campaign.hit_reward, duration=campaign.hit_duration_hours) def test_task_approve_all(patch_aws_services, stubber, mocker, caplog): from psiturk.amt_services_wrapper import MTurkServicesWrapper aws_services_wrapper = MTurkServicesWrapper() import psiturk.tasks mocker.patch.object(psiturk.tasks.TaskUtils, 'aws_services_wrapper', aws_services_wrapper) mocked_approve_all = mocker.patch.object(aws_services_wrapper, 'approve_all_assignments') from psiturk.tasks import do_approve_all do_approve_all('sandbox') mocked_approve_all.assert_called_once()
facebookexperimental/eden
eden/scm/tests/test-treestate.py
Python
gpl-2.0
9,519
0
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This software may be used and distributed according to the terms of the # GNU General Public License version 2. from __future__ import absolute_import import itertools import os import posixpath import random import tempfile import unittest import silenttestrunner from bindings import treestate from edenscm.mercurial import pycompat from hghave import require testtmp = os.getenv("TESTTMP") or tempfile.mkdtemp("test-treestate") def randname(): length = random.randint(1, 4) return "".join(random.sample("abcdef", 1)[0] for i in range(length)) def randpath(path=""): # pop components from path for i in range(1 + random.randrange(path.count("/") + 1)): path = os.path.dirname(path) # push new components to path maxlevel = 4 for i in range(1 + random.randrange(max([1, maxlevel - path.count("/")]))): path = posixpath.join(path, randname()) if not path: path = randname() return path def genpaths(): """generate random paths""" path = "" while True: nextpath = randpath(path) yield nextpath path = nextpath def genfiles(): """generate random tuple of (path, bits, mode, size, mtime, copied)""" pathgen = genpaths() while True: path = next(pathgen) bits = 0 mode = random.randint(0, 0o777) size = random.randint(0, 1 << 31) mtime = random.randint(-1, 1 << 31) copied = None # bits (StateFlags) for bit in [ treestate.EXIST_P1, treestate.EXIST_P2, treestate.EXIST_NEXT, treestate.IGNORED, treestate.NEED_CHECK, ]: if random.randint(0, 1): bits |= bit if random.randint(0, 1): bits |= treestate.COPIED copied = next(pathgen) yield (path, bits, mode, size, mtime, copied) class testtreestate(unittest.TestCase): def testempty(self): tree = treestate.treestate(os.path.join(testtmp, "empty"), 0) self.assertEqual(len(tree), 0) self.assertEqual(tree.getmetadata(), b"") self.assertEqual(tree.walk(0, 0), []) self.assertTrue(tree.hasdir("/")) for path in ["", "a", "/", "b/c", "d/"]: self.assertFalse(path in tree) if path and path != "/": self.assertFalse(tree.hasdir(path)) if path != "/": if path.endswith("/"): self.assertIsNone(tree.getdir(path)) else: self.assertIsNone(tree.get(path, None)) def testinsert(self): tree = treestate.treestate(os.path.join(testtmp, "insert"), 0) count = 5000 files = list(itertools.islice(genfiles(), count)) expected = {} for path, bits, mode, size, mtime, copied in files: tree.insert(path, bits, mode, size, mtime, copied) expected[path] = (bits, mode, size, mtime, copied) self.assertEqual(len(tree), len(expected)) for path in tree.walk(0, 0): self.assertTrue(tree.hasdir(os.path.dirname(path) + "/")) self.assertEqual(tree.get(path, None), expected[path]) def testremove(self): tree = treestate.treestate(os.path.join(testtmp, "remove"), 0) count = 5000 files = list(itertools.islice(genfiles(), count)) expected = {} for path, bits, mode, size, mtime, copied in files: tree.insert(path, bits, mode, size, mtime, copied) if (mtime & 1) == 0: tree.remove(path) if path in expected: del expected[path] else: expected[path] = (bits, mode, size, mtime, copied) self.assertEqual(len(tree), len(expected)) for path in tree.walk(0, 0): self.assertTrue(tree.hasdir(os.path.dirname(path) + "/")) self.assertEqual(tree.get(path, None), expected[path]) def testwalk(self): treepath = os.path.join(testtmp, "walk") tree = treestate.treestate(treepath, 0) count = 5000 files = list(itertools.islice(genfiles(), count)) expected = {} for path, bits, mode, size, mtime, copied in files: tree.insert(path, bits, mode, size, mtime, copied) expected[path] = (bits, mode, size, mtime, copied) def walk(setbits, unsetbits): return sorted( k for k, v in pycompat.iteritems(expected) if ((v[0] & unsetbits) == 0 and (v[0] & setbits) == setbits) ) def check(setbits, unsetbits): self.assertEqual( walk(setbits, unsetbits), sorted(tree.walk(setbits, unsetbits)) ) for i in ["in-memory", "flushed"]: for bit in [treestate.IGNORED, treestate.COPIED]: check(0, bit) check(bit, 0) check(treestate.EXIST_P1, treestate.EXIST_P2) rootid = tree.flush() tree = treestate.treestate(treepath, rootid) def testdirfilter(self): treepath = os.path.join(testtmp, "walk") tree = treestate.treestate(treepath, 0) files = ["a/b", "a/b/c", "b/c", "c/d"] for path in files: tree.insert(path, 1, 2, 3, 4, None) self.assertEqual(tree.walk(1, 0, None), files) self.assertEqual( tree.walk(1, 0, lambda dir: dir in {"a/b/", "c/"}), ["a/b", "b/c"] ) self.assertEqual(tree.walk(1, 0, lambda dir: True), []) def testflush(self): treepath = os.path.join(testtmp, "flush") tree = treestate.treestate(treepath, 0) tree.insert("a", 1, 2, 3, 4, None) tree.setmetadata(b"1") rootid1 = tree.flush() tree.remove("a") tree.insert("b", 1, 2, 3, 4, None) tree.setmetadata(b"2") rootid2 = tree.flush() tree = treestate.treestate(treepath, rootid1) self.assertTrue("a" in tree) self.assertFalse("b" in tree) self.assertEqual(tree.getmetadata(), b"1") tree = treestate.treestate(treepath, rootid2) self.assertFalse("a" in tree) self.assertTrue("b" in tree) self.assertEqual(tree.getmetadata(), b"2")
def testsaveas(self): treepath = os.path.join(testtmp, "saveas") tree = treestate.treestate(treepath, 0) tree.insert("a", 1, 2, 3, 4, None)
tree.setmetadata(b"1") tree.flush() tree.insert("b", 1, 2, 3, 4, None) tree.remove("a") treepath = "%s-savedas" % treepath tree.setmetadata(b"2") rootid = tree.saveas(treepath) tree = treestate.treestate(treepath, rootid) self.assertFalse("a" in tree) self.assertTrue("b" in tree) self.assertEqual(tree.getmetadata(), b"2") def testfiltered(self): treepath = os.path.join(testtmp, "filtered") tree = treestate.treestate(treepath, 0) tree.insert("a/B/c", 1, 2, 3, 4, None) filtered = tree.getfiltered("A/B/C", lambda x: x.upper(), 1) self.assertEqual(filtered, ["a/B/c"]) filtered = tree.getfiltered("A/B/C", lambda x: x, 2) self.assertEqual(filtered, []) def testpathcomplete(self): treepath = os.path.join(testtmp, "pathcomplete") tree = treestate.treestate(treepath, 0) paths = ["a/b/c", "a/b/d", "a/c", "de"] for path in paths: tree.insert(path, 1, 2, 3, 4, None) def complete(prefix, fullpath=False): completed = [] tree.pathcomplete(prefix, 0, 0, completed.append, fullpath) return completed self.assertEqual(complete(""), ["a/", "de"]) self.assertEqual(complete("d"), ["de"]) self.assertEqual(complete("a/"), ["a/b/", "a/c"]) self.assertEqual(complete("a/b/"), ["a/b/c", "a/b/d"]) self.assertEqual(complete("a/b/c"), ["a/b/c"]) self.assertEqual(complete("", True), paths) def testgetdir(self): treepath = os.path.join(testtmp, "filter
pombredanne/invenio-old
modules/bibclassify/lib/bibclassify_microtests.py
Python
gpl-2.0
7,289
0.005076
# -*- coding: utf-8 -*- """Module for running microtests on how well the extraction works - this module is STANDALONE safe""" import ConfigParser import glob import traceback import codecs import bibclassify_config as bconfig import bibclassify_engine as engine log = bconfig.get_logger("bibclassify.microtest") def run(glob_patterns, verbose=20, plevel = 1 ): """Execute microtests""" if verbose is not None: log.setLevel(int(verbose)) results = {} for pattern in glob_patterns: log.info("Looking for microtests: %s" % pattern) for cfgfile in glob.glob(pattern): log.debug("processing: %s" % (cfgfile)) try: test_cases = load_microtest_definition(cfgfile) run_microtest_suite(test_cases, results=results, plevel=plevel) except Exception, msg: log.error('Error running microtest: %s' % cfgfile) log.error(msg) log.error(traceback.format_exc()) summarize_results(results, plevel) def run_microtest_suite(test_cases, results={}, plevel=1): """Runs all tests from the test_case @var test_cases: microtest definitions @keyword results: dict, where results are cummulated @keyword plevel: int [0..1], performance level, results below the plevel are considered unsuccessful @return: nothing """ config = {} if 'config' in test_cases: config = test_cases['config'] del(test_cases['config']) if 'taxonomy' not in config: config['taxonomy'] = ['HEP'] for test_name in sorted(test_cases.keys()): test = test_cases[test_name] try: log.debug('section: %s' % test_name) phrase = test['phrase'][0] (skw, ckw, akw, acr) = engine.get_keywords_from_text(test['phrase'], config['taxonomy'][0], output_mode="raw") details = analyze_results(test, (skw, ckw) ) if details["plevel"] < plevel: log.error("\n" + format_test_case(test)) log.error("results\n" + format_details(details)) else: log.info("Success for section: %s" % (test_name)) log.info("\n" + format_test_case(test)) if plevel != 1: log.info("results\n" + format_details(details)) results.setdefault(test_name, []) results[test_name].append(details) except Exception, msg: log.error('Operational error executing section: %s' % test_name) #log.error(msg) log.error(traceback.format_exc()) def summarize_results(results, plevel): total = 0 success = 0 for k,v in results.items(): total += len(v) success += len(filter(lambda x: x["plevel"] >= plevel, v)) log.info("Total number of micro-tests run: %s" % total) log.info("Success/failure: %d/%d" % (success, total-success)) def format_details(details): plevel = details["plevel"] details["plevel"] = [plevel] out = format_test_case(details) details["plevel"] = plevel return out def format_test_case(test_case): padding = 13 keys = ["phrase", "expected", "unwanted"] out = ["" for x in range(len(keys))] out2 = [] for key in test_case.keys(): phrase = "\n".join(map(lambda x: (" " * (padding + 1) ) + str(x), test_case[key])) if key in keys: out[keys.index(key)] = "%s=%s" % (key.rjust(padding-1), phrase[padding:]) else: out2.append("%s=%s" % (key.rjust(padding-1), phrase[padding:])) if filter(len, out) and filter(len, out2): return "%s\n%s" % ("\n".join(filter(len, out)), "\n".join(out2)) else: return "%s%s" % ("\n".join(filter(len, out)), "\n".join(out2)) def analyze_results(test_case, results): skw = results[0] ckw = results[1] details = {"correct" : [], "incorrect": [], "plevel" : 0} responses_total = len(skw) + len(ckw) expected_total = len(test_case["expected"]) correct_responses = 0 incorrect_responses = 0 for result_set in (skw, ckw): for r in result_set: try: val = r[0].output() except: val = r.output() if r in test_case["expected"]: correct_responses += 1 details["correct"].append(val) else: incorrect_responses += 1 details["incorrect"].append(val) details["plevel"] = ((responses_total + expected_total) - incorrect_responses) / (responses_total + expected_total) return details def load_microtest_definition(cfgfile, **kwargs): """Loads data from the microtest definition file { section-1: phrase: [ some-string] expected: [some, string] unwanted: [some-string] section-2: ..... } """ config = {} cfg = ConfigParser.ConfigParser() fo = codecs.open(cfgfile, 'r', 'utf-8') cfg.readfp(fo, filename=cfgfile) for s in cfg.sections(): if s in config: log.error('two sections with the same name') config[s] = {} for k, v in cfg.items(s): if "\n" in v: v = filter(len, v.splitlines()) else: v = [v.strip()] if k not in config[s]: config[s][k] = [] config[s][k] += v fo.close() return config if __name__ == "__main__": import os, sys test_paths = [] if len(sys.argv) > 1 and sys.argv[1] == "demo": test_paths.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "bibclassify/microtest*.cfg"))) test_paths.append(os.path.abspath(o
s.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../etc/bibclassify/microtest*.cfg"))) run(test_paths) elif (len(sys.argv) > 1): for p in sys.argv[1:]: if p[0] == os.path.sep: # absolute path test_paths.append(p) else: # try to detect i
f we shall prepend rootdir first = p.split(os.path.sep)[0] if os.path.exists(first): #probably relative path test_paths.append(p) elif (os.path.join(bconfig.CFG_PREFIX, first)): #relative to root test_paths.append(os.path.join(bconfig.CFG_PREFIX, p)) log.warning('Resolving relative path %s -> %s' % (p, test_paths[-1])) else: raise Exception ('Please check the glob pattern: %s\n\ it seems to be a relative path, but not relative to the script, nor to the invenio rootdir' % p) run(test_paths) else: print 'Usage: %s glob_pattern [glob_pattern...]\nExample: %s %s/etc/bibclassify/microtest*.cfg' % (sys.argv[0], sys.argv[0], bconfig.CFG_PREFIX, )
ajaniv/equitymaster
equity_master/common.py
Python
gpl-2.0
3,296
0.01426
#/usr/bin/env python # -#- coding: utf-8 -#- # # equity_master/common.py - equity master common classes # # standard copy right text # # Initial version: 2012-04-02 # Author: Amnon Janiv """ .. module:: equity_master/common :synopsis: miscellaneous abstract classes and other core constructs .. moduleauthor:: Amnon Janiv """ __revision__ = '$Id: $' __version__ = '0.0.1' import sys import logging from equity_master import util class UnicodeMixin(object): """Unicode mixin class to help in python 2 to python 3 migration """ if sys.version_info[0] >= 3: # Python 3 def __str__(self): return self.__unicode__() else: # Python 2 def __str__(self): return self.__unicode__().encode('utf8') class ExecutionError(UnicodeMixin, Exception): """Execution error class """ def __init__( self, traceback=None, wrapped_ex=None, args=None, kwargs=None ): super(ExecutionError, self).__init__(args, kwargs) self.traceback = traceback self.wrapped_ex = wrapped_ex def __unicode__(self): return util.pretty_args(*self.args) class EquityMasterError(ExecutionError): """EquityMaster package error class """ pass class ClassMixin(object): """Class mixin abstraction """ def class_name(self): return util.class_name(self) class ErrorMixin(object): """Error mixin class """ def error( self, exc_class, logger, tmpl, *args, **kwargs ): """Log and raise an error""" util.log_raise(exc_class, logger, logging
.ERROR, tmpl, *args, **kwargs) def fatal( self, exc_class, logger, tmpl, *args, **kwar
gs): """Log and exit""" pass class LoggingMixin(object): """Log utilities abstraction """ def log( self, logger, severity, tmpl, *args, **kwargs ): util.log(logger, severity, tmpl, *args, **kwargs) def debug( self, logger, tmpl, *args, **kwargs ): """Log a debug message""" util.log(logger, logging.DEBUG, tmpl, *args, **kwargs) def info(self, logger, tmpl, *args, **kwargs): util.log(logger, logging.INFO, tmpl, *args, **kwargs) def warn(self, logger, tmpl, *args, **kwargs): util.log(logger, logging.WARNING, tmpl, *args, **kwargs) def log_error(self, logger, tmpl, *args, **kwargs): util.log(logger, logging.ERROR, tmpl, *args, **kwargs) class BusinessObject(UnicodeMixin, ClassMixin, ErrorMixin, LoggingMixin, object): """Base class business object Facilitates creation of complex object graphs with reduced development and maintenance costs, flexible, yet with rich functionality """ def is_valid(self): """Check if object instance is valid Demonstrates abstract method construct """ raise NotImplementedError
beni55/djangolint
project/lint/parsers.py
Python
isc
1,186
0
import ast import os class Parser(object): """ Find all *.py files inside `repo_path` and parse its
into ast nodes. If file has syntax errors SyntaxError object will be returned except ast node. """ def __init__(self, repo_path): if not os.path.isabs(repo_path): raise ValueError('Repository path is not absolute: %s' % repo_path) self.repo_path = repo_path def walk(self): """ Yield absolut
e paths to all *.py files inside `repo_path` directory. """ for root, dirnames, filenames in os.walk(self.repo_path): for filename in filenames: if filename.endswith('.py'): yield os.path.join(root, filename) def relpath(self, path): return os.path.relpath(path, self.repo_path) def parse_file(self, path): relpath = self.relpath(path) with open(path) as f: content = f.read() try: return (relpath, ast.parse(content, relpath)) except SyntaxError, e: return (relpath, e) def parse(self): return dict(self.parse_file(filepath) for filepath in self.walk())
hemebond/kapua
forms.py
Python
gpl-3.0
7,905
0.031883
# coding=UTF-8 # Copyright 2011 James O'Neill # # This file is part of Kapua. # # Kapua is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Sof
tware Foundation, either version 3 of the License, or # (at your option) any later version. # # Kapua is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # #
You should have received a copy of the GNU General Public License # along with Kapua. If not, see <http://www.gnu.org/licenses/>. from django.utils.translation import ugettext as _ from django.db import models #adapted from http://www.djangosnippets.org/snippets/494/ #using UN country and 3 char code list from http://unstats.un.org/unsd/methods/m49/m49alpha.htm #correct as of 17th October 2008 COUNTRIES = ( ('AFG', _('Afghanistan')), ('ALA', _('Aland Islands')), ('ALB', _('Albania')), ('DZA', _('Algeria')), ('ASM', _('American Samoa')), ('AND', _('Andorra')), ('AGO', _('Angola')), ('AIA', _('Anguilla')), ('ATG', _('Antigua and Barbuda')), ('ARG', _('Argentina')), ('ARM', _('Armenia')), ('ABW', _('Aruba')), ('AUS', _('Australia')), ('AUT', _('Austria')), ('AZE', _('Azerbaijan')), ('BHS', _('Bahamas')), ('BHR', _('Bahrain')), ('BGD', _('Bangladesh')), ('BRB', _('Barbados')), ('BLR', _('Belarus')), ('BEL', _('Belgium')), ('BLZ', _('Belize')), ('BEN', _('Benin')), ('BMU', _('Bermuda')), ('BTN', _('Bhutan')), ('BOL', _('Bolivia')), ('BES', _('Bonaire, Saint Eustatius and Saba')), ('BIH', _('Bosnia and Herzegovina')), ('BWA', _('Botswana')), ('BRA', _('Brazil')), ('VGB', _('British Virgin Islands')), ('BRN', _('Brunei Darussalam')), ('BGR', _('Bulgaria')), ('BFA', _('Burkina Faso')), ('BDI', _('Burundi')), ('KHM', _('Cambodia')), ('CMR', _('Cameroon')), ('CAN', _('Canada')), ('CPV', _('Cape Verde')), ('CYM', _('Cayman Islands')), ('CAF', _('Central African Republic')), ('TCD', _('Chad')), ('CIL', _('Channel Islands')), ('CHL', _('Chile')), ('CHN', _('China')), ('HKG', _('China - Hong Kong')), ('MAC', _('China - Macao')), ('COL', _('Colombia')), ('COM', _('Comoros')), ('COG', _('Congo')), ('COK', _('Cook Islands')), ('CRI', _('Costa Rica')), ('CIV', _('Cote d\'Ivoire')), ('HRV', _('Croatia')), ('CUB', _('Cuba')), ('CUW', _(u'Curaçao')), ('CYP', _('Cyprus')), ('CZE', _('Czech Republic')), ('PRK', _('Democratic People\'s Republic of Korea')), ('COD', _('Democratic Republic of the Congo')), ('DNK', _('Denmark')), ('DJI', _('Djibouti')), ('DMA', _('Dominica')), ('DOM', _('Dominican Republic')), ('ECU', _('Ecuador')), ('EGY', _('Egypt')), ('SLV', _('El Salvador')), ('GNQ', _('Equatorial Guinea')), ('ERI', _('Eritrea')), ('EST', _('Estonia')), ('ETH', _('Ethiopia')), ('FRO', _('Faeroe Islands')), ('FLK', _('Falkland Islands (Malvinas)')), ('FJI', _('Fiji')), ('FIN', _('Finland')), ('FRA', _('France')), ('GUF', _('French Guiana')), ('PYF', _('French Polynesia')), ('GAB', _('Gabon')), ('GMB', _('Gambia')), ('GEO', _('Georgia')), ('DEU', _('Germany')), ('GHA', _('Ghana')), ('GIB', _('Gibraltar')), ('GRC', _('Greece')), ('GRL', _('Greenland')), ('GRD', _('Grenada')), ('GLP', _('Guadeloupe')), ('GUM', _('Guam')), ('GTM', _('Guatemala')), ('GGY', _('Guernsey')), ('GIN', _('Guinea')), ('GNB', _('Guinea-Bissau')), ('GUY', _('Guyana')), ('HTI', _('Haiti')), ('VAT', _('Holy See (Vatican City)')), ('HND', _('Honduras')), ('HUN', _('Hungary')), ('ISL', _('Iceland')), ('IND', _('India')), ('IDN', _('Indonesia')), ('IRN', _('Iran')), ('IRQ', _('Iraq')), ('IRL', _('Ireland')), ('IMN', _('Isle of Man')), ('ISR', _('Israel')), ('ITA', _('Italy')), ('JAM', _('Jamaica')), ('JPN', _('Japan')), ('JEY', _('Jersey')), ('JOR', _('Jordan')), ('KAZ', _('Kazakhstan')), ('KEN', _('Kenya')), ('KIR', _('Kiribati')), ('KWT', _('Kuwait')), ('KGZ', _('Kyrgyzstan')), ('LAO', _('Lao People\'s Democratic Republic')), ('LVA', _('Latvia')), ('LBN', _('Lebanon')), ('LSO', _('Lesotho')), ('LBR', _('Liberia')), ('LBY', _('Libyan Arab Jamahiriya')), ('LIE', _('Liechtenstein')), ('LTU', _('Lithuania')), ('LUX', _('Luxembourg')), ('MKD', _('Macedonia')), ('MDG', _('Madagascar')), ('MWI', _('Malawi')), ('MYS', _('Malaysia')), ('MDV', _('Maldives')), ('MLI', _('Mali')), ('MLT', _('Malta')), ('MHL', _('Marshall Islands')), ('MTQ', _('Martinique')), ('MRT', _('Mauritania')), ('MUS', _('Mauritius')), ('MYT', _('Mayotte')), ('MEX', _('Mexico')), ('FSM', _('Micronesia, Federated States of')), ('MCO', _('Monaco')), ('MNG', _('Mongolia')), ('MNE', _('Montenegro')), ('MSR', _('Montserrat')), ('MAR', _('Morocco')), ('MOZ', _('Mozambique')), ('MMR', _('Myanmar')), ('NAM', _('Namibia')), ('NRU', _('Nauru')), ('NPL', _('Nepal')), ('NLD', _('Netherlands')), ('ANT', _('Netherlands Antilles')), ('NCL', _('New Caledonia')), ('NZL', _('New Zealand')), ('NIC', _('Nicaragua')), ('NER', _('Niger')), ('NGA', _('Nigeria')), ('NIU', _('Niue')), ('NFK', _('Norfolk Island')), ('MNP', _('Northern Mariana Islands')), ('NOR', _('Norway')), ('PSE', _('Occupied Palestinian Territory')), ('OMN', _('Oman')), ('PAK', _('Pakistan')), ('PLW', _('Palau')), ('PAN', _('Panama')), ('PNG', _('Papua New Guinea')), ('PRY', _('Paraguay')), ('PER', _('Peru')), ('PHL', _('Philippines')), ('PCN', _('Pitcairn')), ('POL', _('Poland')), ('PRT', _('Portugal')), ('PRI', _('Puerto Rico')), ('QAT', _('Qatar')), ('KOR', _('Republic of Korea')), ('MDA', _('Republic of Moldova')), ('REU', _('Reunion')), ('ROU', _('Romania')), ('RUS', _('Russian Federation')), ('RWA', _('Rwanda')), ('BLM', _('Saint-Barthelemy')), ('SHN', _('Saint Helena')), ('KNA', _('Saint Kitts and Nevis')), ('LCA', _('Saint Lucia')), ('MAF', _('Saint-Martin (French part)')), ('SPM', _('Saint Pierre and Miquelon')), ('VCT', _('Saint Vincent and the Grenadines')), ('WSM', _('Samoa')), ('SMR', _('San Marino')), ('STP', _('Sao Tome and Principe')), ('SAU', _('Saudi Arabia')), ('SEN', _('Senegal')), ('SRB', _('Serbia')), ('SYC', _('Seychelles')), ('SLE', _('Sierra Leone')), ('SGP', _('Singapore')), ('SXM', _('Sint Maarten (Dutch part)')), ('SVK', _('Slovakia')), ('SVN', _('Slovenia')), ('SLB', _('Solomon Islands')), ('SOM', _('Somalia')), ('ZAF', _('South Africa')), ('SSD', _('South Sudan')), ('ESP', _('Spain')), ('LKA', _('Sri Lanka')), ('SDN', _('Sudan')), ('SUR', _('Suriname')), ('SJM', _('Svalbard and Jan Mayen Islands')), ('SWZ', _('Swaziland')), ('SWE', _('Sweden')), ('CHE', _('Switzerland')), ('SYR', _('Syrian Arab Republic')), ('TJK', _('Tajikistan')), ('THA', _('Thailand')), ('TLS', _('Timor-Leste')), ('TGO', _('Togo')), ('TKL', _('Tokelau')), ('TON', _('Tonga')), ('TTO', _('Trinidad and Tobago')), ('TUN', _('Tunisia')), ('TUR', _('Turkey')), ('TKM', _('Turkmenistan')), ('TCA', _('Turks and Caicos Islands')), ('TUV', _('Tuvalu')), ('UGA', _('Uganda')), ('UKR', _('Ukraine')), ('ARE', _('United Arab Emirates')), ('GBR', _('United Kingdom')), ('TZA', _('United Republic of Tanzania')), ('USA', _('United States of America')), ('VIR', _('United States Virgin Islands')), ('URY', _('Uruguay')), ('UZB', _('Uzbekistan')), ('VUT', _('Vanuatu')), ('VEN', _('Venezuela (Bolivarian Republic of)')), ('VNM', _('Viet Nam')), ('WLF', _('Wallis and Futuna Islands')), ('ESH', _('Western Sahara')), ('YEM', _('Yemen')), ('ZMB', _('Zambia')), ('ZWE', _('Zimbabwe')), ) class CountryField(models.CharField): def __init__(self, *args, **kwargs): kwargs.setdefault('max_length', 3) kwargs.setdefault('choices', COUNTRIES) super(CountryField, self).__init__(*args, **kwargs) def get_internal_type(self): return "CharField"
pmneila/morphsnakes
test_morphsnakes.py
Python
bsd-3-clause
5,724
0
import numpy as np from morphsnakes import (morphological_chan_vese, morphological_geodesic_active_contour, inverse_gaussian_gradient, circle_level_set, checkerboard_level_set) from numpy.testing import assert_array_equal import pytest def gaussian_blob(): coords = np.mgrid[-5:6, -5:6] sqrdistances = (coords ** 2).sum(0) return np.exp(-sqrdistances / 10) def test_morphsnakes_incorrect_image_shape(): img = np.zeros((10, 10, 3)) ls = np.zeros((10, 9)) with pytest.raises(ValueError): morphological_chan_vese(img, iterations=1, init_level_set=ls) with pytest.raises(ValueError): morphological_geodesic_active_contour(img, iterations=1, init_level_set=ls) def test_morphsnakes_incorrect_ndim(): img = np.zeros((4, 4, 4, 4)) ls = np.zeros((4, 4, 4, 4)) with pytest.raises(ValueError): morphological_chan_vese(img, iterations=1, init_level_set=ls) with pytest.raises(ValueError): morphological_geodesic_active_contour(img, iterations=1,
init_level_set=ls) def test_morphsnakes_black(): img = np.zeros((11, 11)) ls = circle_level_set(img.shape, (5, 5), 3) ref_zeros = np.zeros(img.shape, dtype=np.int8) ref_ones = np.ones(img.shape, dtype=np.int8) acwe_ls = morphological
_chan_vese(img, iterations=6, init_level_set=ls) assert_array_equal(acwe_ls, ref_zeros) gac_ls = morphological_geodesic_active_contour(img, iterations=6, init_level_set=ls) assert_array_equal(gac_ls, ref_zeros) gac_ls2 = morphological_geodesic_active_contour(img, iterations=6, init_level_set=ls, balloon=1, threshold=-1, smoothing=0) assert_array_equal(gac_ls2, ref_ones) assert acwe_ls.dtype == gac_ls.dtype == gac_ls2.dtype == np.int8 def test_morphsnakes_simple_shape_chan_vese(): img = gaussian_blob() ls1 = circle_level_set(img.shape, (5, 5), 3) ls2 = circle_level_set(img.shape, (5, 5), 6) acwe_ls1 = morphological_chan_vese(img, iterations=10, init_level_set=ls1) acwe_ls2 = morphological_chan_vese(img, iterations=10, init_level_set=ls2) assert_array_equal(acwe_ls1, acwe_ls2) assert acwe_ls1.dtype == acwe_ls2.dtype == np.int8 def test_morphsnakes_simple_shape_geodesic_active_contour(): img = np.float_(circle_level_set((11, 11), (5, 5), 3.5)) gimg = inverse_gaussian_gradient(img, alpha=10.0, sigma=1.0) ls = circle_level_set(img.shape, (5, 5), 6) ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int8) gac_ls = morphological_geodesic_active_contour(gimg, iterations=10, init_level_set=ls, balloon=-1) assert_array_equal(gac_ls, ref) assert gac_ls.dtype == np.int8 def test_init_level_sets(): image = np.zeros((6, 6)) checkerboard_ls = morphological_chan_vese(image, 0, 'checkerboard') checkerboard_ref = np.array([[0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 1, 0]], dtype=np.int8) circle_ls = morphological_geodesic_active_contour(image, 0, 'circle') circle_ref = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 0]], dtype=np.int8) ellipsoid_ls = morphological_chan_vese(np.zeros((7, 9)), 0, 'ellipsoid') ellipsoid_ref = np.array( [[0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0]], dtype=np.uint8 ) assert_array_equal(checkerboard_ls, checkerboard_ref) assert_array_equal(circle_ls, circle_ref) assert_array_equal(ellipsoid_ls, ellipsoid_ref) def test_morphsnakes_3d(): image = np.zeros((7, 7, 7)) evolution = [] def callback(x): evolution.append(x.sum()) ls = morphological_chan_vese(image, 5, 'circle', iter_callback=callback) # Check that the initial circle level set is correct assert evolution[0] == 81 # Check that the final level set is correct assert ls.sum() == 0 # Check that the contour is shrinking at every iteration for v1, v2 in zip(evolution[:-1], evolution[1:]): assert v1 >= v2 if __name__ == "__main__": np.testing.run_module_suite()
cgwalters/pykickstart
tests/commands/user.py
Python
gpl-2.0
4,536
0.005291
# # Martin Gracik <mgracik@redhat.com> # # Copyright 2009 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, modify, # copy, or redistribute it subject to the terms and conditions of the GNU # General Public License v.2. This program is distributed in the hope that it # will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the # implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat # trademarks that are incorporated in the source code or documentation are not # subject to the GNU General Public License and may only be used or replicated # with the express permission of Red Hat, Inc. # import unittest from tests.baseclass import CommandTest, CommandSequenceTest from pykickstart.errors import KickstartParseError, KickstartValueError class FC6_TestCase(CommandTest): command = "user" def runTest(self): # pass self.assert_parse("user --name=user", "user --name=user\n") self.assert_parse("user --name=user --groups=grp1,grp2 --homedir=/home/user --shell=/bin/bash --uid=1000 --password=secret --iscrypted", "user --groups=grp1,grp2 --homedir=/home/user --name=user --password=secret --iscrypted --shell=/bin/bash --uid=1000\n") self.assert_parse("user --name=user --groups=grp1", "user --groups=grp1 --name=user\n") self.assert_parse("user --name=user --homedir=/home/user --shell=/bin/bash", "user --homedir=/home/user --name=user --shell=/bin/bash\n") self.assert_parse("user --name=user --password=secret", "user --name=user --password=secret\n") self.assert_parse("user --name=user --uid=1000", "user --name=user --uid=1000\n") self.assertFalse(self.assert_parse("user --name=user") == None) self.assertTrue(self.assert_parse("user --name=userA") != \ self.assert_parse("user --name=userB")) self.assertFalse(self.assert_parse("user --name=userA") == \ self.assert_parse("user --name=userB")) # fail # missing required option --name self.assert_parse_error("user", KickstartValueError) # --name requires an argument self.assert_parse_error("user --name", KickstartParseError) # --uid requires int argument self.assert_parse_error("user --name=user --uid=id", KickstartParseError) # unknown option self.assert_parse_error("user --name=user --unknown=value", KickstartParseError) # required option arguments self.assert_parse_error("user --name=user --groups", KickstartParseError) self.assert_parse_error("user --name=user --homedir", KickstartParseError) self.assert_parse_error("user --name=user --shell", KickstartParseError) self.assert_parse_error("user --name=user --uid", KickstartParseError) self.assert_parse_error("user --name=user --password", KickstartParseError) class FC6_Duplicate_TestCase(CommandSequenceTest): def runTest(self): # pass - can use the command twice, as long as they have different names self.assert_parse(""" user --name=userA user --name=userB""") # fail - can't have two users with the same name self.assert_parse_error(""" user --name=userA user --name=userA""", UserWarning) class F8_TestCase(FC6_TestCase): def runTest(self): # run FC6 test case FC6
_TestCase.runTest(self) # pass self.assert_parse("user --name=user --lock --plaintext", "user --name=user --lock\n") self.assert_parse("user --name=user --lock", "user --name=user --lock\n") self.assert_parse("user --name=user --plaintext", "user --name=user\n") # fail class F12_TestCase(F8_TestCase): def runTest(self): # run F8 test case F8_TestCase.runTest(self) # pass se
lf.assert_parse("user --name=user --gecos=\"User Name\"", "user --name=user --gecos=\"User Name\"\n") class F19_TestCase(F12_TestCase): def runTest(self): # run F12 test case F12_TestCase.runTest(self) # pass self.assert_parse("user --name=user --gid=500", "user --name=user --gid=500\n") if __name__ == "__main__": unittest.main()
calaldees/libs
python3/calaldees/loop.py
Python
gpl-3.0
2,814
0.002843
import time DEFAULT_SLEEP_FACTOR = 0.8 class Loop(object): class LoopInterruptException(Exception): pass def __ini
t__(self, fps, timeshift=0, sleep_factor=DEFAULT_SLEEP_FACTOR): """ Sleep factor could be set to 1.0? as python 3.5 respects time better """ self.set_period(fps, timeshift) self.profile_timelog = [] self.sleep_factor = sleep_factor def set_period(self, fps, timeshift=0): assert fps > 0, 'fps rate must be provided' assert timeshift >= 0, 'timeshift must be positive' self.fps = fps self.period = 1 / fps self.s
tart_time = time.time() - timeshift self.previous_time = time.time() - self.period # Previous time is one frame ago to trigger immediately return self.period def get_frame(self, timestamp): return int((timestamp - self.start_time) // self.period) def is_running(self): return self.running def run(self): self.running = True try: while self.is_running() and self.period: self.current_time = time.time() current_frame = self.get_frame(self.current_time) previous_frame = self.get_frame(self.previous_time) for frame in range(previous_frame, current_frame): self.render(frame + 1) self.previous_time = self.current_time sleep_time = (self.start_time + (self.period * (current_frame + 1)) - time.time()) * self.sleep_factor if sleep_time > 0: time.sleep(sleep_time) except KeyboardInterrupt: pass except self.LoopInterruptException: pass self.close() def close(self): pass def render(self, frame): """ This method is to be overridden under normal operation. The implementation here is useful for measuring the accuracy of the rendered frames. self.profile_timelog contains the time the redered frame was out from it's expected time. This is useful to run and average """ #print('{0} {1}'.format(frame, time.time())) self.profile_timelog.append(self.current_time - (self.start_time + (self.period * frame))) if frame > (self.fps*20): average_frame_inacuracy = sum(self.profile_timelog)/len(self.profile_timelog) average_off_percent = average_frame_inacuracy / self.period variance = max(self.profile_timelog) - min(self.profile_timelog) print('average_frame_inacuracy: {0} average_off_percent: {1:.2%} variance: {2}'.format(average_frame_inacuracy, average_off_percent, variance)) self.running = False if __name__ == "__main__": Loop(60).loop()
chriscz/pySorter
setup.py
Python
gpl-3.0
1,350
0.001481
import os import sys from setuptools import setup, find_packages, Command from commands import * tests_require=['pytest-cov', 'pytest', 'testfixtures'] setup( name=name, version=read_version(), description='A regex based file organizer', long_description=open(os.path.join(base_dir, 'description.txt')).read().strip(), license='MPL', url='https://github.com/chriscz/pySorter', author='Chris Coetz
ee', author_email='chriscz93@gmail.com', packages=find_packages(), setup_requires=['pytest-runner'], tests_require=tests_require, include_package_data=True, zip_safe=False, cmdclass={ 'test': PyTestCommand, 'coverage': CoverageCommand, 'bump': BumpVersionCommand, }, entry_points={ "console_scripts": ['pysorter=pysorter.commandline:main'] }, extras_require=dict( build=['twine', 'wheel', 'setuptools-git', 'sphinx'],
test=['pytest', 'testfixtures', 'pytest-cov'], ), classifiers=[ "Development Status :: 4 - Beta", "Environment :: Console", "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)", "Topic :: Utilities", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", ] )
tmtowtdi/MontyLacuna
lib/lacuna/buildings/boring/ssld.py
Python
mit
213
0.032864
from lacuna.building import MyBuilding class ssld(MyBuilding): path = 'ssld' def __init__( self, client, body_id:int = 0, building_id:int = 0 ): super()._
_init
__( client, body_id, building_id )
vstoykov/django-cms
cms/middleware/toolbar.py
Python
bsd-3-clause
5,441
0.001838
# -*- coding: utf-8 -*- """ Edit Toolbar middleware """ from cms.utils.conf import get_cms_setting from cms.toolbar.toolbar import CMSToolbar from cms.utils.i18n import force_language from django.contrib.admin.models import LogEntry, ADDITION, CHANGE from menus.menu_pool import menu_pool from django.http import HttpResponse from django.template.loader import render_to_string from cms.utils.placeholder import get_toolbar_plugin_struct def toolbar_plugin_processor(instance, placeholder, rendered_content, original_context): from cms.plugin_pool import plugin_pool original_context.push() child_plugin_classes = [] plugin_class = instance.get_plugin_class() if plugin_class.allow_children: inst, plugin = instance.get_plugin_instance() page = original_context['request'].current_page children = [plugin_pool.get_plugin(cls) for cls in plugin.get_child_classes(placeholder, page)] # Builds the list of dictionaries containing module, name and value for the plugin dropdowns child_plugin_classes = get_toolbar_plugin_struct(children, placeholder.slot, placeholder.page, parent=plugin_class) instance.placeholder = placeholder request = original_context['request'] with force_language(request.toolbar.toolbar_language): data = { 'instance': instance, 'rendered_content': rendered_content, 'child_plugin_classes': child_plugin_classes, 'edit_url': placeholder.get_edit_url(instance.pk), 'add_url': placeholder.get_ad
d_url(), 'delete_url': placeholder.get_delete_url(instance.pk), 'move_url': placeholder.get_move_url(), } original_context.update(data) plugin_class = instance.get_plugin_class() template = plugin_class.frontend_edit_template output = render_to_string(template, original_context).strip() original_context.pop() return output class ToolbarMi
ddleware(object): """ Middleware to set up CMS Toolbar. """ def process_request(self, request): """ If we should show the toolbar for this request, put it on request.toolbar. Then call the request_hook on the toolbar. """ edit_on = get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON') edit_off = get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF') build = get_cms_setting('CMS_TOOLBAR_URL__BUILD') if request.user.is_staff or request.user.is_anonymous(): if edit_on in request.GET and not request.session.get('cms_edit', False): if not request.session.get('cms_edit', False): menu_pool.clear() request.session['cms_edit'] = True if request.session.get('cms_build', False): request.session['cms_build'] = False if edit_off in request.GET and request.session.get('cms_edit', True): if request.session.get('cms_edit', True): menu_pool.clear() request.session['cms_edit'] = False if request.session.get('cms_build', False): request.session['cms_build'] = False if build in request.GET and not request.session.get('cms_build', False): request.session['cms_build'] = True else: request.session['cms_build'] = False request.session['cms_edit'] = False if request.user.is_staff: try: request.cms_latest_entry = LogEntry.objects.filter( user=request.user, action_flag__in=(ADDITION, CHANGE) ).only('pk').order_by('-pk')[0].pk except IndexError: request.cms_latest_entry = -1 request.toolbar = CMSToolbar(request) def process_view(self, request, view_func, view_args, view_kwarg): response = request.toolbar.request_hook() if isinstance(response, HttpResponse): return response def process_response(self, request, response): from django.utils.cache import add_never_cache_headers found = False if hasattr(request, 'toolbar') and request.toolbar.edit_mode: found = True for placeholder in getattr(request, 'placeholders', []): if not placeholder.cache_placeholder: found = True break if found: add_never_cache_headers(response) if hasattr(request, 'user') and request.user.is_staff and response.status_code != 500: try: pk = LogEntry.objects.filter( user=request.user, action_flag__in=(ADDITION, CHANGE) ).only('pk').order_by('-pk')[0].pk if hasattr(request, 'cms_latest_entry') and request.cms_latest_entry != pk: log = LogEntry.objects.filter(user=request.user, action_flag__in=(ADDITION, CHANGE))[0] request.session['cms_log_latest'] = log.pk # If there were no LogEntries, just don't touch the session. # Note that in the case of a user logging-in as another user, # request may have a cms_latest_entry attribute, but there are no # LogEntries for request.user. except IndexError: pass return response
hopshadoop/hops-util-py
hops/experiment_impl/parallel/grid_search.py
Python
apache-2.0
4,463
0.005602
""" Gridsearch implementation """ from hops import hdfs, tensorboard, devices from hops.experiment_impl.util import experiment_utils from hops.experiment import Direction import threading import six import time import os def _run(sc, train_fn, run_id, args_dict, direction=Direction.MAX, local_logdir=False, name="no-name", optimization_key=None): """ Run the wrapper function with each hyperparameter combination as specified by the dictionary Args: sc: train_fn:
args_dict: direction: local_logdir: name: Returns: """ app_id = str(sc.applicationId) num_executions = 1 if direction.upper() != Direction.MAX and direction.upper() != Direction.MIN: raise ValueError('Invalid direction ' + direction + ', must be Direction.MAX or Direction.MIN') arg_lists = list(args_dict.values()) currentLen = len(arg_lists[0]) for i in range(len(arg_lists)): if currentLen != len(arg_lists[i]):
raise ValueError('Length of each function argument list must be equal') num_executions = len(arg_lists[i]) #Each TF task should be run on 1 executor nodeRDD = sc.parallelize(range(num_executions), num_executions) #Make SparkUI intuitive by grouping jobs sc.setJobGroup(os.environ['ML_ID'], "{} | Grid Search".format(name)) #Force execution on executor, since GPU is located on executor nodeRDD.foreachPartition(_prepare_func(app_id, run_id, train_fn, args_dict, local_logdir, optimization_key)) arg_count = six.get_function_code(train_fn).co_argcount arg_names = six.get_function_code(train_fn).co_varnames exp_dir = experiment_utils._get_logdir(app_id, run_id) max_val, max_hp, min_val, min_hp, avg, max_return_dict, min_return_dict = experiment_utils._get_best(args_dict, num_executions, arg_names, arg_count, exp_dir, optimization_key) param_combination = "" best_val = "" return_dict = {} if direction.upper() == Direction.MAX: param_combination = max_hp best_val = str(max_val) return_dict = max_return_dict elif direction.upper() == Direction.MIN: param_combination = min_hp best_val = str(min_val) return_dict = min_return_dict print('Finished Experiment \n') best_dir = exp_dir + '/' + param_combination return best_dir, experiment_utils._get_params_dict(best_dir), best_val, return_dict def _prepare_func(app_id, run_id, train_fn, args_dict, local_logdir, optimization_key): """ Args: app_id: run_id: train_fn: args_dict: local_logdir: Returns: """ def _wrapper_fun(iter): """ Args: iter: Returns: """ for i in iter: executor_num = i experiment_utils._set_ml_id(app_id, run_id) tb_hdfs_path = '' hdfs_exec_logdir = '' t = threading.Thread(target=devices._print_periodic_gpu_utilization) if devices.get_num_gpus() > 0: t.start() try: #Arguments if args_dict: param_string, params, args = experiment_utils.build_parameters(train_fn, executor_num, args_dict) hdfs_exec_logdir, hdfs_appid_logdir = experiment_utils._create_experiment_subdirectories(app_id, run_id, param_string, 'grid_search', params=params) logfile = experiment_utils._init_logger(hdfs_exec_logdir) tb_hdfs_path, tb_pid = tensorboard._register(hdfs_exec_logdir, hdfs_appid_logdir, executor_num, local_logdir=local_logdir) print(devices._get_gpu_info()) print('-------------------------------------------------------') print('Started running task ' + param_string) task_start = time.time() retval = train_fn(*args) task_end = time.time() experiment_utils._handle_return(retval, hdfs_exec_logdir, optimization_key, logfile) time_str = 'Finished task ' + param_string + ' - took ' + experiment_utils._time_diff(task_start, task_end) print(time_str) print('Returning metric ' + str(retval)) print('-------------------------------------------------------') except: raise finally: experiment_utils._cleanup(tensorboard, t) return _wrapper_fun
datamachine/twx
docs/conf.py
Python
mit
10,106
0.006333
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # TWX documentation build configuration file, created by # sphinx-quickstart on Sat Jun 27 15:07:02 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'TWX' copyright = '2015, Vince Castellano, Phillip Lopo' author = 'Vince Castellano, Phillip Lopo' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0' # The full version, including alpha/beta/rc tags. release = '1.0b3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by d
efault. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #m
odindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} html_theme_options = { 'github_user': 'datamachine', 'github_repo': 'twx', 'description': 'Telegram Bot API and MTProto Clients', 'github_banner': True, 'github_button': True, 'show_powered_by': False, #'link': '#0088cc', #'sidebar_link': '#0088cc', #'anchor': '#0088cc', 'gray_1': '#0088cc', 'gray_2': '#ecf3f8', #'gray_3': '#0088cc', #'pre_bg': '#ecf3f8', #'font_family': "'Lucida Grande', 'Lucida Sans Unicode', Arial, Helvetica, Verdana, sans-serif", #'head_font_family': "'Lucida Grande', 'Lucida Sans Unicode', Arial, Helvetica, Verdana, sans-serif" } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'TWXdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # a
xyuanmu/you-get
src/you_get/extractors/kakao.py
Python
mit
1,771
0.002823
#!/usr/bin/env python from ..common import * from .universal import
* __all__ = ['kakao_download'] def kakao_download(url, output_dir='.', info_only=False, **kwargs): json_request_url = 'https://videofarm.daum.net/controller/api/closed/v1_2/IntegratedMovieData.json?vid={}' # in this implementation playlist not supported so use url_without_playlist # if want to support playlist need to change that if re.search('playlistId', url): url = re.search(r"(.+)\?.+?", url).group(1) page = get_conten
t(url) try: vid = re.search(r"<meta name=\"vid\" content=\"(.+)\">", page).group(1) title = re.search(r"<meta name=\"title\" content=\"(.+)\">", page).group(1) meta_str = get_content(json_request_url.format(vid)) meta_json = json.loads(meta_str) standard_preset = meta_json['output_list']['standard_preset'] output_videos = meta_json['output_list']['output_list'] size = '' if meta_json['svcname'] == 'smr_pip': for v in output_videos: if v['preset'] == 'mp4_PIP_SMR_480P': size = int(v['filesize']) break else: for v in output_videos: if v['preset'] == standard_preset: size = int(v['filesize']) break video_url = meta_json['location']['url'] print_info(site_info, title, 'mp4', size) if not info_only: download_urls([video_url], title, 'mp4', size, output_dir, **kwargs) except: universal_download(url, output_dir, merge=kwargs['merge'], info_only=info_only, **kwargs) site_info = "tv.kakao.com" download = kakao_download download_playlist = playlist_not_supported('kakao')
falbassini/googleads-dfa-reporting-samples
python/v2.0/download_floodlight_tag.py
Python
apache-2.0
1,952
0.004098
#!/usr/bin/python # # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on
an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specifi
c language governing permissions and # limitations under the License. """This example downloads activity tags for a given floodlight activity.""" import argparse import sys from apiclient import sample_tools from oauth2client import client # Declare command-line flags. argparser = argparse.ArgumentParser(add_help=False) argparser.add_argument( 'profile_id', type=int, help='The ID of the profile to download tags for') argparser.add_argument( 'activity_id', type=int, help='The ID of the floodlight activity to download tags for') def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'dfareporting', 'v2.0', __doc__, __file__, parents=[argparser], scope=['https://www.googleapis.com/auth/dfareporting', 'https://www.googleapis.com/auth/dfatrafficking']) profile_id = flags.profile_id activity_id = flags.activity_id try: # Construct the request. request = service.floodlightActivities().generatetag( profileId=profile_id, floodlightActivityId=activity_id) # Execute request and print response. response = request.execute() print response['floodlightActivityTag'] except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run the ' 'application to re-authorize') if __name__ == '__main__': main(sys.argv)
zgoda/zakwasy
tests/test_sddetails.py
Python
mit
2,004
0.001996
from flask import url_for from tests import ZKWTestCase from zkw.models import Sourdough, User class SourdoughDetailsPageTests(ZKWTestCase): def setUp(self): super(SourdoughDetailsPageTests, self).setUp() self.user_1 = User.get_by_email('user_1@example.com') self.active_sd = Sourdough.query.filter_by(name='Arnulf').first() self.active_sd_url = url_for('sd.details', sourdough_id=self.active_sd.id)
self.inact
ive_sd = Sourdough.query.filter_by(name='inactive').first() self.inactive_sd_url = url_for('sd.details', sourdough_id=self.inactive_sd.id) def test_anon_inactive_page(self): """ Test what anonymous user sees when she tries to access inactive sourdough page. Expected outcome: HTTP 404 """ with self.app.test_client() as client: rv = client.get(self.inactive_sd_url) self.assert404(rv) def test_loggedin_inactive_page(self): """ Test what logged in user sees when she tries to access inactive sourdough page. Expected outcome: sourdough details page without form """ action_text = 'action="%s"' % self.inactive_sd_url with self.app.test_client() as client: self.login(client, self.user_1.email) rv = client.get(self.inactive_sd_url) self.assert200(rv) page = self.page_content(rv) self.assertNotIn(action_text, page) def test_owner_inactive_page(self): """ Test what owner sees when she tries to access inactive sourdough page. Expected outcome: sourdough details page with form """ action_text = 'action="%s"' % self.inactive_sd_url with self.app.test_client() as client: self.login(client, self.inactive_sd.user.email) rv = client.get(self.inactive_sd_url) self.assert200(rv) page = self.page_content(rv) self.assertIn(action_text, page)
uranusjr/django
tests/postgres_tests/test_json.py
Python
bsd-3-clause
13,888
0.001368
import datetime import uuid from decimal import Decimal from django.core import checks, exceptions, serializers from django.core.serializers.json import DjangoJSONEncoder from django.forms import CharField, Form, widgets from django.test.utils import isolate_apps from django.utils.html import escape from . import PostgreSQLTestCase from .models import JSONModel, PostgreSQLModel try: from django.contrib.postgres import forms from django.contrib.postgres.fields import JSONField except ImportError: pass class TestSaveLoad(PostgreSQLTestCase): def test_null(self): instance = JSONModel() instance.save() loaded = JSONModel.objects.get() self.assertIsNone(loaded.field) def test_empty_object(self): instance = JSONModel(field={}) instance.save() loaded = JSONModel.objects.get() self.assertEqual(loaded.field, {}) def test_empty_list(self): instance = JSONModel(field=[]) instance.save() loaded = JSONModel.objects.get() self.assertEqual(loaded.field, []) def test_boolean(self): instance = JSONModel(field=True) instance.save() loaded = JSONModel.objects.get() self.assertIs(loaded.field, True) def test_string(self): instance = JSONModel(field='why?') instance.save() loaded = JSONModel.objects.get() self.assertEqual(loaded.field, 'why?') def test_number(self): instance = JSONModel(field=1) instance.save() loaded = JSONModel.objects.get() self.assertEqual(loaded.field, 1) def test_realistic_object(self): obj = { 'a': 'b', 'c': 1, 'd': ['e', {'f': 'g'}], 'h': True, 'i': False, 'j': None, } instance = JSONModel(field=obj) instance.save() loaded = JSONModel.objects.get() self.assertEqual(loaded.field, obj) def test_custom_encoding(self): """ JSONModel.field_custom has a custom DjangoJSONEncoder. """ some_uuid = uuid.uuid4() obj_before = { 'date': datetime.date(2016, 8, 12), 'datetime': datetime.datetime(2016, 8, 12, 13, 44, 47, 575981), 'decimal': Decimal('10.54'), 'uuid': some_uuid, } obj_after = { 'date': '2016-08-12', 'datetime': '2016-08-12T13:44:47.575', 'decimal': '10.54', 'uuid': str(some_uuid), } JSONModel.objects.create(field_custom=obj_before) loaded = JSONModel.objects.get() self.assertEqual(loaded.field_custom, obj_after) class TestQuerying(PostgreSQLTestCase): @classmethod def setUpTestData(cls): cls.objs = [ JSONModel.objects.create(field=None), JSONModel.objects.create(field=True), JSONModel.objects.create(field=False), JSONModel.objects.create(field='yes'), JSONModel.objects.create(field=7), JSONModel.objects.create(field=[]), JSONModel.objects.create(field={}), JSONModel.objects.create(field={ 'a': 'b', 'c': 1, }), JSONModel.objects.create(field={ 'a': 'b', 'c': 1, 'd': ['e', {'f': 'g'}], 'h': True, 'i': False, 'j': None, 'k': {'l': 'm'}, }), JSONModel.objects.create(field=[1, [2]]), JSONModel.objects.create(field={ 'k': True, 'l': False, }), JSONModel.objects.create(field={'foo': 'bar'}), ] def test_exact(self): self.assertSequenceEqual( JSONModel.objects.filter(field__exact={}), [self.objs[6]] ) def test_exact_complex(self): self.assertSequenceEqual( JSONModel.objects.filter(field__exact={'a': 'b', 'c': 1}), [self.objs[7]] ) def test_isnull(self): self.assertSequenceEqual( JSONModel.objects.filter(field__isnull=True), [self.objs[0]] ) def test_isnull_key(self): # key__isnull works the same as has_key='key'. self.assertSequenceEqual( JSONModel.objects.filter(field__a__isnull=True), self.objs[:7] + self.objs[9:] ) self.assertSequenceEqual( JSONModel.objects.filter(field__a__isnul
l=False), [self.objs[7], self.objs[8]] ) def test_contains(self): self.assertSequenceEqual( JSONModel.objects.filter(field__contains={'a': 'b'}), [self.objs[7], self.objs[8]] ) def test_contained_by(self): self.assertSequenceEqual( JSONModel.objects.filter(field__contained_by={'a': 'b', 'c': 1, 'h': True}), [self.objs[
6], self.objs[7]] ) def test_has_key(self): self.assertSequenceEqual( JSONModel.objects.filter(field__has_key='a'), [self.objs[7], self.objs[8]] ) def test_has_keys(self): self.assertSequenceEqual( JSONModel.objects.filter(field__has_keys=['a', 'c', 'h']), [self.objs[8]] ) def test_has_any_keys(self): self.assertSequenceEqual( JSONModel.objects.filter(field__has_any_keys=['c', 'l']), [self.objs[7], self.objs[8], self.objs[10]] ) def test_shallow_list_lookup(self): self.assertSequenceEqual( JSONModel.objects.filter(field__0=1), [self.objs[9]] ) def test_shallow_obj_lookup(self): self.assertSequenceEqual( JSONModel.objects.filter(field__a='b'), [self.objs[7], self.objs[8]] ) def test_deep_lookup_objs(self): self.assertSequenceEqual( JSONModel.objects.filter(field__k__l='m'), [self.objs[8]] ) def test_shallow_lookup_obj_target(self): self.assertSequenceEqual( JSONModel.objects.filter(field__k={'l': 'm'}), [self.objs[8]] ) def test_deep_lookup_array(self): self.assertSequenceEqual( JSONModel.objects.filter(field__1__0=2), [self.objs[9]] ) def test_deep_lookup_mixed(self): self.assertSequenceEqual( JSONModel.objects.filter(field__d__1__f='g'), [self.objs[8]] ) def test_deep_lookup_transform(self): self.assertSequenceEqual( JSONModel.objects.filter(field__c__gt=1), [] ) self.assertSequenceEqual( JSONModel.objects.filter(field__c__lt=5), [self.objs[7], self.objs[8]] ) def test_usage_in_subquery(self): self.assertSequenceEqual( JSONModel.objects.filter(id__in=JSONModel.objects.filter(field__c=1)), self.objs[7:9] ) def test_iexact(self): self.assertTrue(JSONModel.objects.filter(field__foo__iexact='BaR').exists()) self.assertFalse(JSONModel.objects.filter(field__foo__iexact='"BaR"').exists()) def test_icontains(self): self.assertFalse(JSONModel.objects.filter(field__foo__icontains='"bar"').exists()) def test_startswith(self): self.assertTrue(JSONModel.objects.filter(field__foo__startswith='b').exists()) def test_istartswith(self): self.assertTrue(JSONModel.objects.filter(field__foo__istartswith='B').exists()) def test_endswith(self): self.assertTrue(JSONModel.objects.filter(field__foo__endswith='r').exists()) def test_iendswith(self): self.assertTrue(JSONModel.objects.filter(field__foo__iendswith='R').exists()) def test_regex(self): self.assertTrue(JSONModel.objects.filter(field__foo__regex=r'^bar$').exists()) def test_iregex(self): self.assertTrue(JSONModel.objects.filter(field__foo__iregex=r'^bAr$').exists()) @isolate_apps('postgres_tests') class TestChecks(PostgreSQLTestCase):
vacancy/TensorArtist
tartist/plugins/trainer_enhancer/progress.py
Python
mit
1,402
0.003566
# -*- coding:utf8 -*- # File : progress.py # Author : Jiayuan Mao # Email : maojiayuan@gmail.com # Date : 2/26/17 # # This file is part of TensorArtist. from tartist.core.utils.thirdparty import get_tqdm_defaults import tqdm import numpy as np def enable_epoch_progress(trainer): pbar = None def epoch_progress_on_iter_after(trainer, inp, out): nonlocal pbar if pbar is None: pbar = tqdm.tqdm(total=trainer.epoch_size, leave=False, initial=trainer.iter % trainer.epoch_size, **get_tqdm_defaults()) desc = 'Iter={}'.format(trainer.iter) if 'error' in trainer.runtime: desc += ', error={:.4f}
'.format(trainer.runtime['error']) for k in sorted(out.keys()): v = out[k] if isinstance(v, (str, int, float, np.ndarray, np.float32, np.float64, np.int32, np.int64)): try: v = float(v) desc += ', {}={:.4f}'.format(k, v) except ValueError: pass pbar.set_description(desc) pbar.update() def epoch_progress_on_epoch_after(
trainer): nonlocal pbar pbar.close() pbar = None trainer.register_event('iter:after', epoch_progress_on_iter_after, priority=25) trainer.register_event('epoch:after', epoch_progress_on_epoch_after, priority=5)
uperetz/AstroTools
fitter/_modeling.py
Python
apache-2.0
2,367
0.00338
from scipy.optimize import curve_fit from n
umpy import log, isnan class NotAModel(Exception): pass def chisq(self, result=None): if result is None: result = self.result return ((self.data.cts(row=True)-result)**2/self.data.errors(row=True)**2).sum() def cstat(self, result): if result is None: result = self.result data = self.dat
a.counts result = result*self.data.exposure C = result+data*(log(data)-log(result)-1) return 2*C[~isnan(C)].sum() def reduced_chisq(self): return self.chisq(self.result)/(len(self.data.channels)-len(self.getThawed())) def append(self, *args): for model in args: try: model._calculate model.freeze model.thaw model.calculate model.setp self.models.append(model) except AttributeError: raise self.NotAModel(model, model.__class__) if len(self.models): self.activate() def delete(self, index): # Prevent bad name access self.models[index] = None def activate(self, index=-1): self.current = self.models[index] self.currentindex = index def nameModel(self, index, name): setattr(self, name, lambda: self.activate(index)) def energies(self): return self.resp.ebinAvg def tofit(self, elist, *args): res = self.current.tofit(elist, *args) return self.resp.convolve_channels(res) def toMinimize(self, args): s = self.stat(self.tofit(self.energies(), *args)) return s def fit(self): model = self.current args = self.initArgs() bestfit, self.errs = curve_fit(self.tofit, self.energies(), self.data.cts(row=True), p0=args, sigma=self.data.errors(row=True), absolute_sigma=True, epsfcn=self.eps) self.stderr = dict(zip(model.getThawed(), [self.errs[j][j]**0.5 for j in range(len(self.errs))])) # ftol = 2.220446049250313e-09 # bestfit = minimize(self.toMinimize,args,method="L-BFGS-B",options={'ftol':ftol}) # if not bestfit.success: # raise ValueError("-E- Failed fit with: "+bestfit.message.decode('unicode-escape')) # self.stderr = dict(zip(model.getThawed(),sqrt(abs(max(1,bestfit.fun)*ftol*diag(bestfit.hess_inv.todense()))))) # self.calc(dict(zip(model.getThawed(),bestfit.x))) self.calc(dict(zip(model.getThawed(), bestfit)))
merlin-lang/kulfi
experiments/testbed/results/plot/sort.py
Python
lgpl-3.0
989
0.005056
#!/usr/bin/env python import sys, argparse def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input', type=str, action='store', dest='input', default=None, help="Input file") args = parser.parse_args() stats = dict() if args.input is None: print "Error: No input file" with open(args.input) as in_file:
for line in in_file.readlines(): time = int(line.split()[0]) tx_bytes = int(line.split()[1])
stats[time] = tx_bytes stats = sorted(stats.items()) start_time = stats[0][0] prev_tx = stats[0][1] no_traffic_flag = True for time, tx_bytes in stats: if no_traffic_flag: if tx_bytes > (prev_tx+100000): no_traffic_flag = False start_time, prev_tx = time, tx_bytes else: print (time-start_time), (tx_bytes-prev_tx) prev_tx = tx_bytes if __name__ == "__main__": main()
Moth-Tolias/LetterBoy
backend/jumbles nontrobo/jumbles nontrobo.py
Python
gpl-3.0
4,226
0.018462
"""return a jumbled version of a string. eg, the lazy hamster is jumping becomes the lzay hmasetr si jmunipg shuffles insides of words. """ import random import string #okay, so this will be the jmuble algorythim #variables, passed #string_to_jumble = "" #yeah #jumble_mode = true # do u switch words of two letters def string_jumble(string_to_jumble, jumble_mode = True): #variables, internal string_to_return = "" #New string string_words = [""] #array containing the words of the string current_word = [] #array containing the letters of the current word punctuation_ = [] #array containing the punctuation i = 0 j = 0 #put the words in an array for char in string_to_jumble: #each space signifies a new word if char not in string.ascii_letters: punctuation_.append(char) i += 1 ##make sure there's something to put it in! string_words.append("") else: #otherwise add to the entry string_words[i] += char #print(string_words) THIS IS WORKING #put the letters of the word into an array, and then switch 'em for word in string_words: #if the word is two long and mode is true switch 'em if (len(word) >= 0) and (len(word) <= 3) : if jumble_mode == True: # for char in word: current_word.append(str(char)) #print(current_word) random.shuffle(current_word) #pop the word and a space into the return string for char in current_word: string_to_return += char string_to_return += punctuation_[string_words.index(word)] #print(string_to_return) current_word.clear() #that's all for this word continue #ok now for the REAL real deal #take away the first letter and put it in string_to_return bc it souldn't be jumbled i = 0 for char in word: if i == 0: string_to_return += char #print(string_to_return) i = 1 #assert bluh WORKING continue #then put almost all of the word in current_word[] #current_word.append("") if (i+1) < len(word): current_word.append(str(char)) #print(current_word) i +=1 #we should be at the last char
acter now #print(i) print(len(word)+100) #jumble it #random.shuffle(current_word) #add to the new string for char in current_word: string_to_return += char #add the last lettr pus a space #if word[i]: string_to_return += word[i] #string_to_return += punctuation_[i] string_to_return += pun
ctuation_[string_words.index(word)] print(punctuation_[string_words.index(word)]) #flush the string current_word.clear() #next word! print(string_to_return) print(punctuation_) #done #string_jumble("a0boop1boop3boop4boop5hey") string_jumble("I1think2my3dog4is5terribly6lazy;7I8-9I!mean,£he$-%is^really&quite*fat.")#looks like list.index won't work for us #string_jumble("")#fix this too
BedrockDev/Sunrin2017
Software/Web Programming/Project07/exam_03.py
Python
mit
91
0
ali
st = ['Micheal', 'Franklin', 'Trevor'] for i in alist: print "Happy birthday, " + i
google/orbit
third_party/conan/recipes/grpc/conanfile.py
Python
bsd-2-clause
5,670
0.002822
from conans import ConanFile, CMake, tools from conans.errors import ConanInvalidConfiguration import os import shutil import time import platform class grpcConan(ConanFile): name = "grpc" version = "1.27.3" description = "Google's RPC library and framework." topics = ("conan", "grpc", "rpc") url = "https://github.com/inexorgame/conan-grpc" homepage = "https://github.com/grpc/grpc" license = "Apache-2.0" exports_sources = ["CMakeLists.txt", "gRPCTargets-helpers.cmake"] generators = "cmake" short_paths = True # Otherwise some folders go out of the 260 chars path length scope rapidly (on windows) settings = "os", "arch", "compiler", "build_type" options = { "fPIC": [True, False], } default_options = { "fPIC": True, } _source_subfolder = "source_subfolder" _build_subfolder = "build_subfolder" requires = ( "abseil/20211102.0", "zlib/1.2.11", "openssl/1.1.1k", "protobuf/3.9.1@bincrafters/stable", "c-ares/1.15.0" ) def build_requirements(self): self.build_requires("protoc_installer/3.9.1@bincrafters/stable") if self.user and self.channel: self.build_requires("grpc_codegen/{}@{}/{}".format(self.version, self.user, self.channel)) else: self.build_requires("grpc_codegen/{}".format(self.version)) def configure(self): if self.settings.os == "Windows" and self.settings.compiler == "Visual Studio": del self.options.fPIC compiler_version = int(str(self.settings.compiler.version)) if compiler_version < 14: raise ConanInvalidConfiguration("gRPC can only be built with Visual Studio 2015 or higher.") def source(self): tools.get(**self.conan_data["sources"][self.version]) extracted_dir = "grpc-" + self.version if platform.system() == "Windows": time.sleep(8) # Work-around, see https://github.com/conan-io/conan/issues/5205 os.rename(extracted_dir, self._source_subfolder) cmake_path = os.path.join(self._source_subfolder, "CMakeLists.txt") tools.replace_in_file(cmake_path, "absl::strings", "CONAN_PKG::abseil") tools.replace_in_file(cmake_path, "absl::optional", "CONAN_PKG::abseil") tools.replace_in_file(cmake_path, "absl::inlined_vector", "CONAN_PKG::abseil") tools.replace_in_file(cmake_path, "set(_gRPC_CPP_PLUGIN $<TARGET_FILE:grpc_cpp_plugin>)", "find_program(_gRPC_CPP_PLUGIN grpc_cpp_plugin)") tools.replace_in_file(cmake_path, "DEPENDS ${ABS_FIL} ${_gRPC_PROTOBUF_PROTOC} grpc_cpp_plugin", "DEPENDS ${ABS_FIL} ${_gRPC_PROTOBUF_PROTOC} ${_gRPC_CPP_PLUGIN}") _cmake = None def _configure_cmake(self): if self._cmake: return self._cmake cmake = CMake(self) cmake.definitions['gRPC_BUILD_CODEGEN'] = "ON" cmake.definitions['gRPC_BUILD_CSHARP_EXT'] = "OFF" cmake.definitions['gRPC_BUILD_TESTS'] = "OFF" cmake.definitions['gRPC_INSTALL'] = "ON" cmake.definitions["gRPC_BUILD_GRPC_CPP_PLUGIN"] = "OFF" cmake.definitions["gRPC_BUILD_GRPC_CSHARP_PLUGIN"] = "OFF" cmake.definitions["gRPC_BUILD_GRPC_OBJECTIVE_C_PLUGIN"] = "OFF" cmake.definitions["gRPC_BUILD_GRPC_PHP_PLUGIN"] = "OFF" cmake.definitions["gRPC_BUILD_GRPC_PYTHON_PLUGIN"] = "OFF"
cmake.definitions["gRPC_BUILD_GRPC_RUBY_PLUGIN"] = "OFF" cmake.definitions["gRPC_BUILD_GRPC_NODE_PLUGIN"] = "OFF" # tell grpc to use the find_package versions cmake.definitions['gRPC_CARES_PROVIDER'] = "package" cmake.definitions['gRPC_ZLIB_PROVIDER'] = "package" cmake.definitions['gRPC_SSL_PROVIDER'] = "package" cmake.definitions['gRPC_PROTOBUF_PROVIDER'] = "none" cmake.definitions['gRPC_ABSL_PROVIDER'] = "none" # Workaro
und for https://github.com/grpc/grpc/issues/11068 cmake.definitions['gRPC_GFLAGS_PROVIDER'] = "none" cmake.definitions['gRPC_BENCHMARK_PROVIDER'] = "none" # Compilation on minGW GCC requires to set _WIN32_WINNTT to at least 0x600 # https://github.com/grpc/grpc/blob/109c570727c3089fef655edcdd0dd02cc5958010/include/grpc/impl/codegen/port_platform.h#L44 if self.settings.os == "Windows" and self.settings.compiler == "gcc": cmake.definitions["CMAKE_CXX_FLAGS"] = "-D_WIN32_WINNT=0x600" cmake.definitions["CMAKE_C_FLAGS"] = "-D_WIN32_WINNT=0x600" cmake.configure(build_folder=self._build_subfolder) self._cmake = cmake return cmake def build(self): cmake = self._configure_cmake() cmake.build() def package(self): cmake = self._configure_cmake() cmake.install() shutil.rmtree(os.path.join(self.package_folder, "lib", "pkgconfig")) shutil.rmtree(os.path.join(self.package_folder, "lib", "cmake", "grpc", "modules")) self.copy("gRPCTargets-helpers.cmake", dst=os.path.join("lib", "cmake", "grpc")) self.copy("LICENSE*", src=self._source_subfolder, dst="licenses") def package_info(self): self.cpp_info.libs = [ "grpc++_unsecure", "grpc++_reflection", "grpc++_error_details", "grpc++", "grpc_unsecure", "grpc_plugin_support", "grpc_cronet", "grpcpp_channelz", "grpc", "gpr", "address_sorting", "upb", ] if self.settings.compiler == "Visual Studio": self.cpp_info.system_libs += ["wsock32", "ws2_32"]
TheAspiringHacker/Asparserations
bootstrap/parser_gen.py
Python
mit
13,102
0.004427
#!/usr/bin/python3 import argparse import collections import json import string import sys header_template = """ #ifndef ASPARSERATIONS_GENERATED_${class_name}_H_ #define ASPARSERATIONS_GENERATED_${class_name}_H_ #include <array> #include <map> #include <memory> #include <set> #include <utility> #include <vector> $header_front $begin_namespace enum class Token { $tokens }; enum class Nonterminal { $nonterminals }; enum class Production { $productions }; struct Lexer_State { const char* begin; const char* end; unsigned int lines; const char* last_newline; }; Lexer_State next(const Lexer_State&); /** */ class Node { public: Node(const $payload&, const Lexer_State&); Node(const $payload&, std::vector<std::unique_ptr<Node>>); const $payload& payload() const; const std::vector<std::unique_ptr<Node>>& children() const; const Lexer_State& state() const; virtual ~Node() = default; private: $payload m_payload; std::vector<std::unique_ptr<Node>> m_children; Lexer_State m_state; }; class $class_name { public: $class_name(); std::unique_ptr<Node> parse(const std::string&, $lexer&, $callback&); static std::string nonterminal_to_string(Nonterminal); static std::string production_to_string(Production); virtual ~$class_name() = default; private: struct Mangled_Production { const Nonterminal nonterminal; const Production production; unsigned int child_count; }; struct Productions { Productions(); $mangled_productions_header }; struct State { std::map<Token,std::pair<const State*,std::set<const Mangled_Production*>>> actions; std::map<Nonterminal,const State*> gotos; }; std::vector<State> m_states; std::vector<std::pair<std::unique_ptr<Node>,const State*>> m_stack; std::unique_ptr<Productions> m_productions; void m_process(const State&, const Lexer_State&, $lexer&, $callback&, std::unique_ptr<Node>&); void m_reduce(const Mangled_Production&, $callback&, std::unique_ptr<Node>&); }; $end_namespace #endif """ src_template = """ #include <algorithm> #include <stdexcept> #include <utility> #include "../include/$class_name.hpp" $src_front
$namespace::Lexer_State $namespace::next(const $namespace::Lexer_State& ls) { $namespace::Lexer_State ls_prime = { ls.end, ls.end, ls.lines, ls.last_newline }; return ls_prime; } $namespace::Node::Node(const $payload& payload, const $namespace::Lexer_State& state) : m_payload(payload), m_state(state) {} $namespace::Node::Node(const $payload
& payload, std::vector<std::unique_ptr<Node>> children) { if(children.empty()) throw std::runtime_error("Zero children," "call Node(const char*, const char*) instead"); m_payload = payload; m_children = std::move(children); m_state = $namespace::Lexer_State { m_children.front()->state().begin, m_children.back()->state().end, m_children.back()->state().lines, m_children.back()->state().last_newline }; } const $payload& $namespace::Node::payload() const { return m_payload; } const std::vector<std::unique_ptr<$namespace::Node>>& $namespace::Node::children() const { return m_children; } const $namespace::Lexer_State& $namespace::Node::state() const { return m_state; } $namespace::$class_name::Productions::Productions() : $mangled_productions_src { } $namespace::$class_name::$class_name() : m_productions(new Productions()), m_states($state_count) { $states } std::unique_ptr<$namespace::Node> $namespace::$class_name::parse(const std::string& input, $lexer& lexer, $callback& callback) { std::unique_ptr<Node> root; m_process(m_states.front(), $namespace::Lexer_State{input.data(), input.data(), 1, input.data() - 1}, lexer, callback, root); while(!m_stack.empty()) { m_process(*m_stack.back().second, $namespace::next(m_stack.back().first->state()), lexer, callback, root); } return root; } std::string $namespace::$class_name::nonterminal_to_string($namespace::Nonterminal nt) { switch(nt) { $nonterminals_to_strings } throw std::runtime_error("Unknown nonterminal"); } std::string $namespace::$class_name::production_to_string($namespace::Production p) { switch(p) { $productions_to_strings } throw std::runtime_error("Unknown production"); } void $namespace::$class_name::m_process( const $namespace::$class_name::State& state, const $namespace::Lexer_State& lex_state, $lexer& lexer, $callback& callback, std::unique_ptr<$namespace::Node>& root) { $namespace::Lexer_State err; for(auto& action : state.actions) { auto result = lexer.expect(action.first, lex_state); err = result.first; if(result.second) { if(action.second.first != nullptr) { try { m_stack.emplace_back( std::unique_ptr<$namespace::Node>(new Node(callback.call(action.first, std::string(result.first.begin, result.first.end)), result.first)), action.second.first ); } catch(std::runtime_error& e) { throw std::runtime_error(std::to_string(err.lines) + ":" + std::to_string(err.end - 1 - err.last_newline) + ": " + e.what()); } return; } if(!action.second.second.empty()) { m_reduce(**action.second.second.begin(), callback, root); return; } } } throw std::runtime_error("Failed parse: " + std::to_string(err.lines) + ":" + std::to_string(err.end - err.last_newline)); } void $namespace::$class_name::m_reduce( const $namespace::$class_name::Mangled_Production& production, $callback& callback, std::unique_ptr<$namespace::Node>& root) { if(m_stack.empty()) throw std::runtime_error("Can't reduce empty stack"); std::unique_ptr<$namespace::Node> node = nullptr; if(production.child_count == 0) { node = std::unique_ptr<$namespace::Node>(new Node(callback.call(production.nonterminal, production.production, {}), $namespace::next(m_stack.back().first->state()))); } else { std::vector<std::unique_ptr<Node>> popped; for(int i = 0; i < production.child_count; ++i) { if(m_stack.empty()) throw std::runtime_error("Stack underflow"); popped.push_back(std::move(m_stack.back().first)); m_stack.pop_back(); } std::reverse(popped.begin(), popped.end()); try { auto temp = callback.call(production.nonterminal, production.production, popped); node = std::unique_ptr<$namespace::Node>(new Node(temp, std::move(popped))); } catch(std::runtime_error& e) { throw std::runtime_error(std::string("Error: ") + e.what()); } } if(production.nonterminal == Nonterminal::accept_) { root = std::move(node); return; } const State* state; if(m_stack.empty()) { state = &m_states[0]; } else { state = m_stack.back().second; } auto iter = state->gotos.find(production.nonterminal); if(iter == m_stack.back().second->gotos.end()) { throw std::runtime_error("Unknown nonterminal"); } m_stack.emplace_back(std::move(node), iter->second); } """ def gen_namespace_decls(namespaces): begin = "" end = "" for namespace in namespaces: begin += "namespace " + namespace + " {\n" end = "} // " + namespace + "\n" + end return {"begin_namespace" : begin, "end_namespace" : end} def gen_production_list(grammar): names = set() for name,productions in grammar["nonterminals"].items(): for prodname,wildcard in productions.items(): names.add(prodname) lines = ",\n ".join(names) return lines def gen_mangled_production_list_header(grammar): lines = "" for name,productions in grammar["nonterminals"].items(): for prodname,symbols in productions.items(): lines += "Mangle
indictranstech/omnitech-frappe
frappe/desk/doctype/note/test_note.py
Python
mit
225
0.013333
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors # See license.txt import frappe import unittest test_records = frappe.get_test_records('Note') class TestNo
te(unittest.TestCase): pa
ss
koreiklein/fantasia
lib/common_formulas.py
Python
gpl-2.0
3,977
0.023887
# Copyright (C) 2013 Korei Klein <korei.klein1@gmail.com> from calculus.enriched import formula, constructors, endofunctor from calculus.basic import formula as basicFormula from lib.common_symbols import leftSymbol, rightSymbol, relationSymbol, domainSymbol, inputSymbol, outputSymbol, functionPairsSymbol from lib import common_vars from calculus import variable def IsEquivalence(e): return constructors.Always(constructors.Holds(e, common_vars.equivalence)) def Maps(a, b, f): return constructors.Always(constructors.Holds( variable.ProductVariable([ (inputSymbol, a) , (outputSymbol, b)]), variable.ApplySymbolVariable(f, functionPairsSymbol
))) def IsFunction(f): return constructors.Always(constructors.Holds(f, common_vars.function)) InDomain = formula.InDomain Equal = formula.Equal Identical = formula.Identical def InductionBase(var, claim): return claim.substituteVariable(var, common_vars.zero) def InductionStep(var, claim): newVar = var.relatedVariable() return constructors.Forall([constructors.BoundedVariableBinding(ne
wVar, common_vars.natural)], constructors.Implies([claim.substituteVariable(var, newVar)], claim.updateVariables().substituteVariable(var, variable.ApplySymbolVariable(newVar, common_vars.S)))) def InductionHypotheses(var, claim): return constructors.And([InductionBase(var, claim), InductionStep(var, claim)]) def InductionConclusion(var, claim): newVar = var.relatedVariable() return constructors.Forall([constructors.BoundedVariableBinding(newVar, common_vars.natural)], claim.substituteVariable(var, newVar)) def Induction(var, claim): return constructors.Implies([InductionBase(var, claim), InductionStep(var, claim)], InductionConclusion(var, claim)) def _forwardImportInduction(x, var, claim): hypotheses = InductionHypotheses(var, claim) conclusion = InductionConclusion(var, claim) return constructors.assume(x, hypotheses).forwardFollow(lambda x: formula.Arrow(src = x, tgt = constructors.Not( constructors.And([hypotheses, constructors.Not(constructors.And([conclusion, x]))])), basicArrow = x.translate().forwardOnNotFollow(lambda x: x.backwardOnRightFollow(lambda x: x.backwardOnNotFollow(lambda x: x.forwardOnLeftFollow(lambda x: basicFormula.Induction(src = x, tgt = conclusion.translate()))))))) def forwardImportInductionAndContradict(x, var, claim): assert(x.__class__ == formula.Exists) hypotheses = InductionHypotheses(var, claim) conclusion = InductionConclusion(var, claim) return constructors.assume(x.updateVariables(), hypotheses).forwardFollow(lambda x: formula.Arrow(src = x.updateVariables(), tgt = constructors.Not(hypotheses), basicArrow = x.translate().forwardOnNotFollow(lambda x: x.backwardOnRightFollow(lambda x: x.backwardOnNotFollow(lambda x: x.forwardOnLeftFollow(lambda x: basicFormula.Induction(src = x, tgt = conclusion.translate())).forwardFollow(lambda x: x.forwardCommute().forwardFollow(lambda x: x.forwardOnLeftFollow(lambda x: x.forwardOnBodyFollow(lambda x: x.forwardOnRightFollow(lambda x: x.forwardDoubleDual())))).forwardFollow(lambda x: x.forwardContradict())))).backwardFollow(lambda x: x.backwardOnRightFollow(lambda x: basicFormula.trueIsNotFalse).backwardFollow(lambda x: x.backwardIntroUnitLeft()))))) def forwardInductionOnIExists(x, i): var = x.bindings[i].variable claim = constructors.Not(x.value) a = x.forwardPushAndSplit(i) a = a.forwardFollow(lambda x: endofunctor.Exists(x.bindings).onArrow(forwardImportInductionAndContradict(x.value, var, claim))) return a
jesseengel/magenta
magenta/common/__init__.py
Python
apache-2.0
1,001
0
# Copyright 2019 The Magenta Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under t
he License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Imports objects into the top-level common namespace.""" from __future__ import absolute_import from .beam_search import beam_search from .nade im
port Nade from .sequence_example_lib import count_records from .sequence_example_lib import flatten_maybe_padded_sequences from .sequence_example_lib import get_padded_batch from .sequence_example_lib import make_sequence_example from .tf_utils import merge_hparams
pradyunsg/Py2C
py2c/tree/tests/test_visitors.py
Python
bsd-3-clause
5,659
0.00053
"""Unit-tests for `tree.visitors` """ from py2c import tree from py2c.tree import visitors from py2c.tests import Test, data_driven_test from nose.tools import assert_equal # TEST:: Add non-node fields # ============================================================================= # Helper classes # ============================================================================= class BasicNode(tree.Node): _fields = [] class
BasicNodeReplacement(tree.Node): _fields = [] class BasicNodeWithListReplacement(tree.Node): _fields = [] class BasicNodeDeletable(tree.Node): _fields = [] class ParentNode(tree.Node): _fields = [ ('child', tree.Node, 'OPTIONAL'), ] class ParentNodeWithChildrenList(tree.Node): """Node with list of nodes as field """ _fields = [ ('child', tree.Node, 'ZERO_OR_MORE'), ] # ----------------------------------
------------------------------------------- # Concrete Visitors used for testing # ----------------------------------------------------------------------------- class VisitOrderCheckingVisitor(visitors.RecursiveNodeVisitor): def __init__(self): super().__init__() self.visited = [] def generic_visit(self, node): self.visited.append(node.__class__.__name__) super().generic_visit(node) def visit_BasicNodeReplacement(self, node): self.visited.append("visited Copy!") class AccessPathCheckingVisitor(visitors.RecursiveNodeVisitor): def __init__(self): super().__init__() self.recorded_access_path = None def visit_BasicNode(self, node): self.recorded_access_path = self.access_path[:] class EmptyTransformer(visitors.RecursiveNodeTransformer): pass class VisitOrderCheckingTransformer(visitors.RecursiveNodeTransformer): def __init__(self): super().__init__() self.visited = [] def generic_visit(self, node): self.visited.append(node.__class__.__name__) return super().generic_visit(node) def visit_BasicNodeReplacement(self, node): self.visited.append("visited Copy!") return node class AccessPathCheckingTransformer(visitors.RecursiveNodeTransformer): def __init__(self): super().__init__() self.recorded_access_path = None def visit_BasicNode(self, node): self.recorded_access_path = self.access_path[:] return node class TransformationCheckingTransformer(visitors.RecursiveNodeTransformer): def visit_BasicNode(self, node): return BasicNodeReplacement() def visit_BasicNodeDeletable(self, node): return None # Delete this node def visit_BasicNodeReplacement(self, node): return self.NONE_DEPUTY # Replace this node with None def visit_BasicNodeWithListReplacement(self, node): return [BasicNode(), BasicNodeReplacement()] # ----------------------------------------------------------------------------- # Tests # ----------------------------------------------------------------------------- class TestRecursiveASTVisitor(Test): """py2c.tree.visitors.RecursiveNodeVisitor """ context = globals() @data_driven_test("visitors-visitor_order.yaml", prefix="visit order of ") def test_visit_order(self, node, order): to_visit = self.load(node) # The main stuff visitor = VisitOrderCheckingVisitor() retval = visitor.visit(to_visit) assert_equal(retval, None) assert_equal(visitor.visited, order) @data_driven_test("visitors-access_path.yaml", prefix="access path on visit of ") def test_access_path(self, node, access): to_visit = self.load(node) access_path = self.load(access) # The main stuff visitor = AccessPathCheckingVisitor() retval = visitor.visit(to_visit) assert_equal(retval, None) assert_equal(visitor.recorded_access_path, access_path) class TestRecursiveASTTransformer(Test): """py2c.tree.visitors.RecursiveNodeTransformer """ context = globals() @data_driven_test("visitors-visitor_order.yaml", prefix="empty transformer does not transform ") def test_empty_transformer(self, node, order): to_visit = self.load(node) # The main stuff visitor = EmptyTransformer() retval = visitor.visit(to_visit) assert_equal(to_visit, retval) @data_driven_test("visitors-visitor_order.yaml", prefix="visit order of ") def test_visit_order(self, node, order): to_visit = self.load(node) # The main stuff visitor = VisitOrderCheckingTransformer() retval = visitor.visit(to_visit) assert_equal(to_visit, retval) assert_equal(visitor.visited, order) @data_driven_test("visitors-access_path.yaml", prefix="access path on visit of ") def test_access_path(self, node, access): to_visit = self.load(node) access_path = self.load(access) # The main stuff visitor = AccessPathCheckingTransformer() retval = visitor.visit(to_visit) assert_equal(retval, to_visit) assert_equal(visitor.recorded_access_path, access_path) @data_driven_test("visitors-transform.yaml", prefix="transformation of ") def test_transformation(self, node, expected): to_visit = self.load(node) expected_node = self.load(expected) # The main stuff visitor = TransformationCheckingTransformer() retval = visitor.visit(to_visit) assert_equal(retval, expected_node) if __name__ == '__main__': from py2c.tests import runmodule runmodule()
TwilioDevEd/api-snippets
twiml/voice/sms/sms-2/sms-2.6.x.py
Python
mit
207
0
from twilio.twiml.voice_response import Vo
iceResponse, Say, Sms response = VoiceResponse() response.say('Our store is located at 123 Easy St.') response.sms('Store Location: 123 Easy St.
') print(response)
markrages/ble
profiles/hrm_service.py
Python
mit
1,438
0.013908
#!/usr/bin/python import ble import uuids OPCODE_RESET_EXPENDED=1 class HeartRateService(ble.Service): uuid=uuids.heart_rate class HeartRateControlPoint(ble.Characteristic): uuid=uuids.heart_rate_control_point def reset_expended(self): opcode = OPCODE_RESET_EXPENDED self.value = [opcode] class HeartRateMeasurement(ble.Characteristic): uuid=uuids.heart_rate_measurement @property def value(self): return self.interpret_raw_hrm_measurement(self.raw) def interpret_raw_hrm_measurement(self, raw_value): value = [ord(c) for c in raw_value] flags = value.pop(0) hr_format = (flags>>0) & 1; contact_status = (flags>>1) & 3; expended_present = (flags>>3) & 1; rr_present = (flags>>4) & 1; meas={} meas['hr'] = value.pop(0) if (hr_format): meas['hr'] += 256*value.pop(0) if (contact_status & 2): meas['sensor_contact'] = bool(contact_status & 1) if expended_present:
e = value.pop(0) e += 256*value.pop(0) meas['energy_expended'] = e if rr_present: rr = [] while value: rr_val = value.pop(0) rr_val += 256*value.pop(0) rr_val /= 102
4. rr.append(rr_val) meas['rr'] = rr return meas
recognai/spaCy
spacy/tests/lang/de/test_exceptions.py
Python
mit
1,230
0.000814
# coding: utf-8 """Test that tokenizer exceptions and emoticons are handles correctly.""" from __future__ import unicode_literals import pytest @pytest.mark.parametrize('text', ["auf'm", "du's", "über'm", "wir's"]) def test_de_tokenizer_splits_contractions(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 2 @pytest.mark.parametrize('text', ["z.B.",
"d.h.", "Jan.", "Dez.", "Chr."]) def test_de_tokenizer_handles_abbr(de_tokenizer, text): tokens = de_tokenizer(text) assert len(tokens) == 1 def test_de_tokenizer_handles_exc_in_text(de_tokenizer): text = "Ich bin z.Zt. im Ur
laub." tokens = de_tokenizer(text) assert len(tokens) == 6 assert tokens[2].text == "z.Zt." assert tokens[2].lemma_ == "zur Zeit" @pytest.mark.parametrize('text,norms', [("vor'm", ["vor", "dem"]), ("du's", ["du", "es"])]) def test_de_tokenizer_norm_exceptions(de_tokenizer, text, norms): tokens = de_tokenizer(text) assert [token.norm_ for token in tokens] == norms @pytest.mark.xfail @pytest.mark.parametrize('text,norm', [("daß", "dass")]) def test_de_lex_attrs_norm_exceptions(de_tokenizer, text, norm): tokens = de_tokenizer(text) assert tokens[0].norm_ == norm
msfrank/mandelbrot
test/mock_transport.py
Python
gpl-3.0
1,546
0.000647
import asyncio from mandelbrot.transport import * class MockTransport(Transport): def mock_create_item(self, path, item): raise NotImplementedError() @asyncio.coroutine def create_item(self, path, item): return self.mock_create_item(path, item) def mock_replace_item(self, path, item): raise NotImplementedError() @asyncio.coroutine def replace_item(self, path, item): return self.mock_replace_item(path, item) def mock_delete_item(self, path): raise NotImplementedError() @asyncio.coroutine def delete_item(self, path): return self.mock_delete_item(path) def mock_get_item(self, path, filters): raise NotImplementedError() @asyncio.coroutine def get_item(self, path, filters): return self.mock_get_item(path, filters) def mock_patch_item(self, path, fields, constraints): raise NotImplementedError() @asyncio.coroutine def patch_item(self, path, fields, constraints): return self.mock_patch_item(path, fields, constraints) def mock_get_collection(self, path, matchers, count, last): raise NotImplementedError() @asyncio.coroutine def get_collection(se
lf, path, matchers, count, last): return self.mock_get_collection(path, matchers, count, last) def mock_delete_collection(self, path, params): raise NotImplementedError() @asyncio.coroutine def delete_co
llection(self, path, params): return self.mock_delete_collection(path, params)
kerel-fs/skylines
tests/api/schemas/user_test.py
Python
agpl-3.0
2,586
0.000773
from collections import OrderedDict from datetime import datetime from skylines.api import schemas def test_user_schema(test_user): """:type test_user: skylines.model.User""" data, errors = schemas.user_schema.dump(test_user) assert not errors assert isinstance(data, OrderedDict) assert data.keys() == [ 'id', 'name', 'first_name', 'last_name', 'club', 'tracking_delay', 'tracking_call_sign', 'created_at' ] assert data['id'] == test_user.id assert data['name'] == test_user.name assert data['first_name'] == test_user.first_name assert data['last_name'] == test_user.last_name assert data['tracking_delay'] == test_user.tracking_delay assert data['tracking_call_sign'] == test_user.tracking_callsign created_at = datetime.strptime(data['created_at'], '%Y-%m-%dT%H:%M:%S.%f+00:00') assert isinstance(created_at, datetime) assert created_at == test_user.created def test_user_list_schema(test_user): """:type test_user: skylines.model.User""" data, errors = schemas.user_list_schema.dump(test_user) assert not errors assert isinstance(data, OrderedDict) assert data.keys() == [ 'id', 'name', 'first_name', 'last_name', ] assert data['id'] == test_user.id assert data['name'] == test_user.name assert data['first_name'] == test_user.first_name assert data['last_name'] == test_user.last_name def test_current_user_schema(test_user): """:type test_user: skylines.model.User""" data, errors = schemas.current_user_schema.dump(test_user) assert not errors assert isinstance(data, OrderedDict) assert data.keys() == [ 'id', 'name', 'first_name', 'last_name', 'club', 'tracking_delay', 'tracking_call_sign', 'created_at', 'email', 'tracking_key', 'admin', ] assert data['id'] ==
test_user.id asse
rt data['name'] == test_user.name assert data['first_name'] == test_user.first_name assert data['last_name'] == test_user.last_name assert data['email'] == test_user.email_address assert data['tracking_key'] == ('%X' % test_user.tracking_key) assert data['tracking_delay'] == test_user.tracking_delay assert data['tracking_call_sign'] == test_user.tracking_callsign created_at = datetime.strptime(data['created_at'], '%Y-%m-%dT%H:%M:%S.%f+00:00') assert isinstance(created_at, datetime) assert created_at == test_user.created
msultan/msmbuilder
msmbuilder/project_templates/msm/timescales-plot.py
Python
lgpl-2.1
1,908
0.004193
"
""Plot implied timescales vs lagtime {{header}} """ # ? include "plot_header.template" # ? from "plot_macros.template" import xdg_open with context import numpy as np import pandas as pd import seaborn as sns from matplotlib import pyplot as plt sns.set_style('ticks') colors = sns.color_palette() ## Load timescales = pd.read_pickle('timescales.pandas.pickl') n_timescales =
len([x for x in timescales.columns if x.startswith('timescale_')]) ## Implied timescales vs lagtime def plot_timescales(ax): for i in range(n_timescales): ax.scatter(timescales['lag_time'], timescales['timescale_{}'.format(i)], s=50, c=colors[0], label=None, # pandas be interfering ) xmin, xmax = ax.get_xlim() xx = np.linspace(xmin, xmax) ax.plot(xx, xx, color=colors[2], label='$y=x$') ax.legend(loc='best', fontsize=14) ax.set_xlabel('Lag Time / todo:units', fontsize=18) ax.set_ylabel('Implied Timescales / todo:units', fontsize=18) ax.set_xscale('log') ax.set_yscale('log') ## Percent trimmed vs lagtime def plot_trimmed(ax): ax.plot(timescales['lag_time'], timescales['percent_retained'], 'o-', label=None, # pandas be interfering ) ax.axhline(100, color='k', ls='--', label='100%') ax.legend(loc='best', fontsize=14) ax.set_xlabel('Lag Time / todo:units', fontsize=18) ax.set_ylabel('Retained / %', fontsize=18) ax.set_xscale('log') ax.set_ylim((0, 110)) ## Plot timescales fig, ax = plt.subplots(figsize=(7, 5)) plot_timescales(ax) fig.tight_layout() fig.savefig('implied-timescales.pdf') # {{xdg_open('implied-timescales.pdf')}} ## Plot trimmed fig, ax = plt.subplots(figsize=(7,5)) plot_trimmed(ax) fig.tight_layout() fig.savefig('percent-trimmed.pdf') # {{xdg_open('percent-trimmed.pdf')}}
creasyw/IMTAphy
documentation/doctools/tags/0.2/sphinx/directives.py
Python
gpl-2.0
31,058
0.002222
# -*- coding: utf-8 -*- """ sphinx.directives ~~~~~~~~~~~~~~~~~ Handlers for additional ReST directives. :copyright: 2007-2008 by Georg Brandl. :license: BSD. """ import re import sys import string import posixpath from os import path from docutils import nodes from docutils.parsers.rst import directives from sphinx import addnodes from sphinx.roles import caption_ref_re from sphinx.util.compat import make_admonition ws_re = re.compile(r'\s+') # ------ index markup -------------------------------------------------------------- entrytypes = [ 'single', 'pair', 'triple', 'module', 'keyword', 'operator', 'object', 'exception', 'statement', 'builtin', ] def index_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): arguments = arguments[0].split('\n') env = state.document.settings.env
targetid = 'index-%s' % env.index_num env.index_num += 1 targetnode = nodes.target('', '', ids=[targ
etid]) state.document.note_explicit_target(targetnode) indexnode = addnodes.index() indexnode['entries'] = ne = [] for entry in arguments: entry = entry.strip() for type in entrytypes: if entry.startswith(type+':'): value = entry[len(type)+1:].strip() env.note_index_entry(type, value, targetid, value) ne.append((type, value, targetid, value)) break # shorthand notation for single entries else: for value in entry.split(','): env.note_index_entry('single', value.strip(), targetid, value.strip()) ne.append(('single', value.strip(), targetid, value.strip())) return [indexnode, targetnode] index_directive.arguments = (1, 0, 1) directives.register_directive('index', index_directive) # ------ information units --------------------------------------------------------- def desc_index_text(desctype, currmodule, name): if desctype == 'function': if not currmodule: return '%s() (built-in function)' % name return '%s() (in module %s)' % (name, currmodule) elif desctype == 'data': if not currmodule: return '%s (built-in variable)' % name return '%s (in module %s)' % (name, currmodule) elif desctype == 'class': return '%s (class in %s)' % (name, currmodule) elif desctype == 'exception': return name elif desctype == 'method': try: clsname, methname = name.rsplit('.', 1) except: if currmodule: return '%s() (in module %s)' % (name, currmodule) else: return '%s()' % name if currmodule: return '%s() (%s.%s method)' % (methname, currmodule, clsname) else: return '%s() (%s method)' % (methname, clsname) elif desctype == 'attribute': try: clsname, attrname = name.rsplit('.', 1) except: if currmodule: return '%s (in module %s)' % (name, currmodule) else: return name if currmodule: return '%s (%s.%s attribute)' % (attrname, currmodule, clsname) else: return '%s (%s attribute)' % (attrname, clsname) elif desctype == 'opcode': return '%s (opcode)' % name elif desctype == 'cfunction': return '%s (C function)' % name elif desctype == 'cmember': return '%s (C member)' % name elif desctype == 'cmacro': return '%s (C macro)' % name elif desctype == 'ctype': return '%s (C type)' % name elif desctype == 'cvar': return '%s (C variable)' % name else: raise ValueError("unhandled descenv: %s" % desctype) # ------ functions to parse a Python or C signature and create desc_* nodes. py_sig_re = re.compile(r'''^([\w.]*\.)? # class names (\w+) \s* # thing name (?: \((.*)\) )? $ # optionally arguments ''', re.VERBOSE) py_paramlist_re = re.compile(r'([\[\],])') # split at '[', ']' and ',' def parse_py_signature(signode, sig, desctype, env): """ Transform a python signature into RST nodes. Return (fully qualified name of the thing, classname if any). If inside a class, the current class name is handled intelligently: * it is stripped from the displayed name if present * it is added to the full name (return value) if not present """ m = py_sig_re.match(sig) if m is None: raise ValueError classname, name, arglist = m.groups() add_module = True if env.currclass: if classname and classname.startswith(env.currclass): fullname = classname + name # class name is given again in the signature classname = classname[len(env.currclass):].lstrip('.') add_module = False elif classname: # class name is given in the signature, but different fullname = env.currclass + '.' + classname + name else: # class name is not given in the signature fullname = env.currclass + '.' + name add_module = False else: fullname = classname and classname + name or name if classname: signode += addnodes.desc_classname(classname, classname) # exceptions are a special case, since they are documented in the # 'exceptions' module. elif add_module and env.config.add_module_names and \ env.currmodule and env.currmodule != 'exceptions': nodetext = env.currmodule + '.' signode += addnodes.desc_classname(nodetext, nodetext) signode += addnodes.desc_name(name, name) if not arglist: if desctype in ('function', 'method'): # for callables, add an empty parameter list signode += addnodes.desc_parameterlist() return fullname, classname signode += addnodes.desc_parameterlist() stack = [signode[-1]] for token in py_paramlist_re.split(arglist): if token == '[': opt = addnodes.desc_optional() stack[-1] += opt stack.append(opt) elif token == ']': try: stack.pop() except IndexError: raise ValueError elif not token or token == ',' or token.isspace(): pass else: token = token.strip() stack[-1] += addnodes.desc_parameter(token, token) if len(stack) != 1: raise ValueError return fullname, classname c_sig_re = re.compile( r'''^([^(]*?) # return type ([\w:]+) \s* # thing name (colon allowed for C++ class names) (?: \((.*)\) )? $ # optionally arguments ''', re.VERBOSE) c_funcptr_sig_re = re.compile( r'''^([^(]+?) # return type (\( [^()]+ \)) \s* # name in parentheses \( (.*) \) $ # arguments ''', re.VERBOSE) c_funcptr_name_re = re.compile(r'^\(\s*\*\s*(.*?)\s*\)$') # RE to split at word boundaries wsplit_re = re.compile(r'(\W+)') # These C types aren't described in the reference, so don't try to create # a cross-reference to them stopwords = set(('const', 'void', 'char', 'int', 'long', 'FILE', 'struct')) def parse_c_type(node, ctype): # add cross-ref nodes for all words for part in filter(None, wsplit_re.split(ctype)): tnode = nodes.Text(part, part) if part[0] in string.letters+'_' and part not in stopwords: pnode = addnodes.pending_xref( '', reftype='ctype', reftarget=part, modname=None, classname=None) pnode += tnode node += pnode else: node += tnode def parse_c_signature(signode, sig, desctype): """Transform a C (or C++) signature into RST nodes.""" # first try the function pointer signature regex, it's more specific m = c_funcptr_sig_re.match(sig) if m is None: m = c_sig_re.match(sig) if m is None: raise ValueError('no match') rettype, name, arglist = m.groups() signode += addnodes
OTL/jps
test/test_serialize.py
Python
apache-2.0
697
0
import json import time import jps class MessageHolder(object): def __init__(self): self.saved_msg = [] def __call__(self, msg): self.saved_msg.append(msg) def test_pubsub_with_serialize_json(): holder = MessageHolder() sub = jps.Subscriber('/serialize_hoge1', holder, deserializer=json.loads) pub = jps.Publisher('/serialize_hoge1',
serializer=json.dumps) time.sleep(0.1) obj = {'da1': 1, 'name': 'hoge'} pub.publish(obj) time.sleep(0.1) sub.spin_once() assert len(holder.sa
ved_msg) == 1 assert holder.saved_msg[0]['da1'] == 1 assert holder.saved_msg[0]['name'] == 'hoge'
wooey/django-djangui
wooey/conf/project_template/urls/user_urls.py
Python
bsd-3-clause
353
0.005666
from os import environ from django.conf.urls import include, url from django.conf import settings from django.conf.urls
.static import static from .wooey_urls import * if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) urlpatterns += static(settings.STA
TIC_URL, document_root=settings.STATIC_ROOT)
jorik041/Veil-Evasion
modules/payloads/ruby/meterpreter/rev_http.py
Python
gpl-3.0
2,371
0.012231
""" Custom-written pure ruby meterpreter/reverse_http stager. TODO: better randomization Module built by @harmj0y """ from modules.common import helpers class Payload: def __init__(self): # required options self.description = "pure windows/meterpreter/reverse_http stager, no shellcode" self.language = "ruby" self.extension = "rb" self.rating = "Normal" # options we require user ineraction for- format is {Option : [Value, Description]]} self.required_options = { "compile_to_exe" : ["Y", "Compile to an executable"], "LHOST" : ["", "IP of the metasploit handler"], "LPORT" : ["", "Port of the metasploit handler"]} def generate(self): payloadCode = "require 'rubygems';require 'win32/api';require 'net/http';include Win32\n" payloadCode += "exit if Object.const_defined?(:Ocra)\n" payloadCode += "$v = API.new('VirtualAlloc', 'IIII', 'I');$r = API.new('RtlMoveMemory', 'IPI', 'V');$c = API.new('CreateThread', 'IIIIIP', 'I');$w = API.new('WaitForSingleObject', 'II', 'I')\n" payloadCode += "def ch()\n" #payloadCode += "\tchk = (\"a\"..\"z\").to_a + (\"A\"..\"Z\").to_a + (\"0\"..\"9\").to_a\n" #payloadCode += "\t32.times do\n" #payloadCode += "\t\turi = chk.sample().join()\n" #payloadCode += "\t\tchk.sort_by {rand}.each do |x|\n" #payloadCode += "\t\t\treturn(uri + x) if (uri + x).unpack(\"C*\").inject(:+) % 0x100 == 92\n" #payloadCode += "\t\tend\n" #payloadCode += "\tend\n" payloadCode += "\treturn \"WEZf\"\n" payloadCode += "end\n" payloadCode += "def ij(sc)\n" payloadCode += "\tif sc.length > 1
000\n" payloadCode += "\t\tpt = $v.call(0,(sc.length > 0x1000 ? sc.length : 0x1000), 0x1000, 0x40)\n" payloadCode += "\t\tx = $r.call(pt,sc,sc.length)\n" payloadCode += "\t\tx = $w.c
all($c.call(0,0,pt,0,0,0),0xFFFFFFF)\n" payloadCode += "\tend\nend\n" payloadCode += "uri = URI.encode(\"http://%s:%s/#{ch()}\")\n" % (self.required_options["LHOST"][0], self.required_options["LPORT"][0]) payloadCode += "uri = URI(uri)\n" payloadCode += "ij(Net::HTTP.get(uri))" return payloadCode
NicovincX2/Python-3.5
Algorithmique/Algorithme/Algorithme numérique/Algorithme d'Euclide étendu/extended_euclidean_algorithm.py
Python
gpl-3.0
1,258
0.00159
# -*- coding: utf-8 -*- import os # Author: Sam Erickson # Date: 2/23/2016 # # Program Description: This program gives the integer coefficients x,y to the # equation ax+by=gcd(a,b) given by the extended Euclidean Algorithm. def extendedEuclid(a, b): """ Preconditions - a and b are both positive integers. Posconditions - The equation for ax+by=gcd(a,b) has been returned where x and y are solved. Input - a : int, b : int Output - ax+by=gcd(a,b) : string """ b, a = max(a, b), min(a, b) # Format of euclidList is for back-substitution euclidList = [
[b % a, 1, b,
-1 * (b // a), a]] while b % a > 0: b, a = a, b % a euclidList.append([b % a, 1, b, -1 * (b // a), a]) if len(euclidList) > 1: euclidList.pop() euclidList = euclidList[::-1] for i in range(1, len(euclidList)): euclidList[i][1] *= euclidList[i - 1][3] euclidList[i][3] *= euclidList[i - 1][3] euclidList[i][3] += euclidList[i - 1][1] expr = euclidList[len(euclidList) - 1] strExpr = str(expr[1]) + "*" + str(expr[2]) + " + " + str(expr[3]) + "*" + str(expr[4]) \ + " = " + str(euclidList[0][0]) return strExpr os.system("pause")
MTgeophysics/mtpy
mtpy/gui/SmartMT/visualization/strike.py
Python
gpl-3.0
3,575
0.003077
# -*- coding: utf-8 -*- """ Description: Usage: Author: YingzhiGou Date: 21/08/2017 """ from mtpy.gui.SmartMT.Components.FigureSetting import Font from mtpy.gui.SmartMT.Components.PlotParameter import FrequencyTolerance, Rotation from mtpy.gui.SmartMT.gui.plot_control_guis import PlotControlStrike from mtpy.gui.SmartMT.visualization import VisualizationBase from mtpy.imaging.plotstrike import PlotStrike from mtpy.utils.matplotlib_utils import get_next_fig_num class Strike(VisualizationBase): def __init__(self, parent): VisualizationBase.__init__(self, parent) # setup ui self._plot_control_ui = PlotControlStrike(self._parameter_ui) self._parameter_ui.add_parameter_groupbox(self._plot_control_ui) self._rotation_ui = Rotation(self._parameter_ui) self._parameter_ui.add_parameter_groupbox(self._rotation_ui) self._tolerance_ui = FrequencyTolerance(self._parameter_ui) self._tolerance_ui.ui.doubleSpinBox.setValue(0.05) # set default value self._parameter_ui.add_parameter_groupbox(self._tolerance_ui) self._font_ui = Font
(self._parameter_ui) self._font_ui.hide_weight() self._font_ui.hide_color() self._parameter_ui.add_figure_groupbox(self._font_ui) self._parameter_ui.end_of_parameter_components() self.update_ui() self._params = None def plot(self): # set up params self._params = { 'fn_list': [mt_obj.fn for mt_obj in self._mt_objs], 'rot_z':
self._rotation_ui.get_rotation_in_degree(), 'period_tolerance': self._tolerance_ui.get_tolerance_in_float(), 'plot_range': self._plot_control_ui.get_plot_range(), 'plot_type': self._plot_control_ui.get_plot_type(), 'plot_tipper': self._plot_control_ui.get_plot_tipper(), 'pt_error_floor': self._plot_control_ui.get_error_floor(), 'fold': self._plot_control_ui.get_fold(), 'fig_size': (8, 6), 'fig_dpi': 100, "plot_yn": 'n', "fig_num": get_next_fig_num() } param = self._font_ui.get_size() if param is not None: self._params['font_size'] = param self._plotting_object = PlotStrike(**self._params) self._plotting_object.plot(show=False) self._fig = self._plotting_object.fig def update_ui(self): pass @staticmethod def plot_name(): return "Strike" @staticmethod def plot_description(): return """ <p>This plots the estrike estimated from the invariants, phase tensor and the tipper in either a rose diagram of xy plot</p> <p>plots the strike angle as determined by phase tensor azimuth (Caldwell et al. [2004]) and invariants of the impedance tensor (Weaver et al. [2003]).</p> <p>The data is split into decades where the histogram for each is plotted in the form of a rose diagram with a range of 0 to 180 degrees. Where 0 is North and 90 is East. The median angle of the period band is set in polar diagram. The top row is the strike estimated from the invariants of the impedance tensor. The bottom row is the azimuth estimated from the phase tensor. If tipper is plotted then the 3rd row is the strike determined from the tipper, which is orthogonal to the induction arrow direction.</p> """ def get_plot_tooltip(self): pass
tstenner/bleachbit
windows/setup_py2exe.py
Python
gpl-3.0
20,767
0.000626
""" BleachBit Copyright (C) 2008-2020 Andrew Ziem https://www.bleachbit.org This program is free s
oftware: you can redistribute it and/or modi
fy it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from __future__ import absolute_import, print_function import fnmatch import glob import imp import logging import os import shutil import subprocess import sys import time import win_unicode_console setup_encoding = sys.stdout.encoding win_unicode_console.enable() logger = logging.getLogger('setup_py2exe') logger.setLevel(logging.INFO) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter( "%(asctime)s - %(levelname)s - %(message)s", "%Y-%m-%d %H:%M:%S") ch.setFormatter(formatter) logger.addHandler(ch) fast = False if len(sys.argv) > 1 and sys.argv[1] == 'fast': logger.info('Fast build') fast = True ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logger.info('ROOT_DIR ' + ROOT_DIR) sys.path.append(ROOT_DIR) BB_VER = None GTK_DIR = sys.exec_prefix + '\\Lib\\site-packages\\gnome\\' NSIS_EXE = 'C:\\Program Files (x86)\\NSIS\\makensis.exe' NSIS_ALT_EXE = 'C:\\Program Files\\NSIS\\makensis.exe' if not os.path.exists(NSIS_EXE) and os.path.exists(NSIS_ALT_EXE): logger.info('NSIS found in alternate location: ' + NSIS_ALT_EXE) NSIS_EXE = NSIS_ALT_EXE SZ_EXE = 'C:\\Program Files\\7-Zip\\7z.exe' # maximum compression with maximum compatibility # mm=deflate method because deflate64 not widely supported # mpass=passes for deflate encoder # mfb=number of fast bytes # bso0 bsp0 quiet output # 7-Zip Command Line Reverence Wizard: https://axelstudios.github.io/7z/#!/ SZ_OPTS = '-tzip -mm=Deflate -mfb=258 -mpass=7 -bso0 -bsp0' # best compression if fast: # fast compression SZ_OPTS = '-tzip -mx=1 -bso0 -bsp0' UPX_EXE = ROOT_DIR + '\\upx\\upx.exe' UPX_OPTS = '--best --crp-ms=999999 --nrv2e' def archive(infile, outfile): assert_exist(infile) if os.path.exists(outfile): logger.warning( 'Deleting output archive that already exists: ' + outfile) os.remove(outfile) cmd = '{} a {} {} {}'.format(SZ_EXE, SZ_OPTS, outfile, infile) run_cmd(cmd) assert_exist(outfile) def recursive_glob(rootdir, patterns): return [os.path.join(looproot, filename) for looproot, _, filenames in os.walk(rootdir) for filename in filenames if any(fnmatch.fnmatch(filename, pattern) for pattern in patterns)] def assert_exist(path, msg=None): if not os.path.exists(path): logger.error(path + ' not found') if msg: logger.error(msg) sys.exit(1) def check_exist(path, msg=None): if not os.path.exists(path): logger.warning(path + ' not found') if msg: logger.warning(msg) time.sleep(5) def assert_module(module): try: imp.find_module(module) except ImportError: logger.error('Failed to import ' + module) logger.error('Process aborted because of error!') sys.exit(1) def assert_execute(args, expected_output): """Run a command and check it returns the expected output""" actual_output = subprocess.check_output(args).decode(setup_encoding) if -1 == actual_output.find(expected_output): raise RuntimeError('When running command {} expected output {} but got {}'.format( args, expected_output, actual_output)) def assert_execute_console(): """Check the application starts""" logger.info('Checking bleachbit_console.exe starts') assert_execute([r'dist\bleachbit_console.exe', '--gui', '--exit', '--no-uac'], 'Success') def run_cmd(cmd): logger.info(cmd) p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() logger.info(stdout.decode(setup_encoding)) if stderr: logger.error(stderr.decode(setup_encoding)) def sign_code(filename): if os.path.exists('CodeSign.bat'): logger.info('Signing code: %s' % filename) cmd = 'CodeSign.bat %s' % filename run_cmd(cmd) else: logger.warning('CodeSign.bat not available for %s' % filename) def get_dir_size(start_path='.'): # http://stackoverflow.com/questions/1392413/calculating-a-directory-size-using-python total_size = 0 for dirpath, dirnames, filenames in os.walk(start_path): for f in filenames: fp = os.path.join(dirpath, f) total_size += os.path.getsize(fp) return total_size def copytree(src, dst): # Microsoft xcopy is about twice as fast as shutil.copytree logger.info('copying {} to {}'.format(src, dst)) cmd = 'xcopy {} {} /i /s /q'.format(src, dst) os.system(cmd) def count_size_improvement(func): def wrapper(): import time t0 = time.time() size0 = get_dir_size('dist') func() size1 = get_dir_size('dist') t1 = time.time() logger.info('Reduced size of the dist directory by {:,} B from {:,} B to {:,} B in {:.1f} s'.format( size0 - size1, size0, size1, t1 - t0)) return wrapper def environment_check(): """Check the build environment""" logger.info('Checking for translations') assert_exist('locale', 'run "make -C po local" to build translations') logger.info('Checking for GTK') assert_exist(GTK_DIR) logger.info('Checking PyGI library') assert_module('gi') logger.info('Checking Python win32 library') assert_module('win32file') logger.info('Checking for CodeSign.bat') check_exist('CodeSign.bat', 'Code signing is not available') logger.info('Checking for NSIS') check_exist( NSIS_EXE, 'NSIS executable not found: will try to build portable BleachBit') def build(): """Build the application""" logger.info('Deleting directories build and dist') shutil.rmtree('build', ignore_errors=True) shutil.rmtree('dist', ignore_errors=True) shutil.rmtree('BleachBit-Portable', ignore_errors=True) logger.info('Running py2exe') shutil.copyfile('bleachbit.py', 'bleachbit_console.py') cmd = sys.executable + ' -OO setup.py py2exe' run_cmd(cmd) assert_exist('dist\\bleachbit.exe') assert_exist('dist\\bleachbit_console.exe') os.remove('bleachbit_console.py') if not os.path.exists('dist'): os.makedirs('dist') logger.info('Copying GTK files and icon') copytree(GTK_DIR + '\\etc', 'dist\\etc') copytree(GTK_DIR + '\\lib', 'dist\\lib') for subpath in ['fontconfig', 'fonts', 'icons', 'themes']: copytree(os.path.join(GTK_DIR, 'share', subpath), 'dist\\share\\' + subpath) SCHEMAS_DIR = 'share\\glib-2.0\\schemas' os.makedirs(os.path.join('dist', SCHEMAS_DIR)) shutil.copyfile(os.path.join(GTK_DIR, SCHEMAS_DIR, 'gschemas.compiled'), os.path.join('dist', SCHEMAS_DIR, 'gschemas.compiled')) shutil.copyfile('bleachbit.png', 'dist\\share\\bleachbit.png') # for pop-up notification shutil.copyfile('windows\\bleachbit.ico', 'dist\\share\\bleachbit.ico') for dll in glob.glob1(GTK_DIR, '*.dll'): shutil.copyfile(os.path.join(GTK_DIR, dll), 'dist\\'+dll) os.mkdir('dist\\data') shutil.copyfile('data\\app-menu.ui', 'dist\\data\\app-menu.ui') logger.info('Copying themes') copytree('themes', 'dist\\themes') logger.info('Copying CA bundle') import requests shutil.copyfile(requests.utils.DEFAULT_CA_BUNDLE_PATH, os.path.join('dist', 'cacert.pem')) dist_locale_dir = r'dist\share\locale' logger.
InakiZabala/odoomrp-wip
stock_packaging_info/__openerp__.py
Python
agpl-3.0
1,532
0
# -*- encoding: utf-8 -*- ############################################################################## # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## { "name": "Stock - Packaging information", "version": "1.0", "depends": [ "stock", "product_packaging_through_attributes", ], "author": "OdooMRP team," "AvanzOSC," "Serv. Tecnol. Avanzados - Pedro M. Baeza", "website": "http://www.odoomrp.com", "contributors": [ "Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>",
"Pedro M. Baeza <pedr
o.baeza@serviciosbaeza.com>", "Ana Juaristi <ajuaristio@gmail.com>" ], "category": "Custom Module", "summary": "", "data": [ "views/stock_view.xml", ], "installable": True, "auto_install": False, }
studybuffalo/studybuffalo
study_buffalo/hc_dpd/migrations/0012_align_with_hc_20220301_2.py
Python
gpl-3.0
620
0
# pylint: disable=missing-module-docstring, missing-class-docstring from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('hc_dpd', '0011_align_with_h
c_20220301'), ] operations = [ migrations.AlterField( model_name='company', name='company_name', field=models.CharField(blank=True, max_length=80, null=True), ), migrations.AlterField( model_n
ame='company', name='street_name', field=models.CharField(blank=True, max_length=80, null=True), ), ]
smeerten/ssnake
src/safeEval.py
Python
gpl-3.0
2,736
0.00402
#!/usr/bin/env python3 # Copyright 2016 - 2021 Bas van Meerten and Wouter Franssen # This file is part of ssNake. # # ssNake is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ssNake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ssNake. If not, see <http://www.gnu.org/licenses/>. import re import numpy as np import scipy.special import hypercomplex as hc def safeEval(inp, length=None, Type='All', x=None): """ Creates a more restricted eval environment. Note that this method is still not acceptable to process strings from untrusted sources. Parameters ---------- inp : str String to evaluate. length : int or float, optional The variable length will be set to this value. By default the variable length is not set. Type : {'All', 'FI', 'C'}, optional Type of expected output. 'All' will return all types, 'FI' will return a float or int, and 'C' will return a complex number. By default Type is set to 'All' x : array_like, optional The variable x is set to this variable, By default the variable x is not used. Returns ------- Object The result of the evaluated string. """ env = vars(np).copy() env.update(vars(hc).copy()) env.update(vars(scipy.special).copy()) env.update(vars(scipy.integrate).copy()) env["locals"] = None e
nv["globals"] = None env["__name__"] = None env["__file__"] = None env["__builtins__"] = {'None': None, 'False': False, 'True':True} # None env["slice"] = slice if length is not None: env["length"] = length if x is not None: env["x"] = x inp = re.sub('([0-9]+)[kK]', '\g<1>*1024', str(inp)) try: val = eval(inp, env) if isinstance(val, str): return None if Type == 'All': return val if T
ype == 'FI': #single float/int type if isinstance(val, (float, int)) and not np.isnan(val) and not np.isinf(val): return val return None if Type == 'C': #single complex number if isinstance(val, (float, int, complex)) and not np.isnan(val) and not np.isinf(val): return val return None except Exception: return None
andialbrecht/sqlparse
sqlparse/filters/aligned_indent.py
Python
bsd-3-clause
5,110
0
# # Copyright (C) 2009-2020 the sqlparse authors and contributors # <see AUTHORS file> # # This module is part of python-sqlparse and is released under # the BSD License: https://opensource.org/licenses/BSD-3-Clause from sqlparse import sql, tokens as T from sqlparse.utils import offset, indent class AlignedIndentFilter: join_words = (r'((LEFT\s+|RIGHT\s+|FULL\s+)?' r'(INNER\s+|OUTER\s+|STRAIGHT\s+)?|' r'(CROSS\s+|NATURAL\s+)?)?JOIN\b') by_words = r'(GROUP|ORDER)\s+BY\b' split_words = ('FROM', join_words, 'ON', by_words, 'WHERE', 'AND', 'OR', 'HAVING', 'LIMIT', 'UNION', 'VALUES', 'SET', 'BETWEEN', 'EXCEPT') def __init__(self, char=' ', n='\n'): self.n = n self.offset = 0 self.indent = 0 self.char = char self._max_kwd_len = len('select') def nl(self, offset=1): # offset = 1 represent a single space after SELECT offset = -len(offset) if not isinstance(offset, int) else offset # add two for the space and parenthesis indent = self.indent * (2 + self._max_kwd_len) return sql.Token(T.Whitespace, self.n + self.char * ( self._max_kwd_len + offset + indent + self.offset)) def _process_statement(self, tlist): if len(tlist.tokens) > 0 and tlist.tokens[0].is_whitespace \ and self.indent == 0: tlist.tokens.pop(0) # process the main query body self._process(sql.TokenList(tlist.tokens)) def _process_parenthesis(self, tlist): # if this isn't a subquery, don't re-indent _, token = tlist.token_next_by(m=(T.DML, 'SELECT')) if token is not None: with indent(self): tlist.insert_after(tlist[0], self.nl('SELECT')) # process the inside of the parenthesis self._process_default(tlist) # de-indent last parenthesis tlist.insert_before(tlist[-1], self.nl()) def _process_identifierlist(self, tlist): #
columns being selected identifiers = list(tlist.get_identifiers()) identifiers.pop(0) [tlist.insert_before(token, self.nl()) for token in identifiers] self._process_default(tlist) def _process_case(
self, tlist): offset_ = len('case ') + len('when ') cases = tlist.get_cases(skip_ws=True) # align the end as well end_token = tlist.token_next_by(m=(T.Keyword, 'END'))[1] cases.append((None, [end_token])) condition_width = [len(' '.join(map(str, cond))) if cond else 0 for cond, _ in cases] max_cond_width = max(condition_width) for i, (cond, value) in enumerate(cases): # cond is None when 'else or end' stmt = cond[0] if cond else value[0] if i > 0: tlist.insert_before(stmt, self.nl(offset_ - len(str(stmt)))) if cond: ws = sql.Token(T.Whitespace, self.char * ( max_cond_width - condition_width[i])) tlist.insert_after(cond[-1], ws) def _next_token(self, tlist, idx=-1): split_words = T.Keyword, self.split_words, True tidx, token = tlist.token_next_by(m=split_words, idx=idx) # treat "BETWEEN x and y" as a single statement if token and token.normalized == 'BETWEEN': tidx, token = self._next_token(tlist, tidx) if token and token.normalized == 'AND': tidx, token = self._next_token(tlist, tidx) return tidx, token def _split_kwds(self, tlist): tidx, token = self._next_token(tlist) while token: # joins, group/order by are special case. only consider the first # word as aligner if ( token.match(T.Keyword, self.join_words, regex=True) or token.match(T.Keyword, self.by_words, regex=True) ): token_indent = token.value.split()[0] else: token_indent = str(token) tlist.insert_before(token, self.nl(token_indent)) tidx += 1 tidx, token = self._next_token(tlist, tidx) def _process_default(self, tlist): self._split_kwds(tlist) # process any sub-sub statements for sgroup in tlist.get_sublists(): idx = tlist.token_index(sgroup) pidx, prev_ = tlist.token_prev(idx) # HACK: make "group/order by" work. Longer than max_len. offset_ = 3 if ( prev_ and prev_.match(T.Keyword, self.by_words, regex=True) ) else 0 with offset(self, offset_): self._process(sgroup) def _process(self, tlist): func_name = '_process_{cls}'.format(cls=type(tlist).__name__) func = getattr(self, func_name.lower(), self._process_default) func(tlist) def process(self, stmt): self._process(stmt) return stmt
danmergens/mi-instrument
mi/dataset/parser/pco2a_a_dcl.py
Python
bsd-2-clause
10,120
0.003063
#!/usr/bin/env python """ @package mi.dataset.parser.pco2a_a_dcl @file marine-integrations/mi/dataset/parser/pco2a_a_dcl.py @author Sung Ahn @brief Parser for the pco2a_a_dcl dataset driver This file contains code for the pco2a_a_dcl parser and code to produce data particles. For instrument telemetered data, there is one driver which produces two(air/water) types of data particle. For instrument recover data, there is one driver which produces two(air/water) types of data particle. The input files and the content of the data particles are the same for both instrument telemetered and instrument recovered. Only the names of the output particle streams are different. The input file is ASCII and contains 2 types of records. Records are separated by a newline. All records start with a timestamp. Metadata records: timestamp [text] more text newline. Sensor Data records: timestamp sensor_data newline. Only sensor data records produce particles if properly formed. Mal-formed sensor data records and all metadata records produce no particles. Release notes: Initial Release """ __author__ = 'Sung Ahn' __license__ = 'Apache 2.0' import re from mi.core.log import get_logger log = get_logger() from mi.core.common import BaseEnum from mi.dataset.parser.dcl_file_common import DclInstrumentDataParticle, \ DclFileCommonParser, TIMESTAMP, \ START_METADATA, END_METADATA, START_GROUP, END_GROUP from mi.dataset.parser.common_regexes import END_OF_LINE_REGEX, SPACE_REGEX, \ FLOAT_REGEX, UNSIGNED_INT_REGEX, TIME_HR_MIN_SEC_REGEX, ANY_CHARS_REGEX from mi.dataset.parser.utilities import timestamp_yyyy_mm_dd_hh_mm_ss_to_ntp from mi.core.instrument.data_particle import DataParticleKey # Basic patterns UINT = '(' + UNSIGNED_INT_REGEX + ')' # unsigned integer as a group FLOAT = '(' + FLOAT_REGEX + ')' # floating point as a captured group W_CHAR = r'(W)' A_CHAR = r'(A)' COMMA = ',' SHARP = '#' CHAR_M = ' *M' EXTRA_CR = '\s*?' # account for random <CR> found in some live files. # Timestamp at the start of each record: YYYY/MM/DD HH:MM:SS.mmm # Metadata fields: [text] more text # Sensor data has tab-delimited fields (date, time, integers) # All records end with one of the newlines. SENSOR_DATE = r'(\d{4}/\d{2}/\d{2})' # Sensor Date: MM/DD/YY # Metadata record: # Timestamp [Text]MoreText newline METADATA_PATTERN = TIMESTAMP + SPACE_REGEX # dcl controller timestamp METADATA
_PATTERN += START_METADATA # Metadata record starts with '[' METADATA_PATTERN += ANY_CHARS_REGEX # followed by text METADATA_PATTERN += END_METADATA # followed by ']' METADATA_PATTERN += ANY_CHARS_REGEX # followed by more text METADATA_PATTERN += END_OF_LINE_REGEX # metadata record ends with LF METADATA_MATCHER = re.compile(METADATA_PATTER
N) # Sensor data record: # Timestamp Date<space>Time<space>SensorData # where SensorData are comma-separated unsigned integer numbers SENSOR_DATA_PATTERN = TIMESTAMP + SPACE_REGEX # dcl controller timestamp SENSOR_DATA_PATTERN += SHARP + START_GROUP + SENSOR_DATE + SPACE_REGEX # sensor date SENSOR_DATA_PATTERN += TIME_HR_MIN_SEC_REGEX + END_GROUP + COMMA + CHAR_M + COMMA # sensor time SENSOR_DATA_PATTERN += UINT + COMMA # measurement wavelength beta SENSOR_DATA_PATTERN += UINT + COMMA # raw signal beta SENSOR_DATA_PATTERN += FLOAT + COMMA # measurement wavelength chl SENSOR_DATA_PATTERN += FLOAT + COMMA # raw signal chl SENSOR_DATA_PATTERN += FLOAT + COMMA # measurement wavelength cdom SENSOR_DATA_PATTERN += FLOAT + COMMA # raw signal cdom SENSOR_DATA_PATTERN += UINT + COMMA # raw signal beta SENSOR_DATA_PATTERN += FLOAT + COMMA # raw signal cdom SENSOR_DATA_PATTERN += FLOAT + COMMA # raw signal cdom SENSOR_DATA_PATTERN_AIR = SENSOR_DATA_PATTERN + A_CHAR + EXTRA_CR + END_OF_LINE_REGEX SENSOR_DATA_MATCHER_AIR = re.compile(SENSOR_DATA_PATTERN_AIR) SENSOR_DATA_PATTERN_WATER = SENSOR_DATA_PATTERN + W_CHAR + EXTRA_CR + END_OF_LINE_REGEX SENSOR_DATA_MATCHER_WATER = re.compile(SENSOR_DATA_PATTERN_WATER) # Manual test is below # >>me = re.match(r"((\d{4})/(\d{2})/(\d{2}) (\d{2}):(\d{2}):(\d{2})\.(\d{3})) #((\d{4}/\d{2}/\d{2}) # (\d{2}):(\d{2}):(\d{2})), *M,(\d*),(\d*),(\d+.\d+),(\d+.\d+),(\d+.\d+),(\d+.\d+),(\d*), # (\d+.\d+),(\d+.\d+),(\D)", # "2014/08/10 00:20:24.274 #3765/07/27 01:00:11, M,43032,40423,397.04,40.1,21.221, # 28.480,1026,39.9,40.4,W") # >>> me.group() # '2014/08/10 00:20:24.274 #3765/07/27 01:00:11, M,43032,40423,397.04,40.1,21.221,28.480,1026,39.9,40.4,W' # SENSOR_DATA_MATCHER produces the following groups. # The following are indices into groups() produced by SENSOR_DATA_MATCHER. # i.e, match.groups()[INDEX] SENSOR_GROUP_SENSOR_DATE_TIME = 8 SENSOR_GROUP_SENSOR_DATE = 9 SENSOR_GROUP_SENSOR_HOUR = 10 SENSOR_GROUP_SENSOR_MINUTE = 11 SENSOR_GROUP_SENSOR_SECOND = 12 SENSOR_GROUP_ZERO_A2D = 13 SENSOR_GROUP_CURRENT_A2D = 14 SENSOR_GROUP_CO2 = 15 SENSOR_GROUP_AVG_IRGA_TEMP = 16 SENSOR_GROUP_HUMIDITY = 17 SENSOR_GROUP_HUMIDITY_TEMP = 18 SENSOR_GROUP_STREAM_PRESSURE = 19 SENSOR_GROUP_DETECTOR_TEMP = 20 SENSOR_GROUP_SOURCE_TEMP = 21 SENSOR_GROUP_SAMPLE_TYPE = 22 INSTRUMENT_PARTICLE_AIR_MAP = [ ('zero_a2d', SENSOR_GROUP_ZERO_A2D, int), ('current_a2d', SENSOR_GROUP_CURRENT_A2D, int), ('measured_air_co2', SENSOR_GROUP_CO2, float), ('avg_irga_temperature', SENSOR_GROUP_AVG_IRGA_TEMP, float), ('humidity', SENSOR_GROUP_HUMIDITY, float), ('humidity_temperature', SENSOR_GROUP_HUMIDITY_TEMP, float), ('gas_stream_pressure', SENSOR_GROUP_STREAM_PRESSURE, int), ('irga_detector_temperature', SENSOR_GROUP_DETECTOR_TEMP, float), ('irga_source_temperature', SENSOR_GROUP_SOURCE_TEMP, float) ] INSTRUMENT_PARTICLE_WATER_MAP = [ ('zero_a2d', SENSOR_GROUP_ZERO_A2D, int), ('current_a2d', SENSOR_GROUP_CURRENT_A2D, int), ('measured_water_co2', SENSOR_GROUP_CO2, float), ('avg_irga_temperature', SENSOR_GROUP_AVG_IRGA_TEMP, float), ('humidity', SENSOR_GROUP_HUMIDITY, float), ('humidity_temperature', SENSOR_GROUP_HUMIDITY_TEMP, float), ('gas_stream_pressure', SENSOR_GROUP_STREAM_PRESSURE, int), ('irga_detector_temperature', SENSOR_GROUP_DETECTOR_TEMP, float), ('irga_source_temperature', SENSOR_GROUP_SOURCE_TEMP, float) ] class DataParticleType(BaseEnum): PCO2A_INSTRUMENT_AIR_PARTICLE = 'pco2a_a_dcl_instrument_air' PCO2A_INSTRUMENT_WATER_PARTICLE = 'pco2a_a_dcl_instrument_water' PCO2A_INSTRUMENT_AIR_RECOVERED_PARTICLE = 'pco2a_a_dcl_instrument_air_recovered' PCO2A_INSTRUMENT_WATER_RECOVERED_PARTICLE = 'pco2a_a_dcl_instrument_water_recovered' class Pco2aADclParticleClassKey(BaseEnum): """ An enum for the keys application to the pco2a_a_dcl particle classes """ AIR_PARTICLE_CLASS = 'air_particle_class' WATER_PARTICLE_CLASS = 'water_particle_class' class Pco2aADclInstrumentDataParticleAir(DclInstrumentDataParticle): """ Class for generating the Pco2a_a_dcl instrument particles. """ data_matcher = SENSOR_DATA_MATCHER_AIR def __init__(self, raw_data, *args, **kwargs): super(Pco2aADclInstrumentDataParticleAir, self).__init__( raw_data, INSTRUMENT_PARTICLE_AIR_MAP, *args, **kwargs) # instrument_timestamp is the internal_timestamp instrument_timestamp = self.raw_data[SENSOR_GROUP_SENSOR_DATE_TIME] elapsed_seconds_useconds = timestamp_yyyy_mm_dd_hh_mm_ss_to_ntp(instrument_timestamp) self.set_internal_timestamp(elapsed_seconds_useconds) # instrument clock is not accurate so, use port_timestamp as the preferred_ts self.contents[DataParticleKey.PREFERRED_TIMESTAMP] = DataParticleKey.PORT_TIMESTAMP class Pco2aADclInstrumentDataParticleWater(DclInstrumentDataParticle): """ Class for generating the Pco2a_a_dcl instrument particles. """ data_matcher = SENSOR_DATA_MATCHER_WATER def __init__(self, raw_data, *args, **kwargs): super(Pco2aADclInstrumentDataParticleWater, self).__init__( raw_data, INSTRUMENT_PARTICLE_WATER_MAP, *args, **kwargs) # Instrument timestamp is the internal timestamp instrument_timestamp = self.r
reschly/cryptopals
prob1.py
Python
apache-2.0
3,439
0.018028
#!/usr/bin/env python # Written against python 3.3.1 # Matasano Problem 1 # Convert hex to base64 # Example hex: 49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d # Example base64: SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t import base64 import binascii rawToHexLUT = ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '0a', '0b', '0c', '0d', '0e', '0f', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '1a', '1b', '1c', '1d', '1e', '1f', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '2a', '2b', '2c', '2d', '2e', '2f', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '3a', '3b', '3c', '3d', '3e', '3f', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '4a', '4b', '4c', '4d', '4e', '4f', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '5a', '5b', '5c', '5d', '5e', '5f', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '6a', '6b', '6c', '6d', '6e', '6f', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '7a', '7b', '7c', '7d', '7e', '7f', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '8a', '8b', '8c', '8d', '8e', '8f', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '9a', '9b', '9c', '9d', '9e', '9f', 'a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'aa', 'ab', 'ac', 'ad', 'ae', 'af', 'b0', 'b1', 'b2', 'b3', 'b4', 'b
5', 'b6', 'b7', 'b8', 'b9', 'ba', 'bb', 'bc', 'bd', 'be', 'bf', 'c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9', 'ca', 'cb', 'cc', 'cd', 'ce', 'cf', 'd0', 'd1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9', 'da', 'db', 'dc', 'dd', 'de', 'df', 'e0', 'e1', 'e2', 'e3', 'e4', 'e5', 'e6', 'e7', 'e8', 'e9', 'ea', 'eb', 'ec', 'ed', 'ee', 'ef', 'f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f
6', 'f7', 'f8', 'f9', 'fa', 'fb', 'fc', 'fd', 'fe', 'ff',] def base64toRaw(b64): raw = base64.b64decode(b64); return raw; def rawToBase64(raw): b64 = base64.b64encode(raw); return b64; def hexToRaw(hx): raw = binascii.unhexlify(hx); return raw; def rawToHex(raw): #hx = binascii.hexlify(raw); hx = ''; for r in raw: if type(r) != int: r = ord(r); hx += rawToHexLUT[r]; return bytes(hx, 'UTF-8'); def base64toHex(b64): '''Convert Base64 string to hex string''' return rawToHex(base64toRaw(b64)); def hexToBase64(hx): '''Convert hex string to Base64''' return rawToBase64(hexToRaw(hx)); def test1(): hx = b'49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d'; b64 = b'SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t'; if (base64toHex(b64) != hx): print( "hex expected: " , hx); print( "hex result: " , base64toHex(b64)); return False; if (hexToBase64(hx) != b64): print( "b64 expected: ", b64); print( "b64 result: " , hexToBase64(hx)); return False; return True; if __name__ == "__main__": if (test1()): print("Program 1 success"); else: print("Failure");
biggihs/python-pptx
tests/test_presentation.py
Python
mit
8,885
0
# encoding: utf-8 """ Test suite for pptx.presentation module. """ from __future__ import ( absolute_import, division, print_function, unicode_literals ) import pytest from pptx.parts.coreprops import CorePropertiesPart from pptx.parts.presentation import PresentationPart from pptx.parts.slide import NotesMasterPart from pptx.presentation import Presentation from pptx.slide import SlideLayouts, SlideMaster, SlideMasters, Slides from .unitutil.cxml import element, xml from .unitutil.mock import class_mock, instance_mock, property_mock class DescribePresentation(object): def it_knows_the_height_of_its_slides(self, sld_height_get_fixture): prs, expected_value = sld_height_get_fixture assert prs.slide_height == expected_value def it_can_change_the_height_of_its_slides(self, sld_height_set_fixture): prs, slide_height, expected_xml = sld_height_set_fixture prs.slide_height = slide_height assert prs._element.xml == expected_xml def it_knows_the_width_of_its_slides(self, sld_width_get_fixture): prs, expected_value = sld_width_get_fixture assert prs.slide_width == expected_value def it_can_change_the_width_of_its_slides(self, sld_width_set_fixture): prs, slide_width, expected_xml = sld_width_set_fixture prs.slide_width = slide_width assert prs._element.xml == expected_xml def it_knows_its_part(self, part_fixture): prs, prs_part_ = part_fixture assert prs.part is prs_part_ def it_provides_access_to_its_core_properties(self, core_props_fixture): prs, core_properties_ = core_props_fixture assert prs.core_properties is core_properties_ def it_provides_access_to_its_notes_master(self, notes_master_fixture): prs, notes_master_ = notes_master_fixture assert prs.notes_master is notes_master_ def it_provides_access_to_its_slides(self, slides_fixture): prs, rename_slide_parts_, rIds = slides_fixture[:3] Slides_, slides_, expected_xml = slides_fixture[3:] slides = prs.slides rename_slide_parts_.assert_called_once_with(rIds) Slides_.assert_called_once_with( prs._element.xpath('p:sldIdLst')[0], prs ) assert prs._element.xml == expected_xml assert slides is slides_ def it_provides_access_to_its_slide_layouts(self, layouts_fixture): prs, slide_layouts_ = layouts_fixture assert prs.slide_layouts is slide_layouts_ def it_provides_access_to_its_slide_master(self, master_fixture): prs, getitem_, slide_master_ = master_fixture slide_master = prs.slide_master getitem_.assert_called_once_with(0) assert slide_master is slide_master_ def it_provides_access_to_its_slide_masters(self, masters_fixture): prs, SlideMasters_, slide_masters_, expected_xml = masters_fixture slide_masters = prs.slide_masters SlideMasters_.assert_ca
lled_once_with( prs._element.xpath('p:sldMasterIdLst
')[0], prs ) assert slide_masters is slide_masters_ assert prs._element.xml == expected_xml def it_can_save_the_presentation_to_a_file(self, save_fixture): prs, file_, prs_part_ = save_fixture prs.save(file_) prs_part_.save.assert_called_once_with(file_) # fixtures ------------------------------------------------------- @pytest.fixture def core_props_fixture(self, prs_part_, core_properties_): prs = Presentation(None, prs_part_) prs_part_.core_properties = core_properties_ return prs, core_properties_ @pytest.fixture def layouts_fixture(self, masters_prop_, slide_layouts_): prs = Presentation(None, None) masters_prop_.return_value.__getitem__.return_value.slide_layouts = ( slide_layouts_ ) return prs, slide_layouts_ @pytest.fixture def master_fixture(self, masters_prop_, slide_master_): prs = Presentation(None, None) getitem_ = masters_prop_.return_value.__getitem__ getitem_.return_value = slide_master_ return prs, getitem_, slide_master_ @pytest.fixture(params=[ ('p:presentation', 'p:presentation/p:sldMasterIdLst'), ('p:presentation/p:sldMasterIdLst', 'p:presentation/p:sldMasterIdLst'), ]) def masters_fixture(self, request, SlideMasters_, slide_masters_): prs_cxml, expected_cxml = request.param prs = Presentation(element(prs_cxml), None) expected_xml = xml(expected_cxml) return prs, SlideMasters_, slide_masters_, expected_xml @pytest.fixture def notes_master_fixture(self, prs_part_, notes_master_): prs = Presentation(None, prs_part_) prs_part_.notes_master = notes_master_ return prs, notes_master_ @pytest.fixture def part_fixture(self, prs_part_): prs = Presentation(None, prs_part_) return prs, prs_part_ @pytest.fixture def save_fixture(self, prs_part_): prs = Presentation(None, prs_part_) file_ = 'foobar.docx' return prs, file_, prs_part_ @pytest.fixture(params=[ ('p:presentation', None), ('p:presentation/p:sldSz{cy=42}', 42), ]) def sld_height_get_fixture(self, request): prs_cxml, expected_value = request.param prs = Presentation(element(prs_cxml), None) return prs, expected_value @pytest.fixture(params=[ ('p:presentation', 'p:presentation/p:sldSz{cy=914400}'), ('p:presentation/p:sldSz{cy=424242}', 'p:presentation/p:sldSz{cy=914400}'), ]) def sld_height_set_fixture(self, request): prs_cxml, expected_cxml = request.param prs = Presentation(element(prs_cxml), None) expected_xml = xml(expected_cxml) return prs, 914400, expected_xml @pytest.fixture(params=[ ('p:presentation', None), ('p:presentation/p:sldSz{cx=42}', 42), ]) def sld_width_get_fixture(self, request): prs_cxml, expected_value = request.param prs = Presentation(element(prs_cxml), None) return prs, expected_value @pytest.fixture(params=[ ('p:presentation', 'p:presentation/p:sldSz{cx=914400}'), ('p:presentation/p:sldSz{cx=424242}', 'p:presentation/p:sldSz{cx=914400}'), ]) def sld_width_set_fixture(self, request): prs_cxml, expected_cxml = request.param prs = Presentation(element(prs_cxml), None) expected_xml = xml(expected_cxml) return prs, 914400, expected_xml @pytest.fixture(params=[ ('p:presentation', [], 'p:presentation/p:sldIdLst'), ('p:presentation/p:sldIdLst/p:sldId{r:id=a}', ['a'], 'p:presentation/p:sldIdLst/p:sldId{r:id=a}'), ('p:presentation/p:sldIdLst/(p:sldId{r:id=a},p:sldId{r:id=b})', ['a', 'b'], 'p:presentation/p:sldIdLst/(p:sldId{r:id=a},p:sldId{r:id=b})'), ]) def slides_fixture(self, request, part_prop_, Slides_, slides_): prs_cxml, rIds, expected_cxml = request.param prs = Presentation(element(prs_cxml), None) rename_slide_parts_ = part_prop_.return_value.rename_slide_parts expected_xml = xml(expected_cxml) return prs, rename_slide_parts_, rIds, Slides_, slides_, expected_xml # fixture components --------------------------------------------- @pytest.fixture def core_properties_(self, request): return instance_mock(request, CorePropertiesPart) @pytest.fixture def masters_prop_(self, request): return property_mock(request, Presentation, 'slide_masters') @pytest.fixture def notes_master_(self, request): return instance_mock(request, NotesMasterPart) @pytest.fixture def part_prop_(self, request): return property_mock(request, Presentation, 'part') @pytest.fixture def prs_part_(self, request): return instance_mock(request, PresentationPart) @pytest.fixture def slide_layouts_(self, request): return instance_mock(request, SlideL
quickresolve/accel.ai
flask-aws/lib/python2.7/site-packages/ebcli/lib/elbv2.py
Python
mit
2,545
0.005108
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from cement.utils.misc import minimal_logger from ..lib import aws from ..objects.exceptions import ServiceError, NotFoundError from ..resources.strings import responses LOG = minimal_logger(__name__) def _make_api_call(operation_name, **operation_options): return aws.make_api_call('elbv2', operation_name, **operation_options) def get_instance_healths_from_target_groups(target_group_arns): results = [] instance_healths = {} for arn in target_group_arns: try: results.append( { 'TargetGroupArn': arn, 'Result': _make_api_call('describe_target_health', TargetGroupArn=arn) } ) except ServiceError as e: if e.message == responses['loadbalancer.targetgroup.notfound'].replace('{tgarn}', arn): raise NotFoundError(e) for result in results: for description in result['Result']['TargetHealthDescriptions']: instance_id = description['Target']['Id'] if instance_id not in instance_healths: instance_healths[instance_id] = [] instance_healths[instance_id].append({ 'TargetGroupArn': result['TargetGroupArn'], 'State': description['TargetHealth'].get('State', ''), 'Description': description['TargetHealth'].get('Description', ''), 'Reason': description['TargetHealth'].get('Reason', '') }) return insta
nce_healths #map of instance_id => [target group health descrpitions] def get_target_group_healths(target_group_arns): results = {} for arn in target_group_arns: try: results[arn] = _make_api_call('describe_target_health', TargetGroupArn=arn) except ServiceError as e: if e.code == 'TargetGroupNotFound': raise NotFoundError(e) else: raise e return results #map of target_group_arn => [target group heal
th descrpitions]
gusevfe/snakemake
tests/test14/qsub.py
Python
mit
319
0
#!/usr/bin/env python3 import sys import os import random from snakemake.utils import read_job_properties jobscript = sys.argv[1] j
ob_properties
= read_job_properties(jobscript) with open("qsub.log", "w") as log: print(job_properties, file=log) print(random.randint(1, 100)) os.system("sh {}".format(jobscript))
HWDexperte/ts3observer
ts3observer.py
Python
mit
2,869
0.004183
#!/usr/bin/env python import os, argparse, logging from ts3observer.cli import CommandLineInterface as Cli from ts3observer.gui import GraphicalUserInterface as Gui from ts3observer.utils import path from ts3observer.exc import CriticalException, ShutDownException, print_traceback, print_buginfo class Dispatcher(object): ''' Dispatch the task to the right module ''' def __init__(self): self._parse_arguments() def _parse_arguments(self): ''' Parse the arguments from commandline ''' parser = argparse.ArgumentParser() sub_parser = parser.add_subparsers(dest='task') parser.add_argument('-v', '--verbose', action='store_true', help='Increase verbosity for debugging purpose') parser.add_argument('-q', '--quiet', action='store_true', help='Only show messaged if there is an critical Exception') parser.add_argument('-g', '--graphical', action='store_true', help='Run the ts3observer as Gui') parser.add_argument('-d', '--dev', action='store_true', help='Run in developer modus') utils_parser = sub_parser.add_parser('utils', help='Need some help?') utils_sub_parser = utils_parser.add_subparsers(dest='utils') utils_grouplist = utils_sub_parser.add_parser('servergrouplist', help='List all servergroups') utils_channellist = utils_sub_parser.add_parser('channellist', help='List all channels') utils_clientlist = utils_sub_parser.add_parser('clientlist', help='List all connected clients') utils_clientlist.add_argument('-a', '--advanced', action='store_true', help='Get more information about the connected clients') run_parser = sub_parser.add_parser('run', help='Run the ts3observer') version_parser = sub_parser.add_parser('version', help='Shows the ts3observer version') ts3o.args = parser.parse_args() def dispatch(self): ''' Dispatch the task to the right module ''' if ts3o.args.graphical: getattr(Gui(), ts3o.args.task)() else: getattr(Cli(), ts3o.args.task)() class Ts3o(object): ''' Define a holder class ''' pass def _setup(): ''' Define some globals for ts3observer ''' __builtins__.ts3o = Ts3o() ts3o.base_path = os.path.abspath(os.path.dirname(__file__)) def _run(): try: _setup() Dispatcher().dispatch() except ShutDownException as e: logging.info('Good Bye!') except CriticalException as e: if ts3o.args.verbose: print_traceback() logging.critical('{}: {}'.format(e.__class__.__name__, str(e))) except Exception
as e: print_traceback() logging.critical('{}: {}'.format(e.
__class__.__name__, str(e))) print_buginfo() if __name__ == '__main__': _run() else: raise Exception('Please, run this script directly!')
billingstack/python-fakturo
fakturo/core/client.py
Python
apache-2.0
2,138
0.000468
import logging import requests from fakturo.core import exceptions, utils LOG = logging.getLogger(__name__) class BaseClient(object): def __init__(self, url=None): url.rstrip('/') self.url = url self.requests = self.get_requests() def get_requests(self, headers={}, args_hooks=[], pre_request_hooks=[]): if not 'Content-Type' in headers: headers['Content-Type'] = 'application/json' pre_request_hooks = pre_request_hooks + [utils.log_request] session = requests.Session() session.hooks = dict( args=args_hooks, pre_request=pre_request_hooks) session.headers.update(headers) return session def wrap_api_call(self, function, path, status_code=200
, *args, **kw):
path = path.lstrip('/') if path else '' url = self.url + '/' + path LOG.debug('Wrapping request to %s' % url) wrapper = kw.get('wrapper', None) # NOTE: If we're passed a wrapper function by the caller, pass the # requests function to it along with path and other args... if wrapper and hasattr(wrapper, '__call__'): return wrapper(function, url, *args, **kw) response = function(url, *args, **kw) # NOTE: Make a function that can extract errors based on content type? if response.status_code != status_code: error = None if response.json: error = response.json.get('error', None) if not error: error = 'Remote error occured. Response Body:\n%s' % \ response.content raise exceptions.RemoteError(response.status_code, error) return response def get(self, *args, **kw): return self.wrap_api_call(self.requests.get, *args, **kw) def post(self, *args, **kw): return self.wrap_api_call(self.requests.post, *args, **kw) def put(self, *args, **kw): return self.wrap_api_call(self.requests.put, *args, **kw) def delete(self, *args, **kw): return self.wrap_api_call(self.requests.delete, *args, **kw)
caktus/rapidsms
rapidsms/contrib/locations/utils.py
Python
bsd-3-clause
1,402
0
#!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 from rapidsms.utils.modules import try_import from .forms import LocationForm from .models import Location def get_model(name): """ """ for type in Location.subclasses(): if type._meta.module_name == name: return type raise StandardError("There is no Location subclass named '%s'" % name) def form_for_model(model): """ Return the Form which should be used to add/edit ``model`` in the WebUI, by importing the class named ``"%sForm" % model.__name__`` from the sibling ``forms`` module. For example:: app1.models.Alpha -> myapp.forms.SchoolForm app2.models.beta.Beta -> app2.forms.beta.BetaForm If no such form is defined, an appropriately-patched copy of the rapidsms.contrib.locations.forms.LocationForm form is returned.
""" parts = model.__module__.split(".") parts[parts.index("models")] = "forms" module_name = ".".join(parts) form_name = model.__name__ + "Form" module = try_
import(module_name) if module is not None: form = getattr(module, form_name, None) if form is not None: return form meta_dict = LocationForm.Meta.__dict__ meta_dict["model"] = model return type( form_name, (LocationForm,), { "Meta": type("Meta", (), meta_dict) } )
SEL-Columbia/commcare-hq
corehq/apps/dashboard/views.py
Python
bsd-3-clause
1,502
0.001997
from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect from django.utils.decorators import method_decorator from django.utils.translation import ugettext_noop from corehq import toggles from corehq.apps.domain.views import DomainViewMixin, LoginAndDomainMixin from corehq.apps.hqwebapp.views import BasePageView from corehq.apps.style.decorators import preview_boostrap3 @toggles.DASHBOARD_PREVIEW.required_decorator() def dashboard_default(request, domain): return HttpResponseRedirect(reverse(NewUserDashboardView.urlname, args=[domain])) class BaseDashboardView(LoginAndDomainMixin, BasePageView, DomainViewMixin): @method_decorator(preview_boostrap3()) @method_decorator(toggles.DASHBOARD_PREVIEW.required_decorator()) def dispatch(self, request, *args, **kwargs): return super(BaseDashboardView, self).dispatch(request, *args
, **kwargs) @property def main_context(self): context = super(BaseDashboardView, self).main_context context.update({ 'domain': self.domain, }) return context @property def page_url(self):
return reverse(self.urlname, args=[self.domain]) class NewUserDashboardView(BaseDashboardView): urlname = 'dashboard_new_user' page_title = ugettext_noop("HQ Dashboard") template_name = 'dashboard/dashboard_new_user.html' @property def page_context(self): return { }
activityworkshop/Murmeli
test/test_torclient.py
Python
gpl-2.0
3,296
0.002124
'''Module for testing the tor client''' import unittest import os import time import socks from murmeli import system from murmeli.torclient import TorClient from murmeli.message import ContactRequestMessage class FakeMessageHandler(system.Component): '''Handler for rece
iving messages from Tor''' def __init__(self, sys): system.Component.__init__(self, sys, system.System.COMPNAME_MSG_HANDLER) self.messages = [] def receive(self, msg): '''Receive an incoming message''' if msg: self.messages.append(msg) class TorTest(unittest.TestCase): '''Tests for the tor communication''' def test_sending(self
): '''Test sending non-valid and valid data to the listener''' sys = system.System() tordir = os.path.join("test", "outputdata", "tor") os.makedirs(tordir, exist_ok=True) tor_client = TorClient(sys, tordir) sys.add_component(tor_client) self.assertTrue(tor_client.started, "Tor started") time.sleep(5) # invalid data torid = tor_client.get_own_torid() print("Torid:", torid) self.assertTrue(torid, "Tor id obtained") # Send a message success = self.send_message(torid, "abcdef".encode("utf-8")) self.assertTrue(self.send_message(torid, "murmeli".encode("utf-8")), "Magic sent") time.sleep(5) # Add receiver to handle the messages receiver = FakeMessageHandler(sys) sys.add_component(receiver) self.assertFalse(receiver.messages, "no messages received yet") # contact request req = ContactRequestMessage() sender_name = "Worzel Gummidge" sender_msg = "Watch out for the volcano, it's radioactive!" req.set_field(req.FIELD_SENDER_NAME, sender_name) req.set_field(req.FIELD_MESSAGE, sender_msg) unenc_output = req.create_output(encrypter=None) torid = tor_client.get_own_torid() self.assertTrue(self.send_message(torid, unenc_output), "Real message sent") time.sleep(5) # Now check it has been received self.assertEqual(len(receiver.messages), 1, "1 message received") received = receiver.messages.pop() print("Got message:", received) self.assertEqual(received.get_field(req.FIELD_SENDER_NAME), sender_name, "name match") self.assertEqual(received.get_field(req.FIELD_MESSAGE), sender_msg, "msg match") # Finished sys.stop() self.assertFalse(tor_client.started, "Tor stopped") time.sleep(5) def send_message(self, recipient, message): '''Send a message to the given recipient''' # Try a few times because the service might take a few seconds to become available for _ in range(10): try: socket = socks.socksocket() socket.setproxy(socks.PROXY_TYPE_SOCKS4, "localhost", 11109) socket.connect((recipient + ".onion", 11009)) numsent = socket.send(message) socket.close() return numsent == len(message) except Exception as e: print("Woah, that threw something:", e) time.sleep(8) if __name__ == "__main__": unittest.main()
epeios-q37/epeios
tools/xdhq/wrappers/PYH/XDHqXML.py
Python
agpl-3.0
1,921
0.023425
""" MIT License Copyright (c) 2018 Claude SIMON (https://q37.info/s/rmnmqd49) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ class XML: def _write(self,value): self._xml += str(value) + "\0" def __init__(self,rootTag): self._xml = "" self._write("dummy") self._write(rootTag) def push_tag(self,tag): self._xml += ">" self._write(tag)
pushTag = push_tag def pop_tag(self): self._xml += "<" popTag = pop_tag def put_attribute(self,name,value): self._xml += "A" self._write(name)
self._write(str(value)) putAttribute = put_attribute def put_value(self,value): self._xml += "V" self._write(str(value)) putValue = put_value def put_tag_and_value(self,tag,value): self.pushTag(tag) self.putValue(value) self.popTag() putTagAndValue = put_tag_and_value def to_string(self): return self._xml toString = to_string
chronossc/notes-app
notes/migrations/0001_initial.py
Python
mit
993
0.002014
# -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-03-23 02:11 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Note', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=50)),
('slug', models.SlugField(unique=True)), ('note', models.TextField()), ('favorited', models.BooleanField(default=False)), ('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notes', to=settings.AUTH_USER_MODEL)), ], ), ]
jakobluettgau/feo
tapesim/components/FileManager.py
Python
gpl-3.0
2,010
0.004478
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright (C) 2015 Jakob Luettgau # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. class FileManager(object): """Manages the files that exist in the system.""" def __init__(self, simulation=None): print('FileManager instance.') self.simulation = simulation self.files = {} pass def lookup(self, name): """Ch
ecks if file exists""" if name in self.files: return self.files[name] else: return False def scan(self, entry): """Scan the data structure for a entry
""" pass def update(self, name, tape=None, size=None, pos=0): # create entry if not existent if not (name in self.files): self.files[name] = {} # set fields idividually if tape != None: self.files[name]['tape'] = tape if size != None: self.files[name]['size'] = size self.files[name]['pos'] = pos return self.files[name] def dump(self): """Make snapshot of the file system state.""" print("") self.simulation.log("Dump " + str(self) + " state.") for i, item in enumerate(self.files): self.simulation.log("%05d" % i, str(item), str(self.files[item])) self.simulation.log(self.simulation.persistency.path)
thormuller/yescoin2
share/qt/extract_strings_qt.py
Python
mit
1,873
0.005873
#!/usr/bin/python ''' Extract _("...") strings for translation and convert to Qt4 stringdefs so that they can be picked up by Qt lin
guist. ''' from subprocess import Popen, PIPE import glob import operator import os import sys OUT_CPP="qt/yescoinstrings.cpp" EMPTY=['""'] def parse_po(text): """ Parse 'po' format produced by xgettext. Return a list of (msgid,msgstr) tuples. """ messages = [] msgid = [] msgstr = [] in_msgid = False in_msgstr = False for line in text.split('\n'): line = line.rstrip('\r') if l
ine.startswith('msgid '): if in_msgstr: messages.append((msgid, msgstr)) in_msgstr = False # message start in_msgid = True msgid = [line[6:]] elif line.startswith('msgstr '): in_msgid = False in_msgstr = True msgstr = [line[7:]] elif line.startswith('"'): if in_msgid: msgid.append(line) if in_msgstr: msgstr.append(line) if in_msgstr: messages.append((msgid, msgstr)) return messages files = sys.argv[1:] # xgettext -n --keyword=_ $FILES XGETTEXT=os.getenv('XGETTEXT', 'xgettext') child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE) (out, err) = child.communicate() messages = parse_po(out) f = open(OUT_CPP, 'w') f.write(""" #include <QtGlobal> // Automatically generated by extract_strings.py #ifdef __GNUC__ #define UNUSED __attribute__((unused)) #else #define UNUSED #endif """) f.write('static const char UNUSED *yescoin_strings[] = {\n') messages.sort(key=operator.itemgetter(0)) for (msgid, msgstr) in messages: if msgid != EMPTY: f.write('QT_TRANSLATE_NOOP("yescoin-core", %s),\n' % ('\n'.join(msgid))) f.write('};\n') f.close()
cuttlefishh/papers
vibrio-fischeri-transcriptomics/code/python/totalannotation_v1-1.py
Python
mit
12,403
0.048859
#!/usr/bin/env python import urllib import sys import os ##### totalannotation.py by DJ Barshis ##### This script takes an input fasta file of sequence names and sequences, and blast results files of blasts against ##### nr (parsed .txt with 1 hit per line) and swissprot and tremble (in -outfmt 7) uniprot databases ##### and downloads the corresponding uniprot flat files from the www.uniprot.org web server, ##### extracts particular annotation information from the nr blast and each uniprot flat file and combines it into a meta-annotation table. ##### you will need to create a 2-line .txt file that has the names of the particular columns you would like to extract from the ##### nr parsed blast file separated by tabs (these files can be large so I suggest extracting the header using head or less in terminal ##### the second line consists of the "bad words" you want to skip over in you nr results separated by tabs. ##### I usually use "predicted PREDICTED hypothetical unknown" or some combination thereof. # usage is totalannotation.py YOUR_contigs.fasta BLASTx2nr.txt nrcolumnheadersandbadwords.txt BLASTx2Sprot.txt BLASTx2TrEMBL.txt evaluethreshold directoryforflatfiles(no slashes) outtablename.txt #this is for setting how the script sorts your contigs into order #change the word to 'text' for a text-based sorting or 'coral' for a #palumbi-lab coral-specific numerical sorting textorcoralsort = 'text' #innames, inseqs read_fasta_lists(sys.argv[1]) #sys.argv[2] = BLASTx2nr.txt #sys.argv[3] = thingsfornr.txt #uniprotIDs read_uniprot(sys.argv[4], sys.argv[5]) evalue=float(sys.argv[6]) directory=sys.argv[7] #name only, no /'s #o=open(str(sys.argv[8]), 'w') # New data table file name #####This reads in a fasta file and extracts the sequence names into a dictionary as the keys def read_fasta_dict(file): fin = open(file, 'r') filelines=fin.readlines() filelines.append('EOF') count=0 names={} seqs=[] numseqs=0 for line in filelines: if line=='EOF': names[cols[0]]='%i' %(len(seq)) line=line.strip() if line and line[0] == '>': #indicates the name of the sequence if count>=1: names[cols[0]]='%i' %(len(seq)) count+=1 line=line[1:] cols=line.split(' ') seq='' else: seq +=line fin.close() return names innames=read_fasta_dict(sys.argv[1]) print 'Read in fasta of %i sequences: ...' %(len(innames.keys())) ####This function reads in a parsed (every hit on one line) nr blast file and extracts certain columns and returns a dictionary def nr_dict(file, colstoextract): fin = open(file, 'r') # open input file cols2extract = open(colstoextract, 'r') d={} headers=[] contig='' linenum=0 goodnrhits=0 for line in fin: linenum+=1 line=line.rstrip() cols=line.split('\t') if linenum == 1: headers=line #Used to copy header to new files # this loop is for extracting the column indexes for the column names specified on the first line of the stufffornr.txt file extractlinecount=0 for aline in cols2extract: extractlinecount+=1 if extractlinecount==1: aline=aline.rstrip() words=aline.split('\t') hitdescription=cols.index(words[0]) nrEval=cols.index(words[1]) if linenum >1: cols[0]=cols[0].split(' ')[0] if cols[0] == contig: # print line d[cols[0]].append('%s\t%s' %(cols[hitdescription],cols[nrEval])) else: if float(cols[nrEval]) <= evalue: goodnrhits+=1 contig = cols[0] numhit = 1 d[cols[0]]=d.get(cols[0],[]) d[cols[0]].append('%s\t%s' %(cols[hitdescription],cols[nrEval])) fin.close() cols2extract.close() return headers, d, goodnrhits headers, d, goodnrhits=nr_dict(sys.argv[2], sys.argv[3]) print "Read in nr blast..." print '%s%i' %('Number of good nr matches: ',goodnrhits) print '%s%i' %('Number not matched in nr: ',len(innames.keys())-goodnrhits) print "Searching for badwords..." ######This function parses the nr dictionary for hits that do not contain badwords (e.g. 'Predicted', 'hypothetical', etc.) def parse_badwords(value, badwords): onlybad=0 madegood=0 badhits=[] goodhits=[] tophit=value[0] for each in value: numbadhits=0 for item in badwords: if item in each: numbadhits+=1 if numbadhits >=1: badhits.append(each) if numbadhits == 0: goodhits.append(each) if len(goodhits)==0: onlybad +=1 if len(goodhits)>=1: madegood +=1 goodhits+=badhits return tophit, goodhits, onlybad, madegood badwordlist=[] #reading in a list of badwords from stufffornr.txt badwordfile=open(sys.argv[3],'r') badwordline=0 for line in badwordfile: badwordline+=1 if badwordline==2: line=line.rstrip() badwordlist=line.split('\t') onlybadnrs=0 madegoodnrs=0 ####this step loops through the entrys in your contig dictionary ####and calls the badword parser for each entry that has a match in the nr dictionary and returns the top hit and the top non-badword hit (if there is one) for key,value in innames.items(): if d.has_key(key): tophit, goodhits, onlybad, madegood= parse_badwords(d[key], badwordlist) innames[key]='%s\t%s\t%s' %(innames[key],tophit, goodhits[0]) onlybadnrs+=onlybad madegoodnrs+=madegood else: innames[key]+='\t%s\t%s\t%s\t%s' %('No_sig_nr_hit','No_sig_nr_hit','No_sig_nr_hit','No_sig_nr_hit') print '%s%i' %('Number of nr hits with only a bad word hit: ', onlybadnrs) print '%s%i' %('Number of nr hits with a good word hit: ', madegoodnrs) #######This function reads in the swissprot and trembl outputs and returns #######a dictionary that contains the top uniprot ID from swissprot (if available) or trembl (if no swissprot match was found) def read_uniprot(sprotfile,tremblfile): queryname='' uniprotIDs={} uniqueprotIDs={} sprotmatch=0 tremblpeats=0 tremblmatch=0 sprot = open(sprotfile,'r') trembl = open(tremblfile,'r') for line in sprot: line=line.rstrip() if line[0] == '#': continue else: cols=line.split('\t') if cols[0] == queryname: continue else: # if float(cols[10]) <= evalue and cols[1].split('|')[2].split('_')[1] != 'NEMVE': #for parsing based on threshold value and excluding hits to Nematostella if float(cols[10]) <= evalue: #for parsing based on threshold value only ID=cols[1].split('|') uniprotIDs[cols[0]]=uniprotIDs.get(cols[0],[]) uniprotIDs[cols[0]].append(ID[1]) if innames.has_key(cols[0]): sprotmatch+=1 innames[cols[0]]+='\t%s\t%s\t%s' %(ID[1],cols[2],cols[10]) queryname=cols[0] if uniqueprotIDs.has_key(ID[1]): continue else: uniqueprotIDs[uniprotIDs[cols[0]][0]]='' print 'Read in swissprot blast ...' print '%s%i' %('Number of good swissprot matches: ', sprotmatch) for line in trembl: line=line.rstrip() if line[0] == '#': continue else: cols=line.split('\t') if cols[0] == queryname: continue else: # if float(cols[10]) <= evalue and cols[1].split('|')[2].split('_')[1] != 'NEMVE': #for parsing based on threshold value if float(cols[10]) <= evalue: #for parsing based on threshold value ID=cols[1].split('|') if uniprotIDs.has_key(cols[0]): uniprotIDs[cols[0]].append(ID[1]) queryname=cols[0] tremblpeats+=1 else: uniprotIDs[cols[0]]=uniprotIDs.get(cols[0],[]) uniprotIDs[cols[0]].append(ID[1]) if innames.has_key(cols[0]): innames[cols[0]]+='\t%s\t%s\t%s' %(ID[1],cols[2],cols[10]) queryname=cols[0] tremblmatch+=1 if uniqueprotIDs.has_key(uniprotIDs[cols[0]][0]): continue else: uniqueprotIDs[uniprotIDs[cols[0]][0]]='' print 'Read in TrEMBL blast ...' print '%s%i'%('Number of repeat matches from TrEMBL: ', tremblpeats) print '%s%i'%('Number of additional good matches from TrEMBL: ', tremblmatch) print '%s%i' %('flatfilesneeded: ',len(uniqueprotIDs.keys())) return uniprotIDs, u
niqueprotIDs #this line calls the uniprot reading function uniprotIDs, uniquesforflats=read_uniprot(sys.argv[4], sys.argv[5]) print 'downloading flat files ...' #this loop downloads all the uniprot flat files for
the list of unique uniprotIDs that was parsed from the blast results for key, value in uniquesforflats.items(): if os.pa
Ircam-Web/mezzanine-organization
organization/projects/migrations/0088_auto_20190703_1035.py
Python
agpl-3.0
1,401
0.001428
# -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2019-07-03 08:35 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('organization_projects', '0087_auto_20190619_2052'), ] operations = [ migrations.AlterModelOptions( name='project', options={'ordering': ['title'], 'permissions': (('user_add', 'Mezzo - User - User can add its own content'), ('user_edit', 'Mezzo - User - User can edit its own content'), ('user_delete', 'Mezzo - User - User can delete its own content'), ('team_add', 'Mezzo - User - Team can add its own content'), ('team_edit', "Mezzo - Team - User can edit his team's content"), ('team_delete', "Mezzo - Team - User can delete his team's content")), 'verbose_name': 'project', 'verbose_name_plural': 'project
s'}, ), migrations.AlterModelOptions( name='projectpage', options={'permissions': (('user_add', 'Mezzo - User - User can add its own content'),
('user_edit', 'Mezzo - User - User can edit its own content'), ('user_delete', 'Mezzo - User - User can delete its own content'), ('team_add', 'Mezzo - User - Team can add its own content'), ('team_edit', "Mezzo - Team - User can edit his team's content"), ('team_delete', "Mezzo - Team - User can delete his team's content"))}, ), ]