text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
# Generated on 2013-09-03 13:59:53.459117 by rmistry using
# create_page_set.py.
_TOP_10000_ALEXA_FILE = os.path.join(__location__, 'alexa1-10000-urls.json')
class Alexa1To10000Page(page_module.Page):
def __init__(self, url, page_set):
super(Alexa1To10000Page, self).__init__(
url=url, page_set=page_set,
shared_page_state_class=shared_page_state.SharedDesktopPageState)
def RunPageInteractions(self, action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage()
class Alexa1To10000PageSet(story.StorySet):
""" Top 1-10000 Alexa global.
Generated on 2013-09-03 13:59:53.459117 by rmistry using
create_page_set.py.
"""
def __init__(self):
super(Alexa1To10000PageSet, self).__init__()
with open(_TOP_10000_ALEXA_FILE) as f:
urls_list = json.load(f)
for url in urls_list:
self.AddStory(Alexa1To10000Page(url, self))
| Chilledheart/chromium | tools/perf/page_sets/alexa1-10000.py | Python | bsd-3-clause | 1,340 | 0.005224 |
from django.core.cache import cache as cache_impl
from django.utils.encoding import smart_str
import hashlib
class Hashcache(object):
"""
Wrapper for django.core.cache.cache that hashes the keys to avoid key
length errors. Maybe eventually it will do other cool things.
You can optionally pass your own cache module to the initializer as long
as it conforms to the get/set interface of the django cache module.
>>> from yolango.util.hashcache import Hashcache
>>> cache = Hashcache()
>>> cache.set('my_key', 'hello, world!', 30)
>>> cache.get('my_key')
'hello, world!'
"""
def __init__(self, cache = cache_impl):
assert cache
self.cache = cache
def get(self, key):
"""Hash the key and retrieve it from the cache"""
return self.cache.get(self._hashed(key))
def set(self, key, *args):
"""Hash the key and set it in the cache"""
return self.cache.set(self._hashed(key), *args)
def _hashed(self, key):
return hashlib.new("md5", smart_str(key)).hexdigest()
| yola/hashcache | hashcache/hashcache.py | Python | mit | 1,100 | 0.008182 |
default_app_config = '%s.apps.AppConfig' % __name__
| ic-labs/glamkit-sponsors | sponsors/__init__.py | Python | mit | 52 | 0 |
attr = 'parent child three'
| Orav/kbengine | kbe/src/lib/python/Lib/test/test_importlib/namespace_pkgs/project3/parent/child/three.py | Python | lgpl-3.0 | 29 | 0 |
"""
Two diode model equations.
"""
import numpy as np
from pvmismatch.contrib.gen_coeffs import diode
def fdidv(isat1, isat2, rs, rsh, ic, vc, vt):
"""
Derivative of IV curve and its derivatives w.r.t. Isat1, Isat2, Rs, Rsh, Ic,
Vc and Vt.
:param isat1: diode 1 saturation current [A]
:param isat2: diode 2 saturation current [A]
:param rs: series resistance [ohms]
:param rsh: shunt resistance [ohms]
:param ic: cell current [A]
:param vc: cell voltage [V]
:param vt: thermal voltage (kB * Tc / qe = 26[mV] at Tc=298K) [V]
:return: derivative of IV curve and its derivatives
"""
vd, _ = diode.fvd(vc, ic, rs) # vd = vc + ic * rs
vstar = vd / vt
rstar = rsh / rs
exp_vstar, exp_vstar_2 = np.exp(vstar), np.exp(0.5 * vstar)
v_sat1_sh, v_sat2_sh = isat1 * rsh, isat2 * rsh
v_sat1_sh_exp_vstar = v_sat1_sh * exp_vstar
v_sat2_sh_exp_vstar_2 = 0.5 * v_sat2_sh * exp_vstar_2
vsum = v_sat1_sh_exp_vstar + v_sat2_sh_exp_vstar_2 + vt
vsum_rstar = vsum + vt * rstar
combiterm1 = v_sat1_sh_exp_vstar + 0.5*v_sat2_sh_exp_vstar_2
combiterm2 = isat1*exp_vstar + 0.5*isat2*exp_vstar_2
combiterm3 = vsum / vsum_rstar - 1.0
combiterm4 = vsum_rstar * rs
combiterm5 = rstar * combiterm3 / vsum_rstar
combiterm6 = combiterm1 * combiterm3 / vt
combiterm7 = 1.0 / combiterm4
# dI/dV = derivative of IV curve
didv = -vsum / combiterm4
# jacobian
didv_isat1 = exp_vstar * combiterm5
didv_isat2 = 0.5 * exp_vstar_2 * combiterm5
didv__r_s = combiterm7 * (combiterm6 * ic + vsum**2.0 / combiterm4)
didv_rsh = combiterm7 * (combiterm2 * combiterm3 + vt * vsum / combiterm4)
didv_ic = combiterm6 / vsum_rstar
didv_vc = (didv + 1.0 / rs) * didv_ic
jac = np.array([
didv_isat1, didv_isat2, didv__r_s, didv_rsh, didv_ic, didv_vc
])
return didv, jac
def fdpdv(isat1, isat2, rs, rsh, ic, vc, vt):
"""
Derivative of PV curve and its derivatives w.r.t. Isat1, Isat2, Rs, Rsh, Ic,
Vc and Vt.
:param isat1: diode 1 saturation current [A]
:param isat2: diode 2 saturation current [A]
:param rs: series resistance [ohms]
:param rsh: shunt resistance [ohms]
:param ic: cell current [A]
:param vc: cell voltage [V]
:param vt: thermal voltage (kB * Tc / qe = 26[mV] at Tc=298K) [V]
:return: derivative of PV curve and its derivatives
"""
didv, _ = fdidv(isat1, isat2, rs, rsh, ic, vc, vt)
vd, _ = diode.fvd(vc, ic, rs) # vd = vc + ic * rs
dpdv = didv * vc + ic
dpdv_isat1 = 2.0*rs*rsh*vc*(
2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt
)*np.exp(vd/vt)/(
2.0*isat1*rs*rsh*np.exp(vd/vt)
+ isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
)**2 - 2.0*rsh*vc*np.exp(vd/vt)/(
2.0*isat1*rs*rsh*np.exp(vd/vt)
+ isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
)
dpdv_isat2 = rs*rsh*vc*(
2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt
)*np.exp(0.5*vd/vt)/(
2.0*isat1*rs*rsh*np.exp(vd/vt)
+ isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
)**2 - rsh*vc*np.exp(0.5*vd/vt)/(
2.0*isat1*rs*rsh*np.exp(vd/vt)
+ isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
)
dpdv_rs = -vc*(
2.0*isat1*rsh*ic*np.exp(vd/vt)/vt
+ 0.5*isat2*rsh*ic*np.exp(0.5*vd/vt)/vt
)/(
2.0*isat1*rs*rsh*np.exp(vd/vt)
+ isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
) - vc*(
2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt
)*(
-2.0*isat1*rs*rsh*ic*np.exp(vd/vt)/vt
- 2.0*isat1*rsh*np.exp(vd/vt)
- 0.5*isat2*rs*rsh*ic*np.exp(0.5*vd/vt)/vt
- isat2*rsh*np.exp(0.5*vd/vt) - 2.0*vt
)/(
2.0*isat1*rs*rsh*np.exp(vd/vt)
+ isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
)**2
dpdv_rsh = -vc*(
2.0*isat1*np.exp(vd/vt) + isat2*np.exp(0.5*vd/vt)
)/(
2.0*isat1*rs*rsh*np.exp(vd/vt)
+ isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
) - vc*(
-2.0*isat1*rs*np.exp(vd/vt) - isat2*rs*np.exp(0.5*vd/vt) - 2.0*vt
)*(
2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt
)/(
2.0*isat1*rs*rsh*np.exp(vd/vt)
+ isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
)**2
dpdv_ic = -vc*(
2.0*isat1*rs*rsh*np.exp(vd/vt)/vt
+ 0.5*isat2*rs*rsh*np.exp(0.5*vd/vt)/vt
)/(
2.0*isat1*rs*rsh*np.exp(vd/vt)
+ isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
) - vc*(
-2.0*isat1*rs**2*rsh*np.exp(vd/vt)/vt
- 0.5*isat2*rs**2*rsh*np.exp(0.5*vd/vt)/vt
)*(
2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt
)/(
2.0*isat1*rs*rsh*np.exp(vd/vt)
+ isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
)**2 + 1.0
dpdv_vc = -vc*(
2.0*isat1*rsh*(rs*didv + 1)*np.exp(vd/vt)/vt
+ 0.5*isat2*rsh*(rs*didv + 1)*np.exp(0.5*vd/vt)/vt
)/(
2.0*isat1*rs*rsh*np.exp(vd/vt)
+ isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
) - vc*(
-2.0*isat1*rs*rsh*(rs*didv + 1)*np.exp(vd/vt)/vt
- 0.5*isat2*rs*rsh*(rs*didv + 1)*np.exp(0.5*vd/vt)/vt
)*(
2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt
)/(
2.0*isat1*rs*rsh*np.exp(vd/vt)
+ isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
)**2 - (
2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt
)/(
2.0*isat1*rs*rsh*np.exp(vd/vt)
+ isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
) + didv
jac = np.array([
dpdv_isat1, dpdv_isat2, dpdv_rs, dpdv_rsh, dpdv_ic, dpdv_vc
])
return dpdv, jac
def fjrsh(isat1, isat2, rs, rsh, vt, isc):
"""
Shunt resistance residual and its derivatives w.r.t. Isat1, Isat2, Rs and
Rsh.
:param isat1: diode 1 saturation current [A]
:param isat2: diode 2 saturation current [A]
:param rs: series resistance [ohms]
:param rsh: shunt resistance [ohms]
:param vt: thermal voltage (kB * Tc / qe = 26[mV] at Tc=298K) [V]
:param isc: short circuit current [A]
:return: Rsh residual and its derivatives
Shunt resistance is assumed to be equal to the inverse of the slope of the
IV curve at short circuit.
.. math::
Rsh = \\frac{ -1 }{ \\left. \\frac{dI}{dV} \\right|_{V=0} }
This assumption is valid when [put condition here].
"""
didv, _ = fdidv(isat1, isat2, rs, rsh, ic=isc, vc=0, vt=vt)
vd, _ = diode.fvd(0.0, isc, rs) # vd = vc + ic * rs = 0.0 + isc * rs
# frsh = rsh + 1/didv
frsh = vd * (1.0/rsh + didv)
dfrsh_isat1 = vd*(
2.0*rs*rsh*(
2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt
)*np.exp(vd/vt)/(
2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt)
+ 2.0*rs*vt + 2.0*rsh*vt
)**2 - 2.0*rsh*np.exp(vd/vt)/(
2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt)
+ 2.0*rs*vt + 2.0*rsh*vt
)
)
dfrsh_isat2 = vd*(
rs*rsh*(
2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt
)*np.exp(0.5*vd/vt)/(
2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt)
+ 2.0*rs*vt + 2.0*rsh*vt
)**2 - rsh*np.exp(0.5*vd/vt)/(
2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt)
+ 2.0*rs*vt + 2.0*rsh*vt
)
)
dfrsh_rs = (
vd*(
-(
2.0*isat1*rsh*isc*np.exp(vd/vt)/vt + 0.5*isat2*rsh*isc*np.exp(0.5*vd/vt)/vt
)/(
2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
) - (
2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt
)*(
-2.0*isat1*rs*rsh*isc*np.exp(vd/vt)/vt
- 2.0*isat1*rsh*np.exp(vd/vt)
- 0.5*isat2*rs*rsh*isc*np.exp(0.5*vd/vt)/vt
- isat2*rsh*np.exp(0.5*vd/vt) - 2.0*vt
)/(
2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
)**2
) + (
-(2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt)/(
2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
) + 1.0/rsh
)*isc
)
dfrsh_rsh = (
vd*(
-(2.0*isat1*np.exp(vd/vt) + isat2*np.exp(0.5*vd/vt))/(
2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
) - (
-2.0*isat1*rs*np.exp(vd/vt) - isat2*rs*np.exp(0.5*vd/vt) - 2.0*vt
)*(2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt)/(
2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
)**2 - 1.0/rsh**2
)
)
dfrsh_ic = (
rs*(
-(2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt)/(
2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
) + 1.0/rsh
) + vd*(
-(
2.0*isat1*rs*rsh*np.exp(vd/vt)/vt + 0.5*isat2*rs*rsh*np.exp(0.5*vd/vt)/vt
)/(
2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
) - (
-2.0*isat1*rs**2*rsh*np.exp(vd/vt)/vt - 0.5*isat2*rs**2*rsh*np.exp(0.5*vd/vt)/vt
)*(
2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt
)/(
2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
)**2
)
)
dfrsh_vc = (
vd*(-(
2.0*isat1*rsh*(rs*didv + 1)*np.exp(vd/vt)/vt
+ 0.5*isat2*rsh*(rs*didv + 1)*np.exp(0.5*vd/vt)/vt
)/(
2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
) - (
-2.0*isat1*rs*rsh*(rs*didv + 1)*np.exp(vd/vt)/vt
- 0.5*isat2*rs*rsh*(rs*didv + 1)*np.exp(0.5*vd/vt)/vt
)*(2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt)/(
2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
)**2) + (rs*didv + 1)*(
-(2.0*isat1*rsh*np.exp(vd/vt) + isat2*rsh*np.exp(0.5*vd/vt) + 2.0*vt)/(
2.0*isat1*rs*rsh*np.exp(vd/vt) + isat2*rs*rsh*np.exp(0.5*vd/vt) + 2.0*rs*vt + 2.0*rsh*vt
) + 1.0/rsh
)
)
jac = np.array([
dfrsh_isat1, dfrsh_isat2, dfrsh_rs, dfrsh_rsh, dfrsh_ic, dfrsh_vc
])
return frsh, jac
| SunPower/PVMismatch | pvmismatch/contrib/gen_coeffs/two_diode.py | Python | bsd-3-clause | 11,016 | 0.002179 |
# encoding: utf-8
from yast import import_module
import_module('UI')
from yast import *
class CheckBoxFrame1Client:
def main(self):
UI.OpenDialog(
VBox(
MarginBox(
1,
0.5,
CheckBoxFrame(
"E&xpert Settings",
True,
VBox(
HBox(
InputField("&Server"),
ComboBox("&Mode", ["Automatic", "Manual", "Debug"])
),
Left(CheckBox("&Logging")),
InputField("&Connections")
)
)
),
PushButton("&OK")
)
)
UI.UserInput()
UI.CloseDialog()
CheckBoxFrame1Client().main()
| yast/yast-python-bindings | examples/CheckBoxFrame1.py | Python | gpl-2.0 | 729 | 0.00823 |
# -*- coding: utf-8 -*-
from copy import deepcopy
from openprocurement.auctions.dgf.tests.base import (
test_auction_data,
test_features_auction_data,
test_financial_organization,
test_financial_auction_data,
test_bids,
test_financial_bids,
test_organization
)
from openprocurement.api.tests.base import JSON_RENDERER_ERROR
# AuctionBidderResourceTest
def create_auction_bidder_invalid(self):
response = self.app.post_json('/auctions/some_id/bids', {
'data': {'tenderers': [self.initial_organization], "value": {"amount": 500}, 'qualified': True}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'auction_id'}
])
request_path = '/auctions/{}/bids'.format(self.auction_id)
response = self.app.post(request_path, 'data', status=415)
self.assertEqual(response.status, '415 Unsupported Media Type')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description':
u"Content-Type header should be one of ['application/json']", u'location': u'header',
u'name': u'Content-Type'}
])
response = self.app.post(
request_path, 'data', content_type='application/json', status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
JSON_RENDERER_ERROR
])
response = self.app.post_json(request_path, 'data', status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Data not available',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(
request_path, {'not_data': {}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Data not available',
u'location': u'body', u'name': u'data'}
])
response = self.app.post_json(request_path, {'data': {
'invalid_field': 'invalid_value'}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Rogue field', u'location':
u'body', u'name': u'invalid_field'}
])
response = self.app.post_json(request_path, {
'data': {'tenderers': [{'identifier': 'invalid_value'}]}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': {u'identifier': [
u'Please use a mapping for this field or Identifier instance instead of unicode.']}, u'location': u'body',
u'name': u'tenderers'}
])
response = self.app.post_json(request_path, {
'data': {'tenderers': [{'identifier': {}}]}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertIn({u"location": u"body", u"name": u"qualified", u"description": [u"This field is required."]},
response.json['errors'])
if self.initial_organization == test_financial_organization:
self.assertIn({u'description': [u'This field is required.'], u'location': u'body', u'name': u'eligible'},
response.json['errors'])
self.assertIn({u'description': [
{u'additionalIdentifiers': [u'This field is required.'], u'contactPoint': [u'This field is required.'],
u'identifier': {u'scheme': [u'This field is required.'], u'id': [u'This field is required.']},
u'name': [u'This field is required.'], u'address': [u'This field is required.']}], u'location': u'body',
u'name': u'tenderers'}, response.json['errors'])
else:
self.assertIn({u'description': [{u'contactPoint': [u'This field is required.'],
u'identifier': {u'scheme': [u'This field is required.'],
u'id': [u'This field is required.']},
u'name': [u'This field is required.'],
u'address': [u'This field is required.']}], u'location': u'body',
u'name': u'tenderers'}, response.json['errors'])
response = self.app.post_json(request_path, {'data': {'tenderers': [{
'name': 'name', 'identifier': {'uri': 'invalid_value'}}]}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertIn({u"location": u"body", u"name": u"qualified", u"description": [u"This field is required."]},
response.json['errors'])
if self.initial_organization == test_financial_organization:
self.assertIn({u'description': [u'This field is required.'], u'location': u'body', u'name': u'eligible'},
response.json['errors'])
self.assertIn({u'description': [
{u'additionalIdentifiers': [u'This field is required.'], u'contactPoint': [u'This field is required.'],
u'identifier': {u'scheme': [u'This field is required.'], u'id': [u'This field is required.'],
u'uri': [u'Not a well formed URL.']}, u'address': [u'This field is required.']}],
u'location': u'body', u'name': u'tenderers'}, response.json['errors'])
else:
self.assertIn({u'description': [{u'contactPoint': [u'This field is required.'],
u'identifier': {u'scheme': [u'This field is required.'],
u'id': [u'This field is required.'],
u'uri': [u'Not a well formed URL.']},
u'address': [u'This field is required.']}], u'location': u'body',
u'name': u'tenderers'}, response.json['errors'])
if self.initial_organization == test_financial_organization:
response = self.app.post_json(request_path, {
'data': {'tenderers': [self.initial_organization], 'qualified': True, 'eligible': True}}, status=422)
else:
response = self.app.post_json(request_path,
{'data': {'tenderers': [self.initial_organization], 'qualified': True}},
status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertIn({u'description': [u'This field is required.'], u'location': u'body', u'name': u'value'},
response.json['errors'])
if self.initial_organization == test_financial_organization:
response = self.app.post_json(request_path, {
'data': {'tenderers': [self.initial_organization], "value": {"amount": 500, 'valueAddedTaxIncluded': False},
'qualified': True, 'eligible': True}}, status=422)
else:
response = self.app.post_json(request_path, {
'data': {'tenderers': [self.initial_organization], "value": {"amount": 500, 'valueAddedTaxIncluded': False},
'qualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertIn({u'description': [
u'valueAddedTaxIncluded of bid should be identical to valueAddedTaxIncluded of value of auction'],
u'location': u'body', u'name': u'value'}, response.json['errors'])
if self.initial_organization == test_financial_organization:
response = self.app.post_json(request_path, {
'data': {'tenderers': [self.initial_organization], "value": {"amount": 500, 'currency': "USD"},
'qualified': True, 'eligible': True}}, status=422)
else:
response = self.app.post_json(request_path, {
'data': {'tenderers': [self.initial_organization], "value": {"amount": 500, 'currency': "USD"},
'qualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertIn(
{u'description': [u'currency of bid should be identical to currency of value of auction'], u'location': u'body',
u'name': u'value'}, response.json['errors'])
if self.initial_organization == test_financial_organization:
response = self.app.post_json(request_path, {
'data': {'tenderers': self.initial_organization, "value": {"amount": 500}, 'qualified': True,
'eligible': True}}, status=422)
else:
response = self.app.post_json(request_path, {
'data': {'tenderers': self.initial_organization, "value": {"amount": 500}, 'qualified': True}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
if self.initial_organization == test_financial_organization:
self.assertIn(
{u'description': u"invalid literal for int() with base 10: 'additionalIdentifiers'", u'location': u'body',
u'name': u'data'}, response.json['errors'])
else:
self.assertIn({u'description': u"invalid literal for int() with base 10: 'contactPoint'", u'location': u'body',
u'name': u'data'}, response.json['errors'])
if self.initial_organization == test_financial_organization:
response = self.app.post_json('/auctions/{}/bids'.format(
self.auction_id), {'data': {'tenderers': [self.initial_organization], "value": {"amount": 500}}},
status=422)
else:
response = self.app.post_json('/auctions/{}/bids'.format(
self.auction_id), {'data': {'tenderers': [self.initial_organization], "value": {"amount": 500}}},
status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertIn({u'description': [u'This field is required.'], u'location': u'body', u'name': u'qualified'},
response.json['errors'])
def patch_auction_bidder(self):
if self.initial_organization == test_financial_organization:
response = self.app.post_json('/auctions/{}/bids'.format(
self.auction_id), {'data': {'tenderers': [self.initial_organization], "status": "draft", "value": {"amount": 500}, 'qualified': True, 'eligible': True}})
else:
response = self.app.post_json('/auctions/{}/bids'.format(
self.auction_id), {'data': {'tenderers': [self.initial_organization], "status": "draft", "value": {"amount": 500}, 'qualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bidder = response.json['data']
response = self.app.patch_json('/auctions/{}/bids/{}'.format(self.auction_id, bidder['id']), {"data": {"status": "active", "value": {"amount": 60}}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': [u'value of bid should be greater than value of auction'], u'location': u'body', u'name': u'value'}
])
response = self.app.patch_json('/auctions/{}/bids/{}'.format(self.auction_id, bidder['id']), {"data": {'tenderers': [{"name": u"Державне управління управлінням справами"}]}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['date'], bidder['date'])
self.assertNotEqual(response.json['data']['tenderers'][0]['name'], bidder['tenderers'][0]['name'])
response = self.app.patch_json('/auctions/{}/bids/{}'.format(self.auction_id, bidder['id']), {"data": {"value": {"amount": 500}, 'tenderers': [self.initial_organization]}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['date'], bidder['date'])
self.assertEqual(response.json['data']['tenderers'][0]['name'], bidder['tenderers'][0]['name'])
response = self.app.patch_json('/auctions/{}/bids/{}'.format(self.auction_id, bidder['id']), {"data": {"value": {"amount": 400}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["value"]["amount"], 400)
self.assertNotEqual(response.json['data']['date'], bidder['date'])
response = self.app.patch_json('/auctions/{}/bids/{}'.format(self.auction_id, bidder['id']), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active")
self.assertNotEqual(response.json['data']['date'], bidder['date'])
response = self.app.patch_json('/auctions/{}/bids/{}'.format(self.auction_id, bidder['id']), {"data": {"status": "draft"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can\'t update bid to (draft) status")
response = self.app.patch_json('/auctions/{}/bids/some_id'.format(self.auction_id), {"data": {"value": {"amount": 400}}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.patch_json('/auctions/some_id/bids/some_id', {"data": {"value": {"amount": 400}}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'auction_id'}
])
self.set_status('complete')
response = self.app.get('/auctions/{}/bids/{}'.format(self.auction_id, bidder['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["value"]["amount"], 400)
response = self.app.patch_json('/auctions/{}/bids/{}'.format(self.auction_id, bidder['id']), {"data": {"value": {"amount": 400}}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update bid in current (complete) auction status")
def get_auction_bidder(self):
if self.initial_organization == test_financial_organization:
response = self.app.post_json('/auctions/{}/bids'.format(
self.auction_id), {'data': {'tenderers': [self.initial_organization], "value": {"amount": 500}, 'qualified': True, 'eligible': True}})
else:
response = self.app.post_json('/auctions/{}/bids'.format(
self.auction_id), {'data': {'tenderers': [self.initial_organization], "value": {"amount": 500}, 'qualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bidder = response.json['data']
bid_token = response.json['access']['token']
response = self.app.get('/auctions/{}/bids/{}'.format(self.auction_id, bidder['id']), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't view bid in current (active.tendering) auction status")
response = self.app.get('/auctions/{}/bids/{}?acc_token={}'.format(self.auction_id, bidder['id'], bid_token))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], bidder)
self.set_status('active.qualification')
response = self.app.get('/auctions/{}/bids/{}'.format(self.auction_id, bidder['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
bidder_data = response.json['data']
#self.assertIn(u'participationUrl', bidder_data)
#bidder_data.pop(u'participationUrl')
self.assertEqual(bidder_data, bidder)
response = self.app.get('/auctions/{}/bids/some_id'.format(self.auction_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.get('/auctions/some_id/bids/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'auction_id'}
])
response = self.app.delete('/auctions/{}/bids/{}'.format(self.auction_id, bidder['id']), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't delete bid in current (active.qualification) auction status")
def delete_auction_bidder(self):
if self.initial_organization == test_financial_organization:
response = self.app.post_json('/auctions/{}/bids'.format(
self.auction_id), {'data': {'tenderers': [self.initial_organization], "value": {"amount": 500}, 'qualified': True, 'eligible': True}})
else:
response = self.app.post_json('/auctions/{}/bids'.format(
self.auction_id), {'data': {'tenderers': [self.initial_organization], "value": {"amount": 500}, 'qualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bidder = response.json['data']
response = self.app.delete('/auctions/{}/bids/{}'.format(self.auction_id, bidder['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'], bidder)
revisions = self.db.get(self.auction_id).get('revisions')
self.assertTrue(any([i for i in revisions[-2][u'changes'] if i['op'] == u'remove' and i['path'] == u'/bids']))
self.assertTrue(any([i for i in revisions[-1][u'changes'] if i['op'] == u'add' and i['path'] == u'/bids']))
response = self.app.delete('/auctions/{}/bids/some_id'.format(self.auction_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'bid_id'}
])
response = self.app.delete('/auctions/some_id/bids/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'auction_id'}
])
def get_auction_auctioners(self):
if self.initial_organization == test_financial_organization:
response = self.app.post_json('/auctions/{}/bids'.format(
self.auction_id), {'data': {'tenderers': [self.initial_organization], "value": {"amount": 500}, 'qualified': True, 'eligible': True}})
else:
response = self.app.post_json('/auctions/{}/bids'.format(
self.auction_id), {'data': {'tenderers': [self.initial_organization], "value": {"amount": 500}, 'qualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bidder = response.json['data']
response = self.app.get('/auctions/{}/bids'.format(self.auction_id), status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't view bids in current (active.tendering) auction status")
self.set_status('active.qualification')
response = self.app.get('/auctions/{}/bids'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data'][0], bidder)
response = self.app.get('/auctions/some_id/bids', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'auction_id'}
])
def bid_Administrator_change(self):
if self.initial_organization == test_financial_organization:
response = self.app.post_json('/auctions/{}/bids'.format(
self.auction_id), {'data': {'tenderers': [self.initial_organization], "value": {"amount": 500}, 'qualified': True, 'eligible': True}})
else:
response = self.app.post_json('/auctions/{}/bids'.format(
self.auction_id), {'data': {'tenderers': [self.initial_organization], "value": {"amount": 500}, 'qualified': True}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bidder = response.json['data']
self.app.authorization = ('Basic', ('administrator', ''))
response = self.app.patch_json('/auctions/{}/bids/{}'.format(self.auction_id, bidder['id']), {"data": {
'tenderers': [{"identifier": {"id": "00000000"}}],
"value": {"amount": 400}
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertNotEqual(response.json['data']["value"]["amount"], 400)
self.assertEqual(response.json['data']["tenderers"][0]["identifier"]["id"], "00000000")
# AuctionBidInvalidationAuctionResourceTest
def post_auction_all_invalid_bids(self):
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id),
{'data': {'bids': self.initial_bids}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
self.assertEqual(auction["bids"][0]['value']['amount'], self.initial_bids[0]['value']['amount'])
self.assertEqual(auction["bids"][1]['value']['amount'], self.initial_bids[1]['value']['amount'])
self.assertEqual(auction["bids"][2]['value']['amount'], self.initial_bids[2]['value']['amount'])
value_threshold = auction['value']['amount'] + auction['minimalStep']['amount']
self.assertLess(auction["bids"][0]['value']['amount'], value_threshold)
self.assertLess(auction["bids"][1]['value']['amount'], value_threshold)
self.assertLess(auction["bids"][2]['value']['amount'], value_threshold)
self.assertEqual(auction["bids"][0]['status'], 'invalid')
self.assertEqual(auction["bids"][1]['status'], 'invalid')
self.assertEqual(auction["bids"][2]['status'], 'invalid')
self.assertEqual('unsuccessful', auction["status"])
def post_auction_one_invalid_bid(self):
self.app.authorization = ('Basic', ('auction', ''))
bids = deepcopy(self.initial_bids)
bids[0]['value']['amount'] = bids[0]['value']['amount'] * 3
bids[1]['value']['amount'] = bids[1]['value']['amount'] * 2
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id), {'data': {'bids': bids}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
self.assertEqual(auction["bids"][0]['value']['amount'], bids[0]['value']['amount'])
self.assertEqual(auction["bids"][1]['value']['amount'], bids[1]['value']['amount'])
self.assertEqual(auction["bids"][2]['value']['amount'], bids[2]['value']['amount'])
value_threshold = auction['value']['amount'] + auction['minimalStep']['amount']
self.assertGreater(auction["bids"][0]['value']['amount'], value_threshold)
self.assertGreater(auction["bids"][1]['value']['amount'], value_threshold)
self.assertLess(auction["bids"][2]['value']['amount'], value_threshold)
self.assertEqual(auction["bids"][0]['status'], 'active')
self.assertEqual(auction["bids"][1]['status'], 'active')
self.assertEqual(auction["bids"][2]['status'], 'invalid')
self.assertEqual('active.qualification', auction["status"])
for i, status in enumerate(['pending.verification', 'pending.waiting']):
self.assertIn("tenderers", auction["bids"][i])
self.assertIn("name", auction["bids"][i]["tenderers"][0])
# self.assertIn(auction["awards"][0]["id"], response.headers['Location'])
self.assertEqual(auction["awards"][i]['bid_id'], bids[i]['id'])
self.assertEqual(auction["awards"][i]['value']['amount'], bids[i]['value']['amount'])
self.assertEqual(auction["awards"][i]['suppliers'], bids[i]['tenderers'])
self.assertEqual(auction["awards"][i]['status'], status)
if status == 'pending.verification':
self.assertIn("verificationPeriod", auction["awards"][i])
def post_auction_one_valid_bid(self):
self.app.authorization = ('Basic', ('auction', ''))
bids = deepcopy(self.initial_bids)
bids[0]['value']['amount'] = bids[0]['value']['amount'] * 2
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id), {'data': {'bids': bids}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
self.assertEqual(auction["bids"][0]['value']['amount'], bids[0]['value']['amount'])
self.assertEqual(auction["bids"][1]['value']['amount'], bids[1]['value']['amount'])
self.assertEqual(auction["bids"][2]['value']['amount'], bids[2]['value']['amount'])
value_threshold = auction['value']['amount'] + auction['minimalStep']['amount']
self.assertGreater(auction["bids"][0]['value']['amount'], value_threshold)
self.assertLess(auction["bids"][1]['value']['amount'], value_threshold)
self.assertLess(auction["bids"][2]['value']['amount'], value_threshold)
self.assertEqual(auction["bids"][0]['status'], 'active')
self.assertEqual(auction["bids"][1]['status'], 'invalid')
self.assertEqual(auction["bids"][2]['status'], 'invalid')
self.assertEqual('active.qualification', auction["status"])
for i, status in enumerate(['pending.verification', 'unsuccessful']):
self.assertIn("tenderers", auction["bids"][i])
self.assertIn("name", auction["bids"][i]["tenderers"][0])
# self.assertIn(auction["awards"][0]["id"], response.headers['Location'])
self.assertEqual(auction["awards"][i]['bid_id'], bids[i]['id'])
self.assertEqual(auction["awards"][i]['value']['amount'], bids[i]['value']['amount'])
self.assertEqual(auction["awards"][i]['suppliers'], bids[i]['tenderers'])
self.assertEqual(auction["awards"][i]['status'], status)
if status == 'pending.verification':
self.assertIn("verificationPeriod", auction["awards"][i])
# AuctionBidderProcessTest
def reactivate_invalidated_bids(self):
bid1_id = self.initial_bids[0]['id']
bid2_id = self.initial_bids[1]['id']
bid1_token = self.initial_bids_tokens[self.initial_bids[0]['id']]
bid2_token = self.initial_bids_tokens[self.initial_bids[1]['id']]
# patch
response = self.app.patch_json('/auctions/{}?acc_token={}'.format(self.auction_id, self.auction_token), {'data': {'value': {'amount': 540}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.get('/auctions/{}/bids/{}?acc_token={}'.format(self.auction_id, bid1_id, bid1_token))
self.assertEqual(response.json['data']["status"], "invalid")
response = self.app.get('/auctions/{}/bids/{}?acc_token={}'.format(self.auction_id, bid2_id, bid2_token))
self.assertEqual(response.json['data']["status"], "invalid")
# reactivate bids invalid bid value.amount
response = self.app.patch_json('/auctions/{}/bids/{}?acc_token={}'.format(self.auction_id, bid1_id, bid1_token),
{'data': {"status": "active"}}, status=422)
self.assertEqual(response.json['errors'], [
{u'description': [u'value of bid should be greater than value of auction'], u'location': u'body', u'name': u'value'}
])
response = self.app.patch_json('/auctions/{}/bids/{}?acc_token={}'.format(self.auction_id, bid2_id, bid2_token),
{'data': {"status": "active"}}, status=422)
self.assertEqual(response.json['errors'], [
{u'description': [u'value of bid should be greater than value of auction'], u'location': u'body', u'name': u'value'}
])
# set bid value.amount above auction value.amount
response = self.app.patch_json('/auctions/{}/bids/{}?acc_token={}'.format(self.auction_id, bid1_id, bid1_token),
{"data": {"value": {"amount": 800}}})
response = self.app.patch_json('/auctions/{}/bids/{}?acc_token={}'.format(self.auction_id, bid2_id, bid2_token),
{"data": {"value": {"amount": 900}}})
# reactivate bids
response = self.app.patch_json('/auctions/{}/bids/{}?acc_token={}'.format(self.auction_id, bid1_id, bid1_token),
{'data': {"status": "active"}})
self.assertEqual(response.json['data']["status"], "active")
response = self.app.patch_json('/auctions/{}/bids/{}?acc_token={}'.format(self.auction_id, bid2_id, bid2_token),
{'data': {"status": "active"}})
self.assertEqual(response.json['data']["status"], "active")
# AuctionBidderFeaturesResourceTest
def features_bidder(self):
test_features_bids = [
{
"parameters": [
{
"code": i["code"],
"value": 0.1,
}
for i in self.initial_data['features']
],
"status": "active",
"tenderers": [
self.initial_organization
],
"value": {
"amount": 469,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
},
{
"parameters": [
{
"code": i["code"],
"value": 0.15,
}
for i in self.initial_data['features']
],
"tenderers": [
self.initial_organization
],
"status": "draft",
"value": {
"amount": 479,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
}
]
for i in test_features_bids:
response = self.app.post_json('/auctions/{}/bids'.format(self.auction_id), {'data': i})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
bid = response.json['data']
bid.pop(u'date')
bid.pop(u'id')
self.assertEqual(bid, i)
# AuctionBidderDocumentResourceTest
def create_auction_bidder_document_nopending(self):
if self.initial_organization == test_financial_organization:
response = self.app.post_json('/auctions/{}/bids'.format(
self.auction_id), {'data': {'tenderers': [self.initial_organization], "value": {"amount": 500}, 'qualified': True, 'eligible': True}})
else:
response = self.app.post_json('/auctions/{}/bids'.format(
self.auction_id), {'data': {'tenderers': [self.initial_organization], "value": {"amount": 500}, 'qualified': True}})
bid = response.json['data']
bid_id = bid['id']
response = self.app.post('/auctions/{}/bids/{}/documents'.format(
self.auction_id, bid_id), upload_files=[('file', 'name.doc', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.set_status('active.qualification')
response = self.app.patch_json('/auctions/{}/bids/{}/documents/{}'.format(
self.auction_id, bid_id, doc_id), {"data": {"description": "document description"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document because award of bid is not in pending state")
response = self.app.put('/auctions/{}/bids/{}/documents/{}'.format(
self.auction_id, bid_id, doc_id), 'content3', content_type='application/msword', status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't update document because award of bid is not in pending state")
response = self.app.post('/auctions/{}/bids/{}/documents'.format(
self.auction_id, bid_id), upload_files=[('file', 'name.doc', 'content')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't add document because award of bid is not in pending state")
| prozorro-sale/openprocurement.auctions.dgf | openprocurement/auctions/dgf/tests/blanks/bidder_blanks.py | Python | apache-2.0 | 36,337 | 0.003444 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyAzureMgmtAppconfiguration(PythonPackage):
"""Microsoft Azure App Configuration Management Client Library for Python.
"""
homepage = "https://github.com/Azure/azure-sdk-for-python"
pypi = "azure-mgmt-appconfiguration/azure-mgmt-appconfiguration-0.5.0.zip"
version('0.5.0', sha256='211527511d7616a383cc196956eaf2b7ee016f2367d367924b3715f2a41106da')
version('0.4.0', sha256='85f6202ba235fde6be274f3dec1578b90235cf31979abea3fcfa476d0b2ac5b6')
depends_on('py-setuptools', type='build')
depends_on('py-msrest@0.5.0:', type=('build', 'run'))
depends_on('py-msrestazure@0.4.32:1', type=('build', 'run'))
depends_on('py-azure-common@1.1:1', type=('build', 'run'))
depends_on('py-azure-mgmt-nspkg', when='^python@:2', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/py-azure-mgmt-appconfiguration/package.py | Python | lgpl-2.1 | 985 | 0.00203 |
import os
import sys
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import unittest
sys.path.insert(0, os.path.abspath("../../"))
from specdal.spectrum import Spectrum
from specdal.collection import Collection
class GroupByTests(unittest.TestCase):
def setUp(self):
# total 36 spectra
self.c = Collection(name='For Groups')
for a in ('A', 'B', 'C'):
for b in ('a', 'b', 'c'):
for c in ('0', '1'):
for d in ('0001', '0002', '0003', '0004'):
self.c.append(Spectrum('_'.join([a, b, c, d])))
# print([s.name for s in self.c.spectra])
def test_groups(self):
groups = self.c.groupby(separator='_', indices=[0, 2])
for s in groups['A_0'].spectra:
print(s.name)
'''
def test_num_groups(self):
groups = self.c.groupby(separator='_', indices=[0])
self.assertEqual(len(groups), 3)
groups = self.c.groupby(separator='_', indices=[1])
self.assertEqual(len(groups), 3)
groups = self.c.groupby(separator='_', indices=[2])
self.assertEqual(len(groups), 4)
groups = self.c.groupby(separator='_', indices=[0, 1])
self.assertEqual(len(groups), 9)
groups = self.c.groupby(separator='_', indices=[0, 2])
self.assertEqual(len(groups), 12)
groups = self.c.groupby(separator='_', indices=[1, 2])
self.assertEqual(len(groups), 12)
groups = self.c.groupby(separator='_', indices=[0, 1, 2])
self.assertEqual(len(groups), 36)
'''
def main():
unittest.main()
if __name__ == '__main__':
main()
| EnSpec/SpecDAL | specdal/tests/test_groupby.py | Python | mit | 1,667 | 0.0024 |
#
# Copyright 2015 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
class VirtDeployException(Exception):
def __init__(self, message="Unknown error"):
self.message = message
def __str__(self):
return self.message
class InstanceNotFound(VirtDeployException):
def __init__(self, name):
super(InstanceNotFound, self).__init__(
'No such instance: {0}'.format(name))
| simon3z/virt-deploy | virtdeploy/errors.py | Python | gpl-2.0 | 1,201 | 0 |
# This file is part of pybliographer
#
# Copyright (C) 1998-2004 Frederic GOBRY
# Email : gobry@pybliographer.org
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
# TODO: get rid of all of this, and use standard iterators / generators
class Iterator:
base = None
title = "Some Selection"
def iterator (self):
''' loop method, so that we can for example call a method by
passing indifferently a database or a database iterator...
'''
return self
def __iter__ (self):
retval = self.first ()
while retval != None:
yield retval
retval = self.next()
raise StopIteration
def set_position (self, pos=0):
self._position = 0
def get_position (self):
return self._position
def first (self):
self.set_position (0)
return self.next ()
class DBIterator (Iterator):
''' This class defines a database iterator '''
def __init__ (self, database):
self.keys = database.keys ()
self.base = database
self.database = database
self.count = 0
return
def __iter__ (self):
self._position = 0
for k in self.keys:
yield self.database [k]
self._position += 1
def first (self):
self.count = 0
return self.next ()
def next (self):
try:
entry = self.database [self.keys [self.count]]
except IndexError:
entry = None
self.count = self.count + 1
return entry
| zkota/pyblio-1.3 | Legacy/Iterator.py | Python | gpl-2.0 | 2,270 | 0.01674 |
from build.management.commands.build_drugs import Command as BuildDrugs
class Command(BuildDrugs):
pass | cmunk/protwis | build_gpcr/management/commands/build_drugs.py | Python | apache-2.0 | 109 | 0.009174 |
import logging
from datetime import timedelta
from pajbot.managers.handler import HandlerManager
from pajbot.modules import BaseModule
from pajbot.modules import ModuleSetting
log = logging.getLogger(__name__)
class MaxMsgLengthModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "Maximum Message Length"
DESCRIPTION = "Times out users who post messages that contain too many characters."
CATEGORY = "Moderation"
SETTINGS = [
ModuleSetting(
key="max_msg_length",
label="Max message length (Online chat)",
type="number",
required=True,
placeholder="",
default=400,
constraints={"min_value": 1, "max_value": 500},
),
ModuleSetting(
key="max_msg_length_offline",
label="Max message length (Offline chat)",
type="number",
required=True,
placeholder="",
default=400,
constraints={"min_value": 1, "max_value": 500},
),
ModuleSetting(
key="timeout_length",
label="Timeout length",
type="number",
required=True,
placeholder="Timeout length in seconds",
default=120,
constraints={"min_value": 1, "max_value": 1209600},
),
ModuleSetting(
key="bypass_level",
label="Level to bypass module",
type="number",
required=True,
placeholder="",
default=500,
constraints={"min_value": 100, "max_value": 1000},
),
ModuleSetting(
key="timeout_reason",
label="Timeout Reason",
type="text",
required=False,
placeholder="",
default="Message too long",
constraints={},
),
ModuleSetting(
key="whisper_timeout_reason",
label="Whisper Timeout Reason | Available arguments: {punishment}",
type="text",
required=False,
placeholder="",
default="You have been {punishment} because your message was too long.",
constraints={},
),
ModuleSetting(
key="disable_warnings",
label="Disable warning timeouts",
type="boolean",
required=True,
default=False,
),
]
def on_message(self, source, message, whisper, **rest):
if whisper:
return
if source.level >= self.settings["bypass_level"] or source.moderator:
return
if self.bot.is_online:
if len(message) > self.settings["max_msg_length"]:
if self.settings["disable_warnings"] is True:
self.bot.timeout(source, self.settings["timeout_length"], reason=self.settings["timeout_reason"])
else:
duration, punishment = self.bot.timeout_warn(
source, self.settings["timeout_length"], reason=self.settings["timeout_reason"]
)
""" We only send a notification to the user if he has spent more than
one hour watching the stream. """
if duration > 0 and source.time_in_chat_online >= timedelta(hours=1):
self.bot.whisper(source, self.settings["whisper_timeout_reason"].format(punishment=punishment))
return False
else:
if len(message) > self.settings["max_msg_length_offline"]:
if self.settings["disable_warnings"] is True:
self.bot.timeout(source, self.settings["timeout_length"], reason=self.settings["timeout_reason"])
else:
duration, punishment = self.bot.timeout_warn(
source, self.settings["timeout_length"], reason=self.settings["timeout_reason"]
)
""" We only send a notification to the user if he has spent more than
one hour watching the stream. """
if duration > 0 and source.time_in_chat_online >= timedelta(hours=1):
self.bot.whisper(source, self.settings["whisper_timeout_reason"].format(punishment=punishment))
return False
def enable(self, bot):
HandlerManager.add_handler("on_message", self.on_message, priority=150, run_if_propagation_stopped=True)
def disable(self, bot):
HandlerManager.remove_handler("on_message", self.on_message)
| pajlada/pajbot | pajbot/modules/maxmsglength.py | Python | mit | 4,608 | 0.002821 |
# -*- coding: utf-8 -*-
# Copyright 2010-2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Copy Qt frameworks to the target application's frameworks directory.
Typical usage:
% python copy_qt_frameworks.py --qtdir=/path/to/qtdir/ \
--target=/path/to/target.app/Contents/Frameworks/
"""
__author__ = "horo"
import optparse
import os
from copy_file import CopyFiles
from util import PrintErrorAndExit
from util import RunOrDie
def ParseOption():
"""Parse command line options."""
parser = optparse.OptionParser()
parser.add_option('--qtdir', dest='qtdir')
parser.add_option('--target', dest='target')
(opts, _) = parser.parse_args()
return opts
def main():
opt = ParseOption()
if not opt.qtdir:
PrintErrorAndExit('--qtdir option is mandatory.')
if not opt.target:
PrintErrorAndExit('--target option is mandatory.')
qtdir = os.path.abspath(opt.qtdir)
target = os.path.abspath(opt.target)
# Copies QtCore. For codesign, Info.plist should be copied to Resources/.
CopyFiles(['%s/lib/QtCore.framework/Versions/4/QtCore' % qtdir],
'%s/QtCore.framework/Versions/4/QtCore' % target)
CopyFiles(['%s/lib/QtCore.framework/Contents/Info.plist' % qtdir],
'%s/QtCore.framework/Resources/' % target)
# Copies QtGui. For codesign, Info.plist should be copied to Resources/.
CopyFiles(['%s/lib/QtGui.framework/Versions/4/QtGui' % qtdir],
'%s/QtGui.framework/Versions/4/QtGui' % target)
CopyFiles(['%s/lib/QtGui.framework/Contents/Info.plist' % qtdir],
'%s/QtGui.framework/Resources/' % target)
# Copies Resources of QtGui
CopyFiles(['%s/lib/QtGui.framework/Versions/4/Resources' % qtdir],
'%s/QtGui.framework/Resources' % target,
recursive=True)
# Changes QtGui id
cmd = ["install_name_tool", "-id",
"@executable_path/../Frameworks/QtGui.framework/Versions/4/QtGui",
"%s/QtGui.framework/Versions/4/QtGui" % target]
RunOrDie(cmd)
# Changes QtCore id
cmd = ["install_name_tool", "-id",
"@executable_path/../Frameworks/QtCore.framework/Versions/4/QtCore",
'%s/QtCore.framework/Versions/4/QtCore' % target]
RunOrDie(cmd)
# Changes the reference to QtCore framework from QtGui
cmd = ["install_name_tool", "-change",
"%s/lib/QtCore.framework/Versions/4/QtCore" % qtdir,
"@executable_path/../Frameworks/QtCore.framework/Versions/4/QtCore",
"%s/QtGui.framework/Versions/4/QtGui" % target]
RunOrDie(cmd)
if __name__ == '__main__':
main()
| takahashikenichi/mozc | src/build_tools/copy_qt_frameworks_mac.py | Python | bsd-3-clause | 4,010 | 0.008229 |
#!/usr/bin/python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
src = open(sys.argv[1])
dst = open(sys.argv[2], 'w')
for ch in src.read():
dst.write('%d,\n' % ord(ch))
src.close()
dst.close()
| ibc/MediaSoup | worker/deps/gyp/test/external-cross-compile/src/tochar.py | Python | isc | 317 | 0.003155 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateQuestion
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dataqna
# [START dataqna_generated_dataqna_v1alpha_QuestionService_CreateQuestion_sync]
from google.cloud import dataqna_v1alpha
def sample_create_question():
# Create a client
client = dataqna_v1alpha.QuestionServiceClient()
# Initialize request argument(s)
question = dataqna_v1alpha.Question()
question.scopes = ['scopes_value_1', 'scopes_value_2']
question.query = "query_value"
request = dataqna_v1alpha.CreateQuestionRequest(
parent="parent_value",
question=question,
)
# Make the request
response = client.create_question(request=request)
# Handle the response
print(response)
# [END dataqna_generated_dataqna_v1alpha_QuestionService_CreateQuestion_sync]
| googleapis/python-data-qna | samples/generated_samples/dataqna_generated_dataqna_v1alpha_question_service_create_question_sync.py | Python | apache-2.0 | 1,661 | 0.000602 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stochastic graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor
from tensorflow.contrib.bayesflow.python.ops import stochastic_variables
from tensorflow.contrib.bayesflow.python.ops import variational_inference
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
sv = stochastic_variables
st = stochastic_tensor
vi = variational_inference
dist = distributions
class StochasticVariablesTest(test.TestCase):
def testStochasticVariables(self):
shape = (10, 20)
with variable_scope.variable_scope(
"stochastic_variables",
custom_getter=sv.make_stochastic_variable_getter(
dist_cls=dist.NormalWithSoftplusScale)):
v = variable_scope.get_variable("sv", shape)
self.assertTrue(isinstance(v, st.StochasticTensor))
self.assertTrue(isinstance(v.distribution, dist.NormalWithSoftplusScale))
self.assertEqual(
{"stochastic_variables/sv_loc", "stochastic_variables/sv_scale"},
set([v.op.name for v in variables.global_variables()]))
self.assertEqual(
set(variables.trainable_variables()), set(variables.global_variables()))
v = ops.convert_to_tensor(v)
self.assertEqual(list(shape), v.get_shape().as_list())
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertEqual(shape, sess.run(v).shape)
def testStochasticVariablesWithConstantInitializer(self):
shape = (10, 20)
with variable_scope.variable_scope(
"stochastic_variables",
custom_getter=sv.make_stochastic_variable_getter(
dist_cls=dist.NormalWithSoftplusScale,
dist_kwargs={"validate_args": True},
param_initializers={
"loc": np.ones(shape) * 4.,
"scale": np.ones(shape) * 2.
})):
v = variable_scope.get_variable("sv")
for var in variables.global_variables():
if "loc" in var.name:
mu_var = var
if "scale" in var.name:
sigma_var = var
v = ops.convert_to_tensor(v)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllEqual(np.ones(shape) * 4., sess.run(mu_var))
self.assertAllEqual(np.ones(shape) * 2., sess.run(sigma_var))
self.assertEqual(shape, sess.run(v).shape)
def testStochasticVariablesWithCallableInitializer(self):
shape = (10, 20)
def sigma_init(shape, dtype, partition_info):
_ = partition_info
return array_ops.ones(shape, dtype=dtype) * 2.
with variable_scope.variable_scope(
"stochastic_variables",
custom_getter=sv.make_stochastic_variable_getter(
dist_cls=dist.NormalWithSoftplusScale,
dist_kwargs={"validate_args": True},
param_initializers={
"loc": np.ones(
shape, dtype=np.float32) * 4.,
"scale": sigma_init
})):
v = variable_scope.get_variable("sv", shape)
for var in variables.global_variables():
if "loc" in var.name:
mu_var = var
if "scale" in var.name:
sigma_var = var
v = ops.convert_to_tensor(v)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllEqual(np.ones(shape) * 4., sess.run(mu_var))
self.assertAllEqual(np.ones(shape) * 2., sess.run(sigma_var))
self.assertEqual(shape, sess.run(v).shape)
def testStochasticVariablesWithPrior(self):
shape = (10, 20)
prior = dist.Normal(0., 1.)
with variable_scope.variable_scope(
"stochastic_variables",
custom_getter=sv.make_stochastic_variable_getter(
dist_cls=dist.NormalWithSoftplusScale, prior=prior)):
w = variable_scope.get_variable("weights", shape)
x = random_ops.random_uniform((8, 10))
y = math_ops.matmul(x, w)
prior_map = vi._find_variational_and_priors(y, None)
self.assertEqual(prior_map[w], prior)
elbo = vi.elbo(y, keep_batch_dim=False)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(elbo)
def testStochasticVariablesWithCallablePriorInitializer(self):
def prior_init(shape, dtype):
return dist.Normal(
array_ops.zeros(shape, dtype), array_ops.ones(shape, dtype))
with variable_scope.variable_scope(
"stochastic_variables",
custom_getter=sv.make_stochastic_variable_getter(
dist_cls=dist.NormalWithSoftplusScale, prior=prior_init)):
w = variable_scope.get_variable("weights", (10, 20))
x = random_ops.random_uniform((8, 10))
y = math_ops.matmul(x, w)
prior_map = vi._find_variational_and_priors(y, None)
self.assertTrue(isinstance(prior_map[w], dist.Normal))
elbo = vi.elbo(y, keep_batch_dim=False)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(elbo)
if __name__ == "__main__":
test.main()
| kchodorow/tensorflow | tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_variables_test.py | Python | apache-2.0 | 6,122 | 0.00539 |
from django.dispatch import receiver
from signals import sch_create_project
@receiver(project_created)
def project_networking(sender, **kwargs):
return True
| kanellov/openstack_project_create | openstack_project_create/receivers.py | Python | mit | 162 | 0.006173 |
# -*- coding: utf-8 -*-
import os,math
from qgis.core import NULL
from mole import oeq_global
from mole.project import config
from mole.extensions import OeQExtension
from mole.stat_corr import rb_present_wall_uvalue_AVG_by_building_age_lookup, nrb_present_wall_uvalue_by_building_age_lookup, rb_contemporary_wall_uvalue_by_building_age_lookup, nrb_contemporary_wall_uvalue_by_building_age_lookup
def calculation(self=None, parameters={},feature = None):
from math import floor, ceil
from PyQt4.QtCore import QVariant
wl_uph = NULL
#differentiation between RB and NRB (for now in case of contemporary U-Values RB=NRB. After getting NRB data for contemporary case code must be adaptet)
if parameters['BLD_USAGE'] == "RB":
if parameters['HERIT_STAT'] == "0":
if not oeq_global.isnull(parameters['YOC']):
wl_uph = rb_present_wall_uvalue_AVG_by_building_age_lookup.get(parameters['YOC'])
else:
if not oeq_global.isnull(parameters['YOC']):
wl_uph = rb_contemporary_wall_uvalue_by_building_age_lookup.get(parameters['YOC'])
elif parameters['BLD_USAGE'] == "NRB":
if parameters['HERIT_STAT'] == "0":
if not oeq_global.isnull(parameters['YOC']):
wl_uph = nrb_present_wall_uvalue_by_building_age_lookup.get(parameters['YOC'])
else:
if not oeq_global.isnull(parameters['YOC']):
wl_uph = nrb_contemporary_wall_uvalue_by_building_age_lookup.get(parameters['YOC'])
else:
if parameters['HERIT_STAT'] == "0":
if not oeq_global.isnull(parameters['YOC']):
wl_uph = (((rb_present_wall_uvalue_AVG_by_building_age_lookup.get(parameters['YOC'])) + (
nrb_present_wall_uvalue_by_building_age_lookup.get(parameters['YOC']))) / 2)
else:
if not oeq_global.isnull(parameters['YOC']):
wl_uph = (((rb_contemporary_wall_uvalue_by_building_age_lookup.get(parameters['YOC'])) + (
nrb_contemporary_wall_uvalue_by_building_age_lookup.get(parameters['YOC']))) / 2)
return {'WL_UPH': {'type': QVariant.Double, 'value': wl_uph}}
extension = OeQExtension(
extension_id=__name__,
category='Evaluation',
subcategory='U-Values Present Heritage',
extension_name='Wall Quality (U_Value, Present Heritage)',
layer_name= 'U Wall Present Heritage',
extension_filepath=os.path.join(__file__),
colortable = os.path.join(os.path.splitext(__file__)[0] + '.qml'),
field_id='WL_UPH',
source_type='none',
par_in=['YOC','BLD_USAGE','HERIT_STAT'],
sourcelayer_name=config.data_layer_name,
targetlayer_name=config.data_layer_name,
active=True,
show_results=['WL_UPH'],
description=u"Calculate the present heritage U-Value of the Building's wall",
evaluation_method=calculation)
extension.registerExtension(default=True)
| UdK-VPT/Open_eQuarter | mole/extensions/eval_present_heritage/oeq_UPH_Wall.py | Python | gpl-2.0 | 2,921 | 0.008216 |
#!/usr/bin/env python
import Tkinter as Tk
root=Tk.Tk()
l=Tk.Label(root,
text="Python\nPerl\nC",
justify="right",
)
l.pack()
root.mainloop()
| teppchan/tkintertips | py/label/align.py | Python | mit | 166 | 0.024096 |
from logging.handlers import BaseRotatingHandler
import string
import time
import datetime
import os
class TimePatternRotatingHandler(BaseRotatingHandler):
def __init__(self, filename, when, encoding=None, delay=0):
self.when = string.upper(when)
self.fname_pat = filename
self.mock_dt = None
self.computeNextRollover()
BaseRotatingHandler.__init__(self, self.filename, 'a', encoding, delay)
def get_now_dt(self):
if self.mock_dt is not None:
return self.mock_dt
return datetime.datetime.now()
def computeNextRollover(self):
now = self.get_now_dt()
if self.when == 'MONTH':
dtfmt = '%Y-%m'
dt = (now.replace(day=1) + datetime.timedelta(days=40)).replace(day=1, hour=0, minute=0, second=0)
rolloverAt = time.mktime(dt.timetuple())
elif self.when == 'DAY':
dtfmt = '%Y-%m-%d'
dt = (now + datetime.timedelta(days=1)).replace(hour=0, minute=0, second=0)
rolloverAt = time.mktime(dt.timetuple())
self.rolloverAt = rolloverAt
self.dtfmt = dtfmt
self.filename = os.path.abspath(self.fname_pat % (now.strftime(self.dtfmt)))
#print now, self.filename
def shouldRollover(self, record):
now = self.get_now_dt()
t = time.mktime(now.timetuple())
#print t, self.rolloverAt
if t >= self.rolloverAt:
return 1
return 0
def doRollover(self):
if self.stream:
self.stream.close()
self.computeNextRollover()
self.baseFilename = self.filename
self.stream = self._open()
| vls/python_utils | log_handlers.py | Python | unlicense | 1,677 | 0.003578 |
#-- GAUDI jobOptions generated on Wed Jun 10 17:31:51 2015
#-- Contains event types :
#-- 11104041 - 117 files - 2010995 events - 432.61 GBytes
#-- Extra information about the data processing phases:
#-- Processing Pass Step-124834
#-- StepId : 124834
#-- StepName : Reco14a for MC
#-- ApplicationName : Brunel
#-- ApplicationVersion : v43r2p7
#-- OptionFiles : $APPCONFIGOPTS/Brunel/DataType-2012.py;$APPCONFIGOPTS/Brunel/MC-WithTruth.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124620
#-- StepId : 124620
#-- StepName : Digi13 with G4 dE/dx
#-- ApplicationName : Boole
#-- ApplicationVersion : v26r3
#-- OptionFiles : $APPCONFIGOPTS/Boole/Default.py;$APPCONFIGOPTS/Boole/DataType-2012.py;$APPCONFIGOPTS/Boole/Boole-SiG4EnergyDeposit.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124632
#-- StepId : 124632
#-- StepName : TCK-0x409f0045 Flagged for Sim08 2012
#-- ApplicationName : Moore
#-- ApplicationVersion : v14r8p1
#-- OptionFiles : $APPCONFIGOPTS/Moore/MooreSimProductionWithL0Emulation.py;$APPCONFIGOPTS/Conditions/TCK-0x409f0045.py;$APPCONFIGOPTS/Moore/DataType-2012.py;$APPCONFIGOPTS/L0/L0TCK-0x0045.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-124630
#-- StepId : 124630
#-- StepName : Stripping20-NoPrescalingFlagged for Sim08
#-- ApplicationName : DaVinci
#-- ApplicationVersion : v32r2p1
#-- OptionFiles : $APPCONFIGOPTS/DaVinci/DV-Stripping20-Stripping-MC-NoPrescaling.py;$APPCONFIGOPTS/DaVinci/DataType-2012.py;$APPCONFIGOPTS/DaVinci/InputType-DST.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : fromPreviousStep
#-- CONDDB : fromPreviousStep
#-- ExtraPackages : AppConfig.v3r164
#-- Visible : Y
#-- Processing Pass Step-125577
#-- StepId : 125577
#-- StepName : Sim08a - 2012 - MD - Pythia8
#-- ApplicationName : Gauss
#-- ApplicationVersion : v45r3
#-- OptionFiles : $APPCONFIGOPTS/Gauss/Sim08-Beam4000GeV-md100-2012-nu2.5.py;$DECFILESROOT/options/@{eventType}.py;$LBPYTHIA8ROOT/options/Pythia8.py;$APPCONFIGOPTS/Gauss/G4PL_FTFP_BERT_EmNoCuts.py;$APPCONFIGOPTS/Persistency/Compression-ZLIB-1.py
#-- DDDB : Sim08-20130503-1
#-- CONDDB : Sim08-20130503-1-vc-md100
#-- ExtraPackages : AppConfig.v3r171;DecFiles.v27r11
#-- Visible : Y
from Gaudi.Configuration import *
from GaudiConf import IOHelper
IOHelper('ROOT').inputFiles(['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000001_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000002_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000003_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000004_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000005_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000006_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000007_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000008_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000009_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000010_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000011_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000012_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000013_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000014_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000015_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000016_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000017_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000018_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000019_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000020_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000021_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000022_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000023_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000024_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000025_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000026_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000027_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000028_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000029_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000030_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000031_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000032_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000033_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000034_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000035_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000036_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000037_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000038_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000039_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000040_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000041_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000042_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000043_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000044_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000045_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000046_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000047_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000048_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000049_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000050_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000051_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000052_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000053_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000054_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000055_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000056_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000057_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000058_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000059_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000060_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000061_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000062_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000063_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000064_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000065_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000066_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000067_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000068_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000069_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000070_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000071_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000072_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000073_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000074_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000075_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000076_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000077_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000078_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000079_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000080_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000081_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000082_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000083_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000084_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000085_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000086_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000087_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000088_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000089_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000090_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000091_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000092_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000093_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000094_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000095_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000096_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000097_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000098_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000099_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000100_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000101_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000102_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000103_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000104_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000105_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000106_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000108_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000109_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000110_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000111_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000112_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000113_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000114_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000115_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000116_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000117_1.allstreams.dst',
'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00030282/0000/00030282_00000118_1.allstreams.dst'
], clear=True)
| Williams224/davinci-scripts | kstaretappipig/MC_12_MagDown_kstar_rho_kpipipi.py | Python | mit | 12,721 | 0.027121 |
"""
Brewery Example - merge multiple CSV files
Input: Multiple CSV files with different fields, but with common subset of
fields.
Output: Single CSV file with all fields from all files and with additional
column with origin file name
Run:
$ python merge_multiple_files.py
Afterwards display the CSV file:
$ cat merged.csv | brewery pipe pretty_printer
And see the field completeness (data quality dimension):
$ cat merged.csv | brewery pipe audit pretty_printer
"""
import brewery
from brewery import ds
import sys
# List of sources - you might want to keep this list in a json file
sources = [
{"file": "grants_2008.csv",
"fields": ["receiver", "amount", "date"]},
{"file": "grants_2009.csv",
"fields": ["id", "receiver", "amount", "contract_number", "date"]},
{"file": "grants_2010.csv",
"fields": ["receiver", "subject", "requested_amount", "amount", "date"]}
]
# Create list of all fields and add filename to store information
# about origin of data records
all_fields = brewery.FieldList(["file"])
# Go through source definitions and collect the fields
for source in sources:
for field in source["fields"]:
if field not in all_fields:
all_fields.append(field)
# Create and initialize a data target
out = ds.CSVDataTarget("merged.csv")
out.fields = brewery.FieldList(all_fields)
out.initialize()
# Append all sources
for source in sources:
path = source["file"]
# Initialize data source: skip reading of headers - we are preparing them ourselves
# use XLSDataSource for XLS files
# We ignore the fields in the header, because we have set-up fields
# previously. We need to skip the header row.
src = ds.CSVDataSource(path,read_header=False,skip_rows=1)
src.fields = ds.FieldList(source["fields"])
src.initialize()
for record in src.records():
# Add file reference into ouput - to know where the row comes from
record["file"] = path
out.append(record)
# Close the source stream
src.finalize()
| Stiivi/brewery | examples/merge_multiple_files/merge_multiple_files.py | Python | mit | 2,107 | 0.005695 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
un_numero = 75
otro_numero = -134
| psicobyte/ejemplos-python | cap5/p54.py | Python | gpl-3.0 | 77 | 0 |
from .stopper import EarlyStopper
from .progbar import ProgressBar
from .utils import split_arr
from .data_iterator import SequentialIterator
from tensorflow.python.framework import ops
import tensorflow as tf
import logging
logging.basicConfig(format='%(module)s.%(funcName)s %(lineno)d:%(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def train(session, feed_dict, train_cost_sb, valid_cost_sb, optimizer, epoch_look_back=5,
max_epoch=100, percent_decrease=0, train_valid_ratio=[5,1], batchsize=64,
randomize_split=False):
"""
Example training object for training a dataset
"""
train_arrs = []
valid_arrs = []
phs = []
for ph, arr in feed_dict.items():
train_arr, valid_arr = split_arr(arr, train_valid_ratio, randomize=randomize_split)
phs.append(ph)
train_arrs.append(train_arr)
valid_arrs.append(valid_arr)
iter_train = SequentialIterator(*train_arrs, batchsize=batchsize)
iter_valid = SequentialIterator(*valid_arrs, batchsize=batchsize)
es = EarlyStopper(max_epoch, epoch_look_back, percent_decrease)
# required for BatchNormalization layer
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
with ops.control_dependencies(update_ops):
train_op = optimizer.minimize(train_cost_sb)
init = tf.global_variables_initializer()
session.run(init)
epoch = 0
while True:
epoch += 1
##############################[ Training ]##############################
print('\n')
logger.info('<<<<<[ epoch: {} ]>>>>>'.format(epoch))
logger.info('..training')
pbar = ProgressBar(len(iter_train))
ttl_exp = 0
mean_train_cost = 0
for batches in iter_train:
fd = dict(zip(phs, batches))
train_cost, _ = session.run([train_cost_sb, train_op], feed_dict=fd)
mean_train_cost += train_cost * len(batches[0])
ttl_exp += len(batches[0])
pbar.update(ttl_exp)
print('')
mean_train_cost /= ttl_exp
logger.info('..average train cost: {}'.format(mean_train_cost))
##############################[ Validating ]############################
logger.info('..validating')
pbar = ProgressBar(len(iter_valid))
ttl_exp = 0
mean_valid_cost = 0
for batches in iter_valid:
fd = dict(zip(phs, batches))
valid_cost = session.run(valid_cost_sb, feed_dict=fd)
mean_valid_cost += valid_cost * len(batches[0])
ttl_exp += len(batches[0])
pbar.update(ttl_exp)
print('')
mean_valid_cost /= ttl_exp
logger.info('..average valid cost: {}'.format(mean_valid_cost))
if es.continue_learning(mean_valid_cost, epoch=epoch):
logger.info('best epoch last update: {}'.format(es.best_epoch_last_update))
logger.info('best valid last update: {}'.format(es.best_valid_last_update))
else:
logger.info('training done!')
break
| hycis/TensorGraph | tensorgraph/trainobject.py | Python | apache-2.0 | 3,080 | 0.003896 |
# system configuration generated and used by the sysconfig module
build_time_vars = {'ABIFLAGS': 'm',
'AC_APPLE_UNIVERSAL_BUILD': 0,
'AIX_GENUINE_CPLUSPLUS': 0,
'AR': 'x86_64-linux-gnu-gcc-ar',
'ARFLAGS': 'rc',
'ASDLGEN': 'python3.5 ../Parser/asdl_c.py',
'ASDLGEN_FILES': '../Parser/asdl.py ../Parser/asdl_c.py',
'AST_ASDL': '../Parser/Python.asdl',
'AST_C': 'Python/Python-ast.c',
'AST_C_DIR': 'Python',
'AST_H': 'Include/Python-ast.h',
'AST_H_DIR': 'Include',
'BASECFLAGS': '-Wno-unused-result -Wsign-compare',
'BASECPPFLAGS': '',
'BASEMODLIBS': '',
'BINDIR': '/usr/bin',
'BINLIBDEST': '/usr/lib/python3.5',
'BLDLIBRARY': '-lpython3.5m',
'BLDSHARED': 'x86_64-linux-gnu-gcc -pthread -shared -Wl,-O1 '
'-Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro',
'BUILDEXE': '',
'BUILDPYTHON': 'python',
'BUILD_GNU_TYPE': 'x86_64-unknown-linux-gnu',
'BYTESTR_DEPS': '\\',
'CC': 'x86_64-linux-gnu-gcc -pthread',
'CCSHARED': '-fPIC',
'CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O2 -Wall '
'-Wstrict-prototypes -g -fstack-protector-strong -Wformat '
'-Werror=format-security -g -flto -fuse-linker-plugin '
'-ffat-lto-objects',
'CFLAGSFORSHARED': '-fPIC',
'CFLAGS_NODIST': '',
'CONFIGFILES': 'configure configure.ac acconfig.h pyconfig.h.in '
'Makefile.pre.in',
'CONFIGURE_CFLAGS': '-g -fstack-protector-strong -Wformat '
'-Werror=format-security',
'CONFIGURE_CFLAGS_NODIST': '-Werror=declaration-after-statement',
'CONFIGURE_CPPFLAGS': '-Wdate-time -D_FORTIFY_SOURCE=2',
'CONFIGURE_LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro',
'CONFIG_ARGS': "'--enable-shared' '--prefix=/usr' '--enable-ipv6' "
"'--enable-loadable-sqlite-extensions' "
"'--with-dbmliborder=bdb:gdbm' '--with-computed-gotos' "
"'--without-ensurepip' '--with-system-expat' "
"'--with-system-libmpdec' '--with-system-ffi' '--with-fpectl' "
"'CC=x86_64-linux-gnu-gcc' 'CFLAGS=-g -fstack-protector-strong "
"-Wformat -Werror=format-security ' "
"'LDFLAGS=-Wl,-Bsymbolic-functions -Wl,-z,relro' "
"'CPPFLAGS=-Wdate-time -D_FORTIFY_SOURCE=2'",
'CONFINCLUDEDIR': '/usr/include',
'CONFINCLUDEPY': '/usr/include/python3.5m',
'COREPYTHONPATH': ':plat-x86_64-linux-gnu',
'COVERAGE_INFO': '/build/python3.5-moRWPp/python3.5-3.5.2/build-shared/coverage.info',
'COVERAGE_REPORT': '/build/python3.5-moRWPp/python3.5-3.5.2/build-shared/lcov-report',
'COVERAGE_REPORT_OPTIONS': '--no-branch-coverage --title "CPython lcov '
'report"',
'CPPFLAGS': '-I. -IInclude -I../Include -Wdate-time -D_FORTIFY_SOURCE=2',
'CXX': 'x86_64-linux-gnu-g++ -pthread',
'DESTDIRS': '/usr /usr/lib /usr/lib/python3.5 /usr/lib/python3.5/lib-dynload',
'DESTLIB': '/usr/lib/python3.5',
'DESTPATH': '',
'DESTSHARED': '/usr/lib/python3.5/lib-dynload',
'DIRMODE': 755,
'DIST': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in '
'Makefile.pre.in Include Lib Misc Ext-dummy',
'DISTDIRS': 'Include Lib Misc Ext-dummy',
'DISTFILES': 'README ChangeLog configure configure.ac acconfig.h '
'pyconfig.h.in Makefile.pre.in',
'DLINCLDIR': '.',
'DLLLIBRARY': '',
'DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754': 0,
'DOUBLE_IS_BIG_ENDIAN_IEEE754': 0,
'DOUBLE_IS_LITTLE_ENDIAN_IEEE754': 1,
'DYNLOADFILE': 'dynload_shlib.o',
'ENABLE_IPV6': 1,
'ENSUREPIP': 'no',
'EXE': '',
'EXEMODE': 755,
'EXTRAMACHDEPPATH': '',
'EXTRATESTOPTS': '',
'EXT_SUFFIX': '.cpython-35m-x86_64-linux-gnu.so',
'FILEMODE': 644,
'FLOCK_NEEDS_LIBBSD': 0,
'GETPGRP_HAVE_ARG': 0,
'GETTIMEOFDAY_NO_TZ': 0,
'GNULD': 'yes',
'GRAMMAR_C': 'Python/graminit.c',
'GRAMMAR_H': 'Include/graminit.h',
'GRAMMAR_INPUT': '../Grammar/Grammar',
'HAVE_ACCEPT4': 1,
'HAVE_ACOSH': 1,
'HAVE_ADDRINFO': 1,
'HAVE_ALARM': 1,
'HAVE_ALIGNED_REQUIRED': 0,
'HAVE_ALLOCA_H': 1,
'HAVE_ALTZONE': 0,
'HAVE_ASINH': 1,
'HAVE_ASM_TYPES_H': 1,
'HAVE_ATANH': 1,
'HAVE_BIND_TEXTDOMAIN_CODESET': 1,
'HAVE_BLUETOOTH_BLUETOOTH_H': 1,
'HAVE_BLUETOOTH_H': 0,
'HAVE_BROKEN_MBSTOWCS': 0,
'HAVE_BROKEN_NICE': 0,
'HAVE_BROKEN_PIPE_BUF': 0,
'HAVE_BROKEN_POLL': 0,
'HAVE_BROKEN_POSIX_SEMAPHORES': 0,
'HAVE_BROKEN_PTHREAD_SIGMASK': 0,
'HAVE_BROKEN_SEM_GETVALUE': 0,
'HAVE_BROKEN_UNSETENV': 0,
'HAVE_BUILTIN_ATOMIC': 1,
'HAVE_C99_BOOL': 1,
'HAVE_CHFLAGS': 0,
'HAVE_CHOWN': 1,
'HAVE_CHROOT': 1,
'HAVE_CLOCK': 1,
'HAVE_CLOCK_GETRES': 1,
'HAVE_CLOCK_GETTIME': 1,
'HAVE_COMPUTED_GOTOS': 1,
'HAVE_CONFSTR': 1,
'HAVE_CONIO_H': 0,
'HAVE_COPYSIGN': 1,
'HAVE_CTERMID': 1,
'HAVE_CTERMID_R': 0,
'HAVE_CURSES_H': 1,
'HAVE_CURSES_IS_TERM_RESIZED': 1,
'HAVE_CURSES_RESIZETERM': 1,
'HAVE_CURSES_RESIZE_TERM': 1,
'HAVE_DECL_ISFINITE': 1,
'HAVE_DECL_ISINF': 1,
'HAVE_DECL_ISNAN': 1,
'HAVE_DECL_TZNAME': 0,
'HAVE_DEVICE_MACROS': 1,
'HAVE_DEV_PTC': 0,
'HAVE_DEV_PTMX': 1,
'HAVE_DIRECT_H': 0,
'HAVE_DIRENT_D_TYPE': 1,
'HAVE_DIRENT_H': 1,
'HAVE_DIRFD': 1,
'HAVE_DLFCN_H': 1,
'HAVE_DLOPEN': 1,
'HAVE_DUP2': 1,
'HAVE_DUP3': 1,
'HAVE_DYNAMIC_LOADING': 1,
'HAVE_ENDIAN_H': 1,
'HAVE_EPOLL': 1,
'HAVE_EPOLL_CREATE1': 1,
'HAVE_ERF': 1,
'HAVE_ERFC': 1,
'HAVE_ERRNO_H': 1,
'HAVE_EXECV': 1,
'HAVE_EXPM1': 1,
'HAVE_FACCESSAT': 1,
'HAVE_FCHDIR': 1,
'HAVE_FCHMOD': 1,
'HAVE_FCHMODAT': 1,
'HAVE_FCHOWN': 1,
'HAVE_FCHOWNAT': 1,
'HAVE_FCNTL_H': 1,
'HAVE_FDATASYNC': 1,
'HAVE_FDOPENDIR': 1,
'HAVE_FEXECVE': 1,
'HAVE_FINITE': 1,
'HAVE_FLOCK': 1,
'HAVE_FORK': 1,
'HAVE_FORKPTY': 1,
'HAVE_FPATHCONF': 1,
'HAVE_FSEEK64': 0,
'HAVE_FSEEKO': 1,
'HAVE_FSTATAT': 1,
'HAVE_FSTATVFS': 1,
'HAVE_FSYNC': 1,
'HAVE_FTELL64': 0,
'HAVE_FTELLO': 1,
'HAVE_FTIME': 1,
'HAVE_FTRUNCATE': 1,
'HAVE_FUTIMENS': 1,
'HAVE_FUTIMES': 1,
'HAVE_FUTIMESAT': 1,
'HAVE_GAI_STRERROR': 1,
'HAVE_GAMMA': 1,
'HAVE_GCC_ASM_FOR_MC68881': 0,
'HAVE_GCC_ASM_FOR_X64': 1,
'HAVE_GCC_ASM_FOR_X87': 1,
'HAVE_GCC_UINT128_T': 1,
'HAVE_GETADDRINFO': 1,
'HAVE_GETC_UNLOCKED': 1,
'HAVE_GETENTROPY': 0,
'HAVE_GETGROUPLIST': 1,
'HAVE_GETGROUPS': 1,
'HAVE_GETHOSTBYNAME': 0,
'HAVE_GETHOSTBYNAME_R': 1,
'HAVE_GETHOSTBYNAME_R_3_ARG': 0,
'HAVE_GETHOSTBYNAME_R_5_ARG': 0,
'HAVE_GETHOSTBYNAME_R_6_ARG': 1,
'HAVE_GETITIMER': 1,
'HAVE_GETLOADAVG': 1,
'HAVE_GETLOGIN': 1,
'HAVE_GETNAMEINFO': 1,
'HAVE_GETPAGESIZE': 1,
'HAVE_GETPEERNAME': 1,
'HAVE_GETPGID': 1,
'HAVE_GETPGRP': 1,
'HAVE_GETPID': 1,
'HAVE_GETPRIORITY': 1,
'HAVE_GETPWENT': 1,
'HAVE_GETRANDOM': 0,
'HAVE_GETRANDOM_SYSCALL': 1,
'HAVE_GETRESGID': 1,
'HAVE_GETRESUID': 1,
'HAVE_GETSID': 1,
'HAVE_GETSPENT': 1,
'HAVE_GETSPNAM': 1,
'HAVE_GETTIMEOFDAY': 1,
'HAVE_GETWD': 1,
'HAVE_GLIBC_MEMMOVE_BUG': 0,
'HAVE_GRP_H': 1,
'HAVE_HSTRERROR': 1,
'HAVE_HTOLE64': 1,
'HAVE_HYPOT': 1,
'HAVE_IEEEFP_H': 0,
'HAVE_IF_NAMEINDEX': 1,
'HAVE_INET_ATON': 1,
'HAVE_INET_PTON': 1,
'HAVE_INITGROUPS': 1,
'HAVE_INT32_T': 1,
'HAVE_INT64_T': 1,
'HAVE_INTTYPES_H': 1,
'HAVE_IO_H': 0,
'HAVE_IPA_PURE_CONST_BUG': 0,
'HAVE_KILL': 1,
'HAVE_KILLPG': 1,
'HAVE_KQUEUE': 0,
'HAVE_LANGINFO_H': 1,
'HAVE_LARGEFILE_SUPPORT': 0,
'HAVE_LCHFLAGS': 0,
'HAVE_LCHMOD': 0,
'HAVE_LCHOWN': 1,
'HAVE_LGAMMA': 1,
'HAVE_LIBDL': 1,
'HAVE_LIBDLD': 0,
'HAVE_LIBIEEE': 0,
'HAVE_LIBINTL_H': 1,
'HAVE_LIBREADLINE': 1,
'HAVE_LIBRESOLV': 0,
'HAVE_LIBSENDFILE': 0,
'HAVE_LIBUTIL_H': 0,
'HAVE_LINK': 1,
'HAVE_LINKAT': 1,
'HAVE_LINUX_CAN_BCM_H': 1,
'HAVE_LINUX_CAN_H': 1,
'HAVE_LINUX_CAN_RAW_FD_FRAMES': 1,
'HAVE_LINUX_CAN_RAW_H': 1,
'HAVE_LINUX_NETLINK_H': 1,
'HAVE_LINUX_RANDOM_H': 1,
'HAVE_LINUX_TIPC_H': 1,
'HAVE_LOCKF': 1,
'HAVE_LOG1P': 1,
'HAVE_LOG2': 1,
'HAVE_LONG_DOUBLE': 1,
'HAVE_LONG_LONG': 1,
'HAVE_LSTAT': 1,
'HAVE_LUTIMES': 1,
'HAVE_MAKEDEV': 1,
'HAVE_MBRTOWC': 1,
'HAVE_MEMMOVE': 1,
'HAVE_MEMORY_H': 1,
'HAVE_MEMRCHR': 1,
'HAVE_MKDIRAT': 1,
'HAVE_MKFIFO': 1,
'HAVE_MKFIFOAT': 1,
'HAVE_MKNOD': 1,
'HAVE_MKNODAT': 1,
'HAVE_MKTIME': 1,
'HAVE_MMAP': 1,
'HAVE_MREMAP': 1,
'HAVE_NCURSES_H': 1,
'HAVE_NDIR_H': 0,
'HAVE_NETPACKET_PACKET_H': 1,
'HAVE_NET_IF_H': 1,
'HAVE_NICE': 1,
'HAVE_OPENAT': 1,
'HAVE_OPENPTY': 1,
'HAVE_OSX105_SDK': 0,
'HAVE_PATHCONF': 1,
'HAVE_PAUSE': 1,
'HAVE_PIPE2': 1,
'HAVE_PLOCK': 0,
'HAVE_POLL': 1,
'HAVE_POLL_H': 1,
'HAVE_POSIX_FADVISE': 1,
'HAVE_POSIX_FALLOCATE': 1,
'HAVE_PREAD': 1,
'HAVE_PRLIMIT': 1,
'HAVE_PROCESS_H': 0,
'HAVE_PROTOTYPES': 1,
'HAVE_PTHREAD_ATFORK': 1,
'HAVE_PTHREAD_DESTRUCTOR': 0,
'HAVE_PTHREAD_H': 1,
'HAVE_PTHREAD_INIT': 0,
'HAVE_PTHREAD_KILL': 1,
'HAVE_PTHREAD_SIGMASK': 1,
'HAVE_PTY_H': 1,
'HAVE_PUTENV': 1,
'HAVE_PWRITE': 1,
'HAVE_RAND_EGD': 1,
'HAVE_READLINK': 1,
'HAVE_READLINKAT': 1,
'HAVE_READV': 1,
'HAVE_REALPATH': 1,
'HAVE_RENAMEAT': 1,
'HAVE_RL_APPEND_HISTORY': 1,
'HAVE_RL_CALLBACK': 1,
'HAVE_RL_CATCH_SIGNAL': 1,
'HAVE_RL_COMPLETION_APPEND_CHARACTER': 1,
'HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK': 1,
'HAVE_RL_COMPLETION_MATCHES': 1,
'HAVE_RL_COMPLETION_SUPPRESS_APPEND': 1,
'HAVE_RL_PRE_INPUT_HOOK': 1,
'HAVE_RL_RESIZE_TERMINAL': 1,
'HAVE_ROUND': 1,
'HAVE_SCHED_GET_PRIORITY_MAX': 1,
'HAVE_SCHED_H': 1,
'HAVE_SCHED_RR_GET_INTERVAL': 1,
'HAVE_SCHED_SETAFFINITY': 1,
'HAVE_SCHED_SETPARAM': 1,
'HAVE_SCHED_SETSCHEDULER': 1,
'HAVE_SELECT': 1,
'HAVE_SEM_GETVALUE': 1,
'HAVE_SEM_OPEN': 1,
'HAVE_SEM_TIMEDWAIT': 1,
'HAVE_SEM_UNLINK': 1,
'HAVE_SENDFILE': 1,
'HAVE_SETEGID': 1,
'HAVE_SETEUID': 1,
'HAVE_SETGID': 1,
'HAVE_SETGROUPS': 1,
'HAVE_SETHOSTNAME': 1,
'HAVE_SETITIMER': 1,
'HAVE_SETLOCALE': 1,
'HAVE_SETPGID': 1,
'HAVE_SETPGRP': 1,
'HAVE_SETPRIORITY': 1,
'HAVE_SETREGID': 1,
'HAVE_SETRESGID': 1,
'HAVE_SETRESUID': 1,
'HAVE_SETREUID': 1,
'HAVE_SETSID': 1,
'HAVE_SETUID': 1,
'HAVE_SETVBUF': 1,
'HAVE_SHADOW_H': 1,
'HAVE_SIGACTION': 1,
'HAVE_SIGALTSTACK': 1,
'HAVE_SIGINTERRUPT': 1,
'HAVE_SIGNAL_H': 1,
'HAVE_SIGPENDING': 1,
'HAVE_SIGRELSE': 1,
'HAVE_SIGTIMEDWAIT': 1,
'HAVE_SIGWAIT': 1,
'HAVE_SIGWAITINFO': 1,
'HAVE_SNPRINTF': 1,
'HAVE_SOCKADDR_SA_LEN': 0,
'HAVE_SOCKADDR_STORAGE': 1,
'HAVE_SOCKETPAIR': 1,
'HAVE_SPAWN_H': 1,
'HAVE_SSIZE_T': 1,
'HAVE_STATVFS': 1,
'HAVE_STAT_TV_NSEC': 1,
'HAVE_STAT_TV_NSEC2': 0,
'HAVE_STDARG_PROTOTYPES': 1,
'HAVE_STDINT_H': 1,
'HAVE_STDLIB_H': 1,
'HAVE_STD_ATOMIC': 1,
'HAVE_STRDUP': 1,
'HAVE_STRFTIME': 1,
'HAVE_STRINGS_H': 1,
'HAVE_STRING_H': 1,
'HAVE_STRLCPY': 0,
'HAVE_STROPTS_H': 1,
'HAVE_STRUCT_STAT_ST_BIRTHTIME': 0,
'HAVE_STRUCT_STAT_ST_BLKSIZE': 1,
'HAVE_STRUCT_STAT_ST_BLOCKS': 1,
'HAVE_STRUCT_STAT_ST_FLAGS': 0,
'HAVE_STRUCT_STAT_ST_GEN': 0,
'HAVE_STRUCT_STAT_ST_RDEV': 1,
'HAVE_STRUCT_TM_TM_ZONE': 1,
'HAVE_SYMLINK': 1,
'HAVE_SYMLINKAT': 1,
'HAVE_SYNC': 1,
'HAVE_SYSCONF': 1,
'HAVE_SYSEXITS_H': 1,
'HAVE_SYS_AUDIOIO_H': 0,
'HAVE_SYS_BSDTTY_H': 0,
'HAVE_SYS_DEVPOLL_H': 0,
'HAVE_SYS_DIR_H': 0,
'HAVE_SYS_ENDIAN_H': 0,
'HAVE_SYS_EPOLL_H': 1,
'HAVE_SYS_EVENT_H': 0,
'HAVE_SYS_FILE_H': 1,
'HAVE_SYS_IOCTL_H': 1,
'HAVE_SYS_KERN_CONTROL_H': 0,
'HAVE_SYS_LOADAVG_H': 0,
'HAVE_SYS_LOCK_H': 0,
'HAVE_SYS_MKDEV_H': 0,
'HAVE_SYS_MODEM_H': 0,
'HAVE_SYS_NDIR_H': 0,
'HAVE_SYS_PARAM_H': 1,
'HAVE_SYS_POLL_H': 1,
'HAVE_SYS_RESOURCE_H': 1,
'HAVE_SYS_SELECT_H': 1,
'HAVE_SYS_SENDFILE_H': 1,
'HAVE_SYS_SOCKET_H': 1,
'HAVE_SYS_STATVFS_H': 1,
'HAVE_SYS_STAT_H': 1,
'HAVE_SYS_SYSCALL_H': 1,
'HAVE_SYS_SYS_DOMAIN_H': 0,
'HAVE_SYS_TERMIO_H': 0,
'HAVE_SYS_TIMES_H': 1,
'HAVE_SYS_TIME_H': 1,
'HAVE_SYS_TYPES_H': 1,
'HAVE_SYS_UIO_H': 1,
'HAVE_SYS_UN_H': 1,
'HAVE_SYS_UTSNAME_H': 1,
'HAVE_SYS_WAIT_H': 1,
'HAVE_SYS_XATTR_H': 1,
'HAVE_TCGETPGRP': 1,
'HAVE_TCSETPGRP': 1,
'HAVE_TEMPNAM': 1,
'HAVE_TERMIOS_H': 1,
'HAVE_TERM_H': 1,
'HAVE_TGAMMA': 1,
'HAVE_TIMEGM': 1,
'HAVE_TIMES': 1,
'HAVE_TMPFILE': 1,
'HAVE_TMPNAM': 1,
'HAVE_TMPNAM_R': 1,
'HAVE_TM_ZONE': 1,
'HAVE_TRUNCATE': 1,
'HAVE_TZNAME': 0,
'HAVE_UCS4_TCL': 0,
'HAVE_UINT32_T': 1,
'HAVE_UINT64_T': 1,
'HAVE_UINTPTR_T': 1,
'HAVE_UNAME': 1,
'HAVE_UNISTD_H': 1,
'HAVE_UNLINKAT': 1,
'HAVE_UNSETENV': 1,
'HAVE_USABLE_WCHAR_T': 0,
'HAVE_UTIL_H': 0,
'HAVE_UTIMENSAT': 1,
'HAVE_UTIMES': 1,
'HAVE_UTIME_H': 1,
'HAVE_WAIT3': 1,
'HAVE_WAIT4': 1,
'HAVE_WAITID': 1,
'HAVE_WAITPID': 1,
'HAVE_WCHAR_H': 1,
'HAVE_WCSCOLL': 1,
'HAVE_WCSFTIME': 1,
'HAVE_WCSXFRM': 1,
'HAVE_WMEMCMP': 1,
'HAVE_WORKING_TZSET': 1,
'HAVE_WRITEV': 1,
'HAVE_ZLIB_COPY': 1,
'HAVE__GETPTY': 0,
'HGBRANCH': '',
'HGTAG': '',
'HGVERSION': '',
'HOST_GNU_TYPE': 'x86_64-unknown-linux-gnu',
'INCLDIRSTOMAKE': '/usr/include /usr/include /usr/include/python3.5m '
'/usr/include/python3.5m',
'INCLUDEDIR': '/usr/include',
'INCLUDEPY': '/usr/include/python3.5m',
'INSTALL': '/usr/bin/install -c',
'INSTALL_DATA': '/usr/bin/install -c -m 644',
'INSTALL_PROGRAM': '/usr/bin/install -c',
'INSTALL_SCRIPT': '/usr/bin/install -c',
'INSTALL_SHARED': '/usr/bin/install -c -m 555',
'INSTSONAME': 'libpython3.5m.so.1.0',
'IO_H': 'Modules/_io/_iomodule.h',
'IO_OBJS': '\\',
'LDCXXSHARED': 'x86_64-linux-gnu-g++ -pthread -shared -Wl,-O1 '
'-Wl,-Bsymbolic-functions',
'LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro',
'LDLAST': '',
'LDLIBRARY': 'libpython3.5m.so',
'LDLIBRARYDIR': '',
'LDSHARED': 'x86_64-linux-gnu-gcc -pthread -shared -Wl,-O1 '
'-Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro',
'LDVERSION': '3.5m',
'LIBC': '',
'LIBDEST': '/usr/lib/python3.5',
'LIBDIR': '/usr/lib',
'LIBFFI_INCLUDEDIR': '',
'LIBM': '-lm',
'LIBOBJDIR': 'Python/',
'LIBOBJS': '',
'LIBPC': '/usr/lib/x86_64-linux-gnu/pkgconfig',
'LIBPL': '/usr/lib/python3.5/config-3.5m-x86_64-linux-gnu',
'LIBRARY': 'libpython3.5m.a',
'LIBRARY_OBJS': '\\',
'LIBRARY_OBJS_OMIT_FROZEN': '\\',
'LIBS': '-lpthread -ldl -lutil',
'LIBSUBDIRS': 'tkinter tkinter/test tkinter/test/test_tkinter \\',
'LINKCC': 'x86_64-linux-gnu-gcc -pthread',
'LINKFORSHARED': '-Xlinker -export-dynamic -Wl,-O1 -Wl,-Bsymbolic-functions',
'LIPO_32BIT_FLAGS': '',
'LLVM_PROF_ERR': 'no',
'LLVM_PROF_FILE': '',
'LLVM_PROF_MERGER': 'true',
'LN': 'ln',
'LOCALMODLIBS': '-lexpat -L/usr/lib -lz -lexpat',
'LOG1P_DROPS_ZERO_SIGN': 0,
'MACHDEP': 'linux',
'MACHDEPPATH': ':plat-x86_64-linux-gnu',
'MACHDEPS': 'plat-x86_64-linux-gnu',
'MACHDEP_OBJS': '',
'MACHDESTLIB': '/usr/lib/python3.5',
'MACOSX_DEPLOYMENT_TARGET': '',
'MAINCC': 'x86_64-linux-gnu-gcc -pthread',
'MAJOR_IN_MKDEV': 0,
'MAJOR_IN_SYSMACROS': 0,
'MAKESETUP': '../Modules/makesetup',
'MANDIR': '/usr/share/man',
'MKDIR_P': '/bin/mkdir -p',
'MODLIBS': '-lexpat -L/usr/lib -lz -lexpat',
'MODOBJS': 'Modules/_threadmodule.o Modules/signalmodule.o '
'Modules/arraymodule.o Modules/mathmodule.o Modules/_math.o '
'Modules/_struct.o Modules/_randommodule.o '
'Modules/_elementtree.o Modules/_pickle.o '
'Modules/_datetimemodule.o Modules/_bisectmodule.o '
'Modules/_heapqmodule.o Modules/unicodedata.o '
'Modules/fcntlmodule.o Modules/spwdmodule.o Modules/grpmodule.o '
'Modules/selectmodule.o Modules/socketmodule.o '
'Modules/_posixsubprocess.o Modules/md5module.o '
'Modules/sha1module.o Modules/sha256module.o '
'Modules/sha512module.o Modules/syslogmodule.o '
'Modules/binascii.o Modules/zlibmodule.o Modules/pyexpat.o '
'Modules/posixmodule.o Modules/errnomodule.o '
'Modules/pwdmodule.o Modules/_sre.o Modules/_codecsmodule.o '
'Modules/_weakref.o Modules/_functoolsmodule.o '
'Modules/_operator.o Modules/_collectionsmodule.o '
'Modules/itertoolsmodule.o Modules/atexitmodule.o '
'Modules/_stat.o Modules/timemodule.o Modules/_localemodule.o '
'Modules/_iomodule.o Modules/iobase.o Modules/fileio.o '
'Modules/bytesio.o Modules/bufferedio.o Modules/textio.o '
'Modules/stringio.o Modules/zipimport.o Modules/faulthandler.o '
'Modules/_tracemalloc.o Modules/hashtable.o '
'Modules/symtablemodule.o Modules/xxsubtype.o',
'MODULE_OBJS': '\\',
'MULTIARCH': 'x86_64-linux-gnu',
'MVWDELCH_IS_EXPRESSION': 1,
'NO_AS_NEEDED': '-Wl,--no-as-needed',
'OBJECT_OBJS': '\\',
'OPCODETARGETGEN': '\\',
'OPCODETARGETGEN_FILES': '\\',
'OPCODETARGETS_H': '\\',
'OPCODE_H': '../Include/opcode.h',
'OPCODE_H_DIR': '../Include',
'OPCODE_H_GEN': 'python3.5 ../Tools/scripts/generate_opcode_h.py '
'../Lib/opcode.py ../Include/opcode.h',
'OPCODE_H_SCRIPT': '../Tools/scripts/generate_opcode_h.py',
'OPT': '-DNDEBUG -g -fwrapv -O2 -Wall -Wstrict-prototypes',
'OTHER_LIBTOOL_OPT': '',
'PACKAGE_BUGREPORT': 0,
'PACKAGE_NAME': 0,
'PACKAGE_STRING': 0,
'PACKAGE_TARNAME': 0,
'PACKAGE_URL': 0,
'PACKAGE_VERSION': 0,
'PARSER_HEADERS': '\\',
'PARSER_OBJS': '\\ Parser/myreadline.o Parser/parsetok.o Parser/tokenizer.o',
'PGEN': 'Parser/pgen',
'PGENOBJS': '\\ \\',
'PGENSRCS': '\\ \\',
'PGOBJS': '\\',
'PGO_PROF_GEN_FLAG': '-fprofile-generate',
'PGO_PROF_USE_FLAG': '',
'PGSRCS': '\\',
'PLATDIR': 'plat-x86_64-linux-gnu',
'POBJS': '\\',
'POSIX_SEMAPHORES_NOT_ENABLED': 0,
'PROFILE_TASK': '-m test.regrtest --pgo -x test_asyncore test_gdb '
'test_multiprocessing_fork test_multiprocessing_forkserver '
'test_multiprocessing_main_handling '
'test_multiprocessing_spawn test_subprocess',
'PSRCS': '\\',
'PTHREAD_SYSTEM_SCHED_SUPPORTED': 1,
'PURIFY': '',
'PY3LIBRARY': 'libpython3.so',
'PYLONG_BITS_IN_DIGIT': 0,
'PYTHON': 'python',
'PYTHONFRAMEWORK': '',
'PYTHONFRAMEWORKDIR': 'no-framework',
'PYTHONFRAMEWORKINSTALLDIR': '',
'PYTHONFRAMEWORKPREFIX': '',
'PYTHONPATH': ':plat-x86_64-linux-gnu',
'PYTHON_FOR_BUILD': './python -E',
'PYTHON_HEADERS': '\\',
'PYTHON_OBJS': '\\',
'PY_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O2 -Wall '
'-Wstrict-prototypes -g -fstack-protector-strong -Wformat '
'-Werror=format-security -g -flto -fuse-linker-plugin '
'-ffat-lto-objects',
'PY_CFLAGS_NODIST': '-Werror=declaration-after-statement',
'PY_CORE_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O2 '
'-Wall -Wstrict-prototypes -g -fstack-protector-strong '
'-Wformat -Werror=format-security -g -flto '
'-fuse-linker-plugin -ffat-lto-objects '
'-Werror=declaration-after-statement -I. -IInclude '
'-I../Include -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC '
'-DPy_BUILD_CORE',
'PY_CPPFLAGS': '-I. -IInclude -I../Include -Wdate-time -D_FORTIFY_SOURCE=2',
'PY_FORMAT_LONG_LONG': '"ll"',
'PY_FORMAT_SIZE_T': '"z"',
'PY_LDFLAGS': '-Wl,-Bsymbolic-functions -Wl,-z,relro',
'Py_DEBUG': 0,
'Py_ENABLE_SHARED': 1,
'Py_HASH_ALGORITHM': 0,
'QUICKTESTOPTS': '-x test_subprocess test_io test_lib2to3 \\',
'RANLIB': 'x86_64-linux-gnu-gcc-ranlib',
'READELF': 'readelf',
'RESSRCDIR': 'Mac/Resources/framework',
'RETSIGTYPE': 'void',
'RUNSHARED': 'LD_LIBRARY_PATH=/build/python3.5-moRWPp/python3.5-3.5.2/build-shared',
'SCRIPTDIR': '/usr/lib',
'SETPGRP_HAVE_ARG': 0,
'SGI_ABI': '',
'SHELL': '/bin/sh',
'SHLIBS': '-lpthread -ldl -lutil',
'SHLIB_SUFFIX': '.so',
'SIGNAL_OBJS': '',
'SIGNED_RIGHT_SHIFT_ZERO_FILLS': 0,
'SITEPATH': '',
'SIZEOF_DOUBLE': 8,
'SIZEOF_FLOAT': 4,
'SIZEOF_FPOS_T': 16,
'SIZEOF_INT': 4,
'SIZEOF_LONG': 8,
'SIZEOF_LONG_DOUBLE': 16,
'SIZEOF_LONG_LONG': 8,
'SIZEOF_OFF_T': 8,
'SIZEOF_PID_T': 4,
'SIZEOF_PTHREAD_T': 8,
'SIZEOF_SHORT': 2,
'SIZEOF_SIZE_T': 8,
'SIZEOF_TIME_T': 8,
'SIZEOF_UINTPTR_T': 8,
'SIZEOF_VOID_P': 8,
'SIZEOF_WCHAR_T': 4,
'SIZEOF__BOOL': 1,
'SOABI': 'cpython-35m-x86_64-linux-gnu',
'SRCDIRS': 'Parser Grammar Objects Python Modules Mac Programs',
'SRC_GDB_HOOKS': '../Tools/gdb/libpython.py',
'STDC_HEADERS': 1,
'STRICT_SYSV_CURSES': "/* Don't use ncurses extensions */",
'STRIPFLAG': '-s',
'SUBDIRS': '',
'SUBDIRSTOO': 'Include Lib Misc',
'SYSLIBS': '-lm',
'SYS_SELECT_WITH_SYS_TIME': 1,
'TANH_PRESERVES_ZERO_SIGN': 1,
'TCLTK_INCLUDES': '',
'TCLTK_LIBS': '',
'TESTOPTS': '',
'TESTPATH': '',
'TESTPYTHON': 'LD_LIBRARY_PATH=/build/python3.5-moRWPp/python3.5-3.5.2/build-shared '
'./python',
'TESTPYTHONOPTS': '',
'TESTRUNNER': 'LD_LIBRARY_PATH=/build/python3.5-moRWPp/python3.5-3.5.2/build-shared '
'./python ../Tools/scripts/run_tests.py',
'TESTTIMEOUT': 3600,
'THREADOBJ': 'Python/thread.o',
'TIMEMODULE_LIB': 0,
'TIME_WITH_SYS_TIME': 1,
'TM_IN_SYS_TIME': 0,
'UNICODE_DEPS': '\\',
'UNIVERSALSDK': '',
'USE_COMPUTED_GOTOS': 1,
'USE_INLINE': 1,
'VA_LIST_IS_ARRAY': 1,
'VERSION': '3.5',
'VPATH': '..',
'WANT_SIGFPE_HANDLER': 1,
'WINDOW_HAS_FLAGS': 1,
'WITH_DOC_STRINGS': 1,
'WITH_DYLD': 0,
'WITH_LIBINTL': 0,
'WITH_NEXT_FRAMEWORK': 0,
'WITH_PYMALLOC': 1,
'WITH_THREAD': 1,
'WITH_TSC': 0,
'WITH_VALGRIND': 0,
'X87_DOUBLE_ROUNDING': 0,
'XMLLIBSUBDIRS': 'xml xml/dom xml/etree xml/parsers xml/sax',
'abs_builddir': '/build/python3.5-moRWPp/python3.5-3.5.2/build-shared',
'abs_srcdir': '/build/python3.5-moRWPp/python3.5-3.5.2/build-shared/..',
'cross_compiling': 'no',
'datarootdir': '/usr/share',
'exec_prefix': '/usr',
'prefix': '/usr',
'srcdir': '..'}
| arju88nair/projectCulminate | venv/lib/python3.5/plat-x86_64-linux-gnu/_sysconfigdata_m.py | Python | apache-2.0 | 21,469 | 0.032559 |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds ad groups to a given campaign.
To get ad groups, run get_ad_groups.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README..
Tags: AdGroupService.mutate
"""
__author__ = ('api.kwinter@gmail.com (Kevin Winter)'
'Joseph DiLallo')
import uuid
from googleads import adwords
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
def main(client, campaign_id):
# Initialize appropriate service.
ad_group_service = client.GetService('AdGroupService', version='v201406')
# Construct operations and add ad groups.
operations = [{
'operator': 'ADD',
'operand': {
'campaignId': campaign_id,
'name': 'Earth to Mars Cruises #%s' % uuid.uuid4(),
'status': 'ENABLED',
'biddingStrategyConfiguration': {
'bids': [
{
'xsi_type': 'CpcBid',
'bid': {
'microAmount': '1000000'
},
}
]
}
}
}, {
'operator': 'ADD',
'operand': {
'campaignId': campaign_id,
'name': 'Earth to Venus Cruises #%s' % uuid.uuid4(),
'status': 'ENABLED',
'biddingStrategyConfiguration': {
'bids': [
{
'xsi_type': 'CpcBid',
'bid': {
'microAmount': '1000000'
}
}
]
}
}
}]
ad_groups = ad_group_service.mutate(operations)
# Display results.
for ad_group in ad_groups['value']:
print ('Ad group with name \'%s\' and id \'%s\' was added.'
% (ad_group['name'], ad_group['id']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CAMPAIGN_ID)
| dietrichc/streamline-ppc-reports | examples/adwords/v201406/basic_operations/add_ad_groups.py | Python | apache-2.0 | 2,698 | 0.004077 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class Channel(models.Model):
_inherit = 'slide.channel'
nbr_certification = fields.Integer("Number of Certifications", compute='_compute_slides_statistics', store=True)
class Category(models.Model):
_inherit = 'slide.category'
nbr_certification = fields.Integer("Number of Certifications", compute='_count_presentations', store=True)
| t3dev/odoo | addons/website_slides_survey/models/slide_channel.py | Python | gpl-3.0 | 488 | 0.004098 |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.core.db import db
from indico.util.string import format_repr, return_ascii
class SuggestedCategory(db.Model):
__tablename__ = 'suggested_categories'
__table_args__ = {'schema': 'users'}
user_id = db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
primary_key=True,
index=True,
autoincrement=False
)
category_id = db.Column(
db.Integer,
db.ForeignKey('categories.categories.id'),
primary_key=True,
index=True,
autoincrement=False
)
is_ignored = db.Column(
db.Boolean,
nullable=False,
default=False
)
score = db.Column(
db.Float,
nullable=False,
default=0
)
category = db.relationship(
'Category',
lazy=False,
backref=db.backref(
'suggestions',
lazy=True,
cascade='all, delete-orphan'
)
)
# relationship backrefs:
# - user (User.suggested_categories)
@return_ascii
def __repr__(self):
return format_repr(self, 'user_id', 'category_id', 'score', is_ignored=False)
@classmethod
def merge_users(cls, target, source):
"""Merge the suggestions for two users.
:param target: The target user of the merge.
:param source: The user that is being merged into `target`.
"""
target_suggestions = {x.category: x for x in target.suggested_categories}
for suggestion in source.suggested_categories:
new_suggestion = target_suggestions.get(suggestion.category) or cls(user=target,
category=suggestion.category)
new_suggestion.score = max(new_suggestion.score, suggestion.score)
new_suggestion.is_ignored = new_suggestion.is_ignored or suggestion.is_ignored
db.session.flush()
| mvidalgarcia/indico | indico/modules/users/models/suggestions.py | Python | mit | 2,186 | 0.002287 |
from __future__ import print_function, absolute_import
from PySide2 import QtGui,QtCore,QtWidgets
from math import *
import pickle, os, json
import learnbot_dsl.guis.EditVar as EditVar
from learnbot_dsl.learnbotCode.Block import *
from learnbot_dsl.learnbotCode.Language import getLanguage
from learnbot_dsl.learnbotCode.toQImage import *
from learnbot_dsl.learnbotCode.Parser import parserLearntBotCodeOnlyUserFuntion
from learnbot_dsl.blocksConfig import pathImgBlocks
from learnbot_dsl.learnbotCode import getAprilTextDict
class KeyPressEater(QtCore.QObject):
def eventFilter(self, obj, event):
if isinstance(event, QtGui.QMouseEvent) and event.buttons() & QtCore.Qt.RightButton:
return True
return False
def toLBotPy(inst, ntab=1, offset=0):
text = inst[0]
if inst[1]["TYPE"] in [USERFUNCTION, LIBRARY]:
text = inst[0] + "()"
else:
inst[1]["VISUALBLOCK"].startOffset = offset
if inst[1]["TYPE"] is CONTROL:
if inst[1]["VARIABLES"] is not None:
text = inst[0] + "("
for var in inst[1]["VARIABLES"]:
text += var + ", "
text = text[0:-2] + ""
text += ")"
if inst[1]["TYPE"] is FUNTION:
text = "function." + inst[0] + "("
if inst[1]["VARIABLES"] is not None:
for var in inst[1]["VARIABLES"]:
text += var + ", "
text = text[0:-2] + ""
text += ")"
elif inst[1]["TYPE"] is VARIABLE:
text = inst[0]
if inst[1]["VARIABLES"] is not None:
text += " = "
for var in inst[1]["VARIABLES"]:
text += var
if inst[1]["RIGHT"] is not None:
text += " "
text += toLBotPy(inst[1]["RIGHT"], ntab, len(text) + offset)
if inst[1]["BOTTOMIN"] is not None:
text += ":\n" + "\t" * ntab
text += toLBotPy(inst[1]["BOTTOMIN"], ntab + 1, len(text) + offset)
if inst[0] in ["while", "while True"]:
text += "\n" + "\t" * (ntab - 1) + "end"
if inst[0] == "else" or (inst[0] in ["if", "elif"] and (inst[1]["BOTTOM"] is None or (
inst[1]["BOTTOM"] is not None and inst[1]["BOTTOM"][0] not in ["elif", "else"]))):
text += "\n" + "\t" * (ntab - 1) + "end"
inst[1]["VISUALBLOCK"].endOffset = len(text)-1 + offset
if inst[1]["BOTTOM"] is not None:
text += "\n" + "\t" * (ntab - 1)
text += toLBotPy(inst[1]["BOTTOM"], ntab, len(text) + offset)
return text
def EuclideanDist(p1, p2):
p = p1 - p2
return sqrt(pow(p.x(), 2) + pow(p.y(), 2))
class VarGui(QtWidgets.QDialog, EditVar.Ui_Dialog):
def init(self):
self.setupUi(self)
def getTable(self):
return self.table
def setSlotToDeleteButton(self, fun):
self.deleteButton.clicked.connect(fun)
self.okButton.clicked.connect(self.close)
class VisualBlock(QtWidgets.QGraphicsPixmapItem, QtWidgets.QWidget):
def __init__(self, parentBlock, parent=None, scene=None):
self.startOffset = None
self.endOffset = None
self._notifications = []
self.parentBlock = parentBlock
self.__typeBlock = self.parentBlock.typeBlock
self.__type = self.parentBlock.type
self.id = self.parentBlock.id
self.connections = self.parentBlock.connections
self.highlighted = False
for c in self.connections:
c.setParent(self.parentBlock)
self.dicTrans = parentBlock.dicTrans
self.shouldUpdate = True
if len(self.dicTrans) is 0:
self.showtext = self.parentBlock.name
else:
self.showtext = self.dicTrans[getLanguage()]
QtWidgets.QGraphicsPixmapItem.__init__(self)
QtWidgets.QWidget.__init__(self)
def foo(x):
return 32
# Load Image of block
im = cv2.imread(self.parentBlock.file, cv2.IMREAD_UNCHANGED)
r, g, b, a = cv2.split(im)
rgb = cv2.merge((r, g, b))
hsv = cv2.cvtColor(rgb, cv2.COLOR_RGB2HSV)
h, s, v = cv2.split(hsv)
h = h + self.parentBlock.hue
s = s + 160
hsv = cv2.merge((h, s, v))
im = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
r, g, b = cv2.split(im)
self.cvImg = cv2.merge((r, g, b, a))
self.cvImg = np.require(self.cvImg, np.uint8, 'C')
# if self.parentBlock.type is VARIABLE:
# self.showtext = self.parentBlock.name + " "+ self.showtext
img = generateBlock(self.cvImg, 34, self.showtext, self.parentBlock.typeBlock, None, self.parentBlock.type,
self.parentBlock.nameControl)
qImage = toQImage(img)
# Al multiplicar por 0 obtenemos facilmente un ndarray inicializado a 0
# similar al original
try:
self.header = copy.copy(self.cvImg[0:39, 0:149])
self.foot = copy.copy(self.cvImg[69:104, 0:149])
except:
pass
self.img = QtGui.QPixmap(qImage)
self.scene = scene
self.setFlags(QtWidgets.QGraphicsItem.ItemIsMovable)
self.setZValue(1)
self.setPos(self.parentBlock.pos)
self.scene.activeShouldSave()
self.updatePixmap()
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update)
self.posmouseinItem = None
self.DialogVar = None
self.popMenu = None
self.create_dialogs()
self.sizeIn = 0
self.shouldUpdateConnections = False
def addNotification(self, notification):
tooltip = self.toolTip()
if tooltip:
tooltip += '<hr />'
tooltip += notification.simpleHtml()
self.setToolTip(tooltip)
self._notifications.append(notification)
def clearNotifications(self):
self._notifications = []
self.setToolTip('')
def notifications(self):
return self._notifications
def highlight(self):
self.highlighted = True
self.updateImg(force=True)
self.updatePixmap()
def unhighlight(self):
self.highlighted = False
self.updateImg(force=True)
self.updatePixmap()
def updatePixmap(self):
self.setPixmap(self.img)
def create_dialogs(self):
if self.DialogVar is not None:
del self.DialogVar
vars = self.parentBlock.vars
self.DialogVar = VarGui()
self.DialogVar.init()
self.DialogVar.setSlotToDeleteButton(self.delete)
self.tabVar = self.DialogVar.getTable()
self.tabVar.verticalHeader().setVisible(False)
self.tabVar.horizontalHeader().setVisible(True)
self.tabVar.setColumnCount(4)
self.tabVar.setRowCount(len(vars))
self.tableHeader = [] #QtCore.QStringList()
self.tableHeader.append(self.tr('Name'))
self.tableHeader.append(self.tr('Constant'))
self.tableHeader.append(self.tr('Set to'))
self.tableHeader.append(self.tr('Type'))
self.tabVar.setHorizontalHeaderLabels(self.tableHeader)
self.tabVar.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
# i = 0
for i, var in zip(range(len(vars)),vars):
try:
if getLanguage() in var.translate:
self.tabVar.setCellWidget(i, 0, QtWidgets.QLabel(var.translate[getLanguage()]))
else:
self.tabVar.setCellWidget(i, 0, QtWidgets.QLabel(var.name))
except:
self.tabVar.setCellWidget(i, 0, QtWidgets.QLabel(var.name))
if var.type in ["float","int", "string"]:
edit = QtWidgets.QLineEdit()
if var.type == "float":
edit.setValidator(QtGui.QDoubleValidator())
self.tabVar.setCellWidget(i,3,QtWidgets.QLabel(self.tr('number')))
elif var.type == "int":
edit.setValidator(QtGui.QIntValidator())
self.tabVar.setCellWidget(i,3,QtWidgets.QLabel(self.tr('number')))
else:
self.tabVar.setCellWidget(i,3,QtWidgets.QLabel(self.tr('string')))
if var.type == "string":
edit.setText(var.defaul.replace('\"', ''))
else:
edit.setText(var.defaul)
self.tabVar.setCellWidget(i, 1, edit)
elif var.type == "boolean":
combobox = QtWidgets.QComboBox()
combobox.addItem("True")
combobox.addItem("False")
if var.defaul in ("0", "False"):
combobox.setCurrentIndex(1)
else:
combobox.setCurrentIndex(0)
self.tabVar.setCellWidget(i, 1, combobox)
self.tabVar.setCellWidget(i,3,QtWidgets.QLabel(self.tr('boolean')))
elif var.type == "list":
values = var.translateValues[getLanguage()]
combobox = QtWidgets.QComboBox()
combobox.addItems(values)
self.tabVar.setCellWidget(i, 1, combobox)
self.tabVar.setCellWidget(i,3,QtWidgets.QLabel(self.tr('list')))
elif var.type == "apriltext":
dictApriText = getAprilTextDict()
combobox = QtWidgets.QComboBox()
combobox.addItems([x for x in dictApriText])
self.tabVar.setCellWidget(i, 1, combobox)
self.tabVar.setCellWidget(i,3,QtWidgets.QLabel(self.tr('apriltext')))
combobox = QtWidgets.QComboBox()
combobox.addItem(self.tr('Constant'))
self.tabVar.setCellWidget(i, 2, combobox)
# self.tabVar.setCellWidget(i,3,QtWidgets.QLabel(var.type))
# i += 1
if self.popMenu is not None:
del self.popMenu
del self.keyPressEater
self.popMenu = QtWidgets.QMenu(self)
self.keyPressEater = KeyPressEater(self.popMenu)
self.popMenu.installEventFilter(self.keyPressEater)
action1 = QtWidgets.QAction(self.tr('Edit'), self)
action1.triggered.connect(self.on_clicked_menu_edit)
self.popMenu.addAction(action1)
if self.parentBlock.name not in ["main", "when"]:
if self.parentBlock.type is USERFUNCTION and self.parentBlock.typeBlock is COMPLEXBLOCK:
action3 = QtWidgets.QAction(self.tr('Export Block'), self)
action3.triggered.connect(self.on_clicked_menu_export_block)
self.popMenu.addAction(action3)
else:
action0 = QtWidgets.QAction(self.tr('Duplicate'), self)
action0.triggered.connect(self.on_clicked_menu_duplicate)
self.popMenu.addAction(action0)
self.popMenu.addSeparator()
action2 = QtWidgets.QAction(self.tr('Delete'), self)
action2.triggered.connect(self.on_clicked_menu_delete)
# action2.installEventFilter(self.keyPressEater)
self.popMenu.addAction(action2)
def on_clicked_menu_export_block(self):
if self.parentBlock.name not in ["main", "when"] and self.parentBlock.type is USERFUNCTION and self.parentBlock.typeBlock is COMPLEXBLOCK:
self.scene.stopAllblocks()
path = QtWidgets.QFileDialog.getExistingDirectory(self, self.tr('Select Library'), self.scene.parent.libraryPath, QtWidgets.QFileDialog.ShowDirsOnly)
self.scene.startAllblocks()
ret = None
try:
os.mkdir(os.path.join(path, self.parentBlock.name))
except:
msgBox = QtWidgets.QMessageBox()
msgBox.setWindowTitle(self.tr("Warning"))
msgBox.setIcon(QtWidgets.QMessageBox.Warning)
msgBox.setText(self.tr("This module already exists"))
msgBox.setInformativeText(self.tr("Do you want to overwrite the changes?"))
msgBox.setStandardButtons(QtWidgets.QMessageBox.Ok| QtWidgets.QMessageBox.Cancel)
msgBox.setDefaultButton(QtWidgets.QMessageBox.Ok)
ret = msgBox.exec_()
if ret is None or ret == QtWidgets.QMessageBox.Ok:
path = os.path.join(path, self.parentBlock.name)
# Save blockProject
lBInstance = self.scene.parent
with open(os.path.join(path, self.parentBlock.name + ".blockProject"), 'wb') as fichero:
dic = copy.deepcopy(lBInstance.scene.dicBlockItem)
for id in dic:
block = dic[id]
block.file = os.path.basename(block.file)
pickle.dump(
(dic, lBInstance.listNameWhens, lBInstance.listUserFunctions, lBInstance.listNameVars, lBInstance.listNameUserFunctions),
fichero, 0)
# Save config block
dictBlock = {}
dictBlock["name"] = self.parentBlock.name
dictBlock["type"] = "library"
dictBlock["shape"] = ["blockVertical"]
with open(os.path.join(path, self.parentBlock.name + ".conf"),'w') as f:
json.dump([dictBlock], f)
# Save script learnCode
inst = self.getInstructions()
code = "def " + toLBotPy(inst) + "\nend\n\n"
with open(os.path.join(path, self.parentBlock.name + ".lb"), 'w') as f:
f.write(code)
# Save script python
codePython = parserLearntBotCodeOnlyUserFuntion(code)
with open(os.path.join(path, self.parentBlock.name + ".py"), 'w') as f:
f.write(codePython)
pass
def on_clicked_menu_duplicate(self):
if self.parentBlock.name not in ["main", "when"] and not (self.parentBlock.type is USERFUNCTION and self.parentBlock.typeBlock is COMPLEXBLOCK):
self.duplicate()
self.scene.startAllblocks()
self.scene.parent.savetmpProject()
def duplicate(self, old_id=None, id=None, connection=None):
blockDuplicate = self.parentBlock.copy()
blockDuplicate.setPos(self.parentBlock.pos + QtCore.QPointF(50, 50))
self.scene.addItem(blockDuplicate, False, False, False)
id_new = blockDuplicate.id
new_connection = None
for c in blockDuplicate.connections:
if id is None and c.getType() in [TOP, LEFT]:
c.setItem(None)
c.setConnect(None)
elif old_id is not None and c.getIdItem() == old_id:
new_connection = c
c.setItem(id)
c.setConnect(connection)
elif c.getIdItem() is not None and c.getType() not in [TOP, LEFT]:
c_new, id_new2 = self.scene.getVisualItem(c.getIdItem()).duplicate(self.id, id_new, c)
c.setConnect(c_new)
c.setItem(id_new2)
return new_connection, id_new
def on_clicked_menu_edit(self):
self.scene.setIdItemSelected(None)
if self.DialogVar is not None and len(self.parentBlock.getVars())>0:
self.setCurrentParamInDialog()
self.DialogVar.open()
self.scene.setTable(self.DialogVar)
def setCurrentParamInDialog(self):
varS = self.parentBlock.getVars()
if len(varS)>0:
combo = self.tabVar.cellWidget(0, 2)
assignList = [combo.itemText(i) for i in range(combo.count())]
for cell, var in zip(range(len(varS)), varS):
if varS[cell].defaul in assignList:
index = assignList.index(varS[cell].defaul)
self.tabVar.cellWidget(cell, 2).setCurrentIndex(index)
if var.type in ["float","int", "string"]:
self.tabVar.cellWidget(cell, 1).setText("")
else:
self.tabVar.cellWidget(cell, 1).setCurrentIndex(0)
def on_clicked_menu_delete(self):
self.delete()
def start(self):
self.timer.start(5)
def stop(self):
self.timer.stop()
def activeUpdateConections(self):
self.shouldUpdateConnections = True
def getNameFuntion(self):
return self.parentBlock.name
def getIdItemBottomConnect(self):
for c in [conn for conn in self.connections if conn.getType() is BOTTOM]:
return self.scene.getVisualItem(c.getIdItem())
def getIdItemTopConnect(self):
for c in [conn for conn in self.connections if conn.getType() is TOP]:
return self.scene.getVisualItem(c.getIdItem())
def getNumSubBottom(self, n=0, size=0):
size += self.img.height() - 5
for c in [conn for conn in self.connections if conn.getType() is BOTTOM]:
if c.getConnect() is None:
return n + 1, size + 1
else:
return self.scene.getVisualItem(c.getIdItem()).getNumSubBottom(n + 1, size)
return n + 1, size + 1
def getNumSub(self, n=0):
for c in [conn for conn in self.connections if conn.getType() is BOTTOMIN and conn.getConnect() is not None]:
return self.scene.getVisualItem(c.getIdItem()).getNumSubBottom()
return 0, 0
def getInstructionsRIGHT(self, inst=[]):
for c in [conn for conn in self.connections if conn.getType() is RIGHT and conn.getIdItem() is not None]:
inst = self.scene.getVisualItem(c.getIdItem()).getInstructions()
if len(inst) is 0:
return None
return inst
def getInstructionsBOTTOM(self, inst=[]):
for c in [conn for conn in self.connections if conn.getType() is BOTTOM and conn.getIdItem() is not None]:
inst = self.scene.getVisualItem(c.getIdItem()).getInstructions()
if len(inst) is 0:
return None
return inst
def getInstructionsBOTTOMIN(self, inst=[]):
for c in [conn for conn in self.connections if conn.getType() is BOTTOMIN and conn.getIdItem() is not None]:
inst = self.scene.getVisualItem(c.getIdItem()).getInstructions()
if len(inst) is 0:
return None
return inst
def getVars(self):
vars = []
varS = self.parentBlock.getVars()
# for cell in range(0, self.tabVar.rowCount()):
for cell, var in zip(range(len(varS)), varS):
if self.tabVar.cellWidget(cell, 2).currentText() == self.tr('Constant'):
if self.tabVar.cellWidget(cell, 3).text() == "boolean":
vars.append(self.tabVar.cellWidget(cell, 1).currentText())
elif self.tabVar.cellWidget(cell, 3).text() == "list":
vars.append('"' + var.values[self.tabVar.cellWidget(cell, 1).currentIndex()] + '"')
elif self.tabVar.cellWidget(cell, 3).text() == "apriltext":
vars.append('"' +self.tabVar.cellWidget(cell, 1).currentText() + '"')
elif self.tabVar.cellWidget(cell, 3).text() == "string":
vars.append('"'+self.tabVar.cellWidget(cell, 1).text()+'"')
else:
vars.append(self.tabVar.cellWidget(cell, 1).text())
else:
vars.append(self.tabVar.cellWidget(cell, 2).currentText())
if len(vars) is 0:
vars = None
return vars
def getInstructions(self, inst=[]):
instRight = self.getInstructionsRIGHT()
instBottom = self.getInstructionsBOTTOM()
instBottomIn = self.getInstructionsBOTTOMIN()
nameControl = self.parentBlock.nameControl
if nameControl is "":
nameControl = None
dic = {}
dic["NAMECONTROL"] = nameControl
dic["RIGHT"] = instRight
dic["BOTTOM"] = instBottom
dic["BOTTOMIN"] = instBottomIn
dic["VARIABLES"] = self.getVars()
dic["TYPE"] = self.__type
dic["VISUALBLOCK"] = self
return self.getNameFuntion(), dic
def getId(self):
return self.parentBlock.id
def updateImg(self, force=False):
if self.__typeBlock is COMPLEXBLOCK:
nSubBlock, size = self.getNumSub()
else:
size = 34
if size is 0:
size = 34
if self.sizeIn != size or self.shouldUpdate or force:
self.sizeIn = size
im = generateBlock(self.cvImg, size, self.showtext, self.__typeBlock, None, self.getVars(), self.__type,
self.parentBlock.nameControl)
if self.highlighted:
im = generate_error_block(im)
if not self.isEnabled():
r, g, b, a = cv2.split(im)
im = cv2.cvtColor(im, cv2.COLOR_RGBA2GRAY)
im = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB)
r, g, b= cv2.split(im)
im = cv2.merge((r, g, b, a))
qImage = toQImage(im)
self.img = QtGui.QPixmap(qImage)
self.updatePixmap()
if self.sizeIn != size or self.shouldUpdate:
for c in self.connections:
if c.getType() is BOTTOM:
c.setPoint(QtCore.QPointF(c.getPoint().x(), im.shape[0] - 5))
if c.getIdItem() is not None:
self.scene.getVisualItem(c.getIdItem()).moveToPos(
self.pos() + QtCore.QPointF(0, self.img.height() - 5))
if c.getType() is RIGHT:
c.setPoint(QtCore.QPointF(im.shape[1] - 5, c.getPoint().y()))
if c.getIdItem() is not None:
self.scene.getVisualItem(c.getIdItem()).moveToPos(
self.pos() + QtCore.QPointF(self.img.width() - 5, 0))
self.shouldUpdate = False
def updateVarValues(self):
vars = self.getVars()
prev_vars = self.parentBlock.getVars()
if vars is not None:
for i in range(0, len(vars)):
if vars[i] != prev_vars[i].defaul:
self.shouldUpdate = True
self.parentBlock.updateVars(vars)
break
def updateConnections(self):
for c in [conn for conn in self.connections if conn.getConnect() is not None and EuclideanDist(conn.getPosPoint(), conn.getConnect().getPosPoint()) > 7]:
c.getConnect().setItem(None)
c.getConnect().setConnect(None)
c.setItem(None)
c.setConnect(None)
def update(self):
if len(self.dicTrans) is not 0 and self.showtext is not self.dicTrans[getLanguage()]:
#Language change
self.create_dialogs()
self.shouldUpdate = True
self.showtext = self.dicTrans[getLanguage()]
vars = self.parentBlock.vars
for i, var in zip(range(len(vars)), vars):
try:
if getLanguage() in var.translate:
self.tabVar.setCellWidget(i, 0, QtWidgets.QLabel(var.translate[getLanguage()]))
else:
self.tabVar.setCellWidget(i, 0, QtWidgets.QLabel(var.name))
if var.type == "list":
values = var.translateValues[getLanguage()]
val = self.tabVar.cellWidget(i, 1).currentIndex()
combobox = QtWidgets.QComboBox()
combobox.addItems(values)
self.tabVar.setCellWidget(i, 1, combobox)
combobox.setCurrentIndex(val)
except:
self.tabVar.setCellWidget(i, 0, QtWidgets.QLabel(var.name))
for row in range(0, self.tabVar.rowCount()):
combobox = self.tabVar.cellWidget(row, 2)
items = []
for i in reversed(range(1, combobox.count())):
items.append(combobox.itemText(i))
if combobox.itemText(i) not in self.scene.parent.listNameVars:
combobox.removeItem(i)
combobox.setCurrentIndex(0)
for var in self.scene.parent.listNameVars:
if var not in items:
combobox.addItem(var)
self.updateVarValues()
self.updateImg()
if self.shouldUpdateConnections:
self.updateConnections()
def moveToPos(self, pos, connect=False):
if self.highlighted:
self.unhighlight()
self.clearNotifications()
if connect is False and self.posmouseinItem is not None:
pos = pos - self.posmouseinItem
self.setPos(pos)
self.parentBlock.setPos(copy.deepcopy(self.pos()))
self.scene.activeShouldSave()
for c in self.connections:
if c.getType() in (TOP, LEFT) and self is self.scene.getItemSelected() and connect is not True:
if c.getIdItem() is not None:
c.getConnect().setItem(None)
c.getConnect().setConnect(None)
c.setItem(None)
c.setConnect(None)
elif c.getType() is BOTTOM:
if c.getIdItem() is not None:
self.scene.getVisualItem(c.getIdItem()).moveToPos(
self.pos() + QtCore.QPointF(0, self.img.height() - 5), connect)
elif c.getType() is BOTTOMIN:
if c.getIdItem() is not None:
self.scene.getVisualItem(c.getIdItem()).moveToPos(self.pos() + QtCore.QPointF(17, 33), connect)
elif c.getType() is RIGHT:
if c.getIdItem() is not None:
self.scene.getVisualItem(c.getIdItem()).moveToPos(
self.pos() + QtCore.QPointF(self.img.width() - 5, 0), connect)
def getLastItem(self):
for c in [conn for conn in self.connections if conn.getType() is BOTTOM]:
if c.getConnect() is None:
return c
else:
return self.scene.getVisualItem(c.getIdItem()).getLastItem()
return None
def getLastRightItem(self):
for c in [conn for conn in self.connections if conn.getType() is RIGHT]:
if c.getConnect() is None:
return c
else:
return self.scene.getVisualItem(c.getIdItem()).getLastRightItem()
return None
def moveToFront(self):
self.setZValue(1)
for c in [conn for conn in self.connections if conn.getType() in [BOTTOM, BOTTOMIN] and conn.getConnect() is not None]:
self.scene.getVisualItem(c.getIdItem()).moveToFront()
def mouseMoveEvent(self, event):
if self.isEnabled():
self.setPos(event.scenePos() - self.posmouseinItem)
self.parentBlock.setPos(self.pos())
self.scene.activeShouldSave()
def mousePressEvent(self, event):
if self.isEnabled():
if event.button() is QtCore.Qt.MouseButton.LeftButton:
self.posmouseinItem = event.scenePos() - self.pos()
self.scene.setIdItemSelected(self.id)
if self.DialogVar is not None:
self.DialogVar.close()
if event.button() is QtCore.Qt.MouseButton.RightButton:
self.popMenu.exec_(event.screenPos())
def mouseDoubleClickEvent(self, event):
if self.isEnabled():
if event.button() is QtCore.Qt.MouseButton.LeftButton:
self.scene.setIdItemSelected(None)
if self.DialogVar is not None:
self.DialogVar.open()
self.scene.setTable(self.DialogVar)
if event.button() is QtCore.Qt.MouseButton.RightButton:
pass
def mouseReleaseEvent(self, event):
if self.isEnabled():
if event.button() is QtCore.Qt.MouseButton.LeftButton:
self.posmouseinItem = None
self.scene.setIdItemSelected(None)
if event.button() is QtCore.Qt.MouseButton.RightButton:
self.posmouseinItem = None
self.scene.setIdItemSelected(None)
pass
def delete(self, savetmp=True):
self.DialogVar.close()
del self.cvImg
del self.img
del self.foot
del self.header
del self.timer
del self.DialogVar
for c in [conn for conn in self.connections if conn.getIdItem() is not None]:
if c.getType() in [BOTTOM, BOTTOMIN, RIGHT]:
self.scene.getVisualItem(c.getIdItem()).delete(savetmp=False)
else:
c.getConnect().setConnect(None)
c.getConnect().setItem(None)
if self.parentBlock.name == "when":
self.scene.parent.delWhen(self.parentBlock.nameControl)
if self.parentBlock.name == "main" and self.scene.parent.mainButton is not None:
self.scene.parent.mainButton.setEnabled(True)
self.scene.removeItem(self.id, savetmp)
del self.parentBlock
del self
def isBlockDef(self):
if self.parentBlock.name == "when":
return True
if len([conn for conn in self.connections if conn.getType() in [TOP, BOTTOM, RIGHT, LEFT]])>0:
return False
return True
def setEnabledDependentBlocks(self,enable):
self.shouldUpdate = True
self.setEnabled(enable)
for c in [conn for conn in self.connections if conn.getIdItem() is not None and conn.getType() not in [TOP, LEFT]]:
self.scene.getVisualItem(c.getIdItem()).setEnabledDependentBlocks(enable)
| robocomp/learnbot | learnbot_dsl/learnbotCode/VisualBlock.py | Python | gpl-3.0 | 29,715 | 0.00313 |
from __future__ import with_statement
from nose.tools import (
eq_ as eq,
)
from filesystem.test.util import (
maketemp,
assert_raises,
)
import errno
import os
import filesystem
import filesystem.copyonwrite
def test_mkdir():
tmp = maketemp()
filesystem.copyonwrite.path(filesystem.path(tmp)).child('foo').mkdir()
foo = os.path.join(tmp, 'foo')
assert not os.path.isdir(foo)
def test_mkdir_bad_exists():
tmp = maketemp()
p = filesystem.copyonwrite.path(filesystem.path(tmp)).child('foo')
with p.open('w') as f:
f.write('bar')
e = assert_raises(
OSError,
p.mkdir,
)
eq(e.errno, errno.EEXIST)
| nailor/filesystem | filesystem/test/test_copyonwrite_mkdir.py | Python | mit | 686 | 0.002915 |
from .utils import DslBase, BoolMixin, _make_dsl_class
from .function import SF, ScoreFunction
__all__ = [
'Q', 'Bool', 'Boosting', 'Common', 'ConstantScore', 'DisMax', 'Filtered',
'FunctionScore', 'Fuzzy', 'FuzzyLikeThis', 'FuzzyLikeThisField',
'GeoShape', 'HasChild', 'HasParent', 'Ids', 'Indices', 'Match', 'MatchAll',
'MatchPhrase', 'MatchPhrasePrefix', 'MoreLikeThis', 'MoreLikeThisField',
'MultiMatch', 'Nested', 'Prefix', 'Query', 'QueryString', 'Range',
'Regexp', 'SF', 'ScoreFunction', 'SimpleQueryString', 'SpanFirst',
'SpanMulti', 'SpanNear', 'SpanNot', 'SpanOr', 'SpanTerm', 'Template',
'Term', 'Terms', 'TopChildren', 'Wildcard'
]
def Q(name_or_query='match_all', **params):
# {"match": {"title": "python"}}
if isinstance(name_or_query, dict):
if params:
raise ValueError('Q() cannot accept parameters when passing in a dict.')
if len(name_or_query) != 1:
raise ValueError('Q() can only accept dict with a single query ({"match": {...}}). '
'Instead it got (%r)' % name_or_query)
name, params = name_or_query.copy().popitem()
return Query.get_dsl_class(name)(**params)
# MatchAll()
if isinstance(name_or_query, Query):
if params:
raise ValueError('Q() cannot accept parameters when passing in a Query object.')
return name_or_query
# s.query = Q('filtered', query=s.query)
if hasattr(name_or_query, '_proxied'):
return name_or_query._proxied
# "match", title="python"
return Query.get_dsl_class(name_or_query)(**params)
class Query(DslBase):
_type_name = 'query'
_type_shortcut = staticmethod(Q)
name = None
class MatchAll(Query):
name = 'match_all'
def __add__(self, other):
return other._clone()
__and__ = __rand__ = __radd__ = __add__
def __or__(self, other):
return self
__ror__ = __or__
EMPTY_QUERY = MatchAll()
class Bool(BoolMixin, Query):
name = 'bool'
_param_defs = {
'must': {'type': 'query', 'multi': True},
'should': {'type': 'query', 'multi': True},
'must_not': {'type': 'query', 'multi': True},
}
def __and__(self, other):
q = self._clone()
if isinstance(other, self.__class__):
q.must += other.must
q.must_not += other.must_not
q.should = []
for qx in (self, other):
min_should_match = getattr(qx, 'minimum_should_match', 0 if any((qx.must, qx.must_not)) else 1)
# all subqueries are required
if len(qx.should) <= min_should_match:
q.must.extend(qx.should)
# not all of them are required, use it and remember min_should_match
elif not q.should:
q.minimum_should_match = min_should_match
q.should = qx.should
# not all are required, add a should list to the must with proper min_should_match
else:
q.must.append(Bool(should=qx.should, minimum_should_match=min_should_match))
else:
q.must.append(other)
return q
__rand__ = __and__
# register this as Bool for Query
Query._bool = Bool
class FunctionScore(Query):
name = 'function_score'
_param_defs = {
'query': {'type': 'query'},
'filter': {'type': 'filter'},
'functions': {'type': 'score_function', 'multi': True},
}
def __init__(self, **kwargs):
if 'functions' in kwargs:
pass
else:
fns = kwargs['functions'] = []
for name in ScoreFunction._classes:
if name in kwargs:
fns.append({name: kwargs.pop(name)})
super(FunctionScore, self).__init__(**kwargs)
QUERIES = (
# compound queries
('boosting', {'positive': {'type': 'query'}, 'negative': {'type': 'query'}}),
('constant_score', {'query': {'type': 'query'}, 'filter': {'type': 'filter'}}),
('dis_max', {'queries': {'type': 'query', 'multi': True}}),
('filtered', {'query': {'type': 'query'}, 'filter': {'type': 'filter'}}),
('indices', {'query': {'type': 'query'}, 'no_match_query': {'type': 'query'}}),
# relationship queries
('nested', {'query': {'type': 'query'}}),
('has_child', {'query': {'type': 'query'}}),
('has_parent', {'query': {'type': 'query'}}),
('top_children', {'query': {'type': 'query'}}),
# compount span queries
('span_first', {'match': {'type': 'query'}}),
('span_multi', {'match': {'type': 'query'}}),
('span_near', {'clauses': {'type': 'query', 'multi': True}}),
('span_not', {'exclude': {'type': 'query'}, 'include': {'type': 'query'}}),
('span_or', {'clauses': {'type': 'query', 'multi': True}}),
# core queries
('common', None),
('fuzzy', None),
('fuzzy_like_this', None),
('fuzzy_like_this_field', None),
('geo_shape', None),
('ids', None),
('match', None),
('match_phrase', None),
('match_phrase_prefix', None),
('more_like_this', None),
('more_like_this_field', None),
('multi_match', None),
('prefix', None),
('query_string', None),
('range', None),
('regexp', None),
('simple_query_string', None),
('span_term', None),
('template', None),
('term', None),
('terms', None),
('wildcard', None),
)
# generate the query classes dynamicaly
for qname, params_def in QUERIES:
qclass = _make_dsl_class(Query, qname, params_def)
globals()[qclass.__name__] = qclass
| reflection/elasticsearch-dsl-py | elasticsearch_dsl/query.py | Python | apache-2.0 | 5,587 | 0.00358 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .version import __version__
from .core import * | barentsen/k2flix | k2flix/__init__.py | Python | mit | 99 | 0.010101 |
"""Test for `maas.client.viscera.boot_sources`."""
import random
from testtools.matchers import Equals, MatchesStructure
from .. import boot_sources
from ...testing import make_name_without_spaces, TestCase
from ..testing import bind
def make_origin():
# Create a new origin with BootSources and BootSource. The former refers
# to the latter via the origin, hence why it must be bound.
return bind(boot_sources.BootSources, boot_sources.BootSource)
class TestBootSource(TestCase):
def test__string_representation_includes_url_keyring_info_only(self):
source = boot_sources.BootSource(
{
"url": "http://images.maas.io/ephemeral-v3/daily/",
"keyring_filename": (
"/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg"
),
"keyring_data": "",
}
)
self.assertThat(
repr(source),
Equals(
"<BootSource keyring_data=%(keyring_data)r "
"keyring_filename=%(keyring_filename)r url=%(url)r>" % (source._data)
),
)
def test__read(self):
source_id = random.randint(0, 100)
url = "http://images.maas.io/ephemeral-v3/daily/"
keyring_filename = "/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg"
BootSource = make_origin().BootSource
BootSource._handler.read.return_value = {
"id": source_id,
"url": url,
"keyring_filename": keyring_filename,
"keyring_data": "",
}
source = BootSource.read(source_id)
BootSource._handler.read.assert_called_once_with(id=source_id)
self.assertThat(
source,
MatchesStructure.byEquality(
id=source_id,
url=url,
keyring_filename=keyring_filename,
keyring_data="",
),
)
def test__delete(self):
source_id = random.randint(0, 100)
BootSource = make_origin().BootSource
source = BootSource(
{
"id": source_id,
"url": "http://images.maas.io/ephemeral-v3/daily/",
"keyring_filename": (
"/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg"
),
"keyring_data": "",
}
)
source.delete()
BootSource._handler.delete.assert_called_once_with(id=source_id)
class TestBootSources(TestCase):
def test__read(self):
BootSources = make_origin().BootSources
BootSources._handler.read.return_value = [
{"id": random.randint(0, 9)},
{"id": random.randint(10, 19)},
]
sources = BootSources.read()
self.assertEquals(2, len(sources))
def test__create_calls_create_with_keyring_filename(self):
source_id = random.randint(0, 100)
url = "http://images.maas.io/ephemeral-v3/daily/"
keyring_filename = "/usr/share/keyrings/ubuntu-cloudimage-keyring.gpg"
BootSources = make_origin().BootSources
BootSources._handler.create.return_value = {
"id": source_id,
"url": url,
"keyring_filename": keyring_filename,
"keyring_data": "",
}
source = BootSources.create(url, keyring_filename=keyring_filename)
BootSources._handler.create.assert_called_once_with(
url=url, keyring_filename=keyring_filename, keyring_data=""
)
self.assertThat(
source,
MatchesStructure.byEquality(
id=source_id,
url=url,
keyring_filename=keyring_filename,
keyring_data="",
),
)
def test__create_calls_create_with_keyring_data(self):
source_id = random.randint(0, 100)
url = "http://images.maas.io/ephemeral-v3/daily/"
keyring_data = make_name_without_spaces("data")
BootSources = make_origin().BootSources
BootSources._handler.create.return_value = {
"id": source_id,
"url": url,
"keyring_filename": "",
"keyring_data": keyring_data,
}
source = BootSources.create(url, keyring_data=keyring_data)
BootSources._handler.create.assert_called_once_with(
url=url, keyring_filename="", keyring_data=keyring_data
)
self.assertThat(
source,
MatchesStructure.byEquality(
id=source_id, url=url, keyring_filename="", keyring_data=keyring_data
),
)
def test__create_calls_create_with_unsigned_url(self):
source_id = random.randint(0, 100)
url = "http://images.maas.io/ephemeral-v3/daily/streams/v1/index.json"
BootSources = make_origin().BootSources
BootSources._handler.create.return_value = {
"id": source_id,
"url": url,
"keyring_filename": "",
"keyring_data": "",
}
source = BootSources.create(url)
BootSources._handler.create.assert_called_once_with(
url=url, keyring_filename="", keyring_data=""
)
self.assertThat(
source,
MatchesStructure.byEquality(
id=source_id, url=url, keyring_filename="", keyring_data=""
),
)
| maas/python-libmaas | maas/client/viscera/tests/test_boot_sources.py | Python | agpl-3.0 | 5,431 | 0.000368 |
# -*- coding: utf-8 -*-
# ***********************************************************************
# Copyright (C) 2014 - 2017 Oscar Gerardo Lazo Arjona *
# <oscar.lazo@correo.nucleares.unam.mx> *
# *
# This file is part of FAST. *
# *
# FAST is free software: you can redistribute it and/or modify *
# it under the terms of the GNU General Public License as published by *
# the Free Software Foundation, either version 3 of the License, or *
# (at your option) any later version. *
# *
# FAST is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# GNU General Public License for more details. *
# *
# You should have received a copy of the GNU General Public License *
# along with FAST. If not, see <http://www.gnu.org/licenses/>. *
# *
# ***********************************************************************
"""The basic configuration of FAST."""
from fast import __file__
# Whether to use parallelization through OpenMP.
parallel = True
parallel = False
# Whether to use NETCDF binary files for data communication.
use_netcdf = True
use_netcdf = False
# An integer between 0 and 2 to control which tests are ran.
run_long_tests = 0
# The install directory for FAST:
fast_path = __file__[:-len("__init__.pyc")]
| oscarlazoarjona/fast | fast/config.py | Python | gpl-3.0 | 1,912 | 0 |
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib,random
from resources.lib.modules import client
def request(url, check):
try:
result = client.request(url)
if check in str(result): return result.decode('iso-8859-1').encode('utf-8')
result = client.request(get() + urllib.quote_plus(url))
if check in str(result): return result.decode('iso-8859-1').encode('utf-8')
result = client.request(get() + urllib.quote_plus(url))
if check in str(result): return result.decode('iso-8859-1').encode('utf-8')
except:
pass
def get():
return random.choice([
'http://4freeproxy.com/browse.php?b=20&u=',
'https://www.3proxy.us/index.php?hl=2e5&q=',
'https://www.4proxy.us/index.php?hl=2e5&q=',
'http://www.accessmeproxy.net/browse.php?b=20&u=',
'http://buka.link/browse.php?b=20&u=',
'http://fastrow.win/browse.php?b=20&u=',
'http://free-proxyserver.com/browse.php?b=20&u=',
'http://www.ipunblocker.com/browse.php?b=20&u=',
'http://www.mybriefonline.xyz/browse.php?b=20&u=',
'http://www.navigate-online.xyz/browse.php?b=20&u=',
'http://protectproxy.com/browse.php?b=20&u=',
'http://proxite.net/browse.php?b=20&u=',
'http://proxydash.com/browse.php?b=20&u=',
'http://www.proxywebsite.us/browse.php?b=20&u=',
'http://proxy-server.co/browse.php?b=20&u=',
'http://www.ruby-group.xyz/browse.php?b=20&u=',
'http://securefor.com/browse.php?b=20&u=',
'http://www.singleclick.info/browse.php?b=20&u=',
'http://www.socialcommunication.xyz/browse.php?b=20&u=',
'http://tbjr6.net/browse.php?b=20&u=',
'http://un404.com/browse.php?b=20&u=',
'http://www.unblockmyweb.com/browse.php?b=20&u=',
'http://unblockthatsite.net/ahora.php?b=20&u=',
'http://unblock-youtube.org/browse.php?b=20&u=',
'http://webproxy.stealthy.co/browse.php?b=20&u=',
'http://www.whyproxy.com/browse.php?b=20&u=',
'http://www.xxlproxy.com/index.php?hl=3e4&q=',
'http://zend2.com//open12.php?b=20&u=',
'https://zendproxy.com/bb.php?b=20&u=',
'https://zproxy.de/anon.php?b=20&u='
])
| JamesLinEngineer/RKMC | addons/plugin.video.phstreams/resources/lib/modules/proxy.py | Python | gpl-2.0 | 2,896 | 0.013812 |
from ctypes import *
import json
import ast
NN = CDLL('./libNN.so')
for distance in range(15):
file_rows = open("Data/tecator.csv", 'r').read().split('\n')
file_content = [
float(value)
for row in file_rows
for value in row.split(',')
if value != ''
]
numfil = len(file_rows) - 1
numcol = len(file_rows[0].split(','))
file_content_c = (
(c_float * len(file_content))(*file_content)
)
NN.main.restype=c_char_p
print(NN.main(8, distance, file_content_c, numfil, numcol))
'''
NN.main.restype=c_char_p
response = json.loads(
str(
NN.main(8, 0, file_content_c)
).replace("'", '"')
)
response = {
key.encode(): value.encode() if isinstance(value, unicode) else value
for key, value in response.items()
}
print(response)
''' | tech-teach/microservice-topology | tasks/core/cmetrics_run_test.py | Python | mit | 878 | 0.003417 |
"""
tests for the models
"""
import json
from datetime import datetime, timedelta
from django.utils.timezone import UTC
from mock import patch
from nose.plugins.attrib import attr
from student.roles import CourseCcxCoachRole
from student.tests.factories import (
AdminFactory,
)
from util.tests.test_date_utils import fake_ugettext
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import (
CourseFactory,
check_mongo_calls
)
from .factories import (
CcxFactory,
)
from ..overrides import override_field_for_ccx
@attr('shard_1')
class TestCCX(ModuleStoreTestCase):
"""Unit tests for the CustomCourseForEdX model
"""
def setUp(self):
"""common setup for all tests"""
super(TestCCX, self).setUp()
self.course = CourseFactory.create()
self.coach = AdminFactory.create()
role = CourseCcxCoachRole(self.course.id)
role.add_users(self.coach)
self.ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
def set_ccx_override(self, field, value):
"""Create a field override for the test CCX on <field> with <value>"""
override_field_for_ccx(self.ccx, self.course, field, value)
def test_ccx_course_is_correct_course(self):
"""verify that the course property of a ccx returns the right course"""
expected = self.course
actual = self.ccx.course
self.assertEqual(expected, actual)
def test_ccx_course_caching(self):
"""verify that caching the propery works to limit queries"""
with check_mongo_calls(1):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.course # pylint: disable=pointless-statement
with check_mongo_calls(0):
self.ccx.course # pylint: disable=pointless-statement
def test_ccx_start_is_correct(self):
"""verify that the start datetime for a ccx is correctly retrieved
Note that after setting the start field override microseconds are
truncated, so we can't do a direct comparison between before and after.
For this reason we test the difference between and make sure it is less
than one second.
"""
expected = datetime.now(UTC())
self.set_ccx_override('start', expected)
actual = self.ccx.start # pylint: disable=no-member
diff = expected - actual
self.assertLess(abs(diff.total_seconds()), 1)
def test_ccx_start_caching(self):
"""verify that caching the start property works to limit queries"""
now = datetime.now(UTC())
self.set_ccx_override('start', now)
with check_mongo_calls(1):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.start # pylint: disable=pointless-statement, no-member
with check_mongo_calls(0):
self.ccx.start # pylint: disable=pointless-statement, no-member
def test_ccx_due_without_override(self):
"""verify that due returns None when the field has not been set"""
actual = self.ccx.due # pylint: disable=no-member
self.assertIsNone(actual)
def test_ccx_due_is_correct(self):
"""verify that the due datetime for a ccx is correctly retrieved"""
expected = datetime.now(UTC())
self.set_ccx_override('due', expected)
actual = self.ccx.due # pylint: disable=no-member
diff = expected - actual
self.assertLess(abs(diff.total_seconds()), 1)
def test_ccx_due_caching(self):
"""verify that caching the due property works to limit queries"""
expected = datetime.now(UTC())
self.set_ccx_override('due', expected)
with check_mongo_calls(1):
# these statements are used entirely to demonstrate the
# instance-level caching of these values on CCX objects. The
# check_mongo_calls context is the point here.
self.ccx.due # pylint: disable=pointless-statement, no-member
with check_mongo_calls(0):
self.ccx.due # pylint: disable=pointless-statement, no-member
def test_ccx_has_started(self):
"""verify that a ccx marked as starting yesterday has started"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now - delta
self.set_ccx_override('start', then)
self.assertTrue(self.ccx.has_started()) # pylint: disable=no-member
def test_ccx_has_not_started(self):
"""verify that a ccx marked as starting tomorrow has not started"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now + delta
self.set_ccx_override('start', then)
self.assertFalse(self.ccx.has_started()) # pylint: disable=no-member
def test_ccx_has_ended(self):
"""verify that a ccx that has a due date in the past has ended"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now - delta
self.set_ccx_override('due', then)
self.assertTrue(self.ccx.has_ended()) # pylint: disable=no-member
def test_ccx_has_not_ended(self):
"""verify that a ccx that has a due date in the future has not eneded
"""
now = datetime.now(UTC())
delta = timedelta(1)
then = now + delta
self.set_ccx_override('due', then)
self.assertFalse(self.ccx.has_ended()) # pylint: disable=no-member
def test_ccx_without_due_date_has_not_ended(self):
"""verify that a ccx without a due date has not ended"""
self.assertFalse(self.ccx.has_ended()) # pylint: disable=no-member
# ensure that the expected localized format will be found by the i18n
# service
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%b %d, %Y",
}))
def test_start_datetime_short_date(self):
"""verify that the start date for a ccx formats properly by default"""
start = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015"
self.set_ccx_override('start', start)
actual = self.ccx.start_datetime_text() # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"DATE_TIME_FORMAT": "%b %d, %Y at %H:%M",
}))
def test_start_datetime_date_time_format(self):
"""verify that the DATE_TIME format also works as expected"""
start = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015 at 12:00 UTC"
self.set_ccx_override('start', start)
actual = self.ccx.start_datetime_text('DATE_TIME') # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%b %d, %Y",
}))
def test_end_datetime_short_date(self):
"""verify that the end date for a ccx formats properly by default"""
end = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015"
self.set_ccx_override('due', end)
actual = self.ccx.end_datetime_text() # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"DATE_TIME_FORMAT": "%b %d, %Y at %H:%M",
}))
def test_end_datetime_date_time_format(self):
"""verify that the DATE_TIME format also works as expected"""
end = datetime(2015, 1, 1, 12, 0, 0, tzinfo=UTC())
expected = "Jan 01, 2015 at 12:00 UTC"
self.set_ccx_override('due', end)
actual = self.ccx.end_datetime_text('DATE_TIME') # pylint: disable=no-member
self.assertEqual(expected, actual)
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"DATE_TIME_FORMAT": "%b %d, %Y at %H:%M",
}))
def test_end_datetime_no_due_date(self):
"""verify that without a due date, the end date is an empty string"""
expected = ''
actual = self.ccx.end_datetime_text() # pylint: disable=no-member
self.assertEqual(expected, actual)
actual = self.ccx.end_datetime_text('DATE_TIME') # pylint: disable=no-member
self.assertEqual(expected, actual)
def test_ccx_max_student_enrollment_correct(self):
"""
Verify the override value for max_student_enrollments_allowed
"""
expected = 200
self.set_ccx_override('max_student_enrollments_allowed', expected)
actual = self.ccx.max_student_enrollments_allowed # pylint: disable=no-member
self.assertEqual(expected, actual)
def test_structure_json_default_empty(self):
"""
By default structure_json does not contain anything
"""
self.assertEqual(self.ccx.structure_json, None) # pylint: disable=no-member
self.assertEqual(self.ccx.structure, None) # pylint: disable=no-member
def test_structure_json(self):
"""
Test a json stored in the structure_json
"""
dummy_struct = [
"block-v1:Organization+CN101+CR-FALL15+type@chapter+block@Unit_4",
"block-v1:Organization+CN101+CR-FALL15+type@chapter+block@Unit_5",
"block-v1:Organization+CN101+CR-FALL15+type@chapter+block@Unit_11"
]
json_struct = json.dumps(dummy_struct)
ccx = CcxFactory(
course_id=self.course.id,
coach=self.coach,
structure_json=json_struct
)
self.assertEqual(ccx.structure_json, json_struct) # pylint: disable=no-member
self.assertEqual(ccx.structure, dummy_struct) # pylint: disable=no-member
| solashirai/edx-platform | lms/djangoapps/ccx/tests/test_models.py | Python | agpl-3.0 | 10,002 | 0.0007 |
"""Offer time listening automation rules."""
from datetime import datetime
import logging
import voluptuous as vol
from homeassistant.const import CONF_AT, CONF_PLATFORM
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.event import (
async_track_point_in_time,
async_track_state_change,
async_track_time_change,
)
import homeassistant.util.dt as dt_util
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
_TIME_TRIGGER_SCHEMA = vol.Any(
cv.time,
vol.All(str, cv.entity_domain("input_datetime")),
msg="Expected HH:MM, HH:MM:SS or Entity ID from domain 'input_datetime'",
)
TRIGGER_SCHEMA = vol.Schema(
{
vol.Required(CONF_PLATFORM): "time",
vol.Required(CONF_AT): vol.All(cv.ensure_list, [_TIME_TRIGGER_SCHEMA]),
}
)
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
entities = {}
removes = []
@callback
def time_automation_listener(now):
"""Listen for time changes and calls action."""
hass.async_run_job(action, {"trigger": {"platform": "time", "now": now}})
@callback
def update_entity_trigger(entity_id, old_state=None, new_state=None):
# If a listener was already set up for entity, remove it.
remove = entities.get(entity_id)
if remove:
remove()
removes.remove(remove)
remove = None
# Check state of entity. If valid, set up a listener.
if new_state:
has_date = new_state.attributes["has_date"]
if has_date:
year = new_state.attributes["year"]
month = new_state.attributes["month"]
day = new_state.attributes["day"]
has_time = new_state.attributes["has_time"]
if has_time:
hour = new_state.attributes["hour"]
minute = new_state.attributes["minute"]
second = new_state.attributes["second"]
else:
# If no time then use midnight.
hour = minute = second = 0
if has_date:
# If input_datetime has date, then track point in time.
trigger_dt = dt_util.DEFAULT_TIME_ZONE.localize(
datetime(year, month, day, hour, minute, second)
)
# Only set up listener if time is now or in the future.
if trigger_dt >= dt_util.now():
remove = async_track_point_in_time(
hass, time_automation_listener, trigger_dt
)
elif has_time:
# Else if it has time, then track time change.
remove = async_track_time_change(
hass,
time_automation_listener,
hour=hour,
minute=minute,
second=second,
)
# Was a listener set up?
if remove:
removes.append(remove)
entities[entity_id] = remove
for at_time in config[CONF_AT]:
if isinstance(at_time, str):
# input_datetime entity
update_entity_trigger(at_time, new_state=hass.states.get(at_time))
else:
# datetime.time
removes.append(
async_track_time_change(
hass,
time_automation_listener,
hour=at_time.hour,
minute=at_time.minute,
second=at_time.second,
)
)
# Track state changes of any entities.
removes.append(
async_track_state_change(hass, list(entities), update_entity_trigger)
)
@callback
def remove_track_time_changes():
"""Remove tracked time changes."""
for remove in removes:
remove()
return remove_track_time_changes
| titilambert/home-assistant | homeassistant/components/homeassistant/triggers/time.py | Python | apache-2.0 | 4,049 | 0.000247 |
#!/usr/bin/env python
import os
import shutil
import socket
from datetime import datetime
import subprocess as sp
import json
from pymongo import MongoClient
_ctest = '''
set(CTEST_SOURCE_DIRECTORY "{source}")
set(CTEST_BINARY_DIRECTORY "{build}")
include(${{CTEST_SOURCE_DIRECTORY}}/CTestConfig.cmake)
set(CTEST_SITE "{site}")
set(CTEST_BUILD_NAME "{name}")
set(CTEST_CMAKE_GENERATOR "Unix Makefiles")
ctest_start("Experimental")
ctest_configure()
ctest_build()
ctest_test(PARALLEL_LEVEL 1 RETURN_VALUE res)
ctest_submit()
if(NOT res EQUAL 0)
message(FATAL_ERROR "Test failures occurred.")
endif()
'''
_host = socket.gethostname().split('.')[0]
def config():
return {
'mongo-host': 'lusitania',
'mongo-port': 27017,
'mongo-database': 'geojs_dashboard',
'test-dir': '~/geojs-testing',
'repo': 'https://github.com/OpenGeoscience/geojs.git',
'kill-server': '/Users/jbeezley/bin/killtestserver',
'add-path': '/usr/local/bin',
'cmake': '/usr/local/bin/cmake',
'ctest': '/usr/local/bin/ctest',
'git': '/usr/local/bin/git'
}
def _communicate(cmd, **kw):
cfg = config()
pth = os.environ.get('PATH', '')
if cfg.get('add-path'):
pth = cfg['add-path'] + ':' + pth
kw['stderr'] = sp.STDOUT
kw['stdout'] = sp.PIPE
kw['shell'] = True
p = sp.Popen(
'/usr/bin/env PATH=' + pth + ' ' + cmd,
**kw
)
out, err = p.communicate()
return p.returncode, out
def run_test(repo, commit, testdir, branch):
cfg = config()
git = cfg.get('git', 'git')
cmake = cfg.get('cmake', 'cmake')
ctest = cfg.get('ctest', 'ctest')
print cmake
# ======================
# git clone and checkout
# ======================
s, out = _communicate(' '.join([
git, 'clone',
'--recursive',
repo, testdir
]))
if s != 0:
return (False, 'clone "%s" failed' % repo, out)
s, out = _communicate(' '.join([
git,
'-C', testdir,
'checkout',
commit
]))
if s != 0:
return (False, 'checkout "%s" failed' % commit, out)
s, out = _communicate(' '.join([
git,
'-C', testdir,
'submodule', 'update'
]))
if s != 0:
return (False, 'submodule update failed', out)
# =========
# configure
# =========
builddir = os.path.join(testdir, '_build')
os.makedirs(builddir)
s, out = _communicate(
' '.join([
cmake,
'-D', 'SELENIUM_TESTS=ON',
'-D', 'CHROME_TESTS=OFF',
'-D', 'FIREFOX_TESTS=ON',
'-D', 'COVERAGE_TESTS=OFF',
'..'
]),
cwd=builddir
)
if s != 0:
return (False, 'cmake configure failed', out)
# ==============
# build and test
# ==============
build_script = os.path.join(builddir, 'build.cmake')
kw = {
'source': testdir,
'build': builddir,
'site': _host,
'name': branch + '-' + commit[:6]
}
open(build_script, 'w').write(
_ctest.format(**kw)
)
s, out = _communicate(
ctest + ' -VV -S {}'.format(build_script),
cwd=builddir
)
test_result = s
test_output = out
if test_result != 0:
return (False, 'Test(s) failed', test_output)
return (True, 'All tests passed!', test_output)
def start_test(item, oldTest=None):
if oldTest:
status = {
'pass': oldTest['status']['pass'],
'output': oldTest['status']['output'],
'reason': 'Already tested in branch %s' % oldTest['branch'],
'skipped': True
}
else:
cfg = config()
basedir = os.path.expanduser(cfg['test-dir'])
testdir = os.path.join(basedir, item['commit'])
shutil.rmtree(testdir, ignore_errors=True)
try:
os.makedirs(testdir)
except OSError:
pass
result = run_test(cfg['repo'], item['commit'], testdir, item['branch'])
status = {
'pass': result[0],
'reason': result[1],
'output': result[2],
'skipped': False
}
return status
def notify(item, status):
'''
Do something to notify people, not sure what.
'''
pass
def nightly(queue, results):
for item in queue.find():
oldTest = results.find_one({'commit': item['commit']})
status = start_test(item, oldTest)
if not oldTest:
result = dict(item)
result.pop('_id')
result['time'] = datetime.now()
result['status'] = status
results.insert(result)
queue.remove(item)
notify(item, status)
def continuous(sha, branch, user, queue, results):
oldTest = results.find_one({'commit': sha})
item = {
'commit': sha,
'user': user,
'branch': branch,
'time': datetime.now()
}
status = start_test(item, oldTest)
if not oldTest:
result = dict(item)
result['time'] = datetime.now()
result['status'] = status
results.insert(result)
notify(item, status)
return status
def main(*args):
cfg = config()
cl = MongoClient(
host=cfg['mongo-host'],
port=cfg['mongo-port'],
)
db = cl[cfg['mongo-database']]
queue = db['queue']
results = db['results']
if cfg.get('kill-server'):
sp.call(cfg['kill-server'], shell=True)
if not len(args) or args[0] == 'nightly':
nightly(queue, results)
else:
return continuous(*args[:3], queue=queue, results=results)
if __name__ == '__main__':
import sys
print json.dumps(main(*sys.argv[1:]), indent=4)
| dcjohnston/geojs | dashboard/github_service/dashboard.py | Python | bsd-3-clause | 5,762 | 0 |
#!/usr/bin/env python3
from argparse import ArgumentParser
from re import compile
from sys import argv
def get_split(s):
for ch in ('_', '-'):
if ch in s:
return s.split(ch)
return compile('[A-Z]?[^A-Z]+').findall(s)
def get_cases(s):
split = get_split(s)
capital = [w.capitalize() for w in split]
lower = [w.lower() for w in split]
upper = [w.upper() for w in split]
return [
''.join([lower[0]] + capital[1:]),
''.join(capital),
''.join(lower),
''.join(upper),
'_'.join(lower),
'_'.join(upper),
'-'.join(lower),
'-'.join(upper),
]
def get_zipped_cases(strings):
return zip(*[get_cases(s) for s in strings])
def parse_args(args):
parser = ArgumentParser(
description='Prints various string representation of provided strings')
parser.add_argument('strings', nargs='+')
return parser.parse_args(args)
def main(strings):
for items in get_zipped_cases(strings):
print(' '.join(items))
if __name__ == '__main__': # pragma: no cover
main(**parse_args(argv[1:]).__dict__)
| bjuvensjo/scripts | vang/misc/s.py | Python | apache-2.0 | 1,135 | 0 |
basedir = '/data/t3serv014/snarayan/deep/v_deepgen_4_akt_small/'
figsdir = '/home/snarayan/public_html/figs/deepgen/v4_akt/'
| sidnarayanan/BAdNet | train/gen/akt/paths.py | Python | mit | 125 | 0 |
# Visualize an annotated image with object sizes
import os
import cv2
import random
import numpy as np
from plantcv.plantcv import params
from plantcv.plantcv import color_palette
from plantcv.plantcv._debug import _debug
def obj_sizes(img, mask, num_objects=100):
"""
Label the size of objects in an image.
Inputs:
img = RGB or grayscale image data
mask = Binary mask made from selected contours
num_objects = Optional parameter to limit the number of objects that will get annotated.
Returns:
plotting_img = Plotting image with objects labeled by area
:param img: numpy.ndarray
:param mask: numpy.ndarray
:param num_objects: int
:return plotting_img: numpy.ndarray
"""
plotting_img = np.copy(img)
# Convert grayscale images to color
if len(np.shape(plotting_img)) == 2:
plotting_img = cv2.cvtColor(plotting_img, cv2.COLOR_GRAY2BGR)
# Store debug
debug = params.debug
params.debug = None
# ID contours and sort them from largest to smallest
id_objects, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]
sorted_objects = sorted(id_objects, key=lambda x: cv2.contourArea(x))
# Function sorts smallest to largest so keep the last X objects listed
# sorted_objects = sorted_objects[len(sorted_objects) - num_objects: len(sorted_objects)]
# Reverse the sorted list to order contours from largest to smallest
sorted_objects.reverse()
rand_color = color_palette(num=num_objects, saved=False)
random.shuffle(rand_color)
label_coord_x = []
label_coord_y = []
area_vals = []
for i, contour in enumerate(sorted_objects):
# Break out of the for loop once the number of objects have been plotted
if i >= num_objects:
break
# ID and store area values and centers of mass for labeling them
m = cv2.moments(contour)
# Skip iteration if contour area is zero
# This is needed because cv2.contourArea can be > 0 while moments area is 0.
if m['m00'] == 0:
continue
area_vals.append(m['m00'])
label_coord_x.append(int(m["m10"] / m["m00"]))
label_coord_y.append(int(m["m01"] / m["m00"]))
# Fill in objects with color
cv2.drawContours(plotting_img, sorted_objects, i, rand_color[i], thickness=-1)
# Label with area values
for c, value in enumerate(area_vals):
text = "{:.0f}".format(value)
w = label_coord_x[c]
h = label_coord_y[c]
cv2.putText(img=plotting_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness)
print(f"There were {max(0, len(id_objects) - num_objects)} objects not annotated.")
params.debug = debug
_debug(visual=plotting_img, filename=os.path.join(params.debug_outdir, str(params.device) + '_object_sizes.png'))
return plotting_img
| stiphyMT/plantcv | plantcv/plantcv/visualize/obj_sizes.py | Python | mit | 3,006 | 0.003327 |
import math
def even_numbers_only(thelist):
'''
Returns a list of even numbers in thelist
'''
return [x for x in thelist if x%2 == 0]
def is_perfect_square(x):
'''
Returns True if x is a perfect square, False otherwise
'''
thesqrt = int(math.sqrt(x))
return thesqrt * thesqrt == x
| joequery/joequery.me | joequery/blog/posts/code/python-builtin-functions/simple_functions.py | Python | mit | 319 | 0.009404 |
from ctypes import c_uint, byref
from django.contrib.gis.geos.error import GEOSIndexError
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.libgeos import get_pointer_arr, GEOM_PTR
from django.contrib.gis.geos.linestring import LinearRing
from django.contrib.gis.geos import prototypes as capi
class Polygon(GEOSGeometry):
_minlength = 1
def __init__(self, *args, **kwargs):
"""
Initializes on an exterior ring and a sequence of holes (both
instances may be either LinearRing instances, or a tuple/list
that may be constructed into a LinearRing).
Examples of initialization, where shell, hole1, and hole2 are
valid LinearRing geometries:
>>> poly = Polygon(shell, hole1, hole2)
>>> poly = Polygon(shell, (hole1, hole2))
Example where a tuple parameters are used:
>>> poly = Polygon(((0, 0), (0, 10), (10, 10), (0, 10), (0, 0)),
((4, 4), (4, 6), (6, 6), (6, 4), (4, 4)))
"""
if not args:
raise TypeError('Must provide at least one LinearRing, or a tuple, to initialize a Polygon.')
# Getting the ext_ring and init_holes parameters from the argument list
ext_ring = args[0]
init_holes = args[1:]
n_holes = len(init_holes)
# If initialized as Polygon(shell, (LinearRing, LinearRing)) [for backward-compatibility]
if n_holes == 1 and isinstance(init_holes[0], (tuple, list)):
if len(init_holes[0]) == 0:
init_holes = ()
n_holes = 0
elif isinstance(init_holes[0][0], LinearRing):
init_holes = init_holes[0]
n_holes = len(init_holes)
polygon = self._create_polygon(n_holes + 1, (ext_ring,) + init_holes)
super(Polygon, self).__init__(polygon, **kwargs)
def __iter__(self):
"Iterates over each ring in the polygon."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of rings in this Polygon."
return self.num_interior_rings + 1
@classmethod
def from_bbox(cls, bbox):
"Constructs a Polygon from a bounding box (4-tuple)."
x0, y0, x1, y1 = bbox
return GEOSGeometry( 'POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % (
x0, y0, x0, y1, x1, y1, x1, y0, x0, y0) )
### These routines are needed for list-like operation w/ListMixin ###
def _create_polygon(self, length, items):
# Instantiate LinearRing objects if necessary, but don't clone them yet
# _construct_ring will throw a TypeError if a parameter isn't a valid ring
# If we cloned the pointers here, we wouldn't be able to clean up
# in case of error.
rings = []
for r in items:
if isinstance(r, GEOM_PTR):
rings.append(r)
else:
rings.append(self._construct_ring(r))
shell = self._clone(rings.pop(0))
n_holes = length - 1
if n_holes:
holes = get_pointer_arr(n_holes)
for i, r in enumerate(rings):
holes[i] = self._clone(r)
holes_param = byref(holes)
else:
holes_param = None
return capi.create_polygon(shell, holes_param, c_uint(n_holes))
def _clone(self, g):
if isinstance(g, GEOM_PTR):
return capi.geom_clone(g)
else:
return capi.geom_clone(g.ptr)
def _construct_ring(self, param, msg='Parameter must be a sequence of LinearRings or objects that can initialize to LinearRings'):
"Helper routine for trying to construct a ring from the given parameter."
if isinstance(param, LinearRing): return param
try:
ring = LinearRing(param)
return ring
except TypeError:
raise TypeError(msg)
def _set_list(self, length, items):
# Getting the current pointer, replacing with the newly constructed
# geometry, and destroying the old geometry.
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_polygon(length, items)
if srid: self.srid = srid
capi.destroy_geom(prev_ptr)
def _get_single_internal(self, index):
"""
Returns the ring at the specified index. The first index, 0, will
always return the exterior ring. Indices > 0 will return the
interior ring at the given index (e.g., poly[1] and poly[2] would
return the first and second interior ring, respectively).
CAREFUL: Internal/External are not the same as Interior/Exterior!
_get_single_internal returns a pointer from the existing geometries for use
internally by the object's methods. _get_single_external returns a clone
of the same geometry for use by external code.
"""
if index == 0:
return capi.get_extring(self.ptr)
else:
# Getting the interior ring, have to subtract 1 from the index.
return capi.get_intring(self.ptr, index-1)
def _get_single_external(self, index):
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
#### Polygon Properties ####
@property
def num_interior_rings(self):
"Returns the number of interior rings."
# Getting the number of rings
return capi.get_nrings(self.ptr)
def _get_ext_ring(self):
"Gets the exterior ring of the Polygon."
return self[0]
def _set_ext_ring(self, ring):
"Sets the exterior ring of the Polygon."
self[0] = ring
# Properties for the exterior ring/shell.
exterior_ring = property(_get_ext_ring, _set_ext_ring)
shell = exterior_ring
@property
def tuple(self):
"Gets the tuple for each ring in this Polygon."
return tuple([self[i].tuple for i in xrange(len(self))])
coords = tuple
@property
def kml(self):
"Returns the KML representation of this Polygon."
inner_kml = ''.join(["<innerBoundaryIs>%s</innerBoundaryIs>" % self[i+1].kml
for i in xrange(self.num_interior_rings)])
return "<Polygon><outerBoundaryIs>%s</outerBoundaryIs>%s</Polygon>" % (self[0].kml, inner_kml)
| hunch/hunch-gift-app | django/contrib/gis/geos/polygon.py | Python | mit | 6,672 | 0.003447 |
import os
import autosar.rte.partition
import cfile as C
import io
import autosar.base
import autosar.bsw.com
innerIndentDefault=3 #default indendation (number of spaces)
def _genCommentHeader(comment):
lines = []
lines.append('/*********************************************************************************************************************')
lines.append('* %s'%comment)
lines.append('*********************************************************************************************************************/')
return lines
def _genCommentHeader2(comment):
"""
Same as _genCommentHeader but returns a C sequence instead of raw strings
"""
code = C.sequence()
code.append(C.line('/*********************************************************************************************************************'))
code.append(C.line('* %s'%comment))
code.append(C.line('*********************************************************************************************************************/'))
return code
class TypeGenerator:
def __init__(self, partition, useDefaultTypes=True):
self.partition = partition
self.defaultTypes = {}
if useDefaultTypes:
self._initDefaultType()
def generate(self, dest_dir = '.', file_name='Rte_Type.h'):
"""
Generates Rte_Type.h
Note: The last argument has been deprecated and is no longer in use
"""
if self.partition.isFinalized == False:
self.partition.finalize()
file_path = os.path.join(dest_dir, file_name)
with io.open(file_path, 'w', newline='\n') as fp:
hfile=C.hfile(file_name)
hfile.code.extend([C.line(x) for x in _genCommentHeader('Includes')])
hfile.code.append(C.include("Std_Types.h"))
hfile.code.append(C.blank())
(basicTypes,complexTypes,modeTypes) = self.partition.types.getTypes()
hfile.code.extend([C.line(x) for x in _genCommentHeader('Data Type Definitions')])
hfile.code.append(C.blank())
ws = self.partition.ws
unusedDefaultTypes = self._findUnusedDefaultTypes(ws, basicTypes)
first=True
for ref in sorted(basicTypes)+sorted(complexTypes):
dataType = ws.find(ref)
if dataType is not None:
typedef = None
if first:
first=False
else:
hfile.code.append(C.blank())
hfile.code.append('#define Rte_TypeDef_%s'%dataType.name)
if isinstance(dataType,autosar.datatype.BooleanDataType):
typedef = C.typedef('boolean', dataType.name)
hfile.code.append(C.statement(typedef))
elif isinstance(dataType,autosar.datatype.IntegerDataType):
valrange = dataType.maxVal-dataType.minVal
bitcount = valrange.bit_length()
typename = dataType.name
basetype = self._typename(bitcount,dataType.minVal)
typedef = C.typedef(basetype, typename)
hfile.code.append(C.statement(typedef))
isUnsigned = True if basetype in ('uint8','uint16','uint32') else False
if isUnsigned:
minval=str(dataType.minVal)+'u'
maxval=str(dataType.maxVal)+'u'
else:
minval=str(dataType.minVal)
maxval=str(dataType.maxVal)
hfile.code.append('#define %s_LowerLimit ((%s)%s)'%(typename,typename,minval))
hfile.code.append('#define %s_UpperLimit ((%s)%s)'%(typename,typename,maxval))
if dataType.compuMethodRef is not None:
compuMethod = ws.find(dataType.compuMethodRef)
if compuMethod is not None:
lines1=[]
lines2=[]
if isinstance(compuMethod,autosar.datatype.CompuMethodConst):
for elem in compuMethod.elements:
if isUnsigned:
value = str(elem.upperLimit)+'u'
else:
value = str(elem.upperLimit)
lines1.append('#define RTE_CONST_%s (%s)'%(elem.textValue,value))
lines2.append('#define %s ((%s)%s)'%(elem.textValue,typename,value))
if len(lines2)>0:
tmp=lines1+[C.blank()]+lines2
else:
tmp=lines1
for line in tmp:
hfile.code.append(line)
else:
raise ValueError(dataType.compuMethodRef)
elif isinstance(dataType, autosar.datatype.RecordDataType):
body = C.block(innerIndent=innerIndentDefault)
for elem in dataType.elements:
childType = ws.find(elem.typeRef, role='DataType')
body.append(C.statement(C.variable(elem.name, childType.name)))
struct = C.struct(None,body, typedef=dataType.name)
hfile.code.append(C.statement(struct))
elif isinstance(dataType, autosar.datatype.StringDataType):
hfile.code.append('typedef uint8 %s[%d];'%(dataType.name, dataType.length+1))
elif isinstance(dataType, autosar.datatype.ArrayDataType):
childType = ws.find(dataType.typeRef, role='DataType')
if childType is None:
raise ValueError('invalid type reference: '+dataType.typeRef)
hfile.code.append('typedef %s %s[%d];'%(childType.name, dataType.name, dataType.length))
elif isinstance(dataType, autosar.datatype.RealDataType):
if dataType.encoding == 'DOUBLE':
platform_typename = 'float64'
else:
platform_typename = 'float32'
hfile.code.append('typedef %s %s;'%(platform_typename, dataType.name))
else:
raise NotImplementedError(type(dataType))
#sys.stderr.write('not implemented: %s\n'%str(type(dataType)))
else:
raise ValueError(ref)
if len(modeTypes)>0:
lines=_genCommentHeader('Mode Types')
tmp=[]
hfile.code.extend(lines)
first=True
for ref in modeTypes:
if first:
first=False
else:
tmp.append(C.blank())
modeType = ws.find(ref)
hfile.code.append(C.statement(C.typedef('uint8', 'Rte_ModeType_'+modeType.name)))
for i,elem in enumerate(modeType.modeDeclarations):
# define RTE_MODE_EcuM_Mode_POST_RUN ((Rte_ModeType_EcuM_Mode)0)
tmp.append(C.define('RTE_MODE_%s_%s'%(modeType.name,elem.name),'((Rte_ModeType_EcuM_Mode)%d)'%i))
hfile.code.append(C.blank())
hfile.code.extend(tmp)
if len(unusedDefaultTypes)>0:
hfile.code.append(C.blank(2))
hfile.code.append(C.line('#ifndef RTE_SUPPRESS_UNUSED_DATATYPES'))
for name in sorted(unusedDefaultTypes):
hfile.code.append(C.blank())
hfile.code.extend(self.defaultTypes[name])
hfile.code.append(C.blank())
hfile.code.append(C.line('#endif'))
fp.write('\n'.join(hfile.lines()))
fp.write('\n')
def _initDefaultType(self):
self.defaultTypes['Boolean']=C.sequence().extend([C.statement(C.typedef('boolean', 'Boolean'))])
self.defaultTypes['UInt8']=C.sequence().extend([C.statement(C.typedef('uint8', 'UInt8')), C.define('UInt8_LowerLimit', '((UInt8)0u)'), C.define('UInt8_UpperLimit', '((UInt8)255u)')])
self.defaultTypes['UInt16']=C.sequence().extend([C.statement(C.typedef('uint16', 'UInt16')), C.define('UInt16_LowerLimit', '((UInt16)0u)'), C.define('UInt16_UpperLimit', '((UInt16)65535u)')])
self.defaultTypes['UInt32']=C.sequence().extend([C.statement(C.typedef('uint32', 'UInt32')), C.define('UInt32_LowerLimit', '((UInt32)0u)'), C.define('UInt32_UpperLimit', '((UInt32)4294967295u)')])
self.defaultTypes['SInt8']=C.sequence().extend([C.statement(C.typedef('sint8', 'SInt8')), C.define('SInt8_LowerLimit', '((SInt8)-128)'), C.define('SInt8_UpperLimit', '((SInt8)127)')])
self.defaultTypes['SInt16']=C.sequence().extend([C.statement(C.typedef('sint16', 'SInt16')), C.define('SInt16_LowerLimit', '((SInt16)-32768)'), C.define('SInt16_UpperLimit', '((SInt16)32767)')])
self.defaultTypes['SInt32']=C.sequence().extend([C.statement(C.typedef('sint32', 'SInt32')), C.define('SInt32_LowerLimit', '((SInt32)-2147483648)'), C.define('SInt32_UpperLimit', '((SInt32)2147483647)')])
def _findUnusedDefaultTypes(self, ws, typerefs):
defaultTypeNames = set(self.defaultTypes.keys())
usedTypeNames = set()
for ref in typerefs:
dataType = ws.find(ref)
if dataType is None:
raise ValueError('invalid type reference: '+ref)
usedTypeNames.add(dataType.name)
return defaultTypeNames-usedTypeNames
@staticmethod
def _typename(bitcount,minval):
if bitcount <=8:
return 'uint8' if minval >= 0 else 'sint8'
elif bitcount <=16:
return 'uint16' if minval >= 0 else 'sint16'
elif bitcount <=32:
return 'uint32' if minval >= 0 else 'sint32'
elif bitcount <=64:
return 'uint64' if minval >= 0 else 'sint64'
else:
raise ValueError(bitcount)
class RteGenerator:
"""
Generates Rte.c based on partition. The prefix argument can be used to change both
the file name and prefix name used for public function names
"""
def __init__(self, partition, prefix='Rte', include=None, mode_switch=True, os_enable=True):
self.partition=partition
self.includes = [] #array of tuples, first element is the name of include header, second element is True if this is a sysinclude
self.prefix=prefix
self.com_component = None
self.header_file_name = None
self.data_elements = []
self.extra_static_vars={}
self.extra_public_functions={}
self.extra_rte_start=C.sequence()
self.mode_switch_enable=mode_switch
self.os_enable = os_enable
#self.com_access = {'receive': {}, 'send': {}}
if include is not None:
for elem in include:
if isinstance(elem, str) or isinstance(elem, tuple):
self.includes.append(elem)
else:
raise ValueError("include items must be of type str or tuple(str,boolean)")
for component in partition.components:
if isinstance(component.inner, autosar.bsw.com.ComComponent):
if self.com_component is None:
self.com_component = component
else:
raise RuntimeError("More than one Com component allowed in a partition")
def generate(self, dest_dir='.'):
if self.os_enable:
self.extra_static_vars.update(self.partition.static_vars)
self._generate_header(dest_dir, 'RteApi.h')
self._generate_source(dest_dir, 'RteApi.c')
def _generate_header(self, dest_dir='.', file_name=None):
if file_name is None:
file_name = 'RteApi.h'
self.includes.append((file_name, False))
file_path = os.path.join(dest_dir,file_name)
with io.open(file_path, 'w', newline='\n') as fp:
header = C.hfile(file_path)
self._write_header_includes(header.code)
self._write_header_public_func(header.code)
for line in header.lines():
fp.write(line)
fp.write('\n')
fp.write('\n')
def _generate_source(self, dest_dir='.', file_name=None):
if file_name is None:
file_name = 'RteApi.c'
file_path = os.path.join(dest_dir,file_name)
with io.open(file_path, 'w', newline='\n') as fp:
self._write_includes(fp)
self._write_constants_and_typedefs(fp)
self._write_local_vars(fp)
self._write_public_funcs(fp)
def _write_includes(self, fp):
lines = _genCommentHeader('Includes')
fp.write('\n'.join(lines)+'\n')
code = C.sequence()
for include in self.includes:
code.append(C.include(*include))
if self.com_component is not None:
code.append(C.include(self.com_component.name+'.h'))
if self.os_enable:
code.append(C.include('os.h'))
fp.write('\n'.join(code.lines())+'\n\n')
def _write_constants_and_typedefs(self, fp):
fp.write('\n'.join(_genCommentHeader('Constants and Types'))+'\n\n')
def _write_local_vars(self, fp):
fp.write('\n'.join(_genCommentHeader('Local Variables'))+'\n')
code = C.sequence()
for data_element in sorted(self.partition.data_element_map.values(), key=lambda x: x.symbol):
var = C.variable(data_element.symbol, data_element.dataType.name, True)
code.append(C.statement(var))
for key in sorted(self.extra_static_vars.keys()):
code.append(C.statement(self.extra_static_vars[key]))
fp.write('\n'.join(code.lines())+'\n\n')
def _write_public_funcs(self, fp):
fp.write('\n'.join(_genCommentHeader('Public Functions'))+'\n')
self._write_rte_start(fp)
if len(self.partition.upperLayerAPI.read)>0:
self._genRead(fp, sorted(self.partition.upperLayerAPI.final['read'], key=lambda x: x.shortname))
if len(self.partition.upperLayerAPI.write)>0:
self._genWrite(fp, sorted(self.partition.upperLayerAPI.final['write'], key=lambda x: x.shortname))
if len(self.partition.upperLayerAPI.receive)>0:
self._genReceive(fp, sorted(self.partition.upperLayerAPI.final['receive'], key=lambda x: x.shortname))
if len(self.partition.upperLayerAPI.send)>0:
self._genSend(fp, sorted(self.partition.upperLayerAPI.final['send'], key=lambda x: x.shortname))
#if len(self.partition.upperLayerAPI.call)>0:
# self._genCall(fp, sorted(self.partition.upperLayerAPI.final['call'], key=lambda x: x.shortname))
if len(self.partition.upperLayerAPI.get)>0:
self._genGet(fp, sorted(self.partition.upperLayerAPI.final['get'], key=lambda x: x.shortname))
if len(self.partition.upperLayerAPI.setReadData)>0:
self._genFunctionBodies(fp, sorted(self.partition.upperLayerAPI.final['setReadData'], key=lambda x: x.shortname))
if len(self.partition.upperLayerAPI.setReadResult)>0:
self._genFunctionBodies(fp, sorted(self.partition.upperLayerAPI.setReadResult.values(), key=lambda x: x.shortname))
if self.mode_switch_enable and len(self.partition.mode_switch_functions)>0:
self._genFunctionBodies(fp, [self.partition.mode_switch_functions[key] for key in sorted(self.partition.mode_switch_functions.keys())])
if len(self.extra_public_functions)>0:
self._genFunctionBodies(fp, [self.extra_public_functions[key] for key in sorted(self.extra_public_functions.keys())])
def _write_rte_start(self, fp):
func = C.function(self.prefix+'_Start', 'void')
body = C.block(innerIndent=innerIndentDefault)
self._write_init_values(body)
if len(self.extra_rte_start)>0:
body.extend(self.extra_rte_start)
fp.write(str(func)+'\n')
fp.write('\n'.join(body.lines())+'\n\n')
def _write_init_values(self, body):
for data_element in sorted(self.partition.data_element_map.values(), key=lambda x: x.symbol):
if data_element.initValue is not None:
init_str = autosar.constant.initializer_string(data_element.initValue)
body.code.append(C.statement('%s = %s'%(data_element.symbol, init_str)))
def _genRead(self, fp, prototypes):
"""Generates all Rte_Read functions"""
for port_func in prototypes:
body = C.block(innerIndent=innerIndentDefault)
if port_func.data_element.com_access['Receive'] is not None:
com_func = port_func.data_element.com_access['Receive']
body.code.append(C.statement('return '+str(C.fcall(com_func.name, params=[port_func.proto.args[0].name]))))
else:
body.code.append(C.statement('*%s = %s'%(port_func.proto.args[0].name, port_func.data_element.symbol)))
if port_func.data_element.result_var is not None:
body.code.append(C.statement('return %s'%port_func.data_element.result_var.name))
else:
body.code.append(C.statement('return RTE_E_OK'))
fp.write(str(port_func.proto)+'\n')
fp.write('\n'.join(body.lines())+'\n\n')
def _genWrite(self, fp, prototypes):
for port_func in prototypes:
hasComSignal = False
body = C.block(innerIndent=innerIndentDefault)
if port_func.data_element.symbol is not None:
body.code.append(C.statement('%s = %s'%(port_func.data_element.symbol, port_func.proto.args[0].name)))
if port_func.data_element.com_access['Send'] is not None:
com_func = port_func.data_element.com_access['Send']
body.code.append(C.statement('return '+str(C.fcall(com_func.name, params=[port_func.proto.args[0].name]))))
else:
if port_func.data_element.result_var is not None:
body.code.append(C.statement('return %s'%port_func.data_element.result_var.name))
else:
body.code.append(C.statement('return RTE_E_OK'))
fp.write(str(port_func.proto)+'\n')
fp.write('\n'.join(body.lines())+'\n\n')
def _genReceive(self, fp, prototypes):
for proto in prototypes:
body = C.block(innerIndent=innerIndentDefault)
body.code.append(C.statement('return RTE_E_OK'))
fp.write(str(proto.func)+'\n')
fp.write('\n'.join(body.lines())+'\n\n')
def _genSend(self, fp, prototypes):
for proto in prototypes:
body = C.block(innerIndent=innerIndentDefault)
body.code.append(C.statement('return RTE_E_OK'))
fp.write(str(proto.func)+'\n')
fp.write('\n'.join(body.lines())+'\n\n')
def _genCall(self, fp, prototypes):
for proto in prototypes:
body = C.block(innerIndent=innerIndentDefault)
body.code.append(C.statement('return RTE_E_OK'))
fp.write(str(proto.func)+'\n')
fp.write('\n'.join(body.lines())+'\n\n')
def _genGet(self, fp, prototypes):
for port_func in prototypes:
body = C.block(innerIndent=innerIndentDefault)
prefix = '&' if port_func.data_element.dataType.isComplexType else ''
suffix = '[0]' if isinstance(port_func.data_element.dataType, autosar.datatype.ArrayDataType) else ''
body.code.append(C.statement('return %s%s%s'%(prefix, port_func.data_element.symbol, suffix)))
fp.write(str(port_func.proto)+'\n')
fp.write('\n'.join(body.lines())+'\n\n')
def _genFunctionBodies(self, fp, prototypes):
for func in prototypes:
fp.write(str(func.proto)+'\n')
fp.write('\n'.join(func.body.lines())+'\n\n')
def _write_header_includes(self, code):
code.extend(_genCommentHeader2("INCLUDES"))
code.append(C.include('Rte_Type.h'))
code.append(C.include('Rte.h'))
def _write_header_public_func(self, code):
code.append('')
code.extend(_genCommentHeader2("PUBLIC FUNCTION PROTOTYPES"))
code.append(C.statement(C.function('Rte_Start', 'void')))
if self.mode_switch_enable and len(self.partition.mode_switch_functions)>0:
for func in [self.partition.mode_switch_functions[key] for key in sorted(self.partition.mode_switch_functions.keys())]:
code.append(C.statement(func.proto))
class ComponentHeaderGenerator():
def __init__(self, partition):
self.partition = partition
self.useMockedAPI=False
def generate(self, destdir, mocked=None):
if mocked is not None:
self.useMockedAPI=bool(mocked)
for component in self.partition.components:
if not isinstance(component.inner, autosar.bsw.com.ComComponent):
with io.open(os.path.join(destdir, 'Rte_%s.h'%component.inner.name), 'w', newline='\n') as fp:
self._genComponentHeader(fp, component)
def _genComponentHeader(self, fp, component):
ws = component.inner.rootWS()
assert(ws is not None)
hfile=C.hfile(None, guard='RTE_%s_H'%(component.inner.name.upper()))
hfile.code.append(C.include('Rte.h'))
hfile.code.append(C.include('Rte_Type.h'))
hfile.code.append(C.blank())
lines = self._genInitValues(ws, component.inner.requirePorts+component.inner.providePorts)
if len(lines)>0:
hfile.code.extend([C.line(x) for x in _genCommentHeader('Init Values')])
hfile.code.extend(lines)
#Write API
num_funcs = sum(1 for x in component.clientAPI.get_all())
if num_funcs>0:
hfile.code.append(C.blank())
hfile.code.extend([C.line(x) for x in _genCommentHeader('API Prototypes')])
for func in component.clientAPI.get_all():
assert func.proto is not None
hfile.code.append(C.statement(func.proto))
if len(component.clientAPI.final['read'])>0:
hfile.code.append(C.blank())
hfile.code.extend([C.line(x) for x in _genCommentHeader('Rte_Read_<p>_<d>')])
hfile.code.extend([C.define(func.shortname, func.proto.name) for func in component.clientAPI.final['read']])
if len(component.clientAPI.final['write'])>0:
hfile.code.append(C.blank())
hfile.code.extend([C.line(x) for x in _genCommentHeader('Rte_Write_<p>_<d>')])
hfile.code.extend([C.define(func.shortname, func.proto.name) for func in component.clientAPI.final['write']])
if len(component.clientAPI.final['receive'])>0:
hfile.code.append(C.blank())
hfile.code.extend([C.line(x) for x in _genCommentHeader('Rte_Receive_<p>_<d>')])
hfile.code.extend([C.define(func.shortname, func.proto.name) for func in component.clientAPI.final['receive']])
if len(component.clientAPI.final['send'])>0:
hfile.code.append(C.blank())
hfile.code.extend([C.line(x) for x in _genCommentHeader('Rte_Send_<p>_<d>')])
hfile.code.extend([C.define(func.shortname, func.proto.name) for func in component.clientAPI.final['send']])
if len(component.clientAPI.final['mode'])>0:
hfile.code.append(C.blank())
hfile.code.extend([C.line(x) for x in _genCommentHeader('Rte_Mode_<p>_<d>')])
hfile.code.extend([C.define(func.shortname, func.proto.name) for func in component.clientAPI.final['mode']])
if len(component.clientAPI.final['mode'])>0:
hfile.code.append(C.blank())
hfile.code.extend([C.line(x) for x in _genCommentHeader('Rte_Mode_<mode>')])
hfile.code.extend([C.define(func.shortname, func.proto.name) for func in component.clientAPI.final['mode']])
if len(component.clientAPI.final['calprm'])>0:
hfile.code.append(C.blank())
hfile.code.extend([C.line(x) for x in _genCommentHeader('Rte_Calprm_<name>')])
hfile.code.extend([C.define(func.shortname, func.proto.name) for func in component.clientAPI.final['calprm']])
if len(component.clientAPI.final['call'])>0:
hfile.code.append(C.blank())
hfile.code.extend([C.line(x) for x in _genCommentHeader('Rte_Call_<p>_<o> ')])
hfile.code.extend([C.define(func.shortname, func.proto.name) for func in component.clientAPI.final['call']])
if len(component.runnables)>0:
for runnable in sorted(component.runnables, key=lambda x: x.symbol):
tmp = self._writeRunnableProto(runnable)
hfile.code.extend(tmp)
fp.write('\n'.join(hfile.lines()))
fp.write('\n')
def _genInitValues(self, ws, ports):
ports = sorted(ports, key=lambda port: port.name)
code = C.sequence()
for port in ports:
for comspec in port.comspec:
if isinstance(comspec, autosar.port.DataElementComSpec):
if comspec.initValueRef is not None:
initValue = ws.find(comspec.initValueRef)
if isinstance(initValue, autosar.constant.Constant):
#in case the ref is pointing to a Constant (the parent), grab the child instance using .value
initValue=initValue.value
if initValue is not None:
dataType = ws.find(initValue.typeRef)
if dataType is not None:
prefix = 'Rte_InitValue_%s_%s'%(port.name, comspec.name)
code.extend(self._getInitValue(ws, prefix, initValue, dataType))
return code
def _getInitValue(self, ws, def_name, value, dataType):
"""
returns a list or sequence
"""
code = C.sequence()
if isinstance(value, autosar.constant.IntegerValue):
if dataType.minVal>=0:
suffix='u'
else:
suffix=''
code.append(C.define(def_name,'((%s)%s%s)'%(dataType.name, value.value,suffix)))
elif isinstance(value, autosar.constant.StringValue):
code.append(C.define(def_name,'"%s"'%(value.value)))
elif isinstance(value, autosar.constant.BooleanValue):
if value.value:
text='((boolean) TRUE)'
else:
text='((boolean) FALSE)'
code.append(C.define(def_name,text))
elif isinstance(value, autosar.constant.RecordValue):
for element in value.elements:
prefix = '%s_%s'%(def_name, element.name)
dataType = ws.find(element.typeRef)
if dataType is not None:
code.extend(self._getInitValue(ws, prefix, element, dataType))
elif isinstance(value, autosar.constant.ArrayValue):
pass
else:
raise NotImplementedError(type(value))
return code
def _writeRunnableProto(self, runnable):
lines = []
lines.extend([C.line(x) for x in _genCommentHeader('Runnable %s'%runnable.name)])
lines.append(C.statement(runnable.prototype))
lines.append(C.blank())
return lines
class MockRteGenerator(RteGenerator):
def __init__(self, partition, api_prefix='Rte', file_prefix = 'MockRte', include=None, mode_switch=False, os_enable=False):
super().__init__(partition, api_prefix, include, mode_switch)
self.includes.append((file_prefix+'.h', False))
self.api_prefix = api_prefix
self.file_prefix = file_prefix
self.os_enable = os_enable
self.typedefs={}
for port in partition.unconnectedPorts():
if isinstance(port, autosar.rte.base.ProvidePort):
self._create_port_getter_api(port)
else:
if len(port.data_elements)>0 or len(port.operations)>0:
self._create_port_setter_api(port)
self.partition.upperLayerAPI.finalize()
def generate(self, dest_dir):
self._generateHeader(dest_dir)
super()._generate_source(dest_dir, self.file_prefix+'.c')
def _create_port_getter_api(self, port):
component = port.parent
for data_element in port.data_elements:
if "%s/%s"%(port.name, data_element.name) in component.data_element_port_access:
self._create_data_element_getter(component, port, data_element)
def _create_data_element_getter(self, component, port, data_element):
data_type = data_element.dataType
func_name='%s_GetWriteData_%s_%s_%s'%(self.prefix, component.name, port.name, data_element.name)
short_name='%s_GetWriteData_%s_%s'%(self.prefix, port.name, data_element.name)
suffix = '*' if data_type.isComplexType else ''
proto=C.function(func_name, data_type.name+suffix)
rte_func = autosar.rte.base.DataElementFunction(proto, port, data_element)
#self._createPortVariable(component, port, data_element)
var_name = self._createDataElementVariable(component, port, data_element)
self.partition.upperLayerAPI.get[short_name] = autosar.rte.base.GetPortFunction(short_name, proto, data_element)
def _create_port_setter_api(self, port):
component = port.parent
for data_element in port.data_elements:
if "%s/%s"%(port.name, data_element.name) in component.data_element_port_access:
self._create_data_element_setter(component, port, data_element)
for operation in port.operations:
key = "%s/%s"%(port.name, operation.name)
if key in component.operation_port_access:
self._create_operation_setter(component, port, operation, component.operation_port_access[key])
def _create_data_element_setter(self, component, port, data_element):
var_name = self._createDataElementVariable(component, port, data_element)
port_func = autosar.rte.base.SetReadDataFunction(self.prefix, component, port, data_element, var_name)
self.partition.upperLayerAPI.setReadData[port_func.shortname] = port_func
port_func = autosar.rte.base.SetReadResultFunction(self.prefix, component, port, data_element)
self.partition.upperLayerAPI.setReadResult[port_func.shortname]=port_func
self.extra_static_vars[port_func.static_var.name]=port_func.static_var
self.extra_rte_start.append(C.statement('%s = RTE_E_OK'%(data_element.result_var.name)))
def _create_operation_setter(self, component, port, operation, port_access):
func_name='%s_SetCallHandler_%s_%s_%s'%(self.prefix, component.name, port.name, operation.name)
short_name='%s_SetCallHandler_%s_%s'%(self.prefix, port.name, operation.name)
type_name = '%s_%s_ServerCallHandler_t'%(port.name, operation.name)
var_name = 'm_ServerCallHandler_%s_%s_%s'%(component.name, port.name, operation.name)
port_func = port.portAPI['Rte_Call_%s_%s'%(port.name, operation.name)]
tmp_proto = C.fptr.from_func(port_func.proto, type_name)
self.typedefs[type_name] = 'typedef %s'%str(tmp_proto)
proto = C.function(func_name, 'void', args=[C.variable('handler_func', type_name, pointer=True)])
func = autosar.rte.base.SetCallHandlerFunction(short_name, proto, operation, var_name)
self.extra_public_functions[short_name]=func
static_var = C.variable(var_name, type_name, static=True, pointer=True)
self.extra_static_vars[var_name]=static_var
self.extra_rte_start.append(C.statement('%s = (%s*) 0'%(var_name, type_name)))
body = self._createMockServerCallFunction(port_func.proto, var_name)
self.extra_public_functions[port_func.proto.name]=autosar.rte.base.ServerCallFunction(port_func.proto, body)
def _createMockServerCallFunction(self, proto, var_name):
body = C.block(innerIndent=innerIndentDefault)
body.append(C.line('if (%s != 0)'%(var_name)))
inner = C.block(innerIndent=innerIndentDefault)
fcall = C.fcall(var_name)
for arg in proto.args:
fcall.add_param(arg.name)
if proto.typename != 'void':
inner.append(C.statement('return %s'%str(fcall)))
else:
inner.append(C.statement(fcall))
body.append(inner)
if proto.typename != 'void':
body.append(C.statement('return RTE_E_OK'))
return body
def _createDataElementVariable(self, component, port, data_element):
data_element_map = self.partition.data_element_map
variable_name = '_'.join([component.name, port.name, data_element.name])
if variable_name not in data_element_map:
data_element.symbol = variable_name
data_element_map[variable_name] = data_element
return variable_name
def _generateHeader(self, dest_dir):
filepath = os.path.join(dest_dir,self.file_prefix+'.h')
with io.open(filepath, 'w', newline='\n') as fp:
for line in self._createHeaderLines(filepath):
fp.write(line)
fp.write('\n')
fp.write('\n')
def _createHeaderLines(self, filepath):
hfile = C.hfile(filepath)
code = hfile.code
code.extend([C.line(x) for x in _genCommentHeader('Includes')])
code.append(C.include("Std_Types.h"))
code.append(C.include("Rte_Type.h"))
code.append(C.include("Rte.h"))
code.append(C.blank())
code.extend(_genCommentHeader('Constants and Types'))
for key in sorted(self.typedefs.keys()):
code.append(C.statement(self.typedefs[key]))
code.append(C.blank())
code.extend([C.line(x) for x in _genCommentHeader('Public Function Declarations')])
code.append(C.blank())
code.append(C.statement(C.function('%s_Start'%self.api_prefix, 'void')))
for func in sorted(self.partition.upperLayerAPI.final['get'], key=lambda x: x.shortname):
assert func.proto is not None
hfile.code.append(C.statement(func.proto))
for func in sorted(self.partition.upperLayerAPI.final['setReadData'], key=lambda x: x.shortname):
assert func.proto is not None
hfile.code.append(C.statement(func.proto))
for func in sorted(self.partition.upperLayerAPI.final['setReadResult'], key=lambda x: x.shortname):
assert func.proto is not None
hfile.code.append(C.statement(func.proto))
for func in sorted(self.extra_public_functions.values(), key=lambda x: x.shortname):
assert func.proto is not None
hfile.code.append(C.statement(func.proto))
hfile.code.append(C.blank())
return hfile.lines()
class RteTaskGenerator:
"""
RteTask C code generator
"""
def __init__(self, partition, os_cfg, prefix='RteTask', include=None):
self.partition = partition
self.prefix = prefix
self.os_cfg = os_cfg
self.includes = [
#array of tuples, first element is the name of include header, second element is True if this is a sysinclude
('stdio.h', True),
('Rte.h', False),
('Rte_Type.h', False),
#('%s.h'%self.prefix, False),
('os.h', False),
]
for component in self.partition.components:
if not isinstance(component.inner, autosar.bsw.com.ComComponent):
self.includes.append(('Rte_%s.h'%component.name, False))
if include is not None:
for elem in List(include):
if isinstance(elem, str):
self.includes.append((elem, False))
elif isinstance(elem, tuple):
self.includes.append(elem)
else:
raise ValueError("elem: expected string or tuple, got "+str(type(elem)))
def generate(self, dest_dir='.'):
#self._generate_header(dest_dir)
self._generate_source(dest_dir)
def _generate_source(self, dest_dir):
file_name = self.prefix+'.c'
file_path = os.path.join(dest_dir,file_name)
with open(file_path, 'w', newline='\n') as fp:
s1 = self._write_source_includes()
s2 = self._write_source_constants_and_typedefs()
s3 = self._write_source_local_funcs()
s4 = self._write_source_global_funcs()
for seq in [s1, s2, s3, s4]:
fp.write('\n'.join(seq.lines())+'\n')
def _write_source_includes(self):
code = C.sequence()
code.extend(_genCommentHeader2('INCLUDES'))
code.append(C.blank())
for include in self.includes:
code.append(C.include(*include))
code.append(C.blank())
return code
def _write_source_constants_and_typedefs(self):
code = C.sequence()
code.extend(_genCommentHeader2('CONSTANTS AND DATA TYPES'))
code.append(C.blank())
return code
def _write_source_local_funcs(self):
code = C.sequence()
code.extend(_genCommentHeader2('LOCAL FUNCTION PROTOTYPES'))
code.append(C.blank())
return code
def _write_source_global_funcs(self):
code = C.sequence()
code.extend(_genCommentHeader2('GLOBAL FUNCTIONS'))
code.append(C.blank())
for task in sorted(self.os_cfg.tasks, key=lambda x: x.name):
code.append(C.line('OS_TASK_HANDLER({0.name}, arg)'.format(task)))
code.append(self._generate_task_body(task))
code.append(C.blank(2))
return code
def _generate_task_body(self, task):
code = C.block(innerIndent=innerIndentDefault)
isRunning=C.variable('isRunning', 'boolean')
code.append(C.statement('{0} = TRUE'.format(str(isRunning))))
code.append(C.statement('os_task_t *self = (os_task_t*)arg'))
code.append('')
code.append(C.line('if (self == 0)'))
body = C.block(innerIndent=innerIndentDefault)
body.append(C.statement('THREAD_RETURN(1)'))
code.append(body)
code.append('')
code.append(C.line('while (isRunning == TRUE)'))
while_block = C.block(innerIndent=innerIndentDefault)
while_block.append(C.statement('uint32 eventMask'))
while_block.append(C.statement('int8_t result = os_task_waitEvent(self, &eventMask)'))
while_block.append(C.line('if (result == 0)'))
if_block = C.block(innerIndent=innerIndentDefault)
if_block.extend(self._generate_event_mask_triggers(task))
while_block.append(if_block)
while_block.append(C.line('else if(result > 0)'))
if_block = C.block(innerIndent=innerIndentDefault)
if_block.append(C.statement('printf("%s_QuitEvent\\n")'%task.name))
if_block.append(C.statement('isRunning = false'))
while_block.append(if_block)
while_block.append(C.line('else'))
if_block = C.block(innerIndent=innerIndentDefault)
if_block.append(C.statement(r'fprintf(stderr, "os_task_waitEvent failed\n")'))
while_block.append(if_block)
code.append(while_block)
code.append('')
code.append(C.statement('THREAD_RETURN(0)'))
return code
def _generate_event_mask_triggers(self, task):
code = C.sequence()
for runnable in task.runnables:
if runnable.processed:
continue
matching_runnables = self._find_compatible_runnables(task, runnable)
self._generate_runnable_calls(code, matching_runnables)
for matching in matching_runnables:
matching.processed=True
return code
def _find_compatible_runnables(self, task, current):
result = [current]
for other in task.runnables:
if (other is not current) and (not other.processed):
if len(current.event_triggers) == len(other.event_triggers):
is_compatible = True
for current_event in current.event_triggers:
found = False
for other_event in other.event_triggers:
if current_event.symbol == other_event.symbol:
found = True
break
if not found:
is_compatible = False
break
if is_compatible:
result.append(other)
return result
def _generate_runnable_calls(self, code, matching_runnables):
events = matching_runnables[0].event_triggers
if len(events) == 1:
event = events[0]
if not isinstance(event, autosar.rte.base.OperationInvokedEvent):
code.append(C.line('if (eventMask & %s)'%event.symbol))
block = C.block(innerIndent = innerIndentDefault)
for runnable in matching_runnables:
block.append(C.statement(C.fcall(runnable.symbol)))
code.append(block)
elif len(events) > 1:
raise NotImplementedError('multiple events')
def _generate_header(self, dest_dir):
file_name = self.prefix+'.h'
file_path = os.path.join(dest_dir,file_name)
with io.open(file_path, 'w', newline='\n') as fp:
print("#ifndef RTE_TASK_H", file=fp)
print("#define RTE_TASK_H", file=fp)
self._write_header_includes(fp)
self._write_header_constants_and_typedefs(fp)
self._write_header_global_var(fp)
self._write_header_global_proto(fp)
print("#endif //RTE_TASK_H", file=fp)
def _write_header_includes(self, fp):
lines = _genCommentHeader('INCLUDES')
lines.append('#ifdef _MSC_VER')
lines.append('#include <Windows.h>')
lines.append('#else')
lines.append('#include <pthread.h>')
lines.append('#endif //_MSC_VER')
lines.append('#include "osmacro.h"')
fp.write('\n'.join(lines)+'\n\n')
def _write_header_constants_and_typedefs(self, fp):
lines = _genCommentHeader('CONSTANTS AND DATA TYPES')
fp.write('\n'.join(lines)+'\n\n')
def _write_header_global_var(self, fp):
lines = _genCommentHeader('GLOBAL VARIABLES')
fp.write('\n'.join(lines)+'\n\n')
def _write_header_global_proto(self, fp):
lines = _genCommentHeader('GLOBAL FUNCTION PROTOTYPES')
for task in self.os_cfg.tasks:
lines.append('THREAD_PROTO(%s, arg);'%task.name)
fp.write('\n'.join(lines)+'\n\n')
| cogu/autosar | autosar/rte/generator.py | Python | mit | 42,335 | 0.023597 |
###############################################################################
##
## Copyright (c) Crossbar.io Technologies GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case9_4_1 import Case9_4_1
class Case9_4_3(Case9_4_1):
DESCRIPTION = """Send fragmented binary message message with message payload of length 4 * 2**20 (4M). Sent out in fragments of 1k."""
EXPECTATION = """Receive echo'ed binary message (with payload as sent)."""
def init(self):
self.DATALEN = 4 * 2**20
self.FRAGSIZE = 1 * 2**10
self.PAYLOAD = "*" * self.DATALEN
self.WAITSECS = 100
self.reportTime = True
| crossbario/autobahn-testsuite | autobahntestsuite/autobahntestsuite/case/case9_4_3.py | Python | apache-2.0 | 1,271 | 0.015736 |
__author__ = 'evren kutar'
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^(?P<cslug>[\w-]+)/$', 'blog.views.post_category', name='category'),
url(r'^(?P<cslug>[\w-]+)/(?P<slug>[\w-]+)/$', 'blog.views.post', name='post')
) | evrenkutar/blog | blog/urls.py | Python | gpl-2.0 | 299 | 0.013378 |
class ReportParser:
"""
Parser with generic functionality for all Report Types (Tabular, Summary, Matrix)
Parameters
----------
report: dict, return value of Connection.get_report()
"""
def __init__(self, report):
self.data = report
self.type = self.data["reportMetadata"]["reportFormat"]
self.has_details = self.data["hasDetailRows"]
def get_grand_total(self):
return self.data["factMap"]["T!T"]["aggregates"][0]["value"]
@staticmethod
def _flatten_record(record):
return [field["label"] for field in record]
def _get_field_labels(self):
columns = self.data["reportMetadata"]["detailColumns"]
column_details = self.data["reportExtendedMetadata"]["detailColumnInfo"]
return {key: column_details[value]["label"] for key, value in enumerate(columns)}
def records(self):
"""
Return a list of all records included in the report. If detail rows are not included
in the report a ValueError is returned instead.
Returns
-------
records: list
"""
if not self.has_details:
raise ValueError('Report does not include details so cannot access individual records')
records = []
fact_map = self.data["factMap"]
for group in fact_map.values():
rows = group["rows"]
group_records = (self._flatten_record(row["dataCells"]) for row in rows)
for record in group_records:
records.append(record)
return records
def records_dict(self):
"""
Return a list of dictionaries for all records in the report in {field: value} format. If detail rows
are not included in the report a ValueError is returned instead.
Returns
-------
records: list of dictionaries in {field: value, field: value...} format
"""
if not self.has_details:
raise ValueError('Report does not include details so cannot access individual records')
records = []
fact_map = self.data["factMap"]
field_labels = self._get_field_labels()
for group in fact_map.values():
rows = group["rows"]
group_records = (self._flatten_record(row["dataCells"]) for row in rows)
for record in group_records:
labelled_record = {field_labels[key]: value for key, value in enumerate(record)}
records.append(labelled_record)
return records
class MatrixParser(ReportParser):
"""
Parser with specific functionality for matrix reports
Parameters
----------
report: dict, return value of Connection.get_report()
"""
def __init__(self, report):
super().__init__(report)
self.data = report
self._check_type()
def _check_type(self):
expected = "MATRIX"
if self.type != expected:
raise ValueError("Incorrect report type. Expected {}, received {}.".format(expected, self.type))
else:
pass
def get_col_total(self, col_label, default=None):
"""
Return the total for the specified column. The default arg makes it possible to specify the return
value if the column label is not found.
Parameters
----------
col_label: string
default: string, optional, default None
If column is not found determines the return value
Returns
-------
total: int
"""
grp_across_list = self.data["groupingsAcross"]["groupings"]
col_dict = {grp['label']: int(grp['key']) for grp in grp_across_list}
try:
col_key = col_dict[col_label]
return self.data["factMap"]['T!{}'.format(col_key)]["aggregates"][0]["value"]
except KeyError:
return default
def get_row_total(self, row_label, default=None):
"""
Return the total for the specified row. The default arg makes it possible to specify the return
value if the column label is not found.
Parameters
----------
row_label: string
default: string, optional, default None
If row is not found determines the return value
Returns
-------
total: int
"""
grp_down_list = self.data["groupingsDown"]["groupings"]
row_dict = {grp["label"]: int(grp["key"]) for grp in grp_down_list}
try:
row_key = row_dict[row_label]
return self.data["factMap"]['{}!T'.format(row_key)]["aggregates"][0]["value"]
except KeyError:
return default
@staticmethod
def _convert_parameter(parameter):
if type(parameter) is str:
new_parameter = [parameter]
elif parameter is None:
new_parameter = []
elif type(parameter) is list:
new_parameter = parameter
else:
raise ValueError
return new_parameter
@staticmethod
def _get_subgroup_index(group_above, subgroup_name):
subgroups_with_index = {subgroup['label']: index for index, subgroup in enumerate(group_above)}
index = subgroups_with_index[subgroup_name]
return index
def _get_grouping(self, groups_of_interest, start_grouping, count):
current_grouping = start_grouping
while count > 1:
group_name = groups_of_interest[count - 2]
subgroup_index = self._get_subgroup_index(current_grouping, group_name)
current_grouping = current_grouping[subgroup_index]["groupings"]
count -= 1
self._get_grouping(group_name, current_grouping, count)
return current_grouping
def _get_static_key(self, groups_of_interest, static_grouping_key):
grouping_depth = len(groups_of_interest)
group_index = grouping_depth - 1
top_grouping = self.data[static_grouping_key]["groupings"]
grouping = self._get_grouping(groups_of_interest, top_grouping, grouping_depth)
keys = {group['label']: group['key'] for group in grouping}
static_key = keys[groups_of_interest[group_index]]
return static_key
def _get_dynamic_keys(self, groups_of_interest, dynamic_grouping_key):
grouping_depth = len(groups_of_interest) + 1
top_grouping = self.data[dynamic_grouping_key]["groupings"]
grouping = self._get_grouping(groups_of_interest, top_grouping, grouping_depth)
dynamic_keys = [group["key"] for group in grouping]
labels = [group["label"] for group in grouping]
return {"keys": dynamic_keys, "labels": labels}
def _build_keys(self, static_groups_of_interest, dynamic_groups_of_interest, static_grouping_key,
dynamic_grouping_key):
static_key = self._get_static_key(static_groups_of_interest, static_grouping_key)
dynamic_keys = self._get_dynamic_keys(dynamic_groups_of_interest, dynamic_grouping_key)
keys = []
if static_grouping_key == "groupingsAcross":
for el in dynamic_keys["keys"]:
key = "{}!{}".format(el, static_key)
keys.append(key)
else:
for el in dynamic_keys["keys"]:
key = "{}!{}".format(static_key, el)
keys.append(key)
return {"keys": keys, "labels": dynamic_keys["labels"]}
def _series(self, static_groups_of_interest, static_grouping_key, dynamic_grouping_key,
dynamic_groups_of_interest=None, value_position=0):
static_groups_of_interest = self._convert_parameter(static_groups_of_interest)
dynamic_groups_of_interest = self._convert_parameter(dynamic_groups_of_interest)
keys_labels = self._build_keys(static_groups_of_interest, dynamic_groups_of_interest,
static_grouping_key, dynamic_grouping_key)
labels = keys_labels["labels"]
values = []
for key in keys_labels["keys"]:
value = self.data["factMap"][key]["aggregates"][value_position]["value"]
values.append(value)
series = dict(zip(labels, values))
return series
def series_down(self, column_groups, row_groups=None, value_position=0):
"""
Return selected slice of a report on a vertical axis
Parameters
----------
column_groups: string or list
The selected column to return series from
If multiple grouping levels a list is used to identify grouping of interest
row_groups: string, list or None, optional, default None
Limits rows included in Series to those within specified grouping
value_position: int, default 0
Index of value of interest, if only one value included by default will select
correct value
Returns
-------
series: dict, {label: value, ...}
"""
static_grouping_key = "groupingsAcross"
dynamic_grouping_key = "groupingsDown"
return self._series(column_groups, static_grouping_key, dynamic_grouping_key,
dynamic_groups_of_interest=row_groups, value_position=value_position)
def series_across(self, row_groups, col_groups=None, value_position=0):
"""
Return selected slice of a report on a horizontal axis
Parameters
----------
row_groups: string or list
The selected row to return series from
If multiple grouping levels a list is used to identify grouping of interest
col_groups: string, list or None, optional, default None
Limits cols included in Series to those within specified grouping
value_position: int, default 0
Index of value of interest, if only one value included by default will select
correct value
Returns
-------
series: dict, {label: value, ...}
"""
static_grouping_key = "groupingsDown"
dynamic_grouping_key = "groupingsAcross"
return self._series(row_groups, static_grouping_key, dynamic_grouping_key,
dynamic_groups_of_interest=col_groups, value_position=value_position) | cghall/salesforce-reporting | salesforce_reporting/parsers.py | Python | mit | 10,296 | 0.003594 |
"""
"""
import imp
import os
import sys
KEY_MAP = {0: [],
1: [],
2: [u'a', u'b', u'c'],
3: [u'd', u'e', u'f'],
4: [u'g', u'h', u'i'],
5: [u'j', u'k', u'l'],
6: [u'm', u'n', u'o'],
7: [u'p', u'q', u'r', u's'],
8: [u't', u'u', u'v'],
9: [u'w', u'x', u'y', u'z']
}
class T9:
def __init__(self, data_dir="./data"):
self.data_dir = data_dir
self.data_module = self.data_dir+"/data.py"
self.suggest_length = 10
self.word_length = 4
def load(self):
"""
Load up words and tri data structure that is saved to disk.
If it does not exist then use existing data to build it.
"""
if os.path.exists(self.data_module):
self.data = imp.load_source('data', self.data_module)
else:
msg = "WARNING: Data module is not loaded. "
msg += "Please build by running `make build-data`"
print msg
sys.exit(1)
def map_number(self, number):
"""
Map numbers from a dial pad to characters.
@param number: A string of numbers dialed from a key pad.
@type number: C{int}
"""
ret_chars = []
for num in str(number):
chars = KEY_MAP[int(num)]
if not chars:
break
ret_chars.append(chars)
return ret_chars
def _sort(self, words):
return sorted(words,
key=lambda x: self.data.WORDS.get(x, 0),
reverse=True)
def map_words(self, number):
"""
Map a string of numbers from a phone's key pad to possible
words.
@param number: A string of numbers dialed from a key pad.
@type number: C{int}
"""
number_words = []
for i, chars in enumerate(self.map_number(number)):
if i == 0:
number_words = chars
else:
new_words = []
for word in number_words:
for c in chars:
new_word = word+c
# Only use words in our word trie
if self.data.TRIE.keys(new_word):
new_words.append(new_word)
number_words = new_words
return number_words
def words(self, number):
"""
Given a number return possible word combinations
sorted by usage frequency.
@param number: A string of numbers dialed from a key pad.
@type number: C{int}
"""
ret_words = []
number_words = self.map_words(number)
# Sort and filter words, adding extra words if our options are slim
suggested_words = []
for word in self._sort(number_words):
if word in self.data.WORDS:
ret_words.append(word)
word_keys = filter(lambda x: x != word,
self._sort(self.data.TRIE.keys(word)))
suggested_words += word_keys[:self.suggest_length]
ret_words = ret_words + self._sort(suggested_words)
return ret_words[:self.suggest_length]
def main_user_loop(t):
while True:
try:
number = int(input("Enter a number: "))
except EOFError:
break
except SyntaxError:
break
except TypeError:
if number != 'quit':
print "Invalid number"
break
for word in t.words(number):
print word
def stdin_loop(t):
for number in sys.stdin:
if not number.strip():
break
number = int(number.strip())
for word in t.words(number):
print word
def main(data_dir, user_input=None):
t = T9(data_dir=data_dir)
# Load data module. Remember to build it.
t.load()
if user_input:
main_user_loop(t)
else:
stdin_loop(t)
if __name__ == '__main__':
if len(sys.argv) == 3:
main(sys.argv[1], sys.argv[2])
elif len(sys.argv) == 2:
main(sys.argv[1])
else:
usage = "Usage: {0} <data_directory>"
print usage.format(sys.argv[0])
| twonds/trinine | trinine/t9.py | Python | mit | 4,257 | 0.000235 |
import unittest
"""
Given an array of positive integers, find the maximum sum of a sub-sequence with the
constraint that no two numbers in the sequence should be adjacent in the array.
Input: 3 2 7 10
Output: 13 (3 + 10)
Input 3 2 5 10 7
Output: 15 (3 + 5 + 7)
"""
"""
Approach:
1. Similar to 0-1 Knapsack problem.
2. F(S,i) = max(S[i] + F(S,i+2), F(S,i+1), S[i])
3. That is, for every element there are three cases:
(a) We add that element to previous best which is not adjacent
(b) We do not include the element in best subsequence, ie take adjacent best
(c) We start a new subsequence from the element
dp[i] = max(dp[i-2] + x[i], dp[i-1], x[i])
Finally, dp[n-1] will have the final answer.
"""
"""
Approach:
1. This one uses just 2 variables.
2. First variable tracks the maximum sum obtained by excluding current element
3. Second variable is current element added to first variable
4. Return max(first variable, second variable)
"""
def max_sum_not_adjacent_helper(list_of_numbers, index):
if index >= len(list_of_numbers):
return 0
return max(list_of_numbers[index] + max_sum_not_adjacent_helper(list_of_numbers, index+2),
max_sum_not_adjacent_helper(list_of_numbers, index+1))
def max_sum_not_adjacent(list_of_numbers):
return max_sum_not_adjacent_helper(list_of_numbers, 0)
def max_sum_not_adjacent_iterative(list_of_numbers):
# let excluding = Max sum excluding previous element
# and including = Max sum including previous element
# Then, max sum excluding current element = Max(excluding, including)
# And max sum including current element = excluding + arr[i]
including = list_of_numbers[0]
excluding = 0
for i in range(1, len(list_of_numbers)):
temp = max(including, excluding)
including = excluding + list_of_numbers[i]
excluding = temp
return max(including, excluding)
class TestMaxSumNotAdjacent(unittest.TestCase):
def test_max_sum_not_adjacent(self):
list_of_numbers = [3, 2, 7, 10]
self.assertEqual(max_sum_not_adjacent(list_of_numbers), 13)
list_of_numbers = [3, 2, 5, 10, 7]
self.assertEqual(max_sum_not_adjacent(list_of_numbers), 15)
list_of_numbers = [5, 5, 10, 40, 50, 35]
self.assertEqual(max_sum_not_adjacent(list_of_numbers), 80)
def test_max_sum_not_adjacent_iterative(self):
list_of_numbers = [3, 2, 7, 10]
self.assertEqual(max_sum_not_adjacent_iterative(list_of_numbers), 13)
list_of_numbers = [3, 2, 5, 10, 7]
self.assertEqual(max_sum_not_adjacent_iterative(list_of_numbers), 15)
list_of_numbers = [5, 5, 10, 40, 50, 35]
self.assertEqual(max_sum_not_adjacent_iterative(list_of_numbers), 80)
| prathamtandon/g4gproblems | Arrays/max_sum_not_adjacent.py | Python | mit | 2,744 | 0.001093 |
"""This utility aims to provide an easy way to encode text inside of images."""
# TODO Write test cases.
# TODO Write own version of cImage.
# TODO Add interface.
# TODO Add offset.
# TODO Add random seed.
from cImage import FileImage
from cImage import EmptyImage
class Encode:
"""Class"""
def __init__(self, key_file_name, result_file_name,
key_directory, result_directory):
self.EncodedFileName = result_file_name
self.keyDirectory = key_directory
self.key = FileImage(self.keyDirectory + key_file_name)
self.resultDirectory = result_directory
def Encode(self, data):
"""Take binary data and add it to an image."""
result = EmptyImage(self.key.getWidth(), self.key.getHeight())
count = 0
for row in range(self.key.getHeight()):
for col in range(self.key.getWidth()):
keyPixel = self.key.getPixel(col, row)
if count < (len(data)):
if (int(data[count]) == 1):
newPixel = self.flipLSB(keyPixel)
else:
newPixel = keyPixel
count += 1
result.setPixel(col, row, newPixel)
else:
result.setPixel(col, row, keyPixel)
result.save(self.resultDirectory + self.EncodedFileName)
def backup(self):
try:
self.backup = FileImage(self.EncodedFileName + ".bak")
except:
self.backup = None
def textToBinary(self, data):
"""Convert text to binary."""
result = ''.join(format(ord(x), '08b') for x in data)
return str(result)
def checkStorageSize(self):
"""Check maximum amount of data that can be encoded into an image."""
width = self.key.getWidth()
height = self.key.getHeight()
maxSize = width * height
return maxSize
def flipLSB(self, pixel):
"""Invert the LSB of the red value of a pixel."""
tmp = pixel.getRed()
if (tmp > 120):
tmp -= 120
else:
tmp += 120
pixel.setRed(tmp)
return pixel
class Decode:
def __init__(self, key_file_name, result_file_name,
key_directory, result_directory):
self.EncodedFileName = result_file_name
self.keyDirectory = key_directory
self.key = FileImage(key_file_name)
self.resultDirectory = result_directory
def Decode(self):
"""Extract binary data from image."""
encoded = FileImage(self.resultDirectory + self.EncodedFileName)
result = []
for row in range(encoded.getHeight()):
for col in range(encoded.getWidth()):
encodedPixel = encoded.getPixel(col, row)
keyPixel = self.key.getPixel(col, row)
# 1
if encodedPixel.getRed() != keyPixel.getRed():
result.append(1)
# 0
else:
result.append(0)
return result
def binaryToText(self, data):
"""Convert binary to text."""
binaryString = ''.join(str(x) for x in data)
binaryList = [binaryString[i:i+8] for i in range(0, len(binaryString), 8)]
intList = []
for each in binaryList:
intList.append(int(each, 2))
charList = []
for each in intList:
charList.append(str(chr(each)))
cleanCharList = []
for each in charList:
if each is not '\x00':
cleanCharList.append(each)
result = ''.join(str(x) for x in cleanCharList)
return result
| matttilton/pySteg | pySteg.py | Python | mit | 3,694 | 0.000541 |
# -*- coding: utf-8 -*-
#
# TGiT, Music Tagger for Professionals
# Copyright (C) 2013 Iconoclaste Musique Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
COUNTRIES = {
"AF": "Afghanistan",
"AX": "Aland Islan",
"AL": "Albania",
"DZ": "Algeria",
"AS": "American Samoa",
"AD": "Andorra",
"AO": "Angola",
"AI": "Anguilla",
"AQ": "Antarctica",
"AG": "Antigua and Barbuda",
"AR": "Argentina",
"AM": "Armenia",
"AW": "Aruba",
"AU": "Australia",
"AT": "Austria",
"AZ": "Azerbaijan",
"BS": "Bahamas",
"BH": "Bahrain",
"BD": "Bangladesh",
"BB": "Barbados",
"BY": "Belarus",
"BE": "Belgium",
"BZ": "Belize",
"BJ": "Benin",
"BM": "Bermuda",
"BT": "Bhutan",
"BO": "Bolivia",
"BA": "Bosnia and Herzegovina",
"BW": "Botswana",
"BV": "Bouvet Island",
"BR": "Brazil",
"VG": "British Virgin Islands",
"IO": "British Indian Ocean Territory",
"BN": "Brunei Darussalam",
"BG": "Bulgaria",
"BF": "Burkina Faso",
"BI": "Burundi",
"KH": "Cambodia",
"CM": "Cameroon",
"CA": "Canada",
"CV": "Cape Verde",
"KY": "Cayman Islands",
"CF": "Central African Republic",
"TD": "Chad",
"CL": "Chile",
"CN": "China",
"HK": "Hong Kong, Special Administrative Region of China",
"MO": "Macao, Special Administrative Region of China",
"CX": "Christmas Island",
"CC": "Cocos (Keeling) Islands",
"CO": "Colombia",
"KM": "Comoros",
"CG": "Congo (Brazzaville)",
"CD": "Congo, Democratic Republic of the",
"CK": "Cook Islands",
"CR": "Costa Rica",
"CI": "Côte d'Ivoire",
"HR": "Croatia",
"CU": "Cuba",
"CY": "Cyprus",
"CZ": "Czech Republic",
"DK": "Denmark",
"DJ": "Djibouti",
"DM": "Dominica",
"DO": "Dominican Republic",
"EC": "Ecuador",
"EG": "Egypt",
"SV": "El Salvador",
"GQ": "Equatorial Guinea",
"ER": "Eritrea",
"EE": "Estonia",
"ET": "Ethiopia",
"FK": "Falkland Islands (Malvinas)",
"FO": "Faroe Islands",
"FJ": "Fiji",
"FI": "Finland",
"FR": "France",
"GF": "French Guiana",
"PF": "French Polynesia",
"TF": "French Southern Territories",
"GA": "Gabon",
"GM": "Gambia",
"GE": "Georgia",
"DE": "Germany",
"GH": "Ghana",
"GI": "Gibraltar",
"GR": "Greece",
"GL": "Greenland",
"GD": "Grenada",
"GP": "Guadeloupe",
"GU": "Guam",
"GT": "Guatemala",
"GG": "Guernsey",
"GN": "Guinea",
"GW": "Guinea-Bissau",
"GY": "Guyana",
"HT": "Haiti",
"HM": "Heard Island and Mcdonald Islands",
"VA": "Holy See (Vatican City State)",
"HN": "Honduras",
"HU": "Hungary",
"IS": "Iceland",
"IN": "India",
"ID": "Indonesia",
"IR": "Iran, Islamic Republic of",
"IQ": "Iraq",
"IE": "Ireland",
"IM": "Isle of Man",
"IL": "Israel",
"IT": "Italy",
"JM": "Jamaica",
"JP": "Japan",
"JE": "Jersey",
"JO": "Jordan",
"KZ": "Kazakhstan",
"KE": "Kenya",
"KI": "Kiribati",
"KP": "Korea, Democratic People's Republic of",
"KR": "Korea, Republic of",
"KW": "Kuwait",
"KG": "Kyrgyzstan",
"LA": "Lao PDR",
"LV": "Latvia",
"LB": "Lebanon",
"LS": "Lesotho",
"LR": "Liberia",
"LY": "Libya",
"LI": "Liechtenstein",
"LT": "Lithuania",
"LU": "Luxembourg",
"MK": "Macedonia, Republic of",
"MG": "Madagascar",
"MW": "Malawi",
"MY": "Malaysia",
"MV": "Maldives",
"ML": "Mali",
"MT": "Malta",
"MH": "Marshall Islands",
"MQ": "Martinique",
"MR": "Mauritania",
"MU": "Mauritius",
"YT": "Mayotte",
"MX": "Mexico",
"FM": "Micronesia, Federated States of",
"MD": "Moldova",
"MC": "Monaco",
"MN": "Mongolia",
"ME": "Montenegro",
"MS": "Montserrat",
"MA": "Morocco",
"MZ": "Mozambique",
"MM": "Myanmar",
"NA": "Namibia",
"NR": "Nauru",
"NP": "Nepal",
"NL": "Netherlands",
"AN": "Netherlands Antilles",
"NC": "New Caledonia",
"NZ": "New Zealand",
"NI": "Nicaragua",
"NE": "Niger",
"NG": "Nigeria",
"NU": "Niue",
"NF": "Norfolk Island",
"MP": "Northern Mariana Islands",
"NO": "Norway",
"OM": "Oman",
"PK": "Pakistan",
"PW": "Palau",
"PS": "Palestinian Territory, Occupied",
"PA": "Panama",
"PG": "Papua New Guinea",
"PY": "Paraguay",
"PE": "Peru",
"PH": "Philippines",
"PN": "Pitcairn",
"PL": "Poland",
"PT": "Portugal",
"PR": "Puerto Rico",
"QA": "Qatar",
"RE": "Réunion",
"RO": "Romania",
"RU": "Russian Federation",
"RW": "Rwanda",
"BL": "Saint-Barthélemy",
"SH": "Saint Helena",
"KN": "Saint Kitts and Nevis",
"LC": "Saint Lucia",
"MF": "Saint-Martin (French part)",
"PM": "Saint Pierre and Miquelon",
"VC": "Saint Vincent and Grenadines",
"WS": "Samoa",
"SM": "San Marino",
"ST": "Sao Tome and Principe",
"SA": "Saudi Arabia",
"SN": "Senegal",
"RS": "Serbia",
"SC": "Seychelles",
"SL": "Sierra Leone",
"SG": "Singapore",
"SK": "Slovakia",
"SI": "Slovenia",
"SB": "Solomon Islands",
"SO": "Somalia",
"ZA": "South Africa",
"GS": "South Georgia and the South Sandwich Islands",
"SS": "South Sudan",
"ES": "Spain",
"LK": "Sri Lanka",
"SD": "Sudan",
"SR": "Suriname",
"SJ": "Svalbard and Jan Mayen Islands",
"SZ": "Swaziland",
"SE": "Sweden",
"CH": "Switzerland",
"SY": "Syrian Arab Republic (Syria)",
"TW": "Taiwan, Republic of China",
"TJ": "Tajikistan",
"TZ": "Tanzania, United Republic of",
"TH": "Thailand",
"TL": "Timor-Leste",
"TG": "Togo",
"TK": "Tokelau",
"TO": "Tonga",
"TT": "Trinidad and Tobago",
"TN": "Tunisia",
"TR": "Turkey",
"TM": "Turkmenistan",
"TC": "Turks and Caicos Islands",
"TV": "Tuvalu",
"UG": "Uganda",
"UA": "Ukraine",
"AE": "United Arab Emirates",
"GB": "United Kingdom",
"US": "United States of America",
"UM": "United States Minor Outlying Islands",
"UY": "Uruguay",
"UZ": "Uzbekistan",
"VU": "Vanuatu",
"VE": "Venezuela (Bolivarian Republic of)",
"VN": "Viet Nam",
"VI": "Virgin Islands, US",
"WF": "Wallis and Futuna Islands",
"EH": "Western Sahara",
"YE": "Yemen",
"ZM": "Zambia",
"ZW": "Zimbabwe"
}
ISO3166_2_A2_TO_ISO3166_2_A3 = {
"AF": "AFG",
"AX": "ALA",
"AL": "ALB",
"DZ": "DZA",
"AS": "ASM",
"AD": "AND",
"AO": "AGO",
"AI": "AIA",
"AQ": "ATA",
"AG": "ATG",
"AR": "ARG",
"AM": "ARM",
"AW": "ABW",
"AU": "AUS",
"AT": "AUT",
"AZ": "AZE",
"BS": "BHS",
"BH": "BHR",
"BD": "BGD",
"BB": "BRB",
"BY": "BLR",
"BE": "BEL",
"BZ": "BLZ",
"BJ": "BEN",
"BM": "BMU",
"BT": "BTN",
"BO": "BOL",
"BA": "BIH",
"BW": "BWA",
"BV": "BVT",
"BR": "BRA",
"VG": "VGB",
"IO": "IOT",
"BN": "BRN",
"BG": "BGR",
"BF": "BFA",
"BI": "BDI",
"KH": "KHM",
"CM": "CMR",
"CA": "CAN",
"CV": "CPV",
"KY": "CYM",
"CF": "CAF",
"TD": "TCD",
"CL": "CHL",
"CN": "CHN",
"HK": "HKG",
"MO": "MAC",
"CX": "CXR",
"CC": "CCK",
"CO": "COL",
"KM": "COM",
"CG": "COG",
"CD": "COD",
"CK": "COK",
"CR": "CRI",
"CI": "CIV",
"HR": "HRV",
"CU": "CUB",
"CY": "CYP",
"CZ": "CZE",
"DK": "DNK",
"DJ": "DJI",
"DM": "DMA",
"DO": "DOM",
"EC": "ECU",
"EG": "EGY",
"SV": "SLV",
"GQ": "GNQ",
"ER": "ERI",
"EE": "EST",
"ET": "ETH",
"FK": "FLK",
"FO": "FRO",
"FJ": "FJI",
"FI": "FIN",
"FR": "FRA",
"GF": "GUF",
"PF": "PYF",
"TF": "ATF",
"GA": "GAB",
"GM": "GMB",
"GE": "GEO",
"DE": "DEU",
"GH": "GHA",
"GI": "GIB",
"GR": "GRC",
"GL": "GRL",
"GD": "GRD",
"GP": "GLP",
"GU": "GUM",
"GT": "GTM",
"GG": "GGY",
"GN": "GIN",
"GW": "GNB",
"GY": "GUY",
"HT": "HTI",
"HM": "HMD",
"VA": "VAT",
"HN": "HND",
"HU": "HUN",
"IS": "ISL",
"IN": "IND",
"ID": "IDN",
"IR": "IRN",
"IQ": "IRQ",
"IE": "IRL",
"IM": "IMN",
"IL": "ISR",
"IT": "ITA",
"JM": "JAM",
"JP": "JPN",
"JE": "JEY",
"JO": "JOR",
"KZ": "KAZ",
"KE": "KEN",
"KI": "KIR",
"KP": "PRK",
"KR": "KOR",
"KW": "KWT",
"KG": "KGZ",
"LA": "LAO",
"LV": "LVA",
"LB": "LBN",
"LS": "LSO",
"LR": "LBR",
"LY": "LBY",
"LI": "LIE",
"LT": "LTU",
"LU": "LUX",
"MK": "MKD",
"MG": "MDG",
"MW": "MWI",
"MY": "MYS",
"MV": "MDV",
"ML": "MLI",
"MT": "MLT",
"MH": "MHL",
"MQ": "MTQ",
"MR": "MRT",
"MU": "MUS",
"YT": "MYT",
"MX": "MEX",
"FM": "FSM",
"MD": "MDA",
"MC": "MCO",
"MN": "MNG",
"ME": "MNE",
"MS": "MSR",
"MA": "MAR",
"MZ": "MOZ",
"MM": "MMR",
"NA": "NAM",
"NR": "NRU",
"NP": "NPL",
"NL": "NLD",
"AN": "ANT",
"NC": "NCL",
"NZ": "NZL",
"NI": "NIC",
"NE": "NER",
"NG": "NGA",
"NU": "NIU",
"NF": "NFK",
"MP": "MNP",
"NO": "NOR",
"OM": "OMN",
"PK": "PAK",
"PW": "PLW",
"PS": "PSE",
"PA": "PAN",
"PG": "PNG",
"PY": "PRY",
"PE": "PER",
"PH": "PHL",
"PN": "PCN",
"PL": "POL",
"PT": "PRT",
"PR": "PRI",
"QA": "QAT",
"RE": "REU",
"RO": "ROU",
"RU": "RUS",
"RW": "RWA",
"BL": "BLM",
"SH": "SHN",
"KN": "KNA",
"LC": "LCA",
"MF": "MAF",
"PM": "SPM",
"VC": "VCT",
"WS": "WSM",
"SM": "SMR",
"ST": "STP",
"SA": "SAU",
"SN": "SEN",
"RS": "SRB",
"SC": "SYC",
"SL": "SLE",
"SG": "SGP",
"SK": "SVK",
"SI": "SVN",
"SB": "SLB",
"SO": "SOM",
"ZA": "ZAF",
"GS": "SGS",
"SS": "SSD",
"ES": "ESP",
"LK": "LKA",
"SD": "SDN",
"SR": "SUR",
"SJ": "SJM",
"SZ": "SWZ",
"SE": "SWE",
"CH": "CHE",
"SY": "SYR",
"TW": "TWN",
"TJ": "TJK",
"TZ": "TZA",
"TH": "THA",
"TL": "TLS",
"TG": "TGO",
"TK": "TKL",
"TO": "TON",
"TT": "TTO",
"TN": "TUN",
"TR": "TUR",
"TM": "TKM",
"TC": "TCA",
"TV": "TUV",
"UG": "UGA",
"UA": "UKR",
"AE": "ARE",
"GB": "GBR",
"US": "USA",
"UM": "UMI",
"UY": "URY",
"UZ": "UZB",
"VU": "VUT",
"VE": "VEN",
"VN": "VNM",
"VI": "VIR",
"WF": "WLF",
"EH": "ESH",
"YE": "YEM",
"ZM": "ZMB",
"ZW": "ZWE",
}
| Iconoclasteinc/tgit | tgit/countries.py | Python | gpl-3.0 | 11,846 | 0.000084 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
type 'pytest -v' to run u test series
"""
import codecs
import json
import os
import pytest
import tempfile
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import core
class TestHelpers:
@staticmethod
def file_read(fd):
"""
from current descriptor, create a new descriptor in read mode and read file content
:return file content
:rtype str
"""
with codecs.open(fd.name, 'r', 'utf-8') as f:
res = f.read()
return res
@staticmethod
def same_class(instance, should_cls):
return str(instance.__class__) == str(should_cls)
@staticmethod
def json_compare(a, b):
return json.dumps(a) == json.dumps(b)
class Fake(object):
pass
@pytest.fixture()
def fixture_dict_data():
return dict(
id=1,
active=True,
name='foobar',
nested=dict(id=1, name='nested'),
items=[dict(id=1, name='item1'),],
)
def fixture_dict_data_check_matching(o, data):
assert o
# check __dict__
assert isinstance(o.__dict__, dict)
assert len(o.__dict__.keys()) == len(data.keys())
assert o.__dict__['id'] == data['id']
assert o.__dict__['active'] == data['active']
assert o.__dict__['name'] == data['name']
assert TestHelpers.same_class(o.__dict__['nested'], core.Object)
assert len(o.__dict__['nested'].__dict__.keys()) == len(data['nested'].keys())
assert o.__dict__['nested'].__dict__['id'] == data['nested']['id']
assert o.__dict__['nested'].__dict__['name'] == data['nested']['name']
assert isinstance(o.__dict__['items'], list)
assert len(o.__dict__['items']) == len(data['items'])
assert TestHelpers.same_class(o.__dict__['items'][0], core.Object)
assert o.__dict__['items'][0].__dict__['id'] == data['items'][0]['id']
assert o.__dict__['items'][0].__dict__['name'] == data['items'][0]['name']
# check attrs
assert hasattr(o, 'id')
assert hasattr(o, 'active')
assert hasattr(o, 'name')
assert hasattr(o, 'nested')
assert hasattr(o, 'items')
assert o.id == data['id']
assert o.active == data['active']
assert o.name == data['name']
assert TestHelpers.same_class(o.nested, core.Object)
assert hasattr(o.nested, 'id')
assert hasattr(o.nested, 'name')
assert o.nested.id == data['nested']['id']
assert o.nested.name == data['nested']['name']
assert isinstance(o.items, list)
assert len(o.items) == len(data['items'])
assert hasattr(o.items[0], 'id')
assert hasattr(o.items[0], 'name')
assert o.items[0].id == data['items'][0]['id']
assert o.items[0].name == data['items'][0]['name']
@pytest.fixture()
def fixture_json_data():
return json.dumps(dict(
id=1,
active=True,
name='foobar',
nested=dict(id=1, name='nested'),
items=[dict(id=1, name='item1')],
))
@pytest.fixture()
def fixture_repr_data():
return "<class 'core.Object'>, 5 attrs: active: True, id: 1," \
" items: [<class 'core.Object'>, 2 attrs: id: 1, name: 'item1']," \
" name: 'foobar', nested: <class 'core.Object'>, 2 attrs: id: 1, name: 'nested'"
@pytest.fixture()
def fixture_str_data():
return "<class 'core.Object'>, 5 attrs: active: True, id: 1," \
" items: [{'id': 1, 'name': 'item1'}], name: 'foobar'," \
" nested: {'id': 1, 'name': 'nested'}"
@pytest.fixture()
def fixture_update_merge_data():
return {
'data': {'foo': {'bar': {'message': 'foobar'}}},
'data2': {
'foo': {'bar': {'color': 'green'}},
'foo2': {'bar': {'message': 'foobar 2', 'color': 'orange'}},
},
'merge': {
'foo': {'bar': {'message': 'foobar', 'color': 'green'}},
'foo2': {'bar': {'message': 'foobar 2', 'color': 'orange'}},
},
}
@pytest.fixture()
def fixture_config_file(request):
fd = tempfile.NamedTemporaryFile(mode='w', suffix='.ini', delete=False)
with fd:
fd.write("""
[foo]
foo1=Fee
foo2=Fie
[bar]
bar1=Foe
bar2=Foo
""")
def delete():
if not fd.closed:
fd.close()
os.remove(fd.name)
request.addfinalizer(delete)
return fd
@pytest.fixture()
def fixture_config_file_expected_data():
return dict(
foo=dict(foo1='Fee', foo2='Fie'),
bar=dict(bar1='Foe', bar2='Foo'),
)
class Test01ObjectContract():
def test_00_of_class_ko(self):
assert not core.Object.of_class(None)
assert not core.Object.of_class(False)
assert not core.Object.of_class(True)
assert not core.Object.of_class(1)
assert not core.Object.of_class('a')
assert not core.Object.of_class(object())
assert not core.Object.of_class(Fake())
class Test02ObjectConstructor():
def test_00_contract_ko(self):
with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_DICT):
core.Object(False)
with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_DICT):
core.Object(True)
with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_DICT):
core.Object(1)
with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_DICT):
core.Object('a')
with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_DICT):
core.Object(object())
def test_01_empty(self):
o = core.Object()
assert o
# alias
o = core.o()
assert o
o = core.Object(None)
assert o
o = core.Object(dict())
assert o
def test_02_of_class(self):
assert core.Object.of_class(core.Object())
assert core.Object.of_class(core.o())
@pytest.mark.usefixtures('fixture_dict_data')
def test_03_from_dict(self, fixture_dict_data):
fixture_dict_data_check_matching(core.Object(fixture_dict_data), fixture_dict_data)
fixture_dict_data_check_matching(core.o(fixture_dict_data), fixture_dict_data)
# get_dict: will be used for following test so at serie start
@pytest.mark.usefixtures('fixture_dict_data')
def test_04_get_dict(self, fixture_dict_data):
o = core.o(fixture_dict_data)
assert o.get_dict() == fixture_dict_data
def test_05_kwargs(self):
o = core.o(id=1, name='kwarg')
assert o.get_dict() == dict(id=1, name='kwarg')
o = core.o(dict(), id=1, name='kwarg')
assert o.get_dict() == dict(id=1, name='kwarg')
o = core.o(dict(description='from dict'), id=1, name='kwarg')
assert o.get_dict() == dict(description='from dict', id=1, name='kwarg')
class Test02ObjectUpdateContent():
@pytest.mark.usefixtures('fixture_dict_data')
def test_00_setattr(self, fixture_dict_data):
o = core.o(fixture_dict_data)
# change exiting attribute
o.name = 'changed'
assert o.name == 'changed'
o.nested.name = 'changed2'
assert o.nested.name == 'changed2'
o.items[0].name = 'changed3'
assert o.items[0].name == 'changed3'
# new attribute
o.description = 'description'
assert o.description == 'description'
o.nested2 = core.o(dict(id=2, name='nested2'))
assert o.nested2.id == 2
assert o.nested2.name == 'nested2'
o.nested3 = core.o()
o.nested3.id = 3
assert o.nested3.id == 3
o.items2 = [core.o(dict(id=2, name='item2'))]
assert o.items2[0].id == 2
assert o.items2[0].name == 'item2'
@pytest.mark.usefixtures('fixture_update_merge_data')
def test_01_update(self, fixture_update_merge_data):
with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_SELF):
core.o().update(1)
data = fixture_update_merge_data['data']
data2 = fixture_update_merge_data['data2']
o = core.o(data)
assert o.get_dict() == data
o.update(data2)
assert o.get_dict() == data2
assert core.o(data).update(data2).get_dict() == data2 # chained style
o = core.o()
assert o.get_dict() == {}
o.update(data)
assert o.get_dict() == data
o.update(data2)
assert o.get_dict() == data2
assert core.o().update(data).update(data2).get_dict() == data2 # chained style
o = core.o(data)
o.update(core.o(data2))
assert o.get_dict() == data2
assert core.o(data).update(core.o(data2)).get_dict() == data2 # chained style
o = core.o()
assert o.get_dict() == {}
o.update(core.o(data))
assert o.get_dict() == data
o.update(core.o(data2))
assert o.get_dict() == data2
assert core.o().update(core.o(data)).update(core.o(data2)).get_dict() == data2 # chained style
@pytest.mark.usefixtures('fixture_update_merge_data')
def test_02_merge(self, fixture_update_merge_data):
with pytest.raises(AssertionError, message=core.Object.CONTRACT_DATA_SELF):
core.o().merge(1)
data = fixture_update_merge_data['data']
data2 = fixture_update_merge_data['data2']
merge = fixture_update_merge_data['merge']
o = core.o(data)
assert o.get_dict() == data
o.merge(data2)
assert o.get_dict() == merge
assert core.o(data).merge(data2).get_dict() == merge # chained style
o = core.o()
o.merge(data)
assert o.get_dict() == data
o.merge(data2)
assert o.get_dict() == merge
assert core.o().merge(data).merge(data2).get_dict() == merge # chained style
o = core.o(data)
o.merge(core.o(data2))
assert o.get_dict() == merge
assert core.o(data).merge(core.o(data2)).get_dict() == merge # chained style
o = core.o()
assert o.get_dict() == {}
o.merge(core.o(data))
assert o.get_dict() == data
o.merge(core.o(data2))
assert o.get_dict() == merge
assert core.o().merge(core.o(data)).merge(core.o(data2)).get_dict() == merge
class Test03Json():
@pytest.mark.usefixtures('fixture_dict_data')
@pytest.mark.usefixtures('fixture_json_data')
def test_00_jsonify(self, fixture_dict_data, fixture_json_data):
assert TestHelpers.json_compare(core.o(fixture_dict_data).jsonify(), fixture_json_data)
@pytest.mark.usefixtures('fixture_dict_data')
@pytest.mark.usefixtures('fixture_json_data')
def test_01_unjsonify(self, fixture_dict_data, fixture_json_data):
with pytest.raises(AssertionError):
core.Object.unjsonify(1)
assert core.Object.unjsonify(fixture_json_data).get_dict() == fixture_dict_data
assert core.unjsonify(fixture_json_data).get_dict() == fixture_dict_data
class Test04ObjectMagic():
@pytest.mark.usefixtures('fixture_dict_data')
@pytest.mark.usefixtures('fixture_repr_data')
def test_00_repr(self, fixture_dict_data, fixture_repr_data):
assert repr(core.o(fixture_dict_data)) == fixture_repr_data
@pytest.mark.usefixtures('fixture_dict_data')
@pytest.mark.usefixtures('fixture_str_data')
def test_01_str(self, fixture_dict_data, fixture_str_data):
assert str(core.o(fixture_dict_data)) == fixture_str_data
def test_02_eq_ne(self):
o1 = core.o(id=1, name='foobar')
o2 = core.o(id=1, name='foobar')
o3 = core.o(id=3, name='foobar3')
assert o1 == o2
assert o1 != o3
@pytest.mark.usefixtures('fixture_update_merge_data')
def test_03_add(self, fixture_update_merge_data):
data = fixture_update_merge_data['data']
data2 = fixture_update_merge_data['data2']
merge = fixture_update_merge_data['merge']
assert (core.o(data) + core.o(data2)).get_dict() == merge
@pytest.mark.usefixtures('fixture_update_merge_data')
def test_04_iadd(self, fixture_update_merge_data):
data = fixture_update_merge_data['data']
data2 = fixture_update_merge_data['data2']
merge = fixture_update_merge_data['merge']
o = core.o(data)
o += core.o(data2)
assert o.get_dict() == merge
class Test05ObjectGetContent():
def test_00_get(self):
data = {'foo': {'bar': {'message': 'foobar'}}}
o = core.o(data)
assert o.get('ko') is None
assert TestHelpers.same_class(o.get('foo'), core.Object)
assert o.get('foo').get_dict() == {'bar': {'message': 'foobar'}}
assert TestHelpers.same_class(o.get('foo').get('bar'), core.Object)
assert o.get('foo').get('bar').get_dict() == {'message': 'foobar'}
assert o.get('foo').get('bar').get('message') == 'foobar'
def test_01_get_dot(self):
data = {'foo': {'bar': {'message': 'foobar'}}}
o = core.o(data)
assert o.get_dot('ko') is None
assert o.get_dot('ko.ko') is None
assert TestHelpers.same_class(o.get_dot('foo'), core.Object)
assert o.get_dot('foo').get_dict() == {'bar': {'message': 'foobar'}}
assert TestHelpers.same_class(o.get_dot('foo.bar'), core.Object)
assert o.get_dot('foo.bar').get_dict() == {'message': 'foobar'}
assert o.get_dot('foo.bar.message') == 'foobar'
@pytest.mark.usefixtures('fixture_dict_data')
def test_02_attrs(self, fixture_dict_data):
o = core.o(fixture_dict_data)
assert o.attrs() == sorted(fixture_dict_data.keys())
class Test06ObjectExtra():
@pytest.mark.usefixtures('fixture_config_file')
@pytest.mark.usefixtures('fixture_config_file_expected_data')
def test_00_cfg_read_get(self, fixture_config_file, fixture_config_file_expected_data):
with pytest.raises(AssertionError):
core.o().read_cfg(1)
assert core.o().read_cfg(fixture_config_file.name).get_dict() == fixture_config_file_expected_data
| pyseed/objify | objify/test/test_core.py | Python | mit | 13,959 | 0.002651 |
"""
Form widgets for ToscaWidgets.
To download and install::
easy_install twForms
"""
from tw.api import Widget
from tw.forms.core import *
from tw.forms.fields import *
from tw.forms.datagrid import *
from tw.forms.calendars import *
# build all so doc tools introspect me properly
from tw.forms.core import __all__ as __core_all
from tw.forms.fields import __all__ as __fields_all
from tw.forms.datagrid import __all__ as __datagrid_all
from tw.forms.calendars import __all__ as __calendars_all
__all__ = __core_all + __fields_all + __datagrid_all + __calendars_all
| jokajak/itweb | data/env/lib/python2.6/site-packages/tw.forms-0.9.9-py2.6.egg/tw/forms/__init__.py | Python | gpl-3.0 | 578 | 0.00173 |
"""
This page is in the table of contents.
Hop is a script to raise the extruder when it is not extruding.
Note:
Note: In some cases where you have thin overhang this plugin can help solve the problem object being knocked off by the head
The hop manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Hop
==Operation==
The default 'Activate Hop' checkbox is off.
It is off because Vik and Nophead found better results without hopping. Numerous users reported better output without this plugin hence it is off by default.
When activated the extruder will hop when traveling. When it is off, nothing will be done.
==Settings==
===Hop Over Layer Thickness===
Default is one.
Defines the ratio of the hop height over the layer height, this is the most important hop setting.
===Minimum Hop Angle===
Default is 20 degrees.
Defines the minimum angle that the path of the extruder will be raised. An angle of ninety means that the extruder will go straight up as soon as it is not extruding and a low angle means the extruder path will gradually rise to the hop height.
==Examples==
The following examples hop the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and hop.py.
> python hop.py
This brings up the hop dialog.
> python hop.py Screw Holder Bottom.stl
The hop tool is parsing the file:
Screw Holder Bottom.stl
..
The hop tool has created the file:
.. Screw Holder Bottom_hop.gcode
"""
from __future__ import absolute_import
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities import archive
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import math
import sys
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText( fileName, text, hopRepository = None ):
"Hop a gcode linear move text."
return getCraftedTextFromText( archive.getTextIfEmpty(fileName, text), hopRepository )
def getCraftedTextFromText( gcodeText, hopRepository = None ):
"Hop a gcode linear move text."
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'hop'):
return gcodeText
if hopRepository == None:
hopRepository = settings.getReadRepository( HopRepository() )
if not hopRepository.activateHop.value:
return gcodeText
return HopSkein().getCraftedGcode( gcodeText, hopRepository )
def getNewRepository():
'Get new repository.'
return HopRepository()
def writeOutput(fileName, shouldAnalyze=True):
"Hop a gcode linear move file. Chain hop the gcode if it is not already hopped."
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'hop', shouldAnalyze)
class HopRepository(object):
"A class to handle the hop settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.hop.html', self )
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Hop', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Hop')
self.activateHop = settings.BooleanSetting().getFromValue('Activate Hop', self, False )
self.hopOverLayerThickness = settings.FloatSpin().getFromValue( 0.5, 'Hop Over Layer Thickness (ratio):', self, 1.5, 1.0 )
self.minimumHopAngle = settings.FloatSpin().getFromValue( 20.0, 'Minimum Hop Angle (degrees):', self, 60.0, 30.0 )
self.executeTitle = 'Hop'
def execute(self):
"Hop button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class HopSkein(object):
"A class to hop a skein of extrusions."
def __init__(self):
'Initialize'
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.extruderActive = False
self.feedRateMinute = 961.0
self.hopHeight = 0.4
self.hopDistance = self.hopHeight
self.justDeactivated = False
self.layerCount = settings.LayerCount()
self.lineIndex = 0
self.lines = None
self.oldLocation = None
def getCraftedGcode( self, gcodeText, hopRepository ):
"Parse gcode text and store the hop gcode."
self.lines = archive.getTextLines(gcodeText)
self.minimumSlope = math.tan( math.radians( hopRepository.minimumHopAngle.value ) )
self.parseInitialization( hopRepository )
for self.lineIndex in xrange(self.lineIndex, len(self.lines)):
line = self.lines[self.lineIndex]
self.parseLine(line)
return self.distanceFeedRate.output.getvalue()
def getHopLine(self, line):
"Get hopped gcode line."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
self.feedRateMinute = gcodec.getFeedRateMinute( self.feedRateMinute, splitLine )
if self.extruderActive:
return line
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
highestZ = location.z
if self.oldLocation != None:
highestZ = max( highestZ, self.oldLocation.z )
highestZHop = highestZ + self.hopHeight
locationComplex = location.dropAxis()
if self.justDeactivated:
oldLocationComplex = self.oldLocation.dropAxis()
distance = abs( locationComplex - oldLocationComplex )
if distance < self.minimumDistance:
if self.isNextTravel() or distance == 0.0:
return self.distanceFeedRate.getLineWithZ( line, splitLine, highestZHop )
alongRatio = min( 0.41666666, self.hopDistance / distance )
oneMinusAlong = 1.0 - alongRatio
closeLocation = oldLocationComplex * oneMinusAlong + locationComplex * alongRatio
self.distanceFeedRate.addLine( self.distanceFeedRate.getLineWithZ( line, splitLine, highestZHop ) )
if self.isNextTravel():
return self.distanceFeedRate.getLineWithZ( line, splitLine, highestZHop )
farLocation = oldLocationComplex * alongRatio + locationComplex * oneMinusAlong
self.distanceFeedRate.addGcodeMovementZWithFeedRate( self.feedRateMinute, farLocation, highestZHop )
return line
if self.isNextTravel():
return self.distanceFeedRate.getLineWithZ( line, splitLine, highestZHop )
return line
def isNextTravel(self):
"Determine if there is another linear travel before the thread ends."
for afterIndex in xrange( self.lineIndex + 1, len(self.lines) ):
line = self.lines[ afterIndex ]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
if firstWord == 'G1':
return True
if firstWord == 'M101':
return False
return False
def parseInitialization( self, hopRepository ):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(<layerHeight>':
layerHeight = float(splitLine[1])
self.hopHeight = hopRepository.hopOverLayerThickness.value * layerHeight
self.hopDistance = self.hopHeight / self.minimumSlope
self.minimumDistance = 0.5 * layerHeight
elif firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('hop')
return
self.distanceFeedRate.addLine(line)
def parseLine(self, line):
"Parse a gcode line and add it to the bevel gcode."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if self.distanceFeedRate.getIsAlteration(line):
return
if firstWord == 'G1':
line = self.getHopLine(line)
self.oldLocation = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
self.justDeactivated = False
elif firstWord == '(<layer>':
self.layerCount.printProgressIncrement('hop')
elif firstWord == 'M101':
self.extruderActive = True
elif firstWord == 'M103':
self.extruderActive = False
self.justDeactivated = True
self.distanceFeedRate.addLineCheckAlteration(line)
def main():
"Display the hop dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()
| tinkerinestudio/Tinkerine-Suite | TinkerineSuite/Cura/cura_sf/skeinforge_application/skeinforge_plugins/craft_plugins/hop.py | Python | agpl-3.0 | 8,736 | 0.025412 |
DOCUMENTATION='''
---
module: win_reboot
short_description: Reboot a windows machine
description:
- Reboot a Windows machine, wait for it to go down, come back up, and respond to commands.
version_added: "2.1"
options:
pre_reboot_delay_sec:
description:
- Seconds for shutdown to wait before requesting reboot
default: 2
shutdown_timeout_sec:
description:
- Maximum seconds to wait for shutdown to occur
- Increase this timeout for very slow hardware, large update applications, etc
default: 600
reboot_timeout_sec:
description:
- Maximum seconds to wait for machine to re-appear on the network and respond to a test command
- This timeout is evaluated separately for both network appearance and test command success (so maximum clock time is actually twice this value)
default: 600
connect_timeout_sec:
description:
- Maximum seconds to wait for a single successful TCP connection to the WinRM endpoint before trying again
default: 5
test_command:
description:
- Command to expect success for to determine the machine is ready for management
default: whoami
author:
- Matt Davis (@nitzmahone)
'''
EXAMPLES='''
# unconditionally reboot the machine with all defaults
- win_reboot:
# apply updates and reboot if necessary
- win_updates:
register: update_result
- win_reboot:
when: update_result.reboot_required
# reboot a slow machine that might have lots of updates to apply
- win_reboot:
shutdown_timeout_sec: 3600
reboot_timeout_sec: 3600
'''
RETURNS='''
rebooted:
description: true if the machine was rebooted
returned: always
type: boolean
sample: true
'''
| welex91/ansible-modules-core | windows/win_reboot.py | Python | gpl-3.0 | 1,679 | 0.00536 |
#!/usr/bin/env python2
# multi-regime.py
# Author: Jonah Miller (jonah.maxwell.miller@gmail.com)
# Time-stamp: <2013-12-14 16:06:28 (jonah)>
# This is a library to plot and fit for omega(rho) variable. We choose
# omega so that we get the three distinct regimes for which we know
# the analytic solution with continuous transition regions.
# Imports
# ----------------------------------------------------------------------
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import plot_all_variables as pav
from scipy.special import erf
# ----------------------------------------------------------------------
# Constants
# ----------------------------------------------------------------------
RHO_MIN=0
OMEGA_MIN = -1
ERF_MIN = -1
RHO_DARK_ENERGY = [10,20,25,37]
RHO_MATTER = [20,40,80,92]
RHO_RADIATION = 200
TRANSITION_WIDTH_DE=[1,5,12,20]
TRANSITION_WIDTH_MR=[1,5,12,20]
NUM_ERFS = 2
DE_AMPLITUDE = 1.0/2.0
MATTER_AMPLITUDE = (1.0/3.0)* DE_AMPLITUDE
XLABEL=r'$\rho$'
YLABEL=r'$\omega$'
# ----------------------------------------------------------------------
def omega(rho,
rho_dark_energy,transition_width_de,
rho_matter,transition_width_mr):
return OMEGA_MIN \
+ DE_AMPLITUDE + MATTER_AMPLITUDE\
+ DE_AMPLITUDE*erf((rho - rho_dark_energy)/transition_width_de)\
+ MATTER_AMPLITUDE*erf((rho - rho_matter)/transition_width_mr)
def plot_rho():
mpl.rcParams.update({'font.size': pav.fontsize})
x = np.linspace(0,RHO_RADIATION,100)
ys = [omega(x,RHO_DARK_ENERGY[i],TRANSITION_WIDTH_DE[i],
RHO_MATTER[i],TRANSITION_WIDTH_MR[i]) for i in range(len(RHO_DARK_ENERGY))]
lines = [plt.plot(x,y,linewidth=pav.my_linewidth) for y in ys]
plt.axis([RHO_MIN,RHO_RADIATION,
1.1*OMEGA_MIN,
1.1*(OMEGA_MIN + 2*(DE_AMPLITUDE + MATTER_AMPLITUDE))])
plt.xlabel(XLABEL)
plt.ylabel(YLABEL)
plt.legend(["Abrupt transition",
"Moderate transition",
"Mild transition",
"No well-defined regimes"],
loc=4)
plt.show()
return
if __name__ == "__main__":
plot_rho()
| Yurlungur/FLRW | MultiRegime.py | Python | mit | 2,168 | 0.01476 |
# -*- coding: utf-8 -*-
# Copyright(C) 2014 Bezleputh
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.json import json
from weboob.capabilities.base import UserError
from weboob.capabilities.collection import Collection
from weboob.browser import LoginBrowser, URL, need_login
from .pages import EssentialsPage, TokenPage, ContentsPage, PreferencesPage, MarkerPage
__all__ = ['FeedlyBrowser']
class FeedlyBrowser(LoginBrowser):
BASEURL = 'http://www.feedly.com/'
essentials = URL('http://s3.feedly.com/essentials/essentials_fr.json', EssentialsPage)
token = URL('v3/auth/token', TokenPage)
contents = URL('v3/streams/contents', ContentsPage)
preferences = URL('v3/preferences', PreferencesPage)
marker = URL('v3/markers', MarkerPage)
def __init__(self, username, password, login_browser, *args, **kwargs):
super(FeedlyBrowser, self).__init__(username, password, *args, **kwargs)
self.user_id = None
self.login_browser = login_browser
def do_login(self):
if self.login_browser:
if self.login_browser.code is None or self.user_id is None:
self.login_browser.do_login()
params = {'code': self.login_browser.code,
'client_id': 'feedly',
'client_secret': '0XP4XQ07VVMDWBKUHTJM4WUQ',
'redirect_uri': 'http://dev.feedly.com/feedly.html',
'grant_type': 'authorization_code'}
token, self.user_id = self.token.go(data=params).get_token()
self.session.headers['X-Feedly-Access-Token'] = token
else:
raise UserError(r'You need to fill your username and password to access this page')
@need_login
def iter_threads(self):
params = {'streamId': 'user/%s/category/global.all' % self.user_id,
'unreadOnly': 'true',
'ranked': 'newest',
'count': '100'}
return self.contents.go(params=params).get_articles()
@need_login
def get_unread_feed(self, url):
params = {'streamId': url,
'backfill': 'true',
'boostMustRead': 'true',
'unreadOnly': 'true'}
return self.contents.go(params=params).get_articles()
def get_categories(self):
if self.username is not None and self.password is not None:
return self.get_logged_categories()
return self.essentials.go().get_categories()
@need_login
def get_logged_categories(self):
user_categories = list(self.preferences.go().get_categories())
user_categories.append(Collection([u'global.saved'], u'Saved'))
return user_categories
def get_feeds(self, category):
if self.username is not None and self.password is not None:
return self.get_logged_feeds(category)
return self.essentials.go().get_feeds(category)
@need_login
def get_logged_feeds(self, category):
if category == 'global.saved':
type = 'tag'
else:
type = 'category'
url = 'user/%s/%s/%s' % (self.user_id, type, category)
return self.get_unread_feed(url)
def get_feed_url(self, category, feed):
return self.essentials.go().get_feed_url(category, feed)
@need_login
def set_message_read(self, _id):
datas = {'action': 'markAsRead',
'type': 'entries',
'entryIds': [_id]}
self.marker.open(data=json.dumps(datas))
| laurent-george/weboob | modules/feedly/browser.py | Python | agpl-3.0 | 4,193 | 0.000954 |
"""
WSGI config for hackathon project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hackathon.settings")
application = get_wsgi_application()
| python-frederick/hackathon-2016 | hackathon/wsgi.py | Python | bsd-2-clause | 395 | 0 |
'''simpler & deprecated python script controllers'''
import Sofa
import inspect
def deprecated(cls):
# TODO maybe we should print a backtrace to locate the origin?
# or even better, use: https://docs.python.org/2/library/warnings.html#warnings.warn
line = '''class `{0}` from module `{1}` is deprecated. You may now derive from `Sofa.PythonScriptController` and instantiate derived classes directly.'''.format(cls.__name__, cls.__module__)
Sofa.msg_deprecated('SofaPython', line)
Sofa.msg_deprecated('SofaPython',
'note: `createGraph` will no longer be called automatically. You need to call manually from __init__ instead.')
Sofa.msg_deprecated('SofaPython',
'note: `onLoaded` will no longer be called automatically. You need to call manually from __init__ instead.')
# uncomment to get the location where the deprecated class is created
# import traceback; traceback.print_stack()
return cls
@deprecated
class Controller(Sofa.PythonScriptController):
# to stack data for recursive creations of Controllers
instances = []
kwargs = []
def __new__(cls, node, name='pythonScriptController', filename='', **kwarg):
"""
:param filename: you may have to define it (at least once) to create
a controller for which the class is defined in an external
file. Be aware the file will then be read several times.
"""
# temporary variable to store optional arguments
Controller.kwargs.append( kwarg )
node.createObject('PythonScriptController',
filename = filename,
classname = cls.__name__,
name = name)
# note the previous calls callbacks onLoaded and createGraph
try:
return Controller.instances.pop() # let's trust the garbage collector
except AttributeError:
# if this fails, you need to call
# Controller.onLoaded(self, node) in derived classes
print "[SofaPython.script.Controller.__new__] instance not found, did you call 'SofaPython.script.Controller.onLoaded' on your overloaded 'onLoaded' in {} ?".format(cls)
raise
def onLoaded(self, node):
Controller.instances.append(self)
self.additionalArguments(Controller.kwargs.pop()) # let's trust the garbage collector
def additionalArguments(self,kwarg):
""" to handle optional constructor arguments before createGraph
"""
pass
| Anatoscope/sofa | applications/plugins/SofaPython/python/SofaPython/script.py | Python | lgpl-2.1 | 2,609 | 0.010732 |
# Generated by Django 2.2.17 on 2021-02-02 03:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0036_fix_reg_invoice_link'),
]
operations = [
migrations.AlterField(
model_name='registration',
name='final',
field=models.BooleanField(null=False, default=False, verbose_name='Registration has been finalized'),
),
migrations.AlterField(
model_name='eventregistration',
name='invoiceItem',
field=models.OneToOneField(null=False, on_delete=django.db.models.deletion.CASCADE, related_name='eventRegistration', to='core.InvoiceItem', verbose_name='Invoice item'),
),
migrations.AlterField(
model_name='registration',
name='invoice',
field=models.OneToOneField(null=False, on_delete=django.db.models.deletion.CASCADE, related_name='registration', to='core.Invoice', verbose_name='Invoice'),
),
migrations.RemoveField(
model_name='registration',
name='expirationDate',
),
]
| django-danceschool/django-danceschool | danceschool/core/migrations/0037_remove_registration_expirationdate.py | Python | bsd-3-clause | 1,183 | 0.002536 |
"""
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble", "RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| depet/scikit-learn | sklearn/ensemble/__init__.py | Python | bsd-3-clause | 1,055 | 0 |
# -*- test-case-name: twisted.names.test.test_tap -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Domain Name Server
"""
import os, traceback
from twisted.python import usage
from twisted.names import dns
from twisted.application import internet, service
from twisted.names import server
from twisted.names import authority
from twisted.names import secondary
class Options(usage.Options):
optParameters = [
["interface", "i", "", "The interface to which to bind"],
["port", "p", "53", "The port on which to listen"],
["resolv-conf", None, None,
"Override location of resolv.conf (implies --recursive)"],
["hosts-file", None, None, "Perform lookups with a hosts file"],
]
optFlags = [
["cache", "c", "Enable record caching"],
["recursive", "r", "Perform recursive lookups"],
["verbose", "v", "Log verbosely"],
]
compData = usage.Completions(
optActions={"interface" : usage.CompleteNetInterfaces()}
)
zones = None
zonefiles = None
def __init__(self):
usage.Options.__init__(self)
self['verbose'] = 0
self.bindfiles = []
self.zonefiles = []
self.secondaries = []
def opt_pyzone(self, filename):
"""Specify the filename of a Python syntax zone definition"""
if not os.path.exists(filename):
raise usage.UsageError(filename + ": No such file")
self.zonefiles.append(filename)
def opt_bindzone(self, filename):
"""Specify the filename of a BIND9 syntax zone definition"""
if not os.path.exists(filename):
raise usage.UsageError(filename + ": No such file")
self.bindfiles.append(filename)
def opt_secondary(self, ip_domain):
"""Act as secondary for the specified domain, performing
zone transfers from the specified IP (IP/domain)
"""
args = ip_domain.split('/', 1)
if len(args) != 2:
raise usage.UsageError("Argument must be of the form IP[:port]/domain")
address = args[0].split(':')
if len(address) == 1:
address = (address[0], dns.PORT)
else:
try:
port = int(address[1])
except ValueError:
raise usage.UsageError(
"Specify an integer port number, not %r" % (address[1],))
address = (address[0], port)
self.secondaries.append((address, [args[1]]))
def opt_verbose(self):
"""Increment verbosity level"""
self['verbose'] += 1
def postOptions(self):
if self['resolv-conf']:
self['recursive'] = True
self.svcs = []
self.zones = []
for f in self.zonefiles:
try:
self.zones.append(authority.PySourceAuthority(f))
except Exception:
traceback.print_exc()
raise usage.UsageError("Invalid syntax in " + f)
for f in self.bindfiles:
try:
self.zones.append(authority.BindAuthority(f))
except Exception:
traceback.print_exc()
raise usage.UsageError("Invalid syntax in " + f)
for f in self.secondaries:
svc = secondary.SecondaryAuthorityService.fromServerAddressAndDomains(*f)
self.svcs.append(svc)
self.zones.append(self.svcs[-1].getAuthority())
try:
self['port'] = int(self['port'])
except ValueError:
raise usage.UsageError("Invalid port: %r" % (self['port'],))
def _buildResolvers(config):
"""
Build DNS resolver instances in an order which leaves recursive
resolving as a last resort.
@type config: L{Options} instance
@param config: Parsed command-line configuration
@return: Two-item tuple of a list of cache resovers and a list of client
resolvers
"""
from twisted.names import client, cache, hosts
ca, cl = [], []
if config['cache']:
ca.append(cache.CacheResolver(verbose=config['verbose']))
if config['hosts-file']:
cl.append(hosts.Resolver(file=config['hosts-file']))
if config['recursive']:
cl.append(client.createResolver(resolvconf=config['resolv-conf']))
return ca, cl
def makeService(config):
ca, cl = _buildResolvers(config)
f = server.DNSServerFactory(config.zones, ca, cl, config['verbose'])
p = dns.DNSDatagramProtocol(f)
f.noisy = 0
ret = service.MultiService()
for (klass, arg) in [(internet.TCPServer, f), (internet.UDPServer, p)]:
s = klass(config['port'], arg, interface=config['interface'])
s.setServiceParent(ret)
for svc in config.svcs:
svc.setServiceParent(ret)
return ret
| skycucumber/Messaging-Gateway | webapp/venv/lib/python2.7/site-packages/twisted/names/tap.py | Python | gpl-2.0 | 4,832 | 0.001863 |
class OrderError(Exception):
pass
class OrderIdentifierError(OrderError):
"""
Order exception that is raised if order identifier was not found.
"""
pass
| druids/django-pyston | pyston/order/exceptions.py | Python | bsd-3-clause | 175 | 0 |
# =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import copy
import eos.db
from eos.saveddata.damagePattern import DamagePattern as es_DamagePattern
class ImportError(Exception):
pass
class DamagePattern():
instance = None
@classmethod
def getInstance(cls):
if cls.instance is None:
cls.instance = DamagePattern()
return cls.instance
def getDamagePatternList(self):
return eos.db.getDamagePatternList()
def getDamagePattern(self, name):
return eos.db.getDamagePattern(name)
def newPattern(self, name):
p = es_DamagePattern(0, 0, 0, 0)
p.name = name
eos.db.save(p)
return p
def renamePattern(self, p, newName):
p.name = newName
eos.db.save(p)
def deletePattern(self, p):
eos.db.remove(p)
def copyPattern(self, p):
newP = copy.deepcopy(p)
eos.db.save(newP)
return newP
def saveChanges(self, p):
eos.db.save(p)
def importPatterns(self, text):
lookup = {}
current = self.getDamagePatternList()
for pattern in current:
lookup[pattern.name] = pattern
imports, num = es_DamagePattern.importPatterns(text)
for pattern in imports:
if pattern.name in lookup:
match = lookup[pattern.name]
match.__dict__.update(pattern.__dict__)
else:
eos.db.save(pattern)
eos.db.commit()
lenImports = len(imports)
if lenImports == 0:
raise ImportError("No patterns found for import")
if lenImports != num:
raise ImportError("%d patterns imported from clipboard; %d had errors" % (num, num - lenImports))
def exportPatterns(self):
patterns = self.getDamagePatternList()
for i in xrange(len(patterns) - 1, -1, -1):
if patterns[i].name in ("Uniform", "Selected Ammo"):
del patterns[i]
patterns.sort(key=lambda p: p.name)
return es_DamagePattern.exportPatterns(*patterns)
| Ebag333/Pyfa | service/damagePattern.py | Python | gpl-3.0 | 2,873 | 0.000348 |
# This is the Twisted Get Poetry Now! client, version 2.0
import datetime
import optparse
import os
import traceback
from twisted.internet.protocol import Protocol, ClientFactory
def parse_args():
usage = """usage: %prog [options] [hostname]:port ...
This is the Get Poetry Now! client, Twisted version 2.0.
Run it like this:
python get-poetry.py port1 port2 port3 ...
If you are in the base directory of the twisted-intro package,
you could run it like this:
python twisted-client-2/get-poetry.py 10001 10002 10003
to grab poetry from servers on ports 10001, 10002, and 10003.
Of course, there need to be servers listening on those ports
for that to work.
"""
parser = optparse.OptionParser(usage)
_, addresses = parser.parse_args()
if not addresses:
print(parser.format_help())
parser.exit()
def parse_address(addr):
if ':' not in addr:
host = '127.0.0.1'
port = addr
else:
host, port = addr.split(':', 1)
if not port.isdigit():
parser.error('Ports must be integers.')
return host, int(port)
return list(map(parse_address, addresses))
class PoetryProtocol(Protocol):
poem = b""
task_num = 0
def dataReceived(self, data):
# Called whenever data is recieved from the transport
self.poem += data
traceback.print_stack()
os._exit(0)
def connectionLost(self, reason):
self.poemRecevied(self.poem)
def poemRecevied(self, poem):
self.factory.poem_finished(self.task_num, poem)
class PoetryClientFactory(ClientFactory):
task_num = 1 # Initial task id
protocol = PoetryProtocol # Tell the base-classes to use this protocol
def __init__(self, poetry_count):
self.poetry_count = poetry_count
self.poems = {} # task_num -> poem
def buildProtocol(self, address):
# Create a object of the Protocol
# The returned instance will handle input on an incoming server
# connection, and an attribute "factory" pointing to the creating
# factory.
# Alternatively, L{None} may be returned to immediately close the
# new connection.
# Call the base-class's buildProtocol since our Protocol is basic
proto = ClientFactory.buildProtocol(self, address)
proto.task_num = self.task_num # Assign the new protocol its id
self.task_num += 1 # Increment the id
return proto # Return the built protocol
def poem_finished(self, task_num=None, poem=None):
if task_num is not None:
self.poems[task_num] = poem
self.poetry_count -= 1
if self.poetry_count == 0:
self.report()
from twisted.internet import reactor
reactor.stop()
def report(self):
for i in self.poems:
print("Task %d: %d bytes of poetry" %(i, len(self.poems[i])))
def clientConnectionFailed(self, connector, reason):
print("Failed to connect to:", connector.getDestination())
self.poem_finished()
if __name__ == '__main__':
addresses = parse_args()
start = datetime.datetime.now()
factory = PoetryClientFactory(len(addresses))
from twisted.internet import reactor
for address in addresses:
# Get the host and port from the returned tuples
host, port = address
# The .connectTCP method is of interest here.
# It takes the host and port as first two parameter
# And also a protocol factory to create the protocol objects on-demand
reactor.connectTCP(host, port, factory)
reactor.run()
elapsed = datetime.datetime.now() - start
print("Got %d poems in %s" %(len(addresses), elapsed))
| GreenJoey/My-Simple-Programs | python/Twisted/krondo Twisted Introduction/twisted-client-2/get-poetry-stack.py | Python | gpl-2.0 | 3,806 | 0.001051 |
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_analyticsprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of AnalyticsProfile Avi RESTful Object
description:
- This module is used to configure AnalyticsProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
apdex_response_threshold:
description:
- If a client receives an http response in less than the satisfactory latency threshold, the request is considered satisfied.
- It is considered tolerated if it is not satisfied and less than tolerated latency factor multiplied by the satisfactory latency threshold.
- Greater than this number and the client's request is considered frustrated.
- Default value when not specified in API or module is interpreted by Avi Controller as 500.
apdex_response_tolerated_factor:
description:
- Client tolerated response latency factor.
- Client must receive a response within this factor times the satisfactory threshold (apdex_response_threshold) to be considered tolerated.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
apdex_rtt_threshold:
description:
- Satisfactory client to avi round trip time(rtt).
- Default value when not specified in API or module is interpreted by Avi Controller as 250.
apdex_rtt_tolerated_factor:
description:
- Tolerated client to avi round trip time(rtt) factor.
- It is a multiple of apdex_rtt_tolerated_factor.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
apdex_rum_threshold:
description:
- If a client is able to load a page in less than the satisfactory latency threshold, the pageload is considered satisfied.
- It is considered tolerated if it is greater than satisfied but less than the tolerated latency multiplied by satisifed latency.
- Greater than this number and the client's request is considered frustrated.
- A pageload includes the time for dns lookup, download of all http objects, and page render time.
- Default value when not specified in API or module is interpreted by Avi Controller as 5000.
apdex_rum_tolerated_factor:
description:
- Virtual service threshold factor for tolerated page load time (plt) as multiple of apdex_rum_threshold.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
apdex_server_response_threshold:
description:
- A server http response is considered satisfied if latency is less than the satisfactory latency threshold.
- The response is considered tolerated when it is greater than satisfied but less than the tolerated latency factor * s_latency.
- Greater than this number and the server response is considered frustrated.
- Default value when not specified in API or module is interpreted by Avi Controller as 400.
apdex_server_response_tolerated_factor:
description:
- Server tolerated response latency factor.
- Servermust response within this factor times the satisfactory threshold (apdex_server_response_threshold) to be considered tolerated.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
apdex_server_rtt_threshold:
description:
- Satisfactory client to avi round trip time(rtt).
- Default value when not specified in API or module is interpreted by Avi Controller as 125.
apdex_server_rtt_tolerated_factor:
description:
- Tolerated client to avi round trip time(rtt) factor.
- It is a multiple of apdex_rtt_tolerated_factor.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
client_log_config:
description:
- Clientlogconfiguration settings for analyticsprofile.
conn_lossy_ooo_threshold:
description:
- A connection between client and avi is considered lossy when more than this percentage of out of order packets are received.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
conn_lossy_timeo_rexmt_threshold:
description:
- A connection between client and avi is considered lossy when more than this percentage of packets are retransmitted due to timeout.
- Default value when not specified in API or module is interpreted by Avi Controller as 20.
conn_lossy_total_rexmt_threshold:
description:
- A connection between client and avi is considered lossy when more than this percentage of packets are retransmitted.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
conn_lossy_zero_win_size_event_threshold:
description:
- A client connection is considered lossy when percentage of times a packet could not be trasmitted due to tcp zero window is above this threshold.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
conn_server_lossy_ooo_threshold:
description:
- A connection between avi and server is considered lossy when more than this percentage of out of order packets are received.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
conn_server_lossy_timeo_rexmt_threshold:
description:
- A connection between avi and server is considered lossy when more than this percentage of packets are retransmitted due to timeout.
- Default value when not specified in API or module is interpreted by Avi Controller as 20.
conn_server_lossy_total_rexmt_threshold:
description:
- A connection between avi and server is considered lossy when more than this percentage of packets are retransmitted.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
conn_server_lossy_zero_win_size_event_threshold:
description:
- A server connection is considered lossy when percentage of times a packet could not be trasmitted due to tcp zero window is above this threshold.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
description:
description:
- User defined description for the object.
disable_se_analytics:
description:
- Disable node (service engine) level analytics forvs metrics.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
disable_server_analytics:
description:
- Disable analytics on backend servers.
- This may be desired in container environment when there are large number of ephemeral servers.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_client_close_before_request_as_error:
description:
- Exclude client closed connection before an http request could be completed from being classified as an error.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_gs_down_as_error:
description:
- Exclude queries to gslb services that are operationally down from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_http_error_codes:
description:
- List of http status codes to be excluded from being classified as an error.
- Error connections or responses impacts health score, are included as significant logs, and may be classified as part of a dos attack.
exclude_invalid_dns_domain_as_error:
description:
- Exclude dns queries to domains outside the domains configured in the dns application profile from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_invalid_dns_query_as_error:
description:
- Exclude invalid dns queries from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_no_dns_record_as_error:
description:
- Exclude queries to domains that did not have configured services/records from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_no_valid_gs_member_as_error:
description:
- Exclude queries to gslb services that have no available members from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_persistence_change_as_error:
description:
- Exclude persistence server changed while load balancing' from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_server_dns_error_as_error:
description:
- Exclude server dns error response from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_server_tcp_reset_as_error:
description:
- Exclude server tcp reset from errors.
- It is common for applications like ms exchange.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_syn_retransmit_as_error:
description:
- Exclude 'server unanswered syns' from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_tcp_reset_as_error:
description:
- Exclude tcp resets by client from the list of potential errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
exclude_unsupported_dns_query_as_error:
description:
- Exclude unsupported dns queries from the list of errors.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
hs_event_throttle_window:
description:
- Time window (in secs) within which only unique health change events should occur.
- Default value when not specified in API or module is interpreted by Avi Controller as 1209600.
hs_max_anomaly_penalty:
description:
- Maximum penalty that may be deducted from health score for anomalies.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
hs_max_resources_penalty:
description:
- Maximum penalty that may be deducted from health score for high resource utilization.
- Default value when not specified in API or module is interpreted by Avi Controller as 25.
hs_max_security_penalty:
description:
- Maximum penalty that may be deducted from health score based on security assessment.
- Default value when not specified in API or module is interpreted by Avi Controller as 100.
hs_min_dos_rate:
description:
- Dos connection rate below which the dos security assessment will not kick in.
- Default value when not specified in API or module is interpreted by Avi Controller as 1000.
hs_performance_boost:
description:
- Adds free performance score credits to health score.
- It can be used for compensating health score for known slow applications.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
hs_pscore_traffic_threshold_l4_client:
description:
- Threshold number of connections in 5min, below which apdexr, apdexc, rum_apdex, and other network quality metrics are not computed.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.0.
hs_pscore_traffic_threshold_l4_server:
description:
- Threshold number of connections in 5min, below which apdexr, apdexc, rum_apdex, and other network quality metrics are not computed.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.0.
hs_security_certscore_expired:
description:
- Score assigned when the certificate has expired.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.0.
hs_security_certscore_gt30d:
description:
- Score assigned when the certificate expires in more than 30 days.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_certscore_le07d:
description:
- Score assigned when the certificate expires in less than or equal to 7 days.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.0.
hs_security_certscore_le30d:
description:
- Score assigned when the certificate expires in less than or equal to 30 days.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.0.
hs_security_chain_invalidity_penalty:
description:
- Penalty for allowing certificates with invalid chain.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
hs_security_cipherscore_eq000b:
description:
- Score assigned when the minimum cipher strength is 0 bits.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.0.
hs_security_cipherscore_ge128b:
description:
- Score assigned when the minimum cipher strength is greater than equal to 128 bits.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_cipherscore_lt128b:
description:
- Score assigned when the minimum cipher strength is less than 128 bits.
- Default value when not specified in API or module is interpreted by Avi Controller as 3.5.
hs_security_encalgo_score_none:
description:
- Score assigned when no algorithm is used for encryption.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.0.
hs_security_encalgo_score_rc4:
description:
- Score assigned when rc4 algorithm is used for encryption.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.5.
hs_security_hsts_penalty:
description:
- Penalty for not enabling hsts.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
hs_security_nonpfs_penalty:
description:
- Penalty for allowing non-pfs handshakes.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
hs_security_selfsignedcert_penalty:
description:
- Deprecated.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
hs_security_ssl30_score:
description:
- Score assigned when supporting ssl3.0 encryption protocol.
- Default value when not specified in API or module is interpreted by Avi Controller as 3.5.
hs_security_tls10_score:
description:
- Score assigned when supporting tls1.0 encryption protocol.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_tls11_score:
description:
- Score assigned when supporting tls1.1 encryption protocol.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_tls12_score:
description:
- Score assigned when supporting tls1.2 encryption protocol.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.0.
hs_security_weak_signature_algo_penalty:
description:
- Penalty for allowing weak signature algorithm(s).
- Default value when not specified in API or module is interpreted by Avi Controller as 1.0.
name:
description:
- The name of the analytics profile.
required: true
ranges:
description:
- List of http status code ranges to be excluded from being classified as an error.
resp_code_block:
description:
- Block of http response codes to be excluded from being classified as an error.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the analytics profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a custom Analytics profile object
avi_analyticsprofile:
controller: ''
username: ''
password: ''
apdex_response_threshold: 500
apdex_response_tolerated_factor: 4.0
apdex_rtt_threshold: 250
apdex_rtt_tolerated_factor: 4.0
apdex_rum_threshold: 5000
apdex_rum_tolerated_factor: 4.0
apdex_server_response_threshold: 400
apdex_server_response_tolerated_factor: 4.0
apdex_server_rtt_threshold: 125
apdex_server_rtt_tolerated_factor: 4.0
conn_lossy_ooo_threshold: 50
conn_lossy_timeo_rexmt_threshold: 20
conn_lossy_total_rexmt_threshold: 50
conn_lossy_zero_win_size_event_threshold: 2
conn_server_lossy_ooo_threshold: 50
conn_server_lossy_timeo_rexmt_threshold: 20
conn_server_lossy_total_rexmt_threshold: 50
conn_server_lossy_zero_win_size_event_threshold: 2
disable_se_analytics: false
disable_server_analytics: false
exclude_client_close_before_request_as_error: false
exclude_persistence_change_as_error: false
exclude_server_tcp_reset_as_error: false
exclude_syn_retransmit_as_error: false
exclude_tcp_reset_as_error: false
hs_event_throttle_window: 1209600
hs_max_anomaly_penalty: 10
hs_max_resources_penalty: 25
hs_max_security_penalty: 100
hs_min_dos_rate: 1000
hs_performance_boost: 20
hs_pscore_traffic_threshold_l4_client: 10.0
hs_pscore_traffic_threshold_l4_server: 10.0
hs_security_certscore_expired: 0.0
hs_security_certscore_gt30d: 5.0
hs_security_certscore_le07d: 2.0
hs_security_certscore_le30d: 4.0
hs_security_chain_invalidity_penalty: 1.0
hs_security_cipherscore_eq000b: 0.0
hs_security_cipherscore_ge128b: 5.0
hs_security_cipherscore_lt128b: 3.5
hs_security_encalgo_score_none: 0.0
hs_security_encalgo_score_rc4: 2.5
hs_security_hsts_penalty: 0.0
hs_security_nonpfs_penalty: 1.0
hs_security_selfsignedcert_penalty: 1.0
hs_security_ssl30_score: 3.5
hs_security_tls10_score: 5.0
hs_security_tls11_score: 5.0
hs_security_tls12_score: 5.0
hs_security_weak_signature_algo_penalty: 1.0
name: jason-analytics-profile
tenant_ref: Demo
'''
RETURN = '''
obj:
description: AnalyticsProfile (api/analyticsprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
apdex_response_threshold=dict(type='int',),
apdex_response_tolerated_factor=dict(type='float',),
apdex_rtt_threshold=dict(type='int',),
apdex_rtt_tolerated_factor=dict(type='float',),
apdex_rum_threshold=dict(type='int',),
apdex_rum_tolerated_factor=dict(type='float',),
apdex_server_response_threshold=dict(type='int',),
apdex_server_response_tolerated_factor=dict(type='float',),
apdex_server_rtt_threshold=dict(type='int',),
apdex_server_rtt_tolerated_factor=dict(type='float',),
client_log_config=dict(type='dict',),
conn_lossy_ooo_threshold=dict(type='int',),
conn_lossy_timeo_rexmt_threshold=dict(type='int',),
conn_lossy_total_rexmt_threshold=dict(type='int',),
conn_lossy_zero_win_size_event_threshold=dict(type='int',),
conn_server_lossy_ooo_threshold=dict(type='int',),
conn_server_lossy_timeo_rexmt_threshold=dict(type='int',),
conn_server_lossy_total_rexmt_threshold=dict(type='int',),
conn_server_lossy_zero_win_size_event_threshold=dict(type='int',),
description=dict(type='str',),
disable_se_analytics=dict(type='bool',),
disable_server_analytics=dict(type='bool',),
exclude_client_close_before_request_as_error=dict(type='bool',),
exclude_gs_down_as_error=dict(type='bool',),
exclude_http_error_codes=dict(type='list',),
exclude_invalid_dns_domain_as_error=dict(type='bool',),
exclude_invalid_dns_query_as_error=dict(type='bool',),
exclude_no_dns_record_as_error=dict(type='bool',),
exclude_no_valid_gs_member_as_error=dict(type='bool',),
exclude_persistence_change_as_error=dict(type='bool',),
exclude_server_dns_error_as_error=dict(type='bool',),
exclude_server_tcp_reset_as_error=dict(type='bool',),
exclude_syn_retransmit_as_error=dict(type='bool',),
exclude_tcp_reset_as_error=dict(type='bool',),
exclude_unsupported_dns_query_as_error=dict(type='bool',),
hs_event_throttle_window=dict(type='int',),
hs_max_anomaly_penalty=dict(type='int',),
hs_max_resources_penalty=dict(type='int',),
hs_max_security_penalty=dict(type='int',),
hs_min_dos_rate=dict(type='int',),
hs_performance_boost=dict(type='int',),
hs_pscore_traffic_threshold_l4_client=dict(type='float',),
hs_pscore_traffic_threshold_l4_server=dict(type='float',),
hs_security_certscore_expired=dict(type='float',),
hs_security_certscore_gt30d=dict(type='float',),
hs_security_certscore_le07d=dict(type='float',),
hs_security_certscore_le30d=dict(type='float',),
hs_security_chain_invalidity_penalty=dict(type='float',),
hs_security_cipherscore_eq000b=dict(type='float',),
hs_security_cipherscore_ge128b=dict(type='float',),
hs_security_cipherscore_lt128b=dict(type='float',),
hs_security_encalgo_score_none=dict(type='float',),
hs_security_encalgo_score_rc4=dict(type='float',),
hs_security_hsts_penalty=dict(type='float',),
hs_security_nonpfs_penalty=dict(type='float',),
hs_security_selfsignedcert_penalty=dict(type='float',),
hs_security_ssl30_score=dict(type='float',),
hs_security_tls10_score=dict(type='float',),
hs_security_tls11_score=dict(type='float',),
hs_security_tls12_score=dict(type='float',),
hs_security_weak_signature_algo_penalty=dict(type='float',),
name=dict(type='str', required=True),
ranges=dict(type='list',),
resp_code_block=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'analyticsprofile',
set([]))
if __name__ == '__main__':
main()
| bjolivot/ansible | lib/ansible/modules/network/avi/avi_analyticsprofile.py | Python | gpl-3.0 | 25,965 | 0.003967 |
VERSION = None
BRANCH = 'master' | julien78910/CouchPotatoServer | version.py | Python | gpl-3.0 | 32 | 0.03125 |
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=1, faild_point=100000, path_list=[
[TestAction.create_mini_vm, 'vm1', 'cluster=cluster1'],
[TestAction.reboot_vm, 'vm1'],
[TestAction.create_mini_vm, 'vm2', 'cluster=cluster2'],
[TestAction.create_vm_backup, 'vm2', 'vm2-backup1'],
[TestAction.destroy_vm, 'vm2'],
[TestAction.expunge_vm, 'vm2'],
[TestAction.create_image_from_volume, 'vm1', 'vm1-image1'],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.create_volume, 'volume1', 'cluster=cluster1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'cluster=cluster2', 'flag=thick,scsi'],
[TestAction.add_image, 'image2', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_vm_backup, 'vm1', 'vm1-backup2'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_vm_backup, 'vm1-backup2'],
[TestAction.delete_image, 'image2'],
[TestAction.expunge_image, 'image2'],
[TestAction.create_mini_vm, 'vm3', 'cluster=cluster2'],
[TestAction.create_vm_backup, 'vm3', 'vm3-backup4'],
[TestAction.destroy_vm, 'vm1'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.create_mini_vm, 'vm4', 'cluster=cluster1'],
[TestAction.migrate_vm, 'vm4'],
[TestAction.attach_volume, 'vm4', 'volume1'],
[TestAction.detach_volume, 'volume1'],
[TestAction.delete_volume, 'volume1'],
[TestAction.create_volume, 'volume3', 'cluster=cluster1', 'flag=scsi'],
[TestAction.attach_volume, 'vm4', 'volume3'],
[TestAction.create_volume_backup, 'volume3', 'volume3-backup5'],
[TestAction.delete_volume_backup, 'volume3-backup5'],
[TestAction.create_mini_vm, 'vm5', 'network=random', 'cluster=cluster2'],
[TestAction.expunge_volume, 'volume1'],
[TestAction.reboot_vm, 'vm5'],
[TestAction.create_volume, 'volume4', 'cluster=cluster1', 'flag=scsi'],
[TestAction.attach_volume, 'vm4', 'volume4'],
[TestAction.create_volume_backup, 'volume4', 'volume4-backup6'],
[TestAction.resize_data_volume, 'volume2', 5*1024*1024],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.attach_volume, 'vm5', 'volume2'],
[TestAction.start_vm, 'vm5'],
[TestAction.create_volume_backup, 'volume2', 'volume2-backup7'],
[TestAction.stop_vm, 'vm5'],
[TestAction.use_volume_backup, 'volume2-backup7'],
])
'''
The final status:
Running:['vm4']
Stopped:['vm3', 'vm5']
Enadbled:['vm2-backup1', 'vm1-backup2', 'volume1-backup2', 'vm3-backup4', 'volume4-backup6', 'volume2-backup7', 'vm1-image1']
attached:['volume3', 'volume4', 'volume2']
Detached:[]
Deleted:['vm1', 'volume3-backup5']
Expunged:['vm2', 'volume1', 'image2']
Ha:[]
Group:
vm_backup2:['vm1-backup2', 'volume1-backup2']---vm1@volume1
vm_backup3:['vm3-backup4']---vm3@
vm_backup1:['vm2-backup1']---vm2@
''' | zstackio/zstack-woodpecker | integrationtest/vm/mini/multiclusters/paths/multi_path173.py | Python | apache-2.0 | 2,948 | 0.018996 |
import asyncio
import random
import checks
from TextChecker import TextChecker
from database import Database
from checks import *
from data.rp_texts import *
from data.links import *
from web import Web
from roleplay.Player import Player
import discord
from discord.ext import commands
class Roleplay(commands.Cog):
def __init__(self, bot: commands.Bot):
self.parameters = {}
self.bot = bot
self.delta = 10
self.players = {}
self.playerids = []
self.announce_message = None
self.system_message = None
self.turn_number = 0
self.prompt = None
@commands.command(name='say', case_insensitive=True)
@commands.check(checks.can_manage_rp)
@commands.check(checks.in_say_channel)
async def _say(self, ctx, channel: discord.TextChannel, *, message):
"""
*RP Moderator only* | *#rp-scripting only* | Sends a message as a bot in specified channel.
"""
await channel.send(message)
@commands.command(name='dm', case_insensitive=True)
@commands.check(checks.can_manage_rp)
@commands.check(checks.in_say_channel)
async def _dm(self, ctx, user: discord.User, *, message):
"""
*RP Moderator only* | *#rp-scripting only* | Sends a direct message as a bot to specified member. Use full name (name#number), ping or ID.
"""
channel = user.dm_channel
if channel is None:
await user.create_dm()
channel = user.dm_channel
await channel.send(message)
await ctx.message.add_reaction('✅')
@commands.command(name='rm', case_insensitive=True)
@commands.check(checks.can_manage_rp)
@commands.check(checks.in_say_channel)
async def _remove_message(self, ctx, message: discord.Message):
"""
*RP Moderator only* | *#rp-scripting only* | Removes a specified message that the bot posted. Use message ID.
"""
if message.author.id == self.bot.user.id:
await message.delete()
else:
to_delete = await ctx.send("Can't remove that message.")
await asyncio.sleep(7)
await to_delete.delete()
if not isinstance(ctx.channel, discord.DMChannel):
await ctx.message.delete()
@commands.command(name='medit', case_insensitive=True)
@commands.check(checks.can_manage_rp)
@commands.check(checks.in_say_channel)
async def _edit_message(self, ctx, message: discord.Message, *, text):
"""
*RP Moderator only* | *#rp-scripting only* | Edits a specified message that bot posted. Use message ID.
"""
await message.edit(content=text)
@commands.group(name='rp', case_insensitive=True)
async def _rp(self, ctx):
"""
Base command for RP utilities. Use `?help rp` for details.
Mind that parameters [who] and [channel] are admin exclusive.
Here is a list of all possible subcommands:
"""
await ctx.message.delete()
if ctx.invoked_subcommand is None:
await ctx.send("Subcommand required!")
@_rp.command(name='turn')
@commands.check(checks.can_manage_rp)
async def _turn(self, ctx):
"""
*RP Moderator only* | Tells the bot to post used actions and start new turn.
"""
message = '**::TURN {}::**\n'.format(self.turn_number)
message += 'Turn ended with these actions taking place:\n'
message += '```\n'
message += "{:^35}|{:^25}\n".format('Player', 'Action')
message += "{:*^35}|{:*^25}\n".format('', '')
for player_id, (player, action) in self.players.items():
player = self.system_message.guild.get_member(player_id)
message += "{:<35}|{:<25}\n".format(player.nick or player.name, action or '<no action set>')
message += '```\n'
message += 'New turn has begun, please state your actions.'
await self.bot.get_channel(326954112744816643).send(message)
# await self.bot.get_channel(config.ANNOUNCE_CHANNEL).send(message)
for player_id in self.playerids:
player, action = self.players[player_id]
self.players[player_id] = (player, None)
self.turn_number += 1
await self.post_players(True)
@_rp.command(name='start')
@commands.check(checks.can_manage_rp)
async def _start(self, ctx):
"""
*RP Moderator only* | Creates a new RP session if there is not one running already. Use `?help rp start` for more information about RP sessions.
Players can join the session via `?rp join`
Players are supposed to state their action with `?rp use` command each turn.
Turns are ended by `?rp turn` command.
Session is over when `?rp end` command is used.
In case the session wasn't closed properly (bot crash, etc.) use `?rp clean` to reset it.
"""
announce_channel = self.bot.get_channel(326954112744816643)
system_channel = self.bot.get_channel(374691520055345162)
# announce_channel = self.bot.get_channel(config.ANNOUNCE_CHANNEL)
# system_channel = self.bot.get_channel(config.ADMINISTRATION_CHANNEL)
db = await Database.get_connection(self.bot.loop)
insert = "INSERT INTO roleplay_session(announce_id, system_id) values ($1, $2)"
select = "SELECT 1 FROM roleplay_session WHERE done is FALSE"
async with db.transaction():
if await db.fetchval(select) is None:
announce_message = await announce_channel.send("Session started. To participate, use `?rp join`")
system_message = await system_channel.send("Session participants")
self.announce_message = announce_message
self.system_message = system_message
self.turn_number = 1
await db.execute(insert, *(str(announce_message.id), str(system_message.id)))
else:
await ctx.send('There is already an unfinished session. Please end it before starting new one.')
await Database.close_connection(db)
@_rp.command(name='join')
async def _join(self, ctx):
"""
Joins you to currently open session, if there is one at the moment.
"""
if self.announce_message is None:
await ctx.send('No active session')
return
player_id = ctx.author.id
for player in self.playerids:
if player == player_id:
to_delete = await ctx.send('Player is already in session')
await asyncio.sleep(1)
await to_delete.delete()
return
args = {
'discord_id': ctx.author.id,
'key': config.TRANSACTION_KEY
# 'discord_id': '144229491907100672'
}
response = await Web.get_response(user_data_link, args)
await Web.get_response(lock_link, args)
player = Player(player_id, response['Inventory'])
self.players[player_id] = (player, None)
self.playerids.append(player_id)
await self.announce_message.edit(
content='{}\n{} joined'.format(self.announce_message.content, ctx.author.nick or ctx.author.name))
await self.post_players()
async def post_players(self, new=False):
if new:
self.system_message = await self.bot.get_channel(374691520055345162).send('placeholder')
# self.system_message = await self.bot.get_channel(config.ADMINISTRATION_CHANNEL).send('placeholder')
message = '```\n'
message += "{:^35}|{:^25}\n".format('Player', 'Action')
message += "{:*^35}|{:*^25}\n".format('', '')
for player_id, (player, action) in self.players.items():
player = self.system_message.guild.get_member(player_id)
message += "{:<35}|{:<25}\n".format(player.nick or player.name, action or '<no action set>')
message += '```'
await self.system_message.edit(content=message)
@_rp.command(name='use')
async def _use(self, ctx, *, what=None):
"""
Queues action for you this turn. Please use `?help rp use` for more information.
Options for 'what' are:
number 1-6 for equiped items in order of the character list.
1 - Light
2 - Medium
3 - Heavy
4 - Melee
5 - Defense
6, 7, 8, 9 - Consumable !!IMPORTANT - will consume your item on use
To use equiped utility slots, use abbreviate form of the name:
Chaff launcher -> chaff
Auto Field Maintenance Unit -> afmu
Environmental Layout Scanner -> els
Heat Sink Launcher -> hsl
Kill Warrant Scanner -> kws
Shield Cell Bank -> scb
Encryption Cracking Unit -> ecu
Holo-Me Decoy Projector -> hdp
Virtual Distortion Cloak -> vdc
Electronic Ghost Generator -> egg
Last option is 'hack' which will use your equiped hack tool
"""
if what is None:
return
if ctx.author.id in self.playerids and self.players[ctx.author.id][1] is None:
player, action = self.players[ctx.author.id]
if what == '1':
action = player.light[1] if player.light else None
elif what == '2':
action = player.medium[1] if player.medium else None
elif what == '3':
action = player.heavy[1] if player.heavy else None
elif what == '4':
action = player.melee[1] if player.melee else 'Punch'
elif what == '5':
action = player.defense[1] if player.defense else None
elif what == '6':
action = player.disposable1[1] if player.disposable1 else None
if action:
await player.use_item(1)
elif what == '7':
action = player.disposable2[1] if player.disposable2 else None
if action:
await player.use_item(2)
elif what == '8':
action = player.disposable3[1] if player.disposable3 else None
if action:
await player.use_item(3)
elif what == '9':
action = player.disposable4[1] if player.disposable4 else None
if action:
await player.use_item(4)
elif what == 'hack':
if player.gloves:
if player.gloves[1].lower().__contains__("hacking") and player.gloves[1].lower().__contains__("system"):
action = await self._hack(ctx)
if not action:
return
elif player.have_util(what):
action = what
await getattr(self, '_' + action)(ctx)
self.players[ctx.author.id] = (player, action)
if action is None:
to_delete = await ctx.send("You don't own a tool required to do this action")
await asyncio.sleep(2)
await to_delete.delete()
else:
to_delete = await ctx.send("Action registered.")
await asyncio.sleep(2)
await to_delete.delete()
await self.post_players()
@_rp.command(name='pass')
async def _pass(self, ctx):
"""
Passes current turn for you.
"""
if ctx.author.id in self.playerids and self.players[ctx.author.id][1] is None:
player, action = self.players[ctx.author.id]
action = 'pass'
self.players[ctx.author.id] = (player, action)
to_delete = await ctx.send("Action registered.")
await asyncio.sleep(2)
await to_delete.delete()
await self.post_players()
@_rp.command(name='end')
@commands.check(checks.can_manage_rp)
async def _end(self, ctx):
"""
*RP Moderator only* | Ends currently open rp session
"""
db = await Database.get_connection(self.bot.loop)
probe = "SELECT 1 FROM roleplay_session WHERE done IS FALSE"
select = "SELECT session_id, announce_id, system_id FROM roleplay_session WHERE done = FALSE"
update = "UPDATE roleplay_session SET done = TRUE WHERE session_id = $1"
async with db.transaction():
if await db.fetchval(probe) is None:
to_delete = await ctx.send("There is no open session to close.")
await asyncio.sleep(2)
await to_delete.delete()
else:
async for (session_id, announce_id, system_id) in db.cursor(select):
sys_message = await self.bot.get_channel(374691520055345162).fetch_message(int(system_id))
# sys_message = await self.bot.get_channel(config.ADMINISTRATION_CHANNEL).fetch_message(int(system_id))
self.players.clear()
self.playerids = []
self.announce_message = None
self.system_message = None
await db.execute(update, session_id)
await self.bot.get_channel(326954112744816643).send('Session ended. Thanks for participating')
# await self.bot.get_channel(config.ANNOUNCE_CHANNEL).send('Session ended. Thanks for participating')
await sys_message.edit(content='{}\nSession ended'.format(sys_message.content))
await Database.close_connection(db)
args = {
'discord_id': 'all',
'key' : config.TRANSACTION_KEY
}
await Web.get_response(unlock_link, args)
@_rp.command(name='tool')
@commands.check(checks.can_manage_rp)
async def _tool(self, ctx, what, who, channel: discord.TextChannel):
"""
*RP Moderator only* | Used in place of old RP utility commands.
Please refer to `?help rp use` for the names of utilities.
This command can only be used for utilites (like hack, EGG, ECU...). It will not work on other equipment.
"""
try:
await getattr(self, '_' + what)(ctx, who, channel)
except AttributeError as ex:
print(ex)
await ctx.send("tool {} doesn't exist".format(what))
@_rp.command(name='clean')
@commands.check(checks.can_manage_rp)
async def _clean(self, ctx):
"""
*RP Moderator only* | Force-closes all RP sessions
"""
db = await Database.get_connection(self.bot.loop)
update = "UPDATE roleplay_session SET done = TRUE WHERE done IS FALSE"
async with db.transaction():
await db.execute(update)
self.players.clear()
self.playerids = []
self.announce_message = None
self.system_message = None
await ctx.send('Sessions cleaned')
await Database.close_connection(db)
args = {
'discord_id': 'all',
'key' : config.TRANSACTION_KEY
}
await Web.get_response(unlock_link, args)
# @_rp.command(name='set')
@commands.check(checks.can_manage_rp)
async def _set(self, ctx, what: str, *params):
"""
*RP Moderator only* | Helper command to set various parameters to RP session.
Currenly can be used only to set hacking target, for example `?rp set target 3` to set hack target to 3
"""
if what == 'target' and params[0] is not None:
self.parameters[what] = params[0]
await ctx.send('target set to {}'.format(params[0]))
elif what == 'channel' and params[0] is not None:
self.parameters[what] = params[0]
else:
to_delete = await ctx.send('Unknown parameter!')
await asyncio.sleep(2)
await to_delete.delete()
def react_check(self, reaction, user):
if user is None or user.id == self.bot.user.id:
return False
if reaction.message.id != self.prompt.id:
return False
for emoji in ['1\u20e3', '2\u20e3', '3\u20e3', '4\u20e3', '5\u20e3', '6\u20e3']:
if reaction.emoji == emoji:
return True
return False
async def _hack(self, ctx, who=None, channel: discord.TextChannel = None):
"""
Initiates hack with specified difficulty.
"""
prompt = await self.bot.get_channel(374691520055345162).send('{} is trying to hack something. Please select the level.'.format(ctx.author))
await prompt.add_reaction('1\u20e3')
await prompt.add_reaction('2\u20e3')
await prompt.add_reaction('3\u20e3')
await prompt.add_reaction('4\u20e3')
await prompt.add_reaction('5\u20e3')
await prompt.add_reaction('6\u20e3')
self.prompt = prompt
difficulty = 0
try:
reaction, user = await self.bot.wait_for('reaction_add', check=self.react_check, timeout=60.0)
reaction = reaction.emoji
if reaction == '1\u20e3':
difficulty = 1
elif reaction == '2\u20e3':
difficulty = 2
elif reaction == '3\u20e3':
difficulty = 3
elif reaction == '4\u20e3':
difficulty = 4
elif reaction == '5\u20e3':
difficulty = 5
elif reaction == '6\u20e3':
difficulty = 6
except asyncio.TimeoutError:
await ctx.send('Admin is bussy. Please try to set the hack again later!')
return
if not await can_manage_bot(ctx):
who = None
channel = None
difficulty = abs(difficulty)
if difficulty > 0:
limit = [
10, 25, 40, 50, 70, 75,
]
else:
limit = [
-1, -1, -1, -1, -1, -1,
]
who = who or ctx.message.author
channel = channel or ctx.channel
embed = discord.Embed(title="**::Hacking sequence initiated for Security Level {}::**".format(abs(difficulty)),
description=TextChecker.replace_emotes(
":rp_utility1: Encryption Cracking Unit paired with device.\n"
":rp_utility1: Emulation Program Functional.\n"
":rp_utility1: Data Package Compilation Program Functional.\n"
":rp_utility1: Virus Defense Program Functional.", self.bot),
colour=discord.Colour.orange())
if isinstance(who, discord.Member):
embed.set_author(name=who.nick or who.display_name, icon_url=who.avatar_url)
else:
if who is not None:
embed.set_author(name=who)
else:
embed.set_author(name='Unknown user')
message = await channel.send('', embed=embed)
await asyncio.sleep(self.delta)
for i in range(abs(difficulty)):
if i > 0:
embed.remove_field(0)
await asyncio.sleep(self.delta)
prob = random.randint(0, 100)
if prob > limit[i]:
embed.add_field(name="**[Core Process {} of {}]**".format(i + 1, abs(difficulty)),
value=hacks_pass[i].format(embed.author.name), inline=False)
await message.edit(embed=embed)
else:
embed.add_field(name="**[Core Process {} of {}]**".format(i + 1, abs(difficulty)),
value=hacks_fail[i].format(embed.author.name), inline=False)
await message.edit(embed=embed)
await asyncio.sleep(self.delta)
embed.colour = discord.Colour.red()
embed.add_field(name="**::Hacking sequence failed::**",
value=TextChecker.replace_emotes(
":rp_utility0: Encryption Cracking Unit disconnected from device.\n"
":rp_utility0: Emulation Program was locked out of the system.\n"
":rp_utility0: Data Package Failed, purging corrupted data.\n"
":rp_utility1: All hostile viruses quarantined and purged.\n"
":rp_utility1: Heat Surge Detected.\n"
"Allow **30 seconds** for utility to cool for optimal performance.", self.bot))
await message.edit(embed=embed)
return 'hack'
await asyncio.sleep(self.delta)
embed.colour = discord.Colour.green()
embed.add_field(name="**::Hacking sequence was completed successfully::**",
value=TextChecker.replace_emotes(
":rp_utility1: Encryption Cracking Unit paired with device.\n"
":rp_utility1: Emulation Program Operated Successfully.\n"
":rp_utility1: Data Package Created, ready for download to memory drive.\n"
":rp_utility1: All hostile viruses quarantined and purged.\n"
":rp_utility1: Heat Surge Detected.\n"
"Allow **60 seconds** for utility to cool for optimal performance.", self.bot))
await message.edit(embed=embed)
return 'hack'
async def _scb(self, ctx, who=None, channel: discord.TextChannel = None):
"""
Activates Shield Cell Bank.
"""
if not await can_manage_bot(ctx):
who = None
channel = None
who = who or ctx.message.author
channel = channel or ctx.channel
chance = random.randint(1, 100)
embed = discord.Embed(title="**::Shield Cell Bank::**")
if isinstance(who, discord.Member):
embed.set_author(name=who.nick or who.display_name, icon_url=who.avatar_url)
else:
embed.set_author(name=who)
if chance <= 60:
embed.description = TextChecker.replace_emotes(
"Processing…\n"
"Processing…\n"
"Processing…\n"
":rp_utility1: Personal Shield Devices are recharged to full capacity.\n"
"Processing…\n"
"Processing…\n"
"Processing…\n"
":rp_utility1: Thermal Weaponry are recharged to full capacity.\n"
":rp_utility1: Massive Heat Surge Detected.\n"
"Allow **5 minutes** for utility to cool for optimal performance.",
self.bot)
embed.colour = discord.Colour.green()
elif chance > 90:
embed.description = TextChecker.replace_emotes(
"Processing…\n"
"Processing…\n"
":rp_utility0: Module Malfunction Detected.\n"
":rp_utility0: Personal Shield Devices failed to recharge.\n"
":rp_utility0: Thermal Weaponry failed to recharge.\n"
":rp_utility1: Massive Heat Surge Detected.\n"
":rp_utility0: Meltdown Detected.\n"
"Allow **5 minutes** for utility to cool before triggering.",
self.bot)
embed.colour = discord.Colour.red()
else:
embed.description = TextChecker.replace_emotes(
"Processing…\n"
"Processing…\n"
"Processing…\n"
":rp_utility1: Personal Shield Devices are recharged to full capacity.\n"
"Processing…\n"
"Processing…\n"
"Processing…\n"
":rp_utility1: hermal Weaponry are recharged to full capacity.\n"
":rp_utility1: Heat Surge Detected.\n"
"Allow **60 seconds** for utility to cool for optimal performance.",
self.bot)
embed.colour = discord.Colour.orange()
await channel.send('', embed=embed)
async def _afmu(self, ctx, who=None, channel: discord.TextChannel = None):
"""
Activates Auto Field Maintenance Unit.
"""
if not await can_manage_bot(ctx):
who = None
channel = None
who = who or ctx.message.author
channel = channel or ctx.channel
chance = random.randint(1, 100)
embed = discord.Embed(title="**::Auto Field Maintenance Unit::**")
if isinstance(who, discord.Member):
embed.set_author(name=who.nick or who.display_name, icon_url=who.avatar_url)
else:
embed.set_author(name=who)
if chance <= 50:
embed.description = TextChecker.replace_emotes(
"Processing…\n"
"Processing…\n"
"Processing…\n"
":rp_utility1: Armor Integrity restored to 100%.\n"
"Processing…\n"
"Processing…\n"
"Processing…\n"
":rp_utility1: Armor Modifier Integrity restored to 100%.\n"
"Processing…\n"
"Processing…\n"
"Processing…\n"
":rp_utility1: Malfunctioned Accessories restored to 100%.\n"
"Processing…\n"
"Processing…\n"
":rp_utility1: Malfunctioned Utilities restored to 100%.\n"
":rp_utility1: Large Heat Surge Detected.\n"
":rp_utility0: Meltdown Detected.\n"
"Allow **10 minutes** for utility to cool for optimal performance.",
self.bot)
embed.colour = discord.Colour.green()
elif chance > 75:
embed.description = TextChecker.replace_emotes(
"Processing…\n"
"Processing…\n"
":rp_utility0: Module Malfunction Detected.\n"
":rp_utility0: Armor Integrity ignored by device.\n"
":rp_utility0: Armor Modifier Integrity ignored by device.\n"
":rp_utility0: Malfunctioned Accessories ignored by device.\n"
":rp_utility0: Malfunctioned Utilities ignored by device.\n"
":rp_utility1: Large Heat Surge Detected.\n"
":rp_utility0: Meltdown Detected.\n"
"Allow **10 minutes** for utility to cool before triggering.",
self.bot)
embed.colour = discord.Colour.red()
else:
embed.description = TextChecker.replace_emotes(
"Processing…\n"
":rp_utility0: Armor Integrity ignored by device.\n"
"Processing…\n"
":rp_utility0: Armor Modifier Integrity ignored by device.\n"
"Processing…\n"
"Processing…\n"
"Processing…\n"
":rp_utility1: Malfunctioned Accessories restored to 100%.\n"
"Processing…\n"
"Processing…\n"
":rp_utility1: Malfunctioned Utilities restored to 100%.\n"
":rp_utility1: Large Heat Surge Detected.\n"
":rp_utility0: Meltdown Detected.\n"
"Allow **10 minutes** for utility to cool for optimal performance.",
self.bot)
embed.colour = discord.Colour.orange()
await channel.send('', embed=embed)
async def _chaff(self, ctx, who=None, channel: discord.TextChannel = None):
"""
Activates Chaff Launcher.
"""
if not await can_manage_bot(ctx):
who = None
channel = None
who = who or ctx.message.author
channel = channel or ctx.channel
chance = random.randint(1, 100)
embed = discord.Embed(title="**::Chaff Launcher::**")
if isinstance(who, discord.Member):
embed.set_author(name=who.nick or who.display_name, icon_url=who.avatar_url)
else:
embed.set_author(name=who)
if chance <= 90:
embed.description = TextChecker.replace_emotes(
":rp_utility1: Chaff launched successfully.\n"
":rp_utility1: Hostile Sensors are unable to track for 20 Seconds.\n"
":rp_utility1: Minor Heat Surge Detected.\n"
"Allow **30 seconds** for utility to cool for optimal performance.", self.bot)
embed.colour = discord.Colour.green()
else:
embed.description = TextChecker.replace_emotes(
":rp_utility0: Module Malfunction Detected.\n"
":rp_utility0: Chaff failed to launch.\n"
":rp_utility1: Minor Heat Surge Detected.\n"
"Allow **30 seconds** for utility to cool before triggering.", self.bot)
embed.colour = discord.Colour.red()
await channel.send('', embed=embed)
async def _els(self, ctx, who=None, channel: discord.TextChannel = None):
"""
Activates Environmental Layout Scanner.
"""
if not await can_manage_bot(ctx):
who = None
channel = None
who = who or ctx.message.author
channel = channel or ctx.channel
chance = random.randint(1, 100)
embed = discord.Embed(title="**::Environmental Layout Scanner::**")
if isinstance(who, discord.Member):
embed.set_author(name=who.nick or who.display_name, icon_url=who.avatar_url)
else:
embed.set_author(name=who)
if chance <= 50:
embed.description = TextChecker.replace_emotes(
"Processing…\n"
"Processing…\n"
"Processing…\n"
":rp_utility1: Scan completed successfully.\n"
"Processing…\n"
":rp_utility1: Landscape and structure layout updated.\n"
"Processing…\n"
":rp_utility1: Data Package created, ready to download to a memory drive.\n"
":rp_utility1: Information updated to any detected Visual Assistant Systems in the squad.\n"
":rp_utility1: Heat Surge Detected.\n"
"Allow **60 seconds** for utility to cool for optimal performance.",
self.bot)
embed.colour = discord.Colour.green()
elif chance > 90:
embed.description = TextChecker.replace_emotes(
"Processing…\n"
"Processing…\n"
":rp_utility0: Module Malfunction Detected.\n"
":rp_utility0: Scan failed.\n"
":rp_utility0: Landscape and structure layout failed to update.\n"
"Processing…\n"
"Processing…\n"
":rp_utility0: Data Package failed, purging corrupted data.\n"
":rp_utility1: Heat Surge Detected.\n"
"Allow **60 seconds** for utility to cool before triggering.",
self.bot)
embed.colour = discord.Colour.red()
else:
embed.description = TextChecker.replace_emotes(
"Processing…\n"
"Processing…\n"
"Processing…\n"
":rp_utility1: Scan completed successfully.\n"
"Processing…\n"
":rp_utility1: Landscape and structure layout updated.\n"
"Processing…\n"
"Processing…\n"
":rp_utility1: Valuable insight on environment detected.\n"
"Processing…\n"
":rp_utility1: Data Package created, ready to download to a memory drive.\n"
":rp_utility1: Information updated to any detected Visual Assistant Systems in the squad.\n"
":rp_utility1: Heat Surge Detected.\n"
"Allow **60 seconds** for utility to cool for optimal performance.",
self.bot)
embed.colour = discord.Colour.orange()
await channel.send('', embed=embed)
async def _ecu(self, ctx, who=None, channel: discord.TextChannel = None):
"""
Activates Encryption Cracking Unit.
"""
if not await can_manage_bot(ctx):
who = None
channel = None
who = who or ctx.message.author
channel = channel or ctx.channel
chance = random.randint(1, 100)
embed = discord.Embed(title="**::Encryption Cracking Unit::**")
if isinstance(who, discord.Member):
embed.set_author(name=who.nick or who.display_name, icon_url=who.avatar_url)
else:
embed.set_author(name=who)
if chance <= 50:
embed.description = TextChecker.replace_emotes(
":rp_utility1: Detected encrypted data deciphered.\n"
":rp_utility1: Any communications chatter intercepted.\n"
":rp_utility1: Hostile Viruses Purged.\n"
"Processing…\n"
"Processing…\n"
":rp_utility1: All deciphered data stored on memory device.\n"
":rp_utility1: Heat Surge Detected.\n"
"Allow **2 Minutes** for utility to cool for optimal performance.", self.bot)
embed.colour = discord.Colour.green()
elif chance > 80:
embed.description = TextChecker.replace_emotes(
"Processing…\n"
"Processing…\n"
"Processing…\n"
":rp_utility0: Module Malfunction Detected\n"
"Processing…\n"
":rp_utility0: The device has failed to respond.\n"
"Processing…\n"
":rp_utility1: Massive Heat Surge Detected.\n"
"Allow **2 Minutes** for utility to cool for optimal performance.",
self.bot)
embed.colour = discord.Colour.red()
else:
embed.description = TextChecker.replace_emotes(
":rp_utility1: Detected encrypted data deciphered.\n"
"Processing…\n"
":rp_utility0: Failed to intercept communications chatter.\n"
":rp_utility1: Hostile Viruses Purged.\n"
"Processing…\n"
"Processing…\n"
":rp_utility1: All deciphered data stored on memory device.\n"
":rp_utility1: Heat Surge Detected.\n"
"Allow **2 Minutes** for utility to cool for optimal performance.", self.bot)
embed.colour = discord.Colour.orange()
await channel.send('', embed=embed)
async def _hsl(self, ctx, who=None, channel: discord.TextChannel = None):
"""
Activates Heat Sink Launcher.
"""
if not await can_manage_bot(ctx):
who = None
channel = None
who = who or ctx.message.author
channel = channel or ctx.channel
chance = random.randint(1, 100)
embed = discord.Embed(title="**::Heat Sink Launcher::**")
if isinstance(who, discord.Member):
embed.set_author(name=who.nick or who.display_name, icon_url=who.avatar_url)
else:
embed.set_author(name=who)
if chance <= 90:
embed.description = TextChecker.replace_emotes(
"Processing…\n"
"Processing...\n"
":rp_utility1: All Generated Heat successfully pulled from Utilities.\n"
"Processing…\n"
"Processing…\n"
"Processing...\n"
":rp_utility1: All Generated Heat successfully pulled from Thermal Weaponry.\n"
"Processing…\n"
":rp_utility1: Heat Sink spin cycle initiated, preparing to launch.\n"
"Processing…\n"
"Processing…\n"
":rp_utility1: Heat Sink launched successfully.",
self.bot)
embed.colour = discord.Colour.green()
else:
embed.description = TextChecker.replace_emotes(
"Processing…\n"
"Processing...\n"
":rp_utility1: All Generated Heat successfully pulled from Utilities.\n"
"Processing…\n"
"Processing…\n"
"Processing...\n"
":rp_utility1: All Generated Heat successfully pulled from Thermal Weaponry.\n"
"Processing…\n"
":rp_utility1: Heat Sink spin cycle initiated, preparing to launch.\n"
"Processing…\n"
"Processing…\n"
":rp_utility0: Heat buildup exceeds Heat Sink capacity. Preparing to Overcharge disk.\n"
"WARNING: Keep clear of Heat Sink when launched;\n"
":rp_utility1: Overcharged Heat Sink launched, certain to explode on contact.\n"
"Utility ready for use.", self.bot)
embed.colour = discord.Colour.red()
await channel.send('', embed=embed)
async def _kws(self, ctx, who=None, channel: discord.TextChannel = None):
"""
Activates Kill Warrant Scanner.
"""
if not await can_manage_bot(ctx):
who = None
channel = None
who = who or ctx.message.author
channel = channel or ctx.channel
chance = random.randint(1, 100)
embed = discord.Embed(title="**::Kill Warrant Scanner::**")
if isinstance(who, discord.Member):
embed.set_author(name=who.nick or who.display_name, icon_url=who.avatar_url)
else:
embed.set_author(name=who)
if chance <= 90:
embed.description = TextChecker.replace_emotes(
"Processing…\n"
"Processing…\n"
"Processing…\n"
"Processing…\n"
"Processing…\n"
"Processing...\n"
":rp_utility1: Identity Scan Completed.\n"
":rp_utility1: Information updated to any detected Visual Assistant Systems in the squad.\n"
":rp_utility1: Heat Surge Detected.\n"
"Allow **30 seconds** for utility to cool for optimal performance.",
self.bot)
embed.colour = discord.Colour.green()
else:
embed.description = TextChecker.replace_emotes(
"Processing…\n"
"Processing…\n"
"Processing…\n"
":rp_utility0: Module Malfunction Detected.\n"
":rp_utility1: Identity Scan Failed.\n"
":rp_utility1: Heat Surge Detected.\n"
"Allow **60 seconds** for utility to cool before triggering.",
self.bot)
embed.colour = discord.Colour.red()
await channel.send('', embed=embed)
async def _egg(self, ctx, who=None, channel: discord.TextChannel = None):
"""
Activates Electronic Ghost Generator.
"""
if not await can_manage_bot(ctx):
who = None
channel = None
who = who or ctx.message.author
channel = channel or ctx.channel
chance = random.randint(1, 100)
embed = discord.Embed(title="**::Electronic Ghost Generator::**")
if isinstance(who, discord.Member):
embed.set_author(name=who.nick or who.display_name, icon_url=who.avatar_url)
else:
embed.set_author(name=who)
if chance <= 90:
embed.description = TextChecker.replace_emotes(
":rp_utility1: Frequencies generated successfully.\n"
":rp_utility1: Effective range is **100 Meters**.\n"
":rp_utility1: All individuals within 100 Meters are delirious and will experience hallucinations.\n"
":rp_utility1: Massive Heat Surge Detected.\n"
"Allow **2 Minutes** for utility to cool for optimal performance.\n"
, self.bot)
embed.colour = discord.Colour.green()
else:
embed.description = TextChecker.replace_emotes(
":rp_utility1: Frequencies generated successfully.\n"
"Processing…\n"
"Processing…\n"
":rp_utility0: Module Malfunction Detected\n"
"Processing…\n"
":rp_utility0: Effective range is **5 Meters**.\n"
":rp_utility1: " + (
who.nick or who.display_name) + " is delirious and will experience hallucinations.\n"
":rp_utility1: Massive Heat Surge Detected.\n"
"Allow **2 Minutes** for utility to cool for optimal performance.\n",
self.bot)
embed.colour = discord.Colour.red()
await channel.send('', embed=embed)
async def _hdp(self, ctx, who=None, channel: discord.TextChannel = None):
"""
Activates Holo-Me Decoy Projector.
"""
if not await can_manage_bot(ctx):
who = None
channel = None
who = who or ctx.message.author
channel = channel or ctx.channel
chance = random.randint(1, 100)
embed = discord.Embed(title="**::Holo-Me Decoy Projector::**")
if isinstance(who, discord.Member):
embed.set_author(name=who.nick or who.display_name, icon_url=who.avatar_url)
else:
embed.set_author(name=who)
if chance <= 90:
embed.description = TextChecker.replace_emotes(
":rp_utility1: 28 Decoy Clones projected successfully.\n"
":rp_utility1: Audio Shimmering transmitted successfully.\n"
":rp_utility1: Immune to targeting for 10 Seconds.\n"
":rp_utility1: Massive Heat Surge Detected.\n"
"Allow **2 Minutes** for utility to cool for optimal performance.", self.bot)
embed.colour = discord.Colour.green()
else:
embed.description = TextChecker.replace_emotes(
"Processing…\n"
"Processing…\n"
":rp_utility0: Module Malfunction Detected.\n"
":rp_utility0: Decoy Clones failed to project.\n"
":rp_utility0: Audio Shimmering failed to transmit.\n"
":rp_utility1: Massive Heat Surge Detected.\n"
"Allow **2 Minutes** for utility to cool before triggering.", self.bot)
embed.colour = discord.Colour.red()
await channel.send('', embed=embed)
async def _vdc(self, ctx, who=None, channel: discord.TextChannel = None):
"""
Activates Virtual Distortion Cloak.
"""
if not await can_manage_bot(ctx):
who = None
channel = None
who = who or ctx.message.author
channel = channel or ctx.channel
chance = random.randint(1, 100)
embed = discord.Embed(title="**::Virtual Distortion Cloak::**")
if isinstance(who, discord.Member):
embed.set_author(name=who.nick or who.display_name, icon_url=who.avatar_url)
else:
embed.set_author(name=who)
if chance <= 90:
embed.description = TextChecker.replace_emotes(
":rp_utility1: 60 Corrupted Holograms projected per minute.\n"
":rp_utility1: Generating disruptive audio successfully.\n"
":rp_utility1: Immune to recognition software for ten minutes.\n"
":rp_utility1: Massive Heat Surge Detected.\n"
"Allow **2 Minutes** for utility to cool for optimal performance.", self.bot)
embed.colour = discord.Colour.green()
else:
embed.description = TextChecker.replace_emotes(
":rp_utility0: Module Malfunction Detected.\n"
":rp_utility0: 400 Corrupted Holograms erratically projected before jamming projector orb.\n"
":rp_utility1: Disrupted audio hauntingly transmitted before overloading system memory.\n"
":rp_utility1: Failed to conceal identity, drawing attention.\n"
":rp_utility1: Massive Heat Surge Detected.\n"
"Allow **2 Minutes** for utility to cool before triggering.", self.bot)
embed.colour = discord.Colour.red()
await channel.send('', embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(Roleplay(bot))
| nisanick/Prisma_Machina | cogs/roleplay.py | Python | mit | 45,290 | 0.004455 |
""" Optimzier Tests
These tests were adapted from PyTorch' optimizer tests.
"""
import math
import pytest
import functools
from copy import deepcopy
import torch
from torch.testing._internal.common_utils import TestCase
from torch.autograd import Variable
from timm.scheduler import PlateauLRScheduler
from timm.optim import create_optimizer_v2
# HACK relying on internal PyTorch test functionality for comparisons that I don't want to write
torch_tc = TestCase()
def _test_basic_cases_template(weight, bias, input, constructor, scheduler_constructors):
weight = Variable(weight, requires_grad=True)
bias = Variable(bias, requires_grad=True)
input = Variable(input)
optimizer = constructor(weight, bias)
schedulers = []
for scheduler_constructor in scheduler_constructors:
schedulers.append(scheduler_constructor(optimizer))
# to check if the optimizer can be printed as a string
optimizer.__repr__()
def fn():
optimizer.zero_grad()
y = weight.mv(input)
if y.is_cuda and bias.is_cuda and y.get_device() != bias.get_device():
y = y.cuda(bias.get_device())
loss = (y + bias).pow(2).sum()
loss.backward()
return loss
initial_value = fn().item()
for _i in range(200):
for scheduler in schedulers:
if isinstance(scheduler, PlateauLRScheduler):
val_loss = fn()
scheduler.step(val_loss)
else:
scheduler.step()
optimizer.step(fn)
assert fn().item() < initial_value
def _test_state_dict(weight, bias, input, constructor):
weight = Variable(weight, requires_grad=True)
bias = Variable(bias, requires_grad=True)
input = Variable(input)
def fn_base(optimizer, weight, bias):
optimizer.zero_grad()
i = input_cuda if weight.is_cuda else input
loss = (weight.mv(i) + bias).pow(2).sum()
loss.backward()
return loss
optimizer = constructor(weight, bias)
fn = functools.partial(fn_base, optimizer, weight, bias)
# Prime the optimizer
for _i in range(20):
optimizer.step(fn)
# Clone the weights and construct new optimizer for them
weight_c = Variable(weight.data.clone(), requires_grad=True)
bias_c = Variable(bias.data.clone(), requires_grad=True)
optimizer_c = constructor(weight_c, bias_c)
fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c)
# Load state dict
state_dict = deepcopy(optimizer.state_dict())
state_dict_c = deepcopy(optimizer.state_dict())
optimizer_c.load_state_dict(state_dict_c)
# Run both optimizations in parallel
for _i in range(20):
optimizer.step(fn)
optimizer_c.step(fn_c)
#assert torch.equal(weight, weight_c)
#assert torch.equal(bias, bias_c)
torch_tc.assertEqual(weight, weight_c)
torch_tc.assertEqual(bias, bias_c)
# Make sure state dict wasn't modified
torch_tc.assertEqual(state_dict, state_dict_c)
# Make sure state dict is deterministic with equal but not identical parameters
torch_tc.assertEqual(optimizer.state_dict(), optimizer_c.state_dict())
# Make sure repeated parameters have identical representation in state dict
optimizer_c.param_groups.extend(optimizer_c.param_groups)
torch_tc.assertEqual(optimizer.state_dict()['param_groups'][-1], optimizer_c.state_dict()['param_groups'][-1])
# Check that state dict can be loaded even when we cast parameters
# to a different type and move to a different device.
if not torch.cuda.is_available():
return
input_cuda = Variable(input.data.float().cuda())
weight_cuda = Variable(weight.data.float().cuda(), requires_grad=True)
bias_cuda = Variable(bias.data.float().cuda(), requires_grad=True)
optimizer_cuda = constructor(weight_cuda, bias_cuda)
fn_cuda = functools.partial(fn_base, optimizer_cuda, weight_cuda, bias_cuda)
state_dict = deepcopy(optimizer.state_dict())
state_dict_c = deepcopy(optimizer.state_dict())
optimizer_cuda.load_state_dict(state_dict_c)
# Make sure state dict wasn't modified
torch_tc.assertEqual(state_dict, state_dict_c)
for _i in range(20):
optimizer.step(fn)
optimizer_cuda.step(fn_cuda)
torch_tc.assertEqual(weight, weight_cuda)
torch_tc.assertEqual(bias, bias_cuda)
# validate deepcopy() copies all public attributes
def getPublicAttr(obj):
return set(k for k in obj.__dict__ if not k.startswith('_'))
assert getPublicAttr(optimizer) == getPublicAttr(deepcopy(optimizer))
def _test_basic_cases(constructor, scheduler_constructors=None):
if scheduler_constructors is None:
scheduler_constructors = []
_test_state_dict(
torch.randn(10, 5),
torch.randn(10),
torch.randn(5),
constructor
)
_test_basic_cases_template(
torch.randn(10, 5),
torch.randn(10),
torch.randn(5),
constructor,
scheduler_constructors
)
# non-contiguous parameters
_test_basic_cases_template(
torch.randn(10, 5, 2)[..., 0],
torch.randn(10, 2)[..., 0],
torch.randn(5),
constructor,
scheduler_constructors
)
# CUDA
if not torch.cuda.is_available():
return
_test_basic_cases_template(
torch.randn(10, 5).cuda(),
torch.randn(10).cuda(),
torch.randn(5).cuda(),
constructor,
scheduler_constructors
)
def _test_model(optimizer, params, device=torch.device('cpu')):
weight = torch.tensor(
[[-0.2109, -0.4976], [-0.1413, -0.3420], [-0.2524, 0.6976]],
device=device, requires_grad=True)
bias = torch.tensor([-0.1085, -0.2979, 0.6892], device=device, requires_grad=True)
weight2 = torch.tensor([[-0.0508, -0.3941, -0.2843]], device=device, requires_grad=True)
bias2 = torch.tensor([-0.0711], device=device, requires_grad=True)
input = torch.tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], device=device).reshape(3, 2)
model = torch.nn.Sequential(torch.nn.Linear(2, 3),
torch.nn.Sigmoid(),
torch.nn.Linear(3, 1),
torch.nn.Sigmoid())
model.to(device)
pretrained_dict = model.state_dict()
pretrained_dict['0.weight'] = weight
pretrained_dict['0.bias'] = bias
pretrained_dict['2.weight'] = weight2
pretrained_dict['2.bias'] = bias2
model.load_state_dict(pretrained_dict)
optimizer = create_optimizer_v2(model, opt=optimizer, **params)
prev_loss = float('inf')
for i in range(20):
optimizer.zero_grad()
output = model(input)
loss = output.sum()
loss.backward()
loss = loss.item()
assert loss < prev_loss
prev_loss = loss
optimizer.step()
def rosenbrock(tensor):
x, y = tensor
return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2
def drosenbrock(tensor):
x, y = tensor
return torch.tensor((-400 * x * (y - x ** 2) - 2 * (1 - x), 200 * (y - x ** 2)))
def _test_rosenbrock(constructor, scheduler_constructors=None):
if scheduler_constructors is None:
scheduler_constructors = []
params_t = torch.tensor([1.5, 1.5])
params = Variable(params_t, requires_grad=True)
optimizer = constructor([params])
schedulers = []
for scheduler_constructor in scheduler_constructors:
schedulers.append(scheduler_constructor(optimizer))
solution = torch.tensor([1, 1])
initial_dist = params.data.dist(solution)
def eval(params, w):
# Depending on w, provide only the x or y gradient
optimizer.zero_grad()
loss = rosenbrock(params)
loss.backward()
grad = drosenbrock(params.data)
# NB: We torture test the optimizer by returning an
# uncoalesced sparse tensor
if w:
i = torch.LongTensor([[0, 0]])
x = grad[0]
v = torch.tensor([x / 4., x - x / 4.])
else:
i = torch.LongTensor([[1, 1]])
y = grad[1]
v = torch.tensor([y - y / 4., y / 4.])
x = torch.sparse.DoubleTensor(i, v, torch.Size([2])).to(dtype=v.dtype)
with torch.no_grad():
params.grad = x.to_dense()
return loss
for i in range(2000):
# Do cyclic coordinate descent
w = i % 2
optimizer.step(functools.partial(eval, params, w))
for scheduler in schedulers:
if isinstance(scheduler, PlateauLRScheduler):
scheduler.step(rosenbrock(params))
else:
scheduler.step()
torch_tc.assertLessEqual(params.data.dist(solution), initial_dist)
def _build_params_dict(weight, bias, **kwargs):
return [{'params': [weight]}, dict(params=[bias], **kwargs)]
def _build_params_dict_single(weight, bias, **kwargs):
return [dict(params=bias, **kwargs)]
#@pytest.mark.parametrize('optimizer', ['sgd', 'momentum'])
# FIXME momentum variant frequently fails in GitHub runner, but never local after many attempts
@pytest.mark.parametrize('optimizer', ['sgd'])
def test_sgd(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=1e-2),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-2),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-2), optimizer)
)
# _test_basic_cases(
# lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3),
# [lambda opt: StepLR(opt, gamma=0.9, step_size=10)]
# )
# _test_basic_cases(
# lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3),
# [lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4, warmup_method="linear")]
# )
# _test_basic_cases(
# lambda weight, bias: optimizer([weight, bias], lr=1e-3),
# [lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4, warmup_method="constant")]
# )
# _test_basic_cases(
# lambda weight, bias: optimizer([weight, bias], lr=1e-3),
# [lambda opt: StepLR(opt, gamma=0.9, step_size=10),
# lambda opt: WarmUpLR(opt, warmup_factor=0.4, warmup_iters=4)]
# )
# _test_basic_cases(
# lambda weight, bias: optimizer([weight, bias], lr=1e-3),
# [lambda opt: StepLR(opt, gamma=0.9, step_size=10),
# lambda opt: ReduceLROnPlateau(opt)]
# )
# _test_basic_cases(
# lambda weight, bias: optimizer([weight, bias], lr=1e-3),
# [lambda opt: StepLR(opt, gamma=0.99, step_size=10),
# lambda opt: ExponentialLR(opt, gamma=0.99),
# lambda opt: ReduceLROnPlateau(opt)]
# )
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=3e-3, momentum=1)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=3e-3, momentum=1, weight_decay=.1)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['adamw', 'adam', 'nadam', 'adamax'])
def test_adam(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=5e-2)
)
_test_model(optimizer, dict(lr=5e-2))
@pytest.mark.parametrize('optimizer', ['adabelief'])
def test_adabelief(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=5e-2)
)
_test_model(optimizer, dict(lr=5e-2))
@pytest.mark.parametrize('optimizer', ['radam', 'radabelief'])
def test_rectified(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['adadelta', 'adagrad'])
def test_adaother(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-1)
)
_test_model(optimizer, dict(lr=5e-2))
@pytest.mark.parametrize('optimizer', ['adafactor'])
def test_adafactor(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(_build_params_dict_single(weight, bias), optimizer)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3, weight_decay=1)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=5e-2)
)
_test_model(optimizer, dict(lr=5e-2))
@pytest.mark.parametrize('optimizer', ['lamb', 'lambc'])
def test_lamb(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=1e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['lars', 'larc', 'nlars', 'nlarc'])
def test_lars(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=1e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=1e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['madgrad', 'madgradw'])
def test_madgrad(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-2)
)
_test_model(optimizer, dict(lr=1e-2))
@pytest.mark.parametrize('optimizer', ['novograd'])
def test_novograd(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['rmsprop', 'rmsproptf'])
def test_rmsprop(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-2)
)
_test_model(optimizer, dict(lr=1e-2))
@pytest.mark.parametrize('optimizer', ['adamp'])
def test_adamp(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=5e-2)
)
_test_model(optimizer, dict(lr=5e-2))
@pytest.mark.parametrize('optimizer', ['sgdp'])
def test_sgdp(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
_test_model(optimizer, dict(lr=1e-3))
@pytest.mark.parametrize('optimizer', ['lookahead_sgd', 'lookahead_momentum'])
def test_lookahead_sgd(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-3)
)
@pytest.mark.parametrize('optimizer', ['lookahead_adamw', 'lookahead_adam'])
def test_lookahead_adam(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=5e-2)
)
@pytest.mark.parametrize('optimizer', ['lookahead_radam'])
def test_lookahead_radam(optimizer):
_test_basic_cases(
lambda weight, bias: create_optimizer_v2([weight, bias], optimizer, lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3),
optimizer,
lr=1e-3)
)
_test_basic_cases(
lambda weight, bias: create_optimizer_v2(
_build_params_dict_single(weight, bias, lr=3e-3), optimizer)
)
_test_rosenbrock(
lambda params: create_optimizer_v2(params, optimizer, lr=1e-4)
)
| rwightman/pytorch-image-models | tests/test_optim.py | Python | apache-2.0 | 24,464 | 0.001635 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancersOperations:
"""LoadBalancersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.LoadBalancer":
"""Gets the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LoadBalancer, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_09_01.models.LoadBalancer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
load_balancer_name: str,
parameters: "_models.LoadBalancer",
**kwargs: Any
) -> "_models.LoadBalancer":
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LoadBalancer')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
load_balancer_name: str,
parameters: "_models.LoadBalancer",
**kwargs: Any
) -> AsyncLROPoller["_models.LoadBalancer"]:
"""Creates or updates a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param parameters: Parameters supplied to the create or update load balancer operation.
:type parameters: ~azure.mgmt.network.v2016_09_01.models.LoadBalancer
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either LoadBalancer or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2016_09_01.models.LoadBalancer]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancer"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.LoadBalancerListResult"]:
"""Gets all the load balancers in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2016_09_01.models.LoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.LoadBalancerListResult"]:
"""Gets all the load balancers in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2016_09_01.models.LoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2016_09_01/aio/operations/_load_balancers_operations.py | Python | mit | 23,464 | 0.004773 |
#!/usr/bin/env python3
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Summarize groups failed tests together by finding edit distances between their failure strings,
and emits JSON for rendering in a browser.
"""
# pylint: disable=invalid-name,missing-docstring
import argparse
import functools
import hashlib
import json
import logging
import os
import re
import sys
import time
import zlib
import berghelroach
editdist = berghelroach.dist
flakeReasonDateRE = re.compile(
r'[A-Z][a-z]{2}, \d+ \w+ 2\d{3} [\d.-: ]*([-+]\d+)?|'
r'\w{3}\s+\d{1,2} \d+:\d+:\d+(\.\d+)?|(\d{4}-\d\d-\d\d.|.\d{4} )\d\d:\d\d:\d\d(.\d+)?')
# Find random noisy strings that should be replaced with renumbered strings, for more similarity.
flakeReasonOrdinalRE = re.compile(
r'0x[0-9a-fA-F]+' # hex constants
r'|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(:\d+)?' # IPs + optional port
r'|[0-9a-fA-F]{8}-\S{4}-\S{4}-\S{4}-\S{12}(-\d+)?' # UUIDs + trailing digits
r'|[0-9a-f]{12,32}' # hex garbage
r'|(?<=minion-group-|default-pool-)[-0-9a-z]{4,}' # node names
)
def normalize(s):
"""
Given a traceback or error message from a text, reduce excess entropy to make
clustering easier.
This includes:
- blanking dates and timestamps
- renumbering unique information like
- pointer addresses
- UUIDs
- IP addresses
- sorting randomly ordered map[] strings.
"""
# blank out dates
s = flakeReasonDateRE.sub('TIME', s)
# do alpha conversion-- rename random garbage strings (hex pointer values, node names, etc)
# into 'UNIQ1', 'UNIQ2', etc.
matches = {}
def repl(m):
s = m.group(0)
if s not in matches:
matches[s] = 'UNIQ%d' % (len(matches) + 1)
return matches[s]
if 'map[' in s:
# Go's maps are in a random order. Try to sort them to reduce diffs.
s = re.sub(r'map\[([^][]*)\]',
lambda m: 'map[%s]' % ' '.join(sorted(m.group(1).split())),
s)
s = flakeReasonOrdinalRE.sub(repl, s)
if len(s) > 10000:
# for long strings, remove repeated lines!
s = re.sub(r'(?m)^(.*\n)\1+', r'\1', s)
if len(s) > 10000: # ridiculously long test output
s = s[:5000] + '\n...[truncated]...\n' + s[-5000:]
return s
def normalize_name(name):
"""
Given a test name, remove [...]/{...}.
Matches code in testgrid and kubernetes/hack/update_owners.py.
"""
name = re.sub(r'\[.*?\]|{.*?\}', '', name)
name = re.sub(r'\s+', ' ', name)
return name.strip()
def make_ngram_counts(s, ngram_counts={}):
"""
Convert a string into a histogram of frequencies for different byte combinations.
This can be used as a heuristic to estimate edit distance between two strings in
constant time.
Instead of counting each ngram individually, they are hashed into buckets.
This makes the output count size constant.
"""
# Yes, I'm intentionally memoizing here.
# pylint: disable=dangerous-default-value
size = 64
if s not in ngram_counts:
counts = [0] * size
for x in range(len(s)-3):
counts[zlib.crc32(s[x:x+4].encode('utf8')) & (size - 1)] += 1
ngram_counts[s] = counts # memoize
return ngram_counts[s]
def ngram_editdist(a, b):
"""
Compute a heuristic lower-bound edit distance using ngram counts.
An insert/deletion/substitution can cause up to 4 ngrams to differ:
abcdefg => abcefg
(abcd, bcde, cdef, defg) => (abce, bcef, cefg)
This will underestimate the edit distance in many cases:
- ngrams hashing into the same bucket will get confused
- a large-scale transposition will barely disturb ngram frequencies,
but will have a very large effect on edit distance.
It is useful to avoid more expensive precise computations when they are
guaranteed to exceed some limit (being a lower bound), or as a proxy when
the exact edit distance computation is too expensive (for long inputs).
"""
counts_a = make_ngram_counts(a)
counts_b = make_ngram_counts(b)
return sum(abs(x-y) for x, y in zip(counts_a, counts_b))//4
def make_ngram_counts_digest(s):
"""
Returns a hashed version of the ngram counts.
"""
return hashlib.sha1(str(make_ngram_counts(s)).encode()).hexdigest()[:20]
def file_memoize(description, name):
"""
Decorator to save a function's results to a file.
"""
def inner(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if os.path.exists(name):
with open(name) as f:
data = json.load(f)
logging.info('done (cached) %s', description)
return data
data = func(*args, **kwargs)
with open(name, 'w') as f:
json.dump(data, f)
logging.info('done %s', description)
return data
wrapper.__wrapped__ = func
return wrapper
return inner
@file_memoize('loading failed tests', 'memo_load_failures.json')
def load_failures(builds_file, tests_files):
"""
Load builds and failed tests files.
Group builds by path, group test failures by test name.
Args:
filenames
Returns:
{ build_path: [{ path: build_path, started: 12345, ...} ...], ...},
{ test_name: [{build: gs://foo/bar, name: test_name, failure_text: xxx}, ...], ...}
"""
builds = {}
with open(builds_file) as f:
for build in json.load(f):
if not build['started'] or not build['number']:
continue
for attr in ('started', 'tests_failed', 'number', 'tests_run'):
build[attr] = int(build[attr])
build['elapsed'] = int(float(build['elapsed']))
if 'pr-logs' in build['path']:
build['pr'] = build['path'].split('/')[-3]
builds[build['path']] = build
failed_tests = {}
for tests_file in tests_files:
with open(tests_file) as f:
for line in f:
test = json.loads(line)
failed_tests.setdefault(test['name'], []).append(test)
for tests in failed_tests.values():
tests.sort(key=lambda t: t['build'])
return builds, failed_tests
def find_match(fnorm, clusters):
for ngram_dist, other in sorted((ngram_editdist(fnorm, x), x) for x in clusters):
# allow up to 10% differences
limit = int((len(fnorm)+len(other))/2.0 * 0.10)
if ngram_dist > limit:
continue
if limit <= 1 and other != fnorm: # no chance
continue
dist = editdist(fnorm, other, limit)
if dist < limit:
return other
return None
def cluster_test(tests):
"""
Compute failure clusters given a list of failures for one test.
Normalize the failure text prior to clustering to avoid needless entropy.
Args:
[{name: test_name, build: gs://foo/bar, failure_text: xxx}, ...]
Returns:
{cluster_text_1: [test1, test2, ...]}
"""
clusters = {}
start = time.time()
for test in tests:
ftext = test['failure_text']
fnorm = normalize(ftext)
if fnorm in clusters:
clusters[fnorm].append(test)
else:
other = find_match(fnorm, clusters)
if other:
clusters[other].append(test)
else:
clusters[fnorm] = [test]
if time.time() > start + 60:
logging.info('bailing early, taking too long!')
break
return clusters
@file_memoize('clustering inside each test', 'memo_cluster_local.json')
def cluster_local(failed_tests):
"""
Cluster together the failures for each test.
Args:
{test_1: [{name: test_1, build: gs://foo/bar, failure_text: xxx}, ...], ...}
Returns:
{test_1: {cluster_text_1: [test1, test2], ... }, test_2: ...}
"""
clustered = {}
num_failures = 0
start = time.time()
logging.info("Clustering failures for %d unique tests...", len(failed_tests))
# Look at tests with the most failures first
for n, (test_name, tests) in enumerate(
sorted(failed_tests.items(),
key=lambda x: len(x[1]),
reverse=True),
1):
num_failures += len(tests)
logging.info('%4d/%4d, %d failures, %s', n, len(failed_tests), len(tests), test_name)
sys.stdout.flush()
clustered[test_name] = cluster_test(tests)
elapsed = time.time() - start
logging.info('Finished locally clustering %d unique tests (%d failures) in %dm%ds',
len(clustered), num_failures, elapsed / 60, elapsed % 60)
return clustered
@file_memoize('clustering across tests', 'memo_cluster_global.json')
def cluster_global(clustered, previous_clustered):
"""Combine together clustered failures for each test.
This is done hierarchically for efficiency-- each test's failures are likely to be similar,
reducing the number of clusters that need to be paired up at this stage.
Args:
{test_name: {cluster_text_1: [test1, test2, ...], ...}, ...}
Returns:
{cluster_text_1: [{test_name: [test1, test2, ...]}, ...], ...}
"""
clusters = {}
num_failures = 0
logging.info("Combining clustered failures for %d unique tests...", len(clustered))
start = time.time()
if previous_clustered:
# seed clusters using output from the previous run
n = 0
for cluster in previous_clustered:
key = cluster['key']
if key != normalize(key):
logging.info(key)
logging.info(normalize(key))
n += 1
continue
clusters[cluster['key']] = {}
logging.info('Seeding with %d previous clusters', len(clusters))
if n:
logging.warning('!!! %d clusters lost from different normalization! !!!', n)
# Look at tests with the most failures over all clusters first
for n, (test_name, test_clusters) in enumerate(
sorted(clustered.items(),
key=lambda kv: sum(len(x) for x in kv[1].values()),
reverse=True),
1):
logging.info('%4d/%4d, %d clusters, %s', n, len(clustered), len(test_clusters), test_name)
# Look at clusters with the most failures first
for key, tests in sorted(test_clusters.items(),
key=lambda x: len(x[1]), reverse=True):
num_failures += len(tests)
if key in clusters:
clusters[key].setdefault(test_name, []).extend(tests)
else:
other = find_match(key, clusters)
if other:
clusters[other].setdefault(test_name, []).extend(tests)
else:
clusters[key] = {test_name: list(tests)}
# If we seeded clusters using the previous run's keys, some of those
# clusters may have disappeared. Remove the resulting empty entries.
for k in {k for k, v in clusters.items() if not v}:
clusters.pop(k)
elapsed = time.time() - start
logging.info('Finished clustering %d unique tests (%d failures) into %d clusters in %dm%ds',
len(clustered), num_failures, len(clusters), elapsed / 60, elapsed % 60)
return clusters
def tests_group_by_job(tests, builds):
"""Turn a list of test failures into {job: [buildnumber, ...], ...}"""
groups = {}
for test in tests:
try:
build = builds[test['build']]
except KeyError:
continue
if 'number' in build:
groups.setdefault(build['job'], set()).add(build['number'])
return sorted(((key, sorted(value, reverse=True)) for key, value in groups.items()),
key=lambda kv: (-len(kv[1]), kv[0]))
SPAN_RE = re.compile(r'\w+|\W+')
def common_spans(xs):
"""
Finds something similar to the longest common subsequence of xs, but much faster.
Returns a list of [matchlen_1, mismatchlen_2, matchlen_2, mismatchlen_2, ...], representing
sequences of the first element of the list that are present in all members.
"""
common = None
for x in xs:
x_split = SPAN_RE.findall(x)
if common is None: # first iteration
common = set(x_split)
else:
common.intersection_update(x_split)
spans = []
match = True
span_len = 0
for x in SPAN_RE.findall(xs[0]):
if x in common:
if not match:
match = True
spans.append(span_len)
span_len = 0
span_len += len(x)
else:
if match:
match = False
spans.append(span_len)
span_len = 0
span_len += len(x)
if span_len:
spans.append(span_len)
return spans
def clusters_to_display(clustered, builds):
"""Transpose and sort the output of cluster_global."""
return [{
"key": key,
"id": key_id,
"spans": common_spans([f['failure_text'] for _, fs in clusters for f in fs]),
"text": clusters[0][1][0]['failure_text'],
"tests": [{
"name": test_name,
"jobs": [{"name": n, "builds": [str(x) for x in b]}
for n, b in tests_group_by_job(tests, builds)]
}
for test_name, tests in sorted(clusters, key=lambda nt: (-len(nt[1]), nt[0]))
]
}
for key, key_id, clusters in clustered if sum(len(x[1]) for x in clusters) > 1
]
def builds_to_columns(builds):
"""Convert a list of build dictionaries into a columnar form.
This compresses much better with gzip."""
jobs = {}
cols = {v: [] for v in 'started tests_failed elapsed tests_run result executor pr'.split()}
out = {'jobs': jobs, 'cols': cols, 'job_paths': {}}
for build in sorted(builds.values(), key=lambda b: (b['job'], b['number'])):
if 'number' not in build:
continue
index = len(cols['started'])
for key, entries in cols.items():
entries.append(build.get(key))
job = jobs.setdefault(build['job'], {})
if not job:
out['job_paths'][build['job']] = build['path'][:build['path'].rindex('/')]
job[build['number']] = index
for k, indexes in jobs.items():
numbers = sorted(indexes)
base = indexes[numbers[0]]
count = len(numbers)
# optimization: if we have a dense sequential mapping of builds=>indexes,
# store only the first build number, the run length, and the first index number.
if numbers[-1] == numbers[0] + count - 1 and \
all(indexes[k] == n + base for n, k in enumerate(numbers)):
jobs[k] = [numbers[0], count, base]
for n in numbers:
assert n <= numbers[0] + len(numbers), (k, n, jobs[k], len(numbers), numbers)
return out
def render(builds, clustered):
clustered_sorted = sorted(
clustered.items(),
key=lambda kv: (-sum(len(ts) for ts in kv[1].values()), kv[0]))
clustered_tuples = [(k,
make_ngram_counts_digest(k),
sorted(clusters.items(), key=lambda nt: (-len(nt[1]), nt[0])))
for k, clusters in clustered_sorted]
return {'clustered': clusters_to_display(clustered_tuples, builds),
'builds': builds_to_columns(builds)}
SIG_LABEL_RE = re.compile(r'\[sig-([^]]*)\]')
def annotate_owners(data, builds, owners):
"""
Assign ownership to a cluster based on the share of hits in the last day.
"""
owner_re = re.compile(r'(?:%s)' % '|'.join(
'(?P<%s>%s)' % (
sig.replace('-', '_'), # regex group names can't have -
'|'.join(re.escape(p) for p in prefixes)
)
for sig, prefixes in owners.items()
))
job_paths = data['builds']['job_paths']
yesterday = max(data['builds']['cols']['started']) - (60 * 60 * 24)
for cluster in data['clustered']:
owner_counts = {}
for test in cluster['tests']:
m = SIG_LABEL_RE.search(test['name'])
if m:
owner = m.group(1)
else:
m = owner_re.match(normalize_name(test['name']))
if not m or not m.groupdict():
continue
owner = next(k for k, v in m.groupdict().items() if v)
owner = owner.replace('_', '-')
counts = owner_counts.setdefault(owner, [0, 0])
for job in test['jobs']:
if ':' in job['name']: # non-standard CI
continue
job_path = job_paths[job['name']]
for build in job['builds']:
if builds['%s/%s' % (job_path, build)]['started'] > yesterday:
counts[0] += 1
else:
counts[1] += 1
if owner_counts:
owner = max(owner_counts.items(), key=lambda oc: (oc[1], oc[0]))[0]
cluster['owner'] = owner
else:
cluster['owner'] = 'testing'
def render_slice(data, builds, prefix='', owner=''):
clustered = []
builds_out = {}
jobs = set()
for cluster in data['clustered']:
# print [cluster['id'], prefix]
if owner and cluster.get('owner') == owner:
clustered.append(cluster)
elif prefix and cluster['id'].startswith(prefix):
clustered.append(cluster)
else:
continue
for test in cluster['tests']:
for job in test['jobs']:
jobs.add(job['name'])
for path, build in builds.items():
if build['job'] in jobs:
builds_out[path] = build
return {'clustered': clustered, 'builds': builds_to_columns(builds_out)}
def setup_logging():
"""Initialize logging to screen"""
# See https://docs.python.org/2/library/logging.html#logrecord-attributes
# [IWEF]mmdd HH:MM:SS.mmm] msg
fmt = '%(levelname).1s%(asctime)s.%(msecs)03d] %(message)s' # pylint: disable=line-too-long
datefmt = '%m%d %H:%M:%S'
logging.basicConfig(
level=logging.INFO,
format=fmt,
datefmt=datefmt,
)
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('builds', help='builds.json file from BigQuery')
parser.add_argument('tests', help='tests.json file from BigQuery', nargs='+')
parser.add_argument('--previous', help='previous output', type=argparse.FileType('r'))
parser.add_argument('--owners', help='test owner SIGs', type=argparse.FileType('r'))
parser.add_argument('--output', default='failure_data.json')
parser.add_argument('--output_slices',
help='Output slices to this path (must include PREFIX in template)')
return parser.parse_args(args)
def main(args):
setup_logging()
builds, failed_tests = load_failures(args.builds, args.tests)
previous_clustered = None
if args.previous:
logging.info('loading previous')
previous_clustered = json.load(args.previous)['clustered']
clustered_local = cluster_local(failed_tests)
clustered = cluster_global(clustered_local, previous_clustered)
logging.info("Rendering results...")
start = time.time()
data = render(builds, clustered)
if args.owners:
owners = json.load(args.owners)
annotate_owners(data, builds, owners)
with open(args.output, 'w') as f:
json.dump(data, f, sort_keys=True)
if args.output_slices:
assert 'PREFIX' in args.output_slices
for subset in range(256):
id_prefix = '%02x' % subset
with open(args.output_slices.replace('PREFIX', id_prefix), 'w') as f:
json.dump(render_slice(data, builds, id_prefix), f, sort_keys=True)
if args.owners:
owners.setdefault('testing', []) # for output
for owner in owners:
with open(args.output_slices.replace('PREFIX', 'sig-' + owner), 'w') as f:
json.dump(render_slice(data, builds, prefix='', owner=owner), f, sort_keys=True)
elapsed = time.time() - start
logging.info('Finished rendering results in %dm%ds', elapsed / 60, elapsed % 60)
if __name__ == '__main__':
main(parse_args(sys.argv[1:]))
| krzyzacy/test-infra | triage/summarize.py | Python | apache-2.0 | 21,103 | 0.002606 |
# coding: utf-8
import random
from deworld.layers.base_layer import BaseLayer
class VEGETATION_TYPE:
DESERT = 0
GRASS = 1
FOREST = 2
class VegetationLayer(BaseLayer):
MIN = 0.0
MAX = 1.0
HEIGHT_FOREST_BARIER_START = None
HEIGHT_FOREST_BARIER_END = None
HEIGHT_GRASS_BARIER_START = None
HEIGHT_GRASS_BARIER_END = None
TEMPERATURE_FOREST_BARIER_START = None
TEMPERATURE_FOREST_BARIER_END = None
TEMPERATURE_GRASS_BARIER_START = None
TEMPERATURE_GRASS_BARIER_END = None
WETNESS_FOREST_BARIER_START = None
WETNESS_FOREST_BARIER_END = None
WETNESS_GRASS_BARIER_START = None
WETNESS_GRASS_BARIER_END = None
FOREST_BORDER = None
GRASS_BORDER = None
SPAWN_PROBABILITY = None
CURRENT_GRASS_POWER_BONUS = None
CURRENT_FOREST_POWER_BONUS = None
def __init__(self, **kwargs):
super(VegetationLayer, self).__init__(default=VEGETATION_TYPE.DESERT, default_power=(0.0, 0.0), **kwargs)
self._merge_config(self.config.LAYERS.VEGETATION)
def serialize(self):
return super(VegetationLayer, self).serialize()
@classmethod
def deserialize(cls, world, data):
return cls(world=world, data=data['data'], power=data.get('power'))
def add_power(self, x, y, power):
old_power = self.power[y][x]
self.power[y][x] = (old_power[0] + power[0], old_power[1] + power[1])
def _border_right_power(self, power, value, border_start, border_end):
if value > border_start:
if value > border_end:
power = 0
else:
power *= 1 - float(value - border_start) / (border_end - border_start)
return power
def _border_left_power(self, power, value, border_start, border_end):
if value < border_start:
if value < border_end:
power = 0
else:
power *= 1 - float(border_start - value) / (border_start - border_end)
return power
def can_spawn(self, x, y, type_):
for y in range(y-1, y+1+1):
for x in range(x-1, x+1+1):
if not (0 <= y < self.h and 0 <= x < self.w):
continue
if self.data[y][x] in type_:
return True
return random.uniform(0, 1) < self.SPAWN_PROBABILITY
def power_from_current_situation(self, x, y):
grass, forest = 0.0, 0.0
for y in range(y-1, y+1+1):
for x in range(x-1, x+1+1):
if not (0 <= y < self.h and 0 <= x < self.w):
continue
if self.data[y][x] == VEGETATION_TYPE.GRASS: grass += self.CURRENT_GRASS_POWER_BONUS
elif self.data[y][x] == VEGETATION_TYPE.FOREST: forest += self.CURRENT_FOREST_POWER_BONUS
return random.uniform(0, grass), random.uniform(0, forest)
def sync(self):
for y in range(0, self.h):
for x in range(0, self.w):
power_points = self.power[y][x]
power_grass, power_forest = power_points
if self.data[y][x] == VEGETATION_TYPE.DESERT:
power_grass = max(power_grass, power_forest)
power_forest = max(power_grass, power_forest)
height = self.world.layer_height.data[y][x]
power_forest = self._border_right_power(power_forest, height, self.HEIGHT_FOREST_BARIER_START, self.HEIGHT_FOREST_BARIER_END)
power_grass = self._border_right_power(power_grass, height, self.HEIGHT_GRASS_BARIER_START, self.HEIGHT_GRASS_BARIER_END)
temperature = self.world.layer_temperature.data[y][x]
power_forest = self._border_right_power(power_forest, temperature, self.TEMPERATURE_FOREST_BARIER_START, self.TEMPERATURE_FOREST_BARIER_END)
power_grass = self._border_right_power(power_grass, temperature, self.TEMPERATURE_GRASS_BARIER_START, self.TEMPERATURE_GRASS_BARIER_END)
wetness = self.world.layer_wetness.data[y][x]
power_forest = self._border_left_power(power_forest, wetness, self.WETNESS_FOREST_BARIER_START, self.WETNESS_FOREST_BARIER_END)
power_grass = self._border_left_power(power_grass, wetness, self.WETNESS_GRASS_BARIER_START, self.WETNESS_GRASS_BARIER_END)
bonus_grass, bonus_forest = self.power_from_current_situation(x, y)
power_grass += bonus_grass
power_forest += bonus_forest
if power_forest > power_grass and power_forest > self.FOREST_BORDER and self.can_spawn(x, y, [VEGETATION_TYPE.FOREST]):
self.next_data[y][x] = VEGETATION_TYPE.FOREST
elif power_grass > self.GRASS_BORDER and self.can_spawn(x, y, [VEGETATION_TYPE.GRASS, VEGETATION_TYPE.FOREST]):
self.next_data[y][x] = VEGETATION_TYPE.GRASS
else:
self.next_data[y][x] = VEGETATION_TYPE.DESERT
self.power[y][x] = (power_grass, power_forest)
| Tiendil/deworld | deworld/layers/vegetation_layer.py | Python | bsd-2-clause | 5,066 | 0.003948 |
# stdlib
from collections import namedtuple
# project
from resources import (
agg,
ResourcePlugin,
SnapshotDescriptor,
SnapshotField,
)
from utils.subprocess_output import get_subprocess_output
class Processes(ResourcePlugin):
RESOURCE_KEY = "processes"
FLUSH_INTERVAL = 1 # in minutes
def describe_snapshot(self):
return SnapshotDescriptor(
1,
SnapshotField("user", 'str', aggregator=agg.append, temporal_aggregator=agg.append),
SnapshotField("pct_cpu", 'float'),
SnapshotField("pct_mem", 'float'),
SnapshotField("vsz", 'int'),
SnapshotField("rss", 'int'),
SnapshotField("family", 'str', aggregator=None, temporal_aggregator=None,
group_on=True, temporal_group_on=True),
SnapshotField("ps_count", 'int'))
def _get_proc_list(self):
# Get output from ps
try:
process_exclude_args = self.config.get('exclude_process_args', False)
if process_exclude_args:
ps_arg = 'aux'
else:
ps_arg = 'auxww'
output, _, _ = get_subprocess_output(['ps', ps_arg], self.log)
processLines = output.splitlines() # Also removes a trailing empty line
except Exception:
self.log.exception('Cannot get process list')
raise
del processLines[0] # Removes the headers
processes = []
for line in processLines:
line = line.split(None, 10)
processes.append(map(lambda s: s.strip(), line))
return processes
@staticmethod
def group_by_family(o):
return o[5]
@staticmethod
def filter_by_usage(o):
# keep everything over 1% (cpu or ram)
return o[0] > 1 or o[1] > 1
def _parse_proc_list(self, processes):
def _compute_family(command):
if command.startswith('['):
return 'kernel'
else:
return (command.split()[0]).split('/')[-1]
PSLine = namedtuple("PSLine", "user,pid,pct_cpu,pct_mem,vsz,rss,tty,stat,started,time,command")
self.start_snapshot()
for line in processes:
try:
psl = PSLine(*line)
self.add_to_snapshot([psl.user,
float(psl.pct_cpu),
float(psl.pct_mem),
int(psl.vsz),
int(psl.rss),
_compute_family(psl.command),
1])
except Exception:
pass
self.end_snapshot(group_by=self.group_by_family)
def flush_snapshots(self, snapshot_group):
self._flush_snapshots(snapshot_group=snapshot_group,
group_by=self.group_by_family,
filter_by=self.filter_by_usage)
def check(self):
self._parse_proc_list(self._get_proc_list())
| huhongbo/dd-agent | resources/processes.py | Python | bsd-3-clause | 3,086 | 0.00162 |
import pycaption
from django import http
from django.shortcuts import get_object_or_404
from airmozilla.closedcaptions.models import ClosedCaptions
class TxtWriter(pycaption.base.BaseWriter):
def write(self, caption_set):
lang = caption_set.get_languages()[0]
captions = caption_set.get_captions(lang)
output = 'Language: {}\n\n'.format(lang)
for caption in captions:
line = caption.get_text().replace('\n', ' ')
if line.startswith('- '):
output += '\n\n'
output += line + ' '
return output
SUPPORTED_WRITERS = {
'dfxp': pycaption.DFXPWriter,
'ttml': pycaption.DFXPWriter,
'sami': pycaption.SAMIWriter,
'srt': pycaption.SRTWriter,
'scc': pycaption.SCCWriter,
'webvtt': pycaption.WebVTTWriter,
'txt': TxtWriter,
}
FILE_EXTENSIONS = {
'dfxp': 'dfxp.xml',
'dfxp': 'dfxp',
'ttml': 'dfxp',
'sami': 'sami',
'srt': 'srt',
'scc': 'scc',
'webvtt': 'vtt',
'txt': 'txt',
}
CONTENT_TYPES = {
'txt': 'text/plain',
'sami': ' text/xml',
'dfxp': 'application/ttml+xml; charset=utf-8',
'vtt': 'text/vtt',
}
def download(request, filename_hash, id, slug, extension):
closedcaptions = get_object_or_404(
ClosedCaptions,
id=id,
event__slug__iexact=slug,
)
if extension not in FILE_EXTENSIONS.values():
raise http.Http404('Unrecognized extension')
if closedcaptions.filename_hash != filename_hash:
raise http.Http404('Unrecognized hash')
for key, ext in FILE_EXTENSIONS.items():
if ext == extension:
output_writer = SUPPORTED_WRITERS[key]
content = closedcaptions.file.read()
if not (
closedcaptions.file.name.lower().endswith('.ttml') or
closedcaptions.file.name.lower().endswith('.dfxp')
):
content = content.decode('utf-8')
reader = pycaption.detect_format(content)
assert reader
converter = pycaption.CaptionConverter()
converter.read(content, reader())
response = http.HttpResponse()
response['Content-Type'] = CONTENT_TYPES.get(extension, 'text/plain')
response.write(converter.write(output_writer()))
return response
| blossomica/airmozilla | airmozilla/closedcaptions/views.py | Python | bsd-3-clause | 2,235 | 0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# exceptions.py
"""PyPhi exceptions."""
class StateUnreachableError(ValueError):
"""The current state cannot be reached from any previous state."""
def __init__(self, state):
self.state = state
msg = "The state {} cannot be reached in the given TPM."
super().__init__(msg.format(state))
class ConditionallyDependentError(ValueError):
"""The TPM is conditionally dependent."""
class JSONVersionError(ValueError):
"""JSON was serialized with a different version of PyPhi."""
class WrongDirectionError(ValueError):
"""The wrong direction was provided."""
| wmayner/pyphi | pyphi/exceptions.py | Python | gpl-3.0 | 653 | 0 |
from app import application
if __name__=="__main__":
application.run()
| jerrylei98/Dailydos | wsgi.py | Python | mit | 77 | 0.025974 |
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import Permission, Group
from django.contrib.sites.models import Site
from django.db.models.signals import post_save, post_migrate
from django.core.exceptions import ObjectDoesNotExist
from main.models import SiteSettings, UserProfile, User
# custom user related permissions
def add_user_permissions(sender, **kwargs):
pass
def add_groups(sender, **kwargs):
ct = ContentType.objects.get(app_label='auth', model='user')
perm, created = Permission.objects.get_or_create(codename='can_view', name='Can View Users', content_type=ct)
group, created = Group.objects.get_or_create(name='Volunteer')
if created:
p = Permission.objects.get(codename='add_userevent')
group.permissions.add(p)
group, created = Group.objects.get_or_create(name='Org_Admin')
if created:
p = Permission.objects.get(codename='add_organization')
group.permissions.add(p)
group, created = Group.objects.get_or_create(name='NHS_Admin')
if created:
p = Permission.objects.get(codename='can_view')
group.permissions.add(p)
if not SiteSettings.objects.exists():
settings = SiteSettings(site=Site.objects.get(pk=1), candidate_leadership_hours=50,
candidate_service_hours=100, member_service_hours=6).save()
def create_userprof(sender, instance, created, **kwargs):
"""for when the user is created on the first syncdb"""
if created and instance.is_superuser:
try:
up = instance.user_profile
except ObjectDoesNotExist:
UserProfile(user=instance, email_valid=True, grad_class=2000, membership_status='MEM').save()
#post_migrate.connect(add_user_permissions, sender=auth_models)
# post_migrate.connect(add_groups)
post_save.connect(create_userprof, sender=User, dispatch_uid="create_userprof")
| mattr555/AtYourService-school | main/__init__.py | Python | mit | 1,906 | 0.005771 |
# Generated by Django 3.0.2 on 2020-03-04 20:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('files', '0004_image_compressed'),
('userprofile', '0029_auto_20200304_2007'),
]
operations = [
migrations.CreateModel(
name='Skill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('category', models.ManyToManyField(to='userprofile.Category')),
('prerequisites', models.ManyToManyField(blank=True, to='userprofile.Skill')),
('thumb', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='files.Image')),
],
),
]
| hackerspace-ntnu/website | userprofile/migrations/0030_skill.py | Python | mit | 955 | 0.004188 |
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
import visvis as vv
import numpy as np
import os
# Try importing imageio
imageio = None
try:
import imageio
except ImportError:
pass
def volread(filename):
""" volread(filename)
Read volume from a file. If filename is 'stent', read a dedicated
test dataset. For reading any other kind of volume, the imageio
package is required.
"""
if filename == 'stent':
# Get full filename
path = vv.misc.getResourceDir()
filename2 = os.path.join(path, 'stent_vol.ssdf')
if os.path.isfile(filename2):
filename = filename2
else:
raise IOError("File '%s' does not exist." % filename)
# Load
s = vv.ssdf.load(filename)
return s.vol.astype('int16') * s.colorscale
elif imageio is not None:
return imageio.volread(filename)
else:
raise RuntimeError("visvis.volread needs the imageio package to read arbitrary files.")
if __name__ == '__main__':
vol = vv.volread('stent')
t = vv.volshow(vol)
t.renderStyle = 'mip' # maximum intensity projection (is the default)
| chrisidefix/visvis | functions/volread.py | Python | bsd-3-clause | 1,322 | 0.008321 |
__version__ = '1.0.4'
KERNEL_NAME = 'aimlbot'
LANGUAGE = 'chatbot'
DISPLAY_NAME= 'AIML Chatbot'
| paulovn/aiml-chatbot-kernel | aimlbotkernel/__init__.py | Python | bsd-3-clause | 97 | 0.010309 |
import logging
import os
import re
import subprocess
import time
from ctypes.wintypes import MAX_PATH
from commander import Commander, Response
logger = logging.getLogger(__name__)
class CommanderImpl(Commander):
def __init__(self):
super().__init__()
def call(self, cmd: str) -> Response:
logger.info('Executing command, command: %s', cmd)
start: int = time.time()
completed = subprocess.run(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
end: int = time.time()
duration: int = end - start
response: Response = Response(
completed.returncode,
completed.stdout.decode('utf-8').split(os.linesep),
completed.stderr.decode('utf-8').split(os.linesep)
)
self.log_output(cmd=cmd, response=response)
logger.info('Finished, execution_time: %d, return_code: %d', duration, response.return_code)
return response
def log_output(self, cmd: str, response: Response) -> None:
dir: str = os.path.join('F:\\workspace\\blu_two\\log', self.slugify(cmd))
if len(dir) > MAX_PATH:
dir = dir[0:MAX_PATH]
os.makedirs(dir, exist_ok=True)
with open('{}/return_code'.format(dir), 'w') as file:
file.write(str(response.return_code))
with open('{}/std_out'.format(dir), 'w') as file:
for line in response.std_out:
file.write("{line}\n".format(line=line))
with open('{}/std_err'.format(dir), 'w') as file:
for line in response.std_err:
file.write("{line}\n".format(line=line))
| coolman565/blu_two | commander/commander_impl.py | Python | mit | 1,705 | 0.001173 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 27 12:16:26 2017
@author: Anand A Joshi, Divya Varadarajan
"""
import glob
from os.path import isfile, split
import configparser
config_file = u'/big_disk/ajoshi/ABIDE2/study.cfg'
Config = configparser.ConfigParser()
Config.read(config_file)
Config.sections()
STUDY_DIR = Config.get('CSESVREG', 'STUDY_DIR')
NPROC = int(Config.get('CSESVREG', 'NPROC'))
BST_INSTALL = Config.get('CSESVREG', 'BST_INSTALL')
SVREG_ATLAS = Config.get('CSESVREG', 'SVREG_ATLAS')
SVREG_FLAGS = Config.get('CSESVREG', 'SVREG_FLAGS')
CSE_EXE = Config.get('CSESVREG', 'CSE_EXE')
SVREG_EXE = Config.get('CSESVREG', 'SVREG_EXE')
sublist = lst = glob.glob(STUDY_DIR+'/*')
SMOOTHNESS = '6'
ind = 0
cmdln1 = []
cmdln2 = []
incom = 0
com = 0
for sub in sublist:
img = sub + '/anat/t1.roiwise.stats.txt'
subpath, filename = split(img)
outsurfname = subpath + '/t1.heat_sol_comp.mat'
# print img
if not isfile(outsurfname):
incom += 1
print outsurfname
continue
com += 1
print str(incom) + ' remaining ' + str(com) + ' done'
| ajoshiusc/brainsuite-workflows | utility_scripts/main_check_remaining.py | Python | mit | 1,120 | 0.000893 |
import requests
from fdep.backends import StorageBackend
class HTTPBackend(StorageBackend):
"""Implement HTTP/HTTPS."""
SCHEME_NAME = 'http'
def get_to(self, local_path):
r = requests.get(self.url, stream=True)
total_length = int(r.headers.get('content-length', 0))
self.progressbar.start_progress(total_length)
with open(local_path, 'wb') as f:
for chunk in r.iter_content(10240):
f.write(chunk)
self.progressbar.progress_callback(len(chunk))
self.progressbar.end_progress()
def put_from(self, local_path):
raise NotImplementedError("HTTP backend does not support uploading")
class HTTPSBackend(HTTPBackend):
SCHEME_NAME = 'https'
| checkr/fdep | fdep/backends/http.py | Python | mit | 750 | 0 |
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_intfiles
# Purpose: From Spidering and from searching search engines, identifies
# files of potential interest.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 06/04/2014
# Copyright: (c) Steve Micallef 2014
# Licence: GPL
# -------------------------------------------------------------------------------
import re
import urllib
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_intfiles(SpiderFootPlugin):
"""Interesting Files:Footprint:Identifies potential files of interest, e.g. office documents, zip files."""
# Default options
opts = {
'pages': 20, # Number of search results pages to iterate
'fileexts': ["doc", "docx", "ppt", "pptx", "pdf", 'xls', 'xlsx', 'zip'],
'usesearch': True,
'searchengine': "yahoo"
}
# Option descriptions
optdescs = {
'pages': "Number of search engine results pages to iterate through if using one.",
'fileexts': "File extensions of files you consider interesting.",
'usesearch': "Use search engines to quickly find files. If false, only spidering will be used.",
'searchengine': "If using a search engine, which one? google, yahoo or bing."
}
results = list()
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = list()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["INTERNET_NAME", "LINKED_URL_INTERNAL"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["SEARCH_ENGINE_WEB_CONTENT", "INTERESTING_FILE"]
def yahooCleaner(self, string):
return " url=\"" + urllib.unquote(string.group(1)) + "\" "
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
if eventName == "INTERNET_NAME" and not self.opts['usesearch']:
self.sf.debug("Not using a search engine to find interesting files.")
return None
if eventData in self.results:
return None
else:
self.results.append(eventData)
if eventName == "LINKED_URL_INTERNAL":
for fileExt in self.opts['fileexts']:
if "." + fileExt.lower() in eventData.lower():
if eventData in self.results:
continue
else:
self.results.append(eventData)
evt = SpiderFootEvent("INTERESTING_FILE", eventData,
self.__name__, event)
self.notifyListeners(evt)
return None
# Handling INTERNET_NAME event..
for fileExt in self.opts['fileexts']:
# Sites hosted on the domain
if self.opts['searchengine'].lower() == "google":
pages = self.sf.googleIterate("site:" + eventData + "+" +
"%2Bext:" + fileExt, dict(limit=self.opts['pages'],
useragent=self.opts['_useragent'],
timeout=self.opts['_fetchtimeout']))
if self.opts['searchengine'].lower() == "bing":
pages = self.sf.bingIterate("site:" + eventData + "+" +
"%2Bext:" + fileExt, dict(limit=self.opts['pages'],
useragent=self.opts['_useragent'],
timeout=self.opts['_fetchtimeout']))
if self.opts['searchengine'].lower() == "yahoo":
pages = self.sf.yahooIterate("site:" + eventData + "+" +
"%2Bext:" + fileExt, dict(limit=self.opts['pages'],
useragent=self.opts['_useragent'],
timeout=self.opts['_fetchtimeout']))
if pages is None:
self.sf.info("No results returned from " + self.opts['searchengine'] +
" for " + fileExt + " files.")
continue
for page in pages.keys():
if page in self.results:
continue
else:
self.results.append(page)
# Check if we've been asked to stop
if self.checkForStop():
return None
# Submit the gresults for analysis
evt = SpiderFootEvent("SEARCH_ENGINE_WEB_CONTENT", pages[page],
self.__name__, event)
self.notifyListeners(evt)
if self.opts['searchengine'].lower() == "yahoo":
res = re.sub("RU=(.[^\/]+)\/RK=", self.yahooCleaner, pages[page], 0)
else:
res = pages[page]
links = self.sf.parseLinks(page, res, eventData)
if len(links) == 0:
continue
for link in links:
if link in self.results:
continue
else:
self.results.append(link)
if self.sf.urlFQDN(link).endswith(eventData) and \
"." + fileExt.lower() in link.lower():
self.sf.info("Found an interesting file: " + link)
evt = SpiderFootEvent("INTERESTING_FILE", link,
self.__name__, event)
self.notifyListeners(evt)
# End of sfp_intfiles class
| Reality9/spiderfoot | modules/sfp_intfiles.py | Python | gpl-2.0 | 6,481 | 0.003395 |
# -*- coding: utf-8 -*-
from django import forms
class SearchForm(forms.Form):
query = forms.CharField(required=False)
| estebistec/django-get-forms | examples/demo/demo/forms.py | Python | bsd-3-clause | 127 | 0 |
# -*-coding:utf-8 -*-
import re
import json
from bs4 import BeautifulSoup
from page_parse import status
from decorators.decorator import parse_decorator
from db.models import UserRelation
from utils.filters import url_filter
from db.user_relation import save_relations
def get_userid(html):
return status.get_userid(html)
def get_username(html):
return status.get_username(html)
def get_userdomain(html):
return status.get_userdomain(html)
@parse_decorator(1)
def _get_header(html):
soup = BeautifulSoup(html, "html.parser")
scripts = soup.find_all('script')
pattern = re.compile(r'FM.view\((.*)\)')
cont = ''
for script in scripts:
m = pattern.search(script.string)
if m and 'pl.header.head.index' in script.string:
all_info = m.group(1)
cont = json.loads(all_info)['html']
return cont
def get_verifytype(html):
"""
:param html: page source
:return: 0 stands for unauthorized,1 stands for persional authentication,2 stands for enterprise authentication
"""
if 'icon_pf_approve_co' in html:
return 2
elif 'icon_pf_approve' in html:
return 1
else:
return 0
@parse_decorator(1)
def get_verifyreason(html, verify_type):
"""
details for authentication
:param html: page source
:param verify_type: authentication type
:return: authentication info
"""
if verify_type == 1 or verify_type == 2:
soup = BeautifulSoup(_get_header(html), 'html.parser')
return soup.find(attrs={'class': 'pf_intro'})['title']
else:
return ''
@parse_decorator(1)
def get_headimg(html):
"""
Get the head img url of current user
:param html: page source
:return: head img url
"""
soup = BeautifulSoup(_get_header(html), 'html.parser')
try:
headimg = url_filter(soup.find(attrs={'class': 'photo_wrap'}).find(attrs={'class': 'photo'})['src'])
except AttributeError:
headimg = ''
return headimg
@parse_decorator(1)
def get_left(html):
"""
The left part of the page, which is public
"""
soup = BeautifulSoup(html, "html.parser")
scripts = soup.find_all('script')
pattern = re.compile(r'FM.view\((.*)\)')
cont = ''
l_id = ''
# first ensure the left part
for script in scripts:
m = pattern.search(script.string)
if m and 'WB_frame_b' in script.string:
all_info = m.group(1)
cont = json.loads(all_info)['html']
lsoup = BeautifulSoup(cont, 'html.parser')
l_id = lsoup.find(attrs={'class': 'WB_frame_b'}).div['id']
for script in scripts:
m = pattern.search(script.string)
if m and l_id in script.string:
all_info = m.group(1)
try:
cont = json.loads(all_info)['html']
except KeyError:
return ''
return cont
@parse_decorator(1)
def get_right(html):
"""
Parse the right part of user detail
:param html: page source
:return: the right part of user info page
"""
soup = BeautifulSoup(html, "html.parser")
scripts = soup.find_all('script')
pattern = re.compile(r'FM.view\((.*)\)')
cont = ''
# first ensure right part,enterprise users may have two r_id
rids = []
for script in scripts:
m = pattern.search(script.string)
if m and 'WB_frame_c' in script.string:
all_info = m.group(1)
cont = json.loads(all_info).get('html', '')
if not cont:
return ''
rsoup = BeautifulSoup(cont, 'html.parser')
r_ids = rsoup.find(attrs={'class': 'WB_frame_c'}).find_all('div')
for r in r_ids:
rids.append(r['id'])
for script in scripts:
for r_id in rids:
m = pattern.search(script.string)
if m and r_id in script.string:
all_info = m.group(1)
cont += json.loads(all_info).get('html', '')
return cont
@parse_decorator(0)
def get_level(html):
"""
Get the level of users
"""
pattern = '<span>Lv.(.*?)<\\\/span>'
rs = re.search(pattern, html)
if rs:
return rs.group(1)
else:
return 0
@parse_decorator(2)
def get_fans_or_follows(html, uid, type):
"""
Get fans or follows and store their relationships
:param html: current page source
:param uid: current user id
:param type: type of relations
:return: list of fans or followers
"""
if html == '':
return list()
pattern = re.compile(r'FM.view\((.*)\)')
soup = BeautifulSoup(html, "html.parser")
scripts = soup.find_all('script')
user_ids = list()
relations = list()
for script in scripts:
m = re.search(pattern, script.string)
if m and 'pl.content.followTab.index' in script.string:
all_info = m.group(1)
cont = json.loads(all_info).get('html', '')
soup = BeautifulSoup(cont, 'html.parser')
follows = soup.find(attrs={'class': 'follow_box'}).find_all(attrs={'class': 'follow_item'})
pattern = 'uid=(.*?)&'
for follow in follows:
m = re.search(pattern, str(follow))
if m:
r = m.group(1)
# filter invalid ids
if r.isdigit():
user_ids.append(r)
relations.append(UserRelation(uid, r, type))
save_relations(relations)
return user_ids
def get_max_crawl_pages(html):
"""
Get the max page we can crawl
:param html: current page source
:return: max page number we can crawl
"""
if html == '':
return 1
pattern = re.compile(r'FM.view\((.*)\)')
soup = BeautifulSoup(html, "html.parser")
scripts = soup.find_all('script')
length = 1
for script in scripts:
m = re.search(pattern, script.string)
if m and 'pl.content.followTab.index' in script.string:
all_info = m.group(1)
cont = json.loads(all_info).get('html', '')
soup = BeautifulSoup(cont, 'html.parser')
pattern = 'uid=(.*?)&'
if 'pageList' in cont:
urls2 = soup.find(attrs={'node-type': 'pageList'}).find_all(attrs={
'class': 'page S_txt1', 'bpfilter': 'page'})
length += len(urls2)
return length
| xtuyaowu/jtyd_python_spider | page_parse/user/public.py | Python | mit | 6,454 | 0.000775 |
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
from collections import deque
from functools import partial
import logging
import os
import socket
import sys
from threading import Lock, Thread, Event
import time
import weakref
import sys
from six.moves import range
try:
from weakref import WeakSet
except ImportError:
from cassandra.util import WeakSet # noqa
import asyncore
try:
import ssl
except ImportError:
ssl = None # NOQA
from cassandra.connection import Connection, ConnectionShutdown, NONBLOCKING, Timer, TimerManager
log = logging.getLogger(__name__)
_dispatcher_map = {}
def _cleanup(loop_weakref):
try:
loop = loop_weakref()
except ReferenceError:
return
loop._cleanup()
class WaitableTimer(Timer):
def __init__(self, timeout, callback):
Timer.__init__(self, timeout, callback)
self.callback = callback
self.event = Event()
self.final_exception = None
def finish(self, time_now):
try:
finished = Timer.finish(self, time_now)
if finished:
self.event.set()
return True
return False
except Exception as e:
self.final_exception = e
self.event.set()
return True
def wait(self, timeout=None):
self.event.wait(timeout)
if self.final_exception:
raise self.final_exception
class _PipeWrapper(object):
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
def close(self):
os.close(self.fd)
def getsockopt(self, level, optname, buflen=None):
# act like an unerrored socket for the asyncore error handling
if level == socket.SOL_SOCKET and optname == socket.SO_ERROR and not buflen:
return 0
raise NotImplementedError()
class _AsyncoreDispatcher(asyncore.dispatcher):
def __init__(self, socket):
asyncore.dispatcher.__init__(self, map=_dispatcher_map)
# inject after to avoid base class validation
self.set_socket(socket)
self._notified = False
def writable(self):
return False
def validate(self):
assert not self._notified
self.notify_loop()
assert self._notified
self.loop(0.1)
assert not self._notified
def loop(self, timeout):
asyncore.loop(timeout=timeout, use_poll=True, map=_dispatcher_map, count=1)
class _AsyncorePipeDispatcher(_AsyncoreDispatcher):
def __init__(self):
self.read_fd, self.write_fd = os.pipe()
_AsyncoreDispatcher.__init__(self, _PipeWrapper(self.read_fd))
def writable(self):
return False
def handle_read(self):
while len(os.read(self.read_fd, 4096)) == 4096:
pass
self._notified = False
def notify_loop(self):
if not self._notified:
self._notified = True
os.write(self.write_fd, b'x')
class _AsyncoreUDPDispatcher(_AsyncoreDispatcher):
"""
Experimental alternate dispatcher for avoiding busy wait in the asyncore loop. It is not used by default because
it relies on local port binding.
Port scanning is not implemented, so multiple clients on one host will collide. This address would need to be set per
instance, or this could be specialized to scan until an address is found.
To use::
from cassandra.io.asyncorereactor import _AsyncoreUDPDispatcher, AsyncoreLoop
AsyncoreLoop._loop_dispatch_class = _AsyncoreUDPDispatcher
"""
bind_address = ('localhost', 10000)
def __init__(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.bind(self.bind_address)
self._socket.setblocking(0)
_AsyncoreDispatcher.__init__(self, self._socket)
def handle_read(self):
try:
d = self._socket.recvfrom(1)
while d and d[1]:
d = self._socket.recvfrom(1)
except socket.error as e:
pass
self._notified = False
def notify_loop(self):
if not self._notified:
self._notified = True
self._socket.sendto(b'', self.bind_address)
def loop(self, timeout):
asyncore.loop(timeout=timeout, use_poll=False, map=_dispatcher_map, count=1)
class _BusyWaitDispatcher(object):
max_write_latency = 0.001
"""
Timeout pushed down to asyncore select/poll. Dictates the amount of time it will sleep before coming back to check
if anything is writable.
"""
def notify_loop(self):
pass
def loop(self, timeout):
if not _dispatcher_map:
time.sleep(0.005)
count = timeout // self.max_write_latency
asyncore.loop(timeout=self.max_write_latency, use_poll=True, map=_dispatcher_map, count=count)
def validate(self):
pass
def close(self):
pass
class AsyncoreLoop(object):
timer_resolution = 0.1 # used as the max interval to be in the io loop before returning to service timeouts
_loop_dispatch_class = _AsyncorePipeDispatcher if os.name != 'nt' else _BusyWaitDispatcher
def __init__(self):
self._pid = os.getpid()
self._loop_lock = Lock()
self._started = False
self._shutdown = False
self._thread = None
self._timers = TimerManager()
try:
dispatcher = self._loop_dispatch_class()
dispatcher.validate()
log.debug("Validated loop dispatch with %s", self._loop_dispatch_class)
except Exception:
log.exception("Failed validating loop dispatch with %s. Using busy wait execution instead.", self._loop_dispatch_class)
dispatcher.close()
dispatcher = _BusyWaitDispatcher()
self._loop_dispatcher = dispatcher
atexit.register(partial(_cleanup, weakref.ref(self)))
def maybe_start(self):
should_start = False
did_acquire = False
try:
did_acquire = self._loop_lock.acquire(False)
if did_acquire and not self._started:
self._started = True
should_start = True
finally:
if did_acquire:
self._loop_lock.release()
if should_start:
self._thread = Thread(target=self._run_loop, name="cassandra_driver_event_loop")
self._thread.daemon = True
self._thread.start()
def wake_loop(self):
self._loop_dispatcher.notify_loop()
def _run_loop(self):
log.debug("Starting asyncore event loop")
with self._loop_lock:
while not self._shutdown:
try:
self._loop_dispatcher.loop(self.timer_resolution)
self._timers.service_timeouts()
except Exception:
log.debug("Asyncore event loop stopped unexepectedly", exc_info=True)
break
self._started = False
log.debug("Asyncore event loop ended")
def add_timer(self, timer):
self._timers.add_timer(timer)
# This function is called from a different thread than the event loop
# thread, so for this call to be thread safe, we must wake up the loop
# in case it's stuck at a select
self.wake_loop()
def _cleanup(self):
global _dispatcher_map
self._shutdown = True
if not self._thread:
return
log.debug("Waiting for event loop thread to join...")
self._thread.join(timeout=1.0)
if self._thread.is_alive():
log.warning(
"Event loop thread could not be joined, so shutdown may not be clean. "
"Please call Cluster.shutdown() to avoid this.")
log.debug("Event loop thread was joined")
# Ensure all connections are closed and in-flight requests cancelled
for conn in tuple(_dispatcher_map.values()):
if conn is not self._loop_dispatcher:
conn.close()
self._timers.service_timeouts()
# Once all the connections are closed, close the dispatcher
self._loop_dispatcher.close()
log.debug("Dispatchers were closed")
class AsyncoreConnection(Connection, asyncore.dispatcher):
"""
An implementation of :class:`.Connection` that uses the ``asyncore``
module in the Python standard library for its event loop.
"""
_loop = None
_writable = False
_readable = False
@classmethod
def initialize_reactor(cls):
if not cls._loop:
cls._loop = AsyncoreLoop()
else:
current_pid = os.getpid()
if cls._loop._pid != current_pid:
log.debug("Detected fork, clearing and reinitializing reactor state")
cls.handle_fork()
cls._loop = AsyncoreLoop()
@classmethod
def handle_fork(cls):
global _dispatcher_map
_dispatcher_map = {}
if cls._loop:
cls._loop._cleanup()
cls._loop = None
@classmethod
def create_timer(cls, timeout, callback):
timer = Timer(timeout, callback)
cls._loop.add_timer(timer)
return timer
def __init__(self, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
self.deque = deque()
self.deque_lock = Lock()
self._connect_socket()
# start the event loop if needed
self._loop.maybe_start()
init_handler = WaitableTimer(
timeout=0,
callback=partial(asyncore.dispatcher.__init__,
self, self._socket, _dispatcher_map)
)
self._loop.add_timer(init_handler)
init_handler.wait(kwargs["connect_timeout"])
self._writable = True
self._readable = True
self._send_options_message()
def close(self):
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s", id(self), self.host)
self._writable = False
self._readable = False
# We don't have to wait for this to be closed, we can just schedule it
self.create_timer(0, partial(asyncore.dispatcher.close, self))
log.debug("Closed socket to %s", self.host)
if not self.is_defunct:
self.error_all_requests(
ConnectionShutdown("Connection to %s was closed" % self.host))
#This happens when the connection is shutdown while waiting for the ReadyMessage
if not self.connected_event.is_set():
self.last_error = ConnectionShutdown("Connection to %s was closed" % self.host)
# don't leave in-progress operations hanging
self.connected_event.set()
def handle_error(self):
self.defunct(sys.exc_info()[1])
def handle_close(self):
log.debug("Connection %s closed by server", self)
self.close()
def handle_write(self):
while True:
with self.deque_lock:
try:
next_msg = self.deque.popleft()
except IndexError:
self._writable = False
return
try:
sent = self.send(next_msg)
self._readable = True
except socket.error as err:
if (err.args[0] in NONBLOCKING):
with self.deque_lock:
self.deque.appendleft(next_msg)
else:
self.defunct(err)
return
else:
if sent < len(next_msg):
with self.deque_lock:
self.deque.appendleft(next_msg[sent:])
if sent == 0:
return
def handle_read(self):
try:
while True:
buf = self.recv(self.in_buffer_size)
self._iobuf.write(buf)
if len(buf) < self.in_buffer_size:
break
except socket.error as err:
if ssl and isinstance(err, ssl.SSLError):
if err.args[0] not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
self.defunct(err)
return
elif err.args[0] not in NONBLOCKING:
self.defunct(err)
return
if self._iobuf.tell():
self.process_io_buffer()
if not self._requests and not self.is_control_connection:
self._readable = False
def push(self, data):
sabs = self.out_buffer_size
if len(data) > sabs:
chunks = []
for i in range(0, len(data), sabs):
chunks.append(data[i:i + sabs])
else:
chunks = [data]
with self.deque_lock:
self.deque.extend(chunks)
self._writable = True
self._loop.wake_loop()
def writable(self):
return self._writable
def readable(self):
return self._readable or (self.is_control_connection and not (self.is_defunct or self.is_closed))
| thelastpickle/python-driver | cassandra/io/asyncorereactor.py | Python | apache-2.0 | 13,797 | 0.001667 |
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from MaKaC.webinterface.rh import conferenceDisplay
from indico.web.flask.blueprints.event.display import event
# My conference
event.add_url_rule('/my-conference/', 'myconference', conferenceDisplay.RHMyStuff)
event.add_url_rule('/my-conference/contributions', 'myconference-myContributions',
conferenceDisplay.RHConfMyStuffMyContributions)
event.add_url_rule('/my-conference/sessions', 'myconference-mySessions', conferenceDisplay.RHConfMyStuffMySessions)
event.add_url_rule('/my-conference/tracks', 'myconference-myTracks', conferenceDisplay.RHConfMyStuffMyTracks)
# Other views
event.add_url_rule('/other-view', 'conferenceOtherViews', conferenceDisplay.RHConferenceOtherViews)
# EMail form
event.add_url_rule('/email', 'EMail', conferenceDisplay.RHConferenceEmail, methods=('GET', 'POST'))
event.add_url_rule('/email/send', 'EMail-send', conferenceDisplay.RHConferenceSendEmail, methods=('POST',))
# Participation invitation
event.add_url_rule('/invitation/participant/<participantId>', 'confModifParticipants-invitation',
conferenceDisplay.RHConfParticipantsInvitation, methods=('GET', 'POST'))
event.add_url_rule('/invitation/participant/<participantId>/refuse', 'confModifParticipants-refusal',
conferenceDisplay.RHConfParticipantsRefusal, methods=('GET', 'POST'))
| XeCycle/indico | indico/web/flask/blueprints/event/display/misc.py | Python | gpl-3.0 | 2,063 | 0.005332 |
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
# This is the shard name for when the keyrange covers the entire space
# for unsharded database.
SHARD_ZERO = '0'
# Keyrange that spans the entire space, used
# for unsharded database.
NON_PARTIAL_KEYRANGE = ''
MIN_KEY = ''
MAX_KEY = ''
KIT_UNSET = ''
KIT_UINT64 = 'uint64'
KIT_BYTES = 'bytes'
| ptomasroos/vitess | py/vtdb/keyrange_constants.py | Python | bsd-3-clause | 450 | 0 |
"""Elementwise operators"""
from __future__ import absolute_import as _abs
import tvm
from .. import tag
from ..util import get_const_int
@tvm.tag_scope(tag=tag.ELEMWISE)
def relu(x):
"""Take relu of input x.
Parameters
----------
x : tvm.Tensor
Input argument.
Returns
-------
y : tvm.Tensor
The result.
"""
return tvm.compute(x.shape, lambda *i: tvm.max(x(*i), tvm.const(0, x.dtype)))
@tvm.tag_scope(tag=tag.ELEMWISE)
def leaky_relu(x, alpha):
"""Take leaky relu of input x.
Parameters
----------
x : tvm.Tensor
Input argument.
alpha : float
The slope for the small gradient when x < 0
Returns
-------
y : tvm.Tensor
The result.
"""
def _compute(*indices):
value = x(*indices)
calpha = tvm.const(alpha, value.dtype)
return tvm.select(value > 0, value, value * calpha)
return tvm.compute(x.shape, _compute)
@tvm.tag_scope(tag=tag.BROADCAST)
def prelu(x, slope, axis=1):
""" PReLU.
It accepts two arguments: an input ``x`` and a weight array ``W``
and computes the output as :math:`PReLU(x) y = x > 0 ? x : W * x`,
where :math:`*` is an elementwise multiplication for each sample in the
batch.
Arguments:
x : tvm.Tensor
Input argument.
slope : tvm.Tensor
Channelised slope tensor for prelu
axis : int
The axis where the channel data needs to be applied
Returns:
y : tvm.Tensor
The result.
Links:
[http://arxiv.org/pdf/1502.01852v1.pdf]
"""
assert len(x.shape) == 4 and len(slope.shape) == 1
assert axis < len(x.shape)
assert get_const_int(slope.shape[0]) == get_const_int(x.shape[axis])
def _compute_channelwise(*indices):
return tvm.select(x(*indices) > 0, x(*indices), x(*indices) * slope(indices[axis]))
return tvm.compute(x.shape, _compute_channelwise)
| mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/topi/python/topi/nn/elemwise.py | Python | apache-2.0 | 1,936 | 0.002066 |
import unittest
from vase.util import MultiDict
class MultiDictTests(unittest.TestCase):
def test_setitem(self):
md = MultiDict()
key = 'hello'
value = 'world'
md[key] = value
self.assertEqual(md[key], value)
self.assertEqual(md.get(key), value)
self.assertEqual(md.getlist(key), [value])
self.assertRaises(KeyError, md.__getitem__, "vasya")
self.assertEqual(md.get("vasya"), None)
self.assertEqual(list(md.items()), [(key, value)])
self.assertEqual(list(md.lists()), [(key, [value])])
self.assertEqual(list(md.values()), [value])
| vkryachko/Vase | tests/test_multidict.py | Python | bsd-2-clause | 640 | 0 |
# -*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : contact@netzob.org |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Standard library imports
#+---------------------------------------------------------------------------+
import os
import logging
#+---------------------------------------------------------------------------+
#| Related third party imports
#+---------------------------------------------------------------------------+
from gi.repository import Gtk, Gdk, GLib, Pango
import gi
from netzob.Common.SignalsManager import SignalsManager
gi.require_version('Gtk', '3.0')
#+---------------------------------------------------------------------------+
#| Local application imports
#+---------------------------------------------------------------------------+
from netzob.Common.NetzobException import NetzobException
from netzob.Common.ResourcesConfiguration import ResourcesConfiguration
from netzob.Common.Type.Format import Format
class MessageTableView(object):
MAX_DISPLAYED_FIELDS = 200
def __init__(self, controller):
self.controller = controller
self.builder = Gtk.Builder()
self.builder.add_from_file(os.path.join(
ResourcesConfiguration.getStaticResources(),
"ui", "vocabulary",
"messageTable.glade"))
self._getObjects(self.builder, ["messageTableBox",
"fieldNameLabel",
"messageTableScrolledWindow"])
self.builder.connect_signals(self.controller)
self.displayedField = None
# Make an empty treeview
self.messageTableTreeView = self.__makeMessageTreeView()
self.messageTableScrolledWindow.add(self.messageTableTreeView)
self.messageTableTreeView.show()
self.treeViewHeaderGroup = TreeViewHeaderWidgetGroup()
def _getObjects(self, builder, objectsList):
for object in objectsList:
setattr(self, object, builder.get_object(object))
def __makeMessageTreeView(self):
# Instanciate treeview
messageTableTreeView = Gtk.TreeView()
messageTableTreeView.connect("enter-notify-event", self.controller.messageTableTreeView_enter_notify_event_cb)
messageTableTreeView.connect("leave-notify-event", self.controller.messageTableTreeView_leave_notify_event_cb)
messageTableTreeView.connect("button-press-event", self.controller.messageTableTreeView_button_press_event_cb)
messageTableTreeView.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)
messageTableTreeView.get_selection().connect("changed", self.controller.messageTableTreeView_changed_event_cb)
messageTableTreeView.set_rules_hint(True)
messageTableTreeView.set_grid_lines(Gtk.TreeViewGridLines.BOTH)
# Configures it as a Drag Source
messageTableTreeView.enable_model_drag_source(Gdk.ModifierType.BUTTON1_MASK, [], Gdk.DragAction.MOVE)
messageTableTreeView.connect("drag-data-get", self.__drag_data_get_event)
messageTableTreeView.drag_source_add_text_targets()
# Create columns
if self.displayedField is None or len(self.displayedField.getExtendedFields()) < 1:
return messageTableTreeView
startOfColumns = 1 + self.displayedField.getExtendedFields()[0].getIndex()
numOfColumns = startOfColumns + min(self.MAX_DISPLAYED_FIELDS, len(self.displayedField.getExtendedFields()))
self.treeViewHeaderGroup.clear()
for colIdx in range(startOfColumns, numOfColumns):
(tvc, head) = self.__makeTreeViewColumn(startOfColumns, colIdx)
#tvc.set_clickable(True)
messageTableTreeView.append_column(tvc)
but = tvc.get_button()
box = but.get_children()[0]
align = box.get_children()[0]
align.connect("size-allocate", propagate_size_allocation)
self.treeViewHeaderGroup.add(head)
# Setup column headers.
columns = messageTableTreeView.get_columns()
for column in columns:
column_widget = column.get_widget()
column_header = find_closest_ancestor(column_widget, Gtk.Button)
if column_header:
column_header.connect('button-press-event', propagate_button_press_event)
column_header.set_focus_on_click(False)
return messageTableTreeView
def refreshProperties(self):
"""refresh the properties like background color"""
self.messageTableTreeView.queue_draw()
def __makeTreeViewColumn(self, startOfColumns, i):
i = i - 1
markupCellRenderer = Gtk.CellRendererText()
treeViewColumn = Gtk.TreeViewColumn()
field = self.displayedField.getFieldByIndex(i)
headerWidget = TreeViewHeaderWidget(field,
treeViewColumn,
self)
treeViewColumn.set_widget(headerWidget)
treeViewColumn.set_resizable(True)
treeViewColumn.pack_start(markupCellRenderer, True)
treeViewColumn.add_attribute(markupCellRenderer, "markup", i + 2 - startOfColumns)
markupCellRenderer.set_property("font", "monospace")
return (treeViewColumn, headerWidget)
def setDisplayedField(self, field):
"""Memorizes field as the displayed field in this message table
and updates itself to display this field."""
self.displayedField = field
self.update()
def getDisplayedField(self):
"""Returns the currently displayed field in this message table"""
return self.displayedField
def updateMessageTableTreeView(self):
"""Performs a full update on the treeview displaying messages.
You should call this method only if you need a full update
of the table"""
logging.debug("Start to update the message table")
## Remove former TreeView if necessary
if self.messageTableTreeView is not None:
self.messageTableScrolledWindow.remove(self.messageTableTreeView)
if self.displayedField is None:
return
## Create a new treeview
self.messageTableTreeView = self.__makeMessageTreeView()
## Create and fill store for the create tree view
self.updateMessageTableListStore()
## Display newly created treeview
self.messageTableScrolledWindow.add(self.messageTableTreeView)
self.messageTableTreeView.show()
logging.debug("End to update the message table")
def updateMessageTableListStore(self):
"""Updates the liststore containing the displayed messages.
You should call this method when you need the messages displayed
in the treeview to be refreshed and the fields of the symbol have not
changed. (ie the columns of the treeview won't be updated)"""
splitMessagesMatrix = []
messages = self.displayedField.getMessages()
# Split every message
logging.debug("Align {0} messages with regex {1}".format(len(messages), self.displayedField.getRegex()))
for message in messages:
try:
splitMessage = [str(message.getID())]
tmpSplitMessage = message.applyAlignment(styled=True, encoded=True)
tmpSplitMessage = tmpSplitMessage[self.displayedField.getExtendedFields()[0].getIndex():self.displayedField.getExtendedFields()[-1].getIndex() + 1]
splitMessage.extend(tmpSplitMessage)
except NetzobException:
logging.warn("Impossible to display one of messages since it cannot be cut according to the computed regex.")
logging.warn("Message: {0}".format(str(message.getStringData())))
continue # We don't display the message in error
splitMessagesMatrix.append(splitMessage)
logging.debug("Alignent computed")
# Setup listStore
numOfColumns = min(self.MAX_DISPLAYED_FIELDS,
len(self.displayedField.getExtendedFields()))
# the list store must include the ID and a column for every field
listStoreTypes = [str] * (numOfColumns + 1)
self.messageTableListStore = Gtk.ListStore(*listStoreTypes)
# Fill listStore with split messages
for splitMessage in splitMessagesMatrix:
self.messageTableListStore.append(splitMessage)
self.messageTableTreeView.set_model(self.messageTableListStore)
def sortMessagesByField(self, field, sortType):
"""Sorts the messages displayed in the treeview by field field in the
order specified by sortType.
If field is None, the treeview is restored to its original
"unsorted" state."""
sortTypeMap = {TreeViewHeaderWidget.SORT_ASCENDING: Gtk.SortType.ASCENDING,
TreeViewHeaderWidget.SORT_DESCENDING: Gtk.SortType.DESCENDING}
if field is None:
sortIndex = -2
else:
sortIndex = field.getIndex()
self.messageTableListStore.set_sort_column_id(sortIndex, sortTypeMap[sortType])
self.treeViewHeaderGroup.setAllColumnsSortIndicator(sortIndex, sortType)
def updateFieldNameLabel(self):
"""Udpates the label displaying the field name."""
if self.displayedField is None:
fieldName = "Empty Message Table"
else:
fieldName = self.displayedField.getName()
self.fieldNameLabel.set_text(fieldName)
def setSelected(self, selected):
"""Selects or unselects the message table."""
if selected:
boldFont = Pango.FontDescription()
boldFont.set_weight(Pango.Weight.BOLD)
self.fieldNameLabel.modify_font(boldFont)
else:
selection = self.messageTableTreeView.get_selection()
if selection is not None:
selection.unselect_all()
for header in self.treeViewHeaderGroup.getSelectedHeaders():
header.setSelected(False)
normalFont = Pango.FontDescription()
normalFont.set_weight(Pango.Weight.NORMAL)
self.fieldNameLabel.modify_font(normalFont)
def __drag_data_get_event(self, widget, drag_context, data, info, time):
"""Callback executed when the user request this treeview as the the
source of its drag and drop operation"""
(model, rows) = widget.get_selection().get_selected_rows()
if rows is not None:
for row in rows:
msgID = model[row][0]
if msgID is not None:
data.set_text("m:{0}".format(msgID), -1)
def updateBackgroundColor(self, currentSelectedHeader):
# Retrieve first and last selected headers
selectedHeaders = []
for header in self.treeViewHeaderGroup.getHeaders():
if header.getSelected():
selectedHeaders.append(header)
if len(selectedHeaders) < 1:
firstSelectedHeader = None
lastSelectedHeader = None
else:
firstSelectedHeader = selectedHeaders[0]
lastSelectedHeader = selectedHeaders[-1]
# Retrieve selected header range
goSelect = False
for header in self.treeViewHeaderGroup.getHeaders():
if header == firstSelectedHeader:
goSelect = True
if header == currentSelectedHeader and not currentSelectedHeader.getSelected():
goSelect = False
if goSelect:
header.setSelected(True)
# change background of column
if header.treeViewColumn is not None and header.treeViewColumn.get_cells() is not None and len(header.treeViewColumn.get_cells()) > 0:
cellRenderer = header.treeViewColumn.get_cells()[0]
cellRenderer.set_property("background", "grey")
self.refreshProperties()
boldFont = Pango.FontDescription()
boldFont.set_weight(Pango.Weight.BOLD)
header.titleLabel.modify_font(boldFont)
else:
header.setSelected(False)
# change background of column
if header.treeViewColumn is not None and header.treeViewColumn.get_cells() is not None and len(header.treeViewColumn.get_cells()) > 0:
cellRenderer = header.treeViewColumn.get_cells()[0]
cellRenderer.set_property("background", "white")
self.refreshProperties()
normalFont = Pango.FontDescription()
normalFont.set_weight(Pango.Weight.NORMAL)
header.titleLabel.modify_font(normalFont)
if header == lastSelectedHeader:
goSelect = False
def update(self):
self.updateFieldNameLabel()
self.updateMessageTableTreeView()
def getPanel(self):
return self.messageTableBox
def destroy(self):
self.messageTableBox.destroy()
class TreeViewHeaderWidget(Gtk.VBox):
"""Header Widget for columns of TreeViews displaying symbol messages.
This header is designed to display properties of a symbol field, like :
- Name
- Corresponding Regex
- Display format
The following actions can by perform of the field/column:
- Sort messages
- Change display format
- Collapse column
This header widget has different "display" state controlling its size,
and the amount of information displayed:
- In collapsed state, the column is collapsed horizontally and the only
widget displayed is an arrow to expand it
- In unfocused state, the field name and it regex are displayed as well
as arrow controls to collapse the column and sort it.
- In focused state, the display format is added to the information displayed
in unfocused state.
"""
# This header has different states, controlling the amount
# of information displayed and therefore its size
STATE_FOCUSED = 0 # Fully displayed header (ie including display format)
STATE_UNFOCUSED = 1 # "Reduced" header (display format hidden)
STATE_COLLAPSED = 2 # Collapsed column
# Constants defining the sort status (not sorted, sorted in ascending order,
# sorted in descending order).
SORT_NONE = 0
SORT_ASCENDING = 1
SORT_DESCENDING = 2
def __init__(self, field, treeViewColumn, messageTableView):
Gtk.VBox.__init__(self)
self.field = field
self.treeViewColumn = treeViewColumn
self.messageTableView = messageTableView
self.__setupUI()
self.__setState(self.STATE_FOCUSED)
self.__setRegexMarkup(field.getEncodedVersionOfTheRegex())
self.__setTitle(field.getName())
self.setFormat(field.getFormat())
self.focused = True
self.collapsed = False
self.setSortIndicator(self.SORT_NONE)
self.setSelected(False)
def __setupUI(self):
"""Setup GTK widgets"""
# Title and arrows
self.titleLabel = Gtk.Label()
self.titleLabel.set_property("margin-right", 20)
self.titleEventBox = Gtk.EventBox()
self.titleEventBox.add(self.titleLabel)
self.titleEventBox.connect("button-press-event", self.titleEventBox_button_press_event_cb)
self.titleEventBox.set_visible_window(False)
self.collapseArrow = Gtk.Arrow(Gtk.ArrowType.LEFT, Gtk.ShadowType.NONE)
self.sortArrow = Gtk.Arrow(Gtk.ArrowType.DOWN, Gtk.ShadowType.NONE)
self.collapseEventBox = Gtk.EventBox()
self.collapseEventBox.add(self.collapseArrow)
self.collapseEventBox.connect("button-press-event", self.collapseEventBox_button_press_event_cb)
self.collapseEventBox.set_visible_window(False)
self.sortEventBox = Gtk.EventBox()
self.sortEventBox.add(self.sortArrow)
self.sortEventBox.connect("button-press-event", self.sortEventBox_button_press_event_cb)
self.sortEventBox.set_visible_window(False)
self.titleArrowBox = Gtk.HBox()
# Regex
self.regexEventBox = Gtk.EventBox()
self.regexEventBox.modify_bg(Gtk.StateType.NORMAL, Gdk.Color.parse("#c8c8c8")[1])
self.regexLabel = Gtk.Label()
self.regexEventBox.add(self.regexLabel)
boldFont = Pango.FontDescription()
boldFont.set_weight(Pango.Weight.BOLD)
self.regexLabel.modify_font(boldFont)
# Format button
self.formatLabel = Gtk.Label()
self.formatEventBox = Gtk.EventBox()
self.formatEventBox.add(self.formatLabel)
self.formatEventBox.connect("button-press-event", self.formatEventBox_button_press_event_cb)
def __setState(self, state):
"""Set the display state, see comment in class header
(ie collapsed, focused, unfocused) and update
its contents accordingly"""
self.state = state
# Remove all children
for child in self.get_children():
self.remove(child)
for child in self.titleArrowBox.get_children():
self.titleArrowBox.remove(child)
if state == self.STATE_COLLAPSED:
self.collapseArrow.set(Gtk.ArrowType.RIGHT, Gtk.ShadowType.NONE)
self.pack_start(self.collapseEventBox, False, False, 0)
w, _ = self.get_preferred_width()
self.treeViewColumn.set_max_width(w + 6)
self.treeViewColumn.set_min_width(w + 6)
if state == self.STATE_FOCUSED or state == self.STATE_UNFOCUSED:
self.collapseArrow.set(Gtk.ArrowType.LEFT, Gtk.ShadowType.NONE)
self.titleArrowBox.pack_start(self.titleEventBox, True, True, 0)
self.titleArrowBox.pack_start(self.sortEventBox, False, False, 0)
self.titleArrowBox.pack_start(self.collapseEventBox, False, False, 0)
self.treeViewColumn.set_max_width(-1)
self.treeViewColumn.set_min_width(-1)
if state == self.STATE_UNFOCUSED:
self.pack_start(self.titleArrowBox, False, False, 0)
self.pack_start(self.regexEventBox, False, False, 0)
if state == self.STATE_FOCUSED:
self.pack_start(self.titleArrowBox, False, False, 0)
self.pack_start(self.regexEventBox, False, False, 0)
self.pack_start(self.formatEventBox, False, False, 0)
self.show_all()
def __resetSortArrow(self):
"""Re-create the arrow widget used to display the current.
This method"""
self.sortEventBox.remove(self.sortArrow)
self.sortArrow = Gtk.Arrow(Gtk.ArrowType.DOWN, Gtk.ShadowType.NONE)
self.sortArrow.show()
self.sortEventBox.add(self.sortArrow)
def __setTitle(self, title):
"""Sets the string displayed in the title label"""
self.titleLabel.set_text(title)
def __setRegexMarkup(self, regex):
""""Sets the string displayed in the regex label"""
self.regexLabel.set_text(GLib.markup_escape_text(regex))
def setFormat(self, format_):
"""Sets the string displayed in the format label"""
self.formatLabel.set_text(format_)
def setSortIndicator(self, sortStatus):
"""Sets the sort indicator arrow to reflect sortStatus."""
self.sortStatus = sortStatus
if sortStatus == self.SORT_ASCENDING:
self.sortArrow.set(Gtk.ArrowType.UP, Gtk.ShadowType.NONE)
self.sortArrow.modify_fg(Gtk.StateType.NORMAL, Gdk.Color.parse("red")[1])
elif sortStatus == self.SORT_DESCENDING:
self.sortArrow.set(Gtk.ArrowType.DOWN, Gtk.ShadowType.NONE)
self.sortArrow.modify_fg(Gtk.StateType.NORMAL, Gdk.Color.parse("red")[1])
elif sortStatus == self.SORT_NONE:
self.__resetSortArrow()
def setFocus(self, focused):
"""Set the 'focused' state of the header, and modify its appearance
accordingly. If a column header is in unfocused mode, some information
in contains will be hidden. The header widget remembers its focused state,
after it was collapsed. For example, in you set the state of a header
to unfocused, collapse it and the expand it, it will be restored in
unfocused mode"""
self.focused = focused
if focused and not self.collapsed:
self.__setState(self.STATE_FOCUSED)
elif not focused and not self.collapsed:
# self.__setState(self.STATE_UNFOCUSED)
pass
def setCollapsed(self, collapsed):
"""Set the 'collapsed' state of the header, and modify its appearance
accordingly. If a column header is in collapsed mode,
the column is collapsed horizontally and the only widget displayed
is an arrow to expand it."""
self.collapsed = collapsed
if collapsed:
self.__setState(self.STATE_COLLAPSED)
elif not collapsed and not self.focused:
self.__setState(self.STATE_UNFOCUSED)
elif not collapsed and self.focused:
self.__setState(self.STATE_FOCUSED)
def getSelected(self):
"""Returns True if the column is selected, False otherwise."""
return self.selected
def setSelected(self, selected):
"""Selects or unselects the column"""
self.selected = selected
## Callbacks
def formatEventBox_button_press_event_cb(self, *args):
supportedFormats = Format.getSupportedFormats()
currentFormat = self.field.getFormat()
currentFormatIdx = supportedFormats.index(currentFormat)
newFormat = supportedFormats[(currentFormatIdx + 1) % len(supportedFormats)]
self.field.setFormat(newFormat)
self.setFormat(newFormat)
self.messageTableView.updateMessageTableListStore()
def sortEventBox_button_press_event_cb(self, *args):
if self.sortStatus == self.SORT_NONE:
self.messageTableView.sortMessagesByField(self.field, self.SORT_DESCENDING)
self.setSortIndicator(self.SORT_DESCENDING)
elif self.sortStatus == self.SORT_DESCENDING:
self.messageTableView.sortMessagesByField(self.field, self.SORT_ASCENDING)
self.setSortIndicator(self.SORT_ASCENDING)
elif self.sortStatus == self.SORT_ASCENDING:
self.messageTableView.sortMessagesByField(None, self.SORT_ASCENDING)
self.setSortIndicator(self.SORT_NONE)
def collapseEventBox_button_press_event_cb(self, *args):
self.setCollapsed(not self.collapsed)
def titleEventBox_button_press_event_cb(self, *args):
self.messageTableView.controller.vocabularyPerspective.setSelectedMessageTable(self.messageTableView)
self.setSelected(not self.selected)
self.messageTableView.updateBackgroundColor(self)
# Emit Signals to update toolbar status
nbSelectedFields = len(self.messageTableView.treeViewHeaderGroup.getSelectedFields())
signalsManager = self.messageTableView.controller.getSignalsManager()
if nbSelectedFields == 0:
signalsManager.emitSignal(SignalsManager.SIG_FIELDS_NO_SELECTION)
elif nbSelectedFields == 1:
signalsManager.emitSignal(SignalsManager.SIG_FIELDS_SINGLE_SELECTION)
elif nbSelectedFields > 1:
signalsManager.emitSignal(SignalsManager.SIG_FIELDS_MULTIPLE_SELECTION)
class TreeViewHeaderWidgetGroup(object):
def __init__(self):
self.headerList = []
def add(self, headerWidget):
"""Adds a header widget in the group"""
self.headerList.append(headerWidget)
def remove(self, headerWidget):
"""Removes a header widget from the group"""
self.headerList.remove(headerWidget)
def clear(self):
"""Empties the group"""
self.headerList = []
def getHeaders(self):
"""Returns the header list"""
return self.headerList
def getSelectedHeaders(self):
"""Returns the header widgets which are selected"""
return [header for header in self.headerList
if header.getSelected()]
def getSelectedFields(self):
"""Returns the fields displayed in columns whose header are selected"""
return [header.field for header in self.getSelectedHeaders()]
def setAllColumnsFocus(self, focused):
"""Set the focused state of all the headers in the group"""
for header in self.headerList:
header.setFocus(focused)
def setAllColumnsSortIndicator(self, index, sortType):
"""Sets the sort indicator of all columns belonging to
this group. It can be used to reset the sort indicator of all columns
to its unsorted base state"""
for header in self.headerList:
if header.field.getIndex() != index:
header.setSortIndicator(header.SORT_NONE)
else:
header.setSortIndicator(sortType)
## The following functions are GTK utility functions, which are "hacks"
## to allow the treeview header widget to work properly
def propagate_button_press_event(parent, event, *data):
"""Propagates a button-press-event from the widget parent
to the first Gtk.Button or Gtk.EventBox child encountered"""
parent_alloc = parent.get_allocation()
x = parent_alloc.x + int(event.x)
y = parent_alloc.y + int(event.y)
children = parent.get_children()
while children:
for child in children:
child_alloc = child.get_allocation()
if child_alloc.x <= x <= child_alloc.x + child_alloc.width and \
child_alloc.y <= y <= child_alloc.y + child_alloc.height:
if isinstance(child, Gtk.Button) or isinstance(child, Gtk.EventBox):
cevent = Gdk.Event()
cevent.type = Gdk.EventType.FOCUS_CHANGE
cevent.send_event = True
cevent.in_ = True
cevent.window = event.window
child.emit('button-press-event', cevent, *data)
return True
else:
if hasattr(child, 'get_children') and callable(child.get_children):
children = child.get_children()
else:
None
else:
children = None
return False
def propagate_size_allocation(parent, allocation, *data):
"""Force the child widget of parent to occupy the same space as its
parent."""
children = parent.get_children()
for child in children:
gdkRectangle = Gdk.Rectangle()
gdkRectangle.x = allocation.x
gdkRectangle.y = allocation.y
gdkRectangle.height = allocation.height
gdkRectangle.width = allocation.width
child.size_allocate(allocation)
def find_closest_ancestor(widget, ancestor_class):
"""Returns the closest parent of widget
which inherit from ancestor_class"""
if not isinstance(widget, Gtk.Widget):
raise TypeError("%r is not a Gtk.Widget" % widget)
ancestor = widget.get_parent()
while ancestor is not None:
if isinstance(ancestor, ancestor_class):
break
if hasattr(ancestor, 'get_parent') and callable(ancestor.get_parent):
ancestor = ancestor.get_parent()
else:
None
return ancestor
| nagyistoce/netzob | src/netzob/UI/Vocabulary/Views/MessageTableView.py | Python | gpl-3.0 | 29,245 | 0.003112 |
from pupa.scrape import Jurisdiction, Organization
import scrapelib
import lxml.html
from .people import SDLegislatorScraper
from .bills import SDBillScraper
class SouthDakota(Jurisdiction):
division_id = "ocd-division/country:us/state:sd"
classification = "government"
name = "South Dakota"
url = "http://www.sdlegislature.gov/"
scrapers = {
'people': SDLegislatorScraper,
'bills': SDBillScraper
}
parties = [
{'name': 'Republican'},
{'name': 'Democratic'}
]
legislative_sessions = [
{
"_scraped_name": "2009 (84th) Session",
"identifier": "2009",
"name": "2009 Regular Session"
},
{
"_scraped_name": "2010 (85th) Session",
"identifier": "2010",
"name": "2010 Regular Session"
},
{
"_scraped_name": "2011 (86th) Session",
"identifier": "2011",
"name": "2011 Regular Session",
"start_date": "2011-01-11"
},
{
"_scraped_name": "2011 (86th) Special Session",
"identifier": "2011s",
"name": "2011 Special Session"
},
{
"_scraped_name": "2012 (87th) Session",
"identifier": "2012",
"name": "2012 Regular Session"
},
{
"_scraped_name": "2013 (88th) Session",
"identifier": "2013",
"name": "2013 Regular Session"
},
{
"_scraped_name": "2014 (89th) Session",
"identifier": "2014",
"name": "2014 Regular Session"
},
{
"_scraped_name": "2015 (90th) Session",
"identifier": "2015",
"name": "2015 Regular Session"
},
{
"_scraped_name": "2016 (91st) Session",
"identifier": "2016",
"name": "2016 Regular Session"
},
{
"_scraped_name": "2017 (92nd) Session",
"identifier": "2017",
"name": "2017 Regular Session"
}
]
ignored_scraped_sessions = [
"Previous Years"
]
def get_organizations(self):
legislature_name = "South Dakota State Legislature"
lower_chamber_name = "House"
lower_seats = 0
lower_title = "Representative"
upper_chamber_name = "Senate"
upper_seats = 0
upper_title = "Senator"
legislature = Organization(name=legislature_name,
classification="legislature")
upper = Organization(upper_chamber_name, classification='upper',
parent_id=legislature._id)
lower = Organization(lower_chamber_name, classification='lower',
parent_id=legislature._id)
for n in range(1, upper_seats + 1):
upper.add_post(
label=str(n), role=upper_title,
division_id='{}/sldu:{}'.format(self.division_id, n))
for n in range(1, lower_seats + 1):
lower.add_post(
label=str(n), role=lower_title,
division_id='{}/sldl:{}'.format(self.division_id, n))
yield legislature
yield upper
yield lower
def get_session_list(self):
html = scrapelib.Scraper().get('http://www.sdlegislature.gov/'
'Legislative_Session/Menu.aspx').text
doc = lxml.html.fromstring(html)
sessions = doc.xpath('//div[@id="ctl00_ContentPlaceHolder1_BlueBoxLeft"]//ul/li'
'/a/div/text()')
return sessions
| cliftonmcintosh/openstates | openstates/sd/__init__.py | Python | gpl-3.0 | 3,672 | 0.000272 |
"""
UTILITIES
"""
def distinct(inlist):
"""
returns a list of distinct values
(no duplicated values)
"""
outlist = []
for elem in inlist:
if not elem in outlist:
outlist.append(elem)
return outlist
def list_evolution(list1,list2):
"""
returns the index evolution of each element
of the list 1 compared to the index within list 2
NB: if lists length do not match, place None value at missing index
"""
# return [list2.index(x) - list1.index(x) for x in list1 if x in list2]
evo = []
for x in list1:
if x in list2:
evo.append(list2.index(x) - list1.index(x))
else:
evo.append(None)
return evo
def list_to_dict(keyslist,valueslist):
"""
convert lists of keys and values to a dict
"""
if len(keyslist) != len(valueslist):
return {}
mydict = {}
for idx in range(0,len(keyslist)):
mydict[keyslist[idx]] = valueslist[idx]
return mydict
#evo_progress: return the evolution of each item of a list compared to its left value
evo_progress = lambda pos: [] if pos == [] else [j for i in [[0],[pos[i-1]-pos[i] for i in range(1,len(pos))]] for j in i]
if __name__ == '__main__':
mylist = ['A','B','A','C','A','A','D']
print('ORIGINAL:', mylist)
print('DISTINCT', distinct(mylist))
mylist1 = ['A','B','C','D']
mylist2 = ['A','D','B','C']
print(list_evolution(mylist2,mylist1))
print(list_to_dict(['a','b','c'], [1,2,3]))
print(evo_progress([]))
print(evo_progress([1]))
print(evo_progress([1,4,2,4,8,5,5,3])) | dmartin35/pronosfoot | tools/utils.py | Python | mit | 1,623 | 0.02711 |
# coding=utf-8
#
# (c) Copyright 2008 by Daniel J. Rocco
# Licensed under the Creative Commons Attribution-
# Noncommercial-Share Alike 3.0 United States License, see
# <http://creativecommons.org/licenses/by-nc-sa/3.0/us/>
#
"""
Command-module for controlling **TortoiseSVN** from Windows Explorer
============================================================================
This module implements various voice-commands for using the
Windows Explorer extensions of the TortoiseSVN subversion client.
(c) Copyright 2008 by Daniel J. Rocco
Licensed under the Creative Commons Attribution-
Noncommercial-Share Alike 3.0 United States License, see
<http://creativecommons.org/licenses/by-nc-sa/3.0/us/>
"""
import os.path
import subprocess
import os
import win32gui
import urllib
#from subprocess import Popen
from dragonfly import (Grammar, ConnectionGrammar, AppContext, CompoundRule,
Choice, Window, Config, Section, Item)
#---------------------------------------------------------------------------
# Set up this module's configuration.
config = Config("TortoiseSVN")
config.tortoisesvn = Section("TortoiseSVN configuration")
config.tortoisesvn.path = Item(r'C:\Program Files\TortoiseSVN\bin\TortoiseProc.exe')
config.tortoisesvn.command = Item("(tortoise | subversion) <command>")
config.tortoisesvn.global_command = Item("(tortoise | subversion) <command> <predef>")
config.tortoisesvn.actions = Item({
"add": "add",
"checkout": "checkout",
"commit": "commit",
"revert": "revert",
"merge": "merge",
"delete": "delete",
"diff": "diff",
"log": "log",
"import": "import",
"update": "update",
"revert": "revert",
"ignore": "ignore",
"rename": "rename",
"properties": "properties",
"repository": "repobrowser",
"edit conflict": "conflicteditor",
},
)
config.tortoisesvn.predef = Item({
"dragonfly | dee fly": r"C:\data\projects\Dragonfly\work dragonfly",
},
)
#config.generate_config_file()
config.load()
#---------------------------------------------------------------------------
# Utility generator function for iterating over COM collections.
def collection_iter(collection):
for index in xrange(collection.Count):
yield collection.Item(index)
#---------------------------------------------------------------------------
# This module's grammar for use within Windows Explorer.
class ExplorerGrammar(ConnectionGrammar):
def __init__(self):
ConnectionGrammar.__init__(
self,
name="Explorer subversion",
context=AppContext(executable="explorer"),
app_name="Shell.Application"
)
def get_active_explorer(self):
handle = Window.get_foreground().handle
for window in collection_iter(self.application.Windows()):
if window.HWND == handle:
return window
self._log.warning("%s: no active explorer." % self)
return None
def get_current_directory(self):
window = self.get_active_explorer()
path = urllib.unquote(window.LocationURL[8:])
if path.startswith("file:///"):
path = path[8:]
return path
def get_selected_paths(self):
window = self.get_active_explorer()
items = window.Document.SelectedItems()
paths = []
for item in collection_iter(items):
paths.append(item.Path)
return paths
def get_selected_filenames(self):
paths = self.get_selected_paths()
return [os.path.basename(p) for p in paths]
#---------------------------------------------------------------------------
# Create the rule from which the other rules will be derived.
# This rule implements the method to execute TortoiseSVN.
class TortoiseRule(CompoundRule):
def _execute_command(self, path_list, command):
# Construct arguments and spawn TortoiseSVN.
path_arg = '/path:"%s"' % str('*'.join(path_list))
command_arg = "/command:" + command
os.spawnv(os.P_NOWAIT, config.tortoisesvn.path,
[config.tortoisesvn.path, command_arg, path_arg])
# For some reason the subprocess module gives quote-related errors.
#Popen([tortoise_path, command_arg, path_arg])
#---------------------------------------------------------------------------
# Create the rule for controlling TortoiseSVN from Windows Explorer.
class ExplorerCommandRule(TortoiseRule):
spec = config.tortoisesvn.command
extras = [
Choice("command", config.tortoisesvn.actions),
]
def _process_recognition(self, node, extras):
selection = self.grammar.get_selected_paths()
if not selection:
selection = [self.grammar.get_current_directory()]
self._execute_command(selection, extras["command"])
#---------------------------------------------------------------------------
# Create the rule for controlling TortoiseSVN from anywhere.
class GlobalCommandRule(TortoiseRule):
spec = config.tortoisesvn.global_command
extras = [
Choice("command", config.tortoisesvn.actions),
Choice("predef", config.tortoisesvn.predef),
]
def _process_recognition(self, node, extras):
path_list = [extras["predef"]]
command = extras["command"]
self._execute_command(path_list, command)
#---------------------------------------------------------------------------
# Load the grammar instance and define how to unload it.
explorer_grammar = ExplorerGrammar()
explorer_grammar.add_rule(ExplorerCommandRule())
global_grammar = Grammar("TortoiseSVN global")
global_grammar.add_rule(GlobalCommandRule())
explorer_grammar.load()
global_grammar.load()
# Unload function which will be called by natlink at unload time.
def unload():
global explorer_grammar, global_grammar
if explorer_grammar:
explorer_grammar.unload()
explorer_grammar = None
if global_grammar:
global_grammar.unload()
global_grammar = None
| Erotemic/local | depricated/speech/Examples/_tortoisesvn.py | Python | gpl-3.0 | 6,848 | 0.003505 |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("AdaBoostClassifier" , "FourClass_100" , "db2")
| antoinecarme/sklearn2sql_heroku | tests/classification/FourClass_100/ws_FourClass_100_AdaBoostClassifier_db2_code_gen.py | Python | bsd-3-clause | 144 | 0.013889 |
'''
Profile Formula Validation is an example of a plug-in to GUI menu that will profile formula execution.
(c) Copyright 2012 Mark V Systems Limited, All rights reserved.
'''
import os
from tkinter import simpledialog, messagebox
def profileFormulaMenuEntender(cntlr, menu):
# Extend menu with an item for the profile formula plugin
menu.add_command(label="Profile formula validation",
underline=0,
command=lambda: profileFormulaMenuCommand(cntlr) )
def profileFormulaMenuCommand(cntlr):
# save DTS menu item has been invoked
if cntlr.modelManager is None or cntlr.modelManager.modelXbrl is None:
cntlr.addToLog("No taxonomy loaded.")
return
# get file name into which to save log file while in foreground thread
profileReportFile = cntlr.uiFileDialog("save",
title=_("arelle - Save Formula Profile Report"),
initialdir=cntlr.config.setdefault("formulaProfileReportDir","."),
filetypes=[(_("Profile report file .log"), "*.log")],
defaultextension=".log")
if not profileReportFile:
return False
errMsg = ""
maxRunTime = 0
while (1):
timeout = simpledialog.askstring(_("arelle - Set formula run time limit"),
_("{0}You may enter the maximum number of minutes to run formulas.\n"
"(Leave empty for no run time limitation.)".format(errMsg)),
parent=cntlr.parent)
if timeout:
try:
maxRunTime = float(timeout)
break
except ValueError as err:
errMsg = str(err) + "\n\n"
excludeCompileTime = messagebox.askyesno(_("arelle - Exclude formula compile statistics"),
_("Should formula compiling be excluded from the statistics?\n"
"(Yes will make a separate compiling \"pass\" so that statistics include execution only.)".format(errMsg)),
parent=cntlr.parent)
cntlr.config["formulaProfileReportDir"] = os.path.dirname(profileReportFile)
cntlr.saveConfig()
# perform validation and profiling on background thread
import threading
thread = threading.Thread(target=lambda c=cntlr, f=profileReportFile, t=maxRunTime, e=excludeCompileTime: backgroundProfileFormula(c,f,t,e))
thread.daemon = True
thread.start()
def backgroundProfileFormula(cntlr, profileReportFile, maxRunTime, excludeCompileTime):
from arelle import Locale, XPathParser, ValidateXbrlDimensions, ValidateFormula
# build grammar before profiling (if this is the first pass, so it doesn't count in profile statistics)
XPathParser.initializeParser(cntlr.modelManager)
# load dimension defaults
ValidateXbrlDimensions.loadDimensionDefaults(cntlr.modelManager)
import cProfile, pstats, sys, time
# a minimal validation class for formula validator parameters that are needed
class Validate:
def __init__(self, modelXbrl, maxRunTime):
self.modelXbrl = modelXbrl
self.parameters = None
self.validateSBRNL = False
self.maxFormulaRunTime = maxRunTime
def close(self):
self.__dict__.clear()
val = Validate(cntlr.modelManager.modelXbrl, maxRunTime)
formulaOptions = val.modelXbrl.modelManager.formulaOptions
if excludeCompileTime:
startedAt = time.time()
cntlr.addToLog(_("pre-compiling formulas before profiling"))
val.validateFormulaCompileOnly = True
ValidateFormula.validate(val)
del val.validateFormulaCompileOnly
cntlr.addToLog(Locale.format_string(cntlr.modelManager.locale,
_("formula pre-compiling completed in %.2f secs"),
time.time() - startedAt))
cntlr.addToLog(_("executing formulas for profiling"))
else:
cntlr.addToLog(_("compiling and executing formulas for profiling"))
startedAt = time.time()
statsFile = profileReportFile + ".bin"
cProfile.runctx("ValidateFormula.validate(val)", globals(), locals(), statsFile)
cntlr.addToLog(Locale.format_string(cntlr.modelManager.locale,
_("formula profiling completed in %.2f secs"),
time.time() - startedAt))
# dereference val
val.close()
# specify a file for log
priorStdOut = sys.stdout
sys.stdout = open(profileReportFile, "w")
statObj = pstats.Stats(statsFile)
statObj.strip_dirs()
statObj.sort_stats("time")
statObj.print_stats()
statObj.print_callees()
statObj.print_callers()
sys.stdout.flush()
sys.stdout.close()
del statObj
sys.stdout = priorStdOut
os.remove(statsFile)
__pluginInfo__ = {
'name': 'Profile Formula Validation',
'version': '1.0',
'description': "This plug-in adds a profiled formula validation. "
"Includes XPath compilation in the profile if it is the first validation of instance; "
"to exclude XPath compile statistics, validate first the normal way (e.g., toolbar button) "
"and then validate again using this profile formula validation plug-in. ",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2012 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'CntlrWinMain.Menu.Validation': profileFormulaMenuEntender,
}
| sternshus/arelle2.7 | svr-2.7/arelle/plugin/profileFormula.py | Python | apache-2.0 | 5,608 | 0.008559 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.