text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
from positive_alert_test_case import PositiveAlertTestCase
from negative_alert_test_case import NegativeAlertTestCase
from alert_test_suite import AlertTestSuite
class TestAlertBruteforceSsh(AlertTestSuite):
alert_filename = "bruteforce_ssh"
# This event is the default positive event that will cause the
# alert to trigger
default_event = {
"_source": {
"summary": 'login invalid ldap_count_entries failed by 1.2.3.4',
"hostname": "exhostname",
"details": {
"program": "sshd",
"sourceipaddress": "1.2.3.4",
}
}
}
# This alert is the expected result from running this task
default_alert = {
"category": "bruteforce",
"severity": "NOTICE",
"summary": "10 ssh bruteforce attempts by 1.2.3.4 exhostname (10 hits)",
"tags": ['ssh'],
}
test_cases = []
test_cases.append(
PositiveAlertTestCase(
description="Positive test with default event and default alert expected",
events=AlertTestSuite.create_events(default_event, 10),
expected_alert=default_alert
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda(date_timedelta={'minutes': 1})
event['_source']['receivedtimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda(date_timedelta={'minutes': 1})
test_cases.append(
PositiveAlertTestCase(
description="Positive test with events a minute earlier",
events=events,
expected_alert=default_alert
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = 'login failed'
test_cases.append(
PositiveAlertTestCase(
description="Positive test with events with a summary of 'login failed'",
events=events,
expected_alert=default_alert
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = 'invalid failed'
test_cases.append(
PositiveAlertTestCase(
description="Positive test with events with a summary of 'invalid failed'",
events=events,
expected_alert=default_alert
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = 'invalid failed'
test_cases.append(
PositiveAlertTestCase(
description="Positive test with events with a summary of 'ldap_count_entries failed'",
events=events,
expected_alert=default_alert
)
)
events = AlertTestSuite.create_events(default_event, 10)
events[8]['_source']['details']['sourceipaddress'] = "127.0.0.1"
events[9]['_source']['details']['sourceipaddress'] = "127.0.0.1"
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with 10 events however one has different sourceipaddress",
events=events,
)
)
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with not enough events",
events=AlertTestSuite.create_events(default_event, 9),
),
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = 'login good ldap_count_entries'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with summary without 'failed'",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = 'failed'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with summary with only 'failed'",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = 'login'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with summary with only 'login'",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = 'invalid'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with summary with only 'invalid'",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = 'ldap_count_entries'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with summary with only 'ldap_count_entries'",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['details']['program'] = 'badprogram'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with bad program",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({'minutes': 3})
event['_source']['receivedtimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({'minutes': 3})
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with old timestamp",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = event['_source']['summary'].replace('1.2.3.4', '11.22.33.44')
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with 11.22.33.44 as a whitelisted ip",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['summary'] = event['_source']['summary'].replace('1.2.3.4', '55.66.77.88')
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with 55.66.77.88 as a whitelisted ip",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['details']['sourceipaddress'] = None
test_cases.append(
NegativeAlertTestCase(
description="Negative test case aggregation key excluded",
events=events,
)
)
| Phrozyn/MozDef | tests/alerts/test_bruteforce_ssh.py | Python | mpl-2.0 | 7,170 | 0.00265 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, division, absolute_import
from bgfiles.http import create_content_disposition
from django.test import SimpleTestCase
class CreateContentDispositionTest(SimpleTestCase):
def test(self):
header = create_content_disposition('Fußball.pdf')
self.assertEqual(b'attachment; filename="Fuball.pdf"; filename*=UTF-8\'\'Fu%C3%9Fball.pdf', header)
header = create_content_disposition('Fußball.pdf', attachment=False)
self.assertEqual(b'inline; filename="Fuball.pdf"; filename*=UTF-8\'\'Fu%C3%9Fball.pdf', header)
header = create_content_disposition(b'Fussball.pdf')
self.assertEqual(b'attachment; filename="Fussball.pdf"', header)
header = create_content_disposition(b'Fussball.pdf', attachment=False)
self.assertEqual(b'inline; filename="Fussball.pdf"', header)
expected = (b'attachment; filename="Leery Jenkins My Man .pdf"; '
b'filename*=UTF-8\'\'L%C3%A9%C3%ABr%C5%93%C3%B8y%20%20Jenkins%20%20My%20Man%20.pdf')
self.assertEqual(create_content_disposition('Léërœøy \\Jenkins/"My Man".pdf'), expected)
expected = (b'inline; filename="Leery Jenkins My Man .pdf"; '
b'filename*=UTF-8\'\'L%C3%A9%C3%ABr%C5%93%C3%B8y%20%20Jenkins%20%20My%20Man%20.pdf')
self.assertEqual(create_content_disposition('Léërœøy \\Jenkins/"My Man".pdf', attachment=False), expected)
| climapulse/dj-bgfiles | tests/test_http.py | Python | bsd-3-clause | 1,483 | 0.004752 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import requests
from datetime import datetime
from cachetools import LFUCache
from requests_futures.sessions import FuturesSession
import threading
from .utils import get_args
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
log = logging.getLogger(__name__)
# How low do we want the queue size to stay?
wh_warning_threshold = 100
# How long can it be over the threshold, in seconds?
# Default: 5 seconds per 100 in threshold.
wh_threshold_lifetime = int(5 * (wh_warning_threshold / 100.0))
wh_lock = threading.Lock()
args = get_args()
def send_to_webhook(session, message_type, message):
if not args.webhooks:
# What are you even doing here...
log.warning('Called send_to_webhook() without webhooks.')
return
req_timeout = args.wh_timeout
data = {
'type': message_type,
'message': message
}
for w in args.webhooks:
try:
session.post(w, json=data, timeout=(None, req_timeout),
background_callback=__wh_completed)
except requests.exceptions.ReadTimeout:
log.exception('Response timeout on webhook endpoint %s.', w)
except requests.exceptions.RequestException as e:
log.exception(repr(e))
def wh_updater(args, queue, key_caches):
wh_threshold_timer = datetime.now()
wh_over_threshold = False
# Set up one session to use for all requests.
# Requests to the same host will reuse the underlying TCP
# connection, giving a performance increase.
session = __get_requests_session(args)
# Extract the proper identifier. This list also controls which message
# types are getting cached.
ident_fields = {
'pokestop': 'pokestop_id',
'pokemon': 'encounter_id',
'gym': 'gym_id',
'gym_details': 'gym_id'
}
# Instantiate WH LFU caches for all cached types. We separate the caches
# by ident_field types, because different ident_field (message) types can
# use the same name for their ident field.
for key in ident_fields:
key_caches[key] = LFUCache(maxsize=args.wh_lfu_size)
# The forever loop.
while True:
try:
# Loop the queue.
whtype, message = queue.get()
# Get the proper cache if this type has one.
key_cache = None
if whtype in key_caches:
key_cache = key_caches[whtype]
# Get the unique identifier to check our cache, if it has one.
ident = message.get(ident_fields.get(whtype), None)
# cachetools in Python2.7 isn't thread safe, so we add a lock.
with wh_lock:
# Only send if identifier isn't already in cache.
if ident is None or key_cache is None:
# We don't know what it is, or it doesn't have a cache,
# so let's just log and send as-is.
log.debug(
'Sending webhook item of uncached type: %s.', whtype)
send_to_webhook(session, whtype, message)
elif ident not in key_cache:
key_cache[ident] = message
log.debug('Sending %s to webhook: %s.', whtype, ident)
send_to_webhook(session, whtype, message)
else:
# Make sure to call key_cache[ident] in all branches so it
# updates the LFU usage count.
# If the object has changed in an important way, send new
# data to webhooks.
if __wh_object_changed(whtype, key_cache[ident], message):
key_cache[ident] = message
send_to_webhook(session, whtype, message)
log.debug('Sending updated %s to webhook: %s.',
whtype, ident)
else:
log.debug('Not resending %s to webhook: %s.',
whtype, ident)
# Helping out the GC.
del whtype
del message
del ident
# Webhook queue moving too slow.
if (not wh_over_threshold) and (
queue.qsize() > wh_warning_threshold):
wh_over_threshold = True
wh_threshold_timer = datetime.now()
elif wh_over_threshold:
if queue.qsize() < wh_warning_threshold:
wh_over_threshold = False
else:
timediff = datetime.now() - wh_threshold_timer
if timediff.total_seconds() > wh_threshold_lifetime:
log.warning('Webhook queue has been > %d (@%d);'
+ ' for over %d seconds,'
+ ' try increasing --wh-concurrency'
+ ' or --wh-threads.',
wh_warning_threshold,
queue.qsize(),
wh_threshold_lifetime)
queue.task_done()
except Exception as e:
log.exception('Exception in wh_updater: %s.', repr(e))
# Helpers
# Background handler for completed webhook requests.
# Currently doesn't do anything.
def __wh_completed():
pass
def __get_requests_session(args):
# Config / arg parser
num_retries = args.wh_retries
backoff_factor = args.wh_backoff_factor
pool_size = args.wh_concurrency
# Use requests & urllib3 to auto-retry.
# If the backoff_factor is 0.1, then sleep() will sleep for [0.1s, 0.2s,
# 0.4s, ...] between retries. It will also force a retry if the status
# code returned is 500, 502, 503 or 504.
session = FuturesSession(max_workers=pool_size)
# If any regular response is generated, no retry is done. Without using
# the status_forcelist, even a response with status 500 will not be
# retried.
retries = Retry(total=num_retries, backoff_factor=backoff_factor,
status_forcelist=[500, 502, 503, 504])
# Mount handler on both HTTP & HTTPS.
session.mount('http://', HTTPAdapter(max_retries=retries,
pool_connections=pool_size,
pool_maxsize=pool_size))
session.mount('https://', HTTPAdapter(max_retries=retries,
pool_connections=pool_size,
pool_maxsize=pool_size))
return session
def __get_key_fields(whtype):
key_fields = {
# lure_expiration is a UTC timestamp so it's good (Y).
'pokestop': ['enabled', 'latitude',
'longitude', 'lure_expiration', 'active_fort_modifier'],
'pokemon': ['spawnpoint_id', 'pokemon_id', 'latitude', 'longitude',
'disappear_time', 'move_1', 'move_2',
'individual_stamina', 'individual_defense',
'individual_attack', 'form', 'cp', 'pokemon_level'],
'gym': ['team_id', 'guard_pokemon_id',
'gym_points', 'enabled', 'latitude', 'longitude'],
'gym_details': ['latitude', 'longitude', 'team', 'pokemon']
}
return key_fields.get(whtype, [])
# Determine if a webhook object has changed in any important way (and
# requires a resend).
def __wh_object_changed(whtype, old, new):
# Only test for important fields: don't trust last_modified fields.
fields = __get_key_fields(whtype)
if not fields:
log.debug('Received an object of unknown type %s.', whtype)
return True
return not __dict_fields_equal(fields, old, new)
# Determine if two dicts have equal values for all keys in a list.
def __dict_fields_equal(keys, a, b):
for k in keys:
if a.get(k) != b.get(k):
return False
return True
| pgandev/RocketMap | pogom/webhook.py | Python | agpl-3.0 | 8,054 | 0 |
# Embedded file name: /usr/lib/enigma2/python/Components/NetworkTime.py
from Components.Console import Console
from config import config
from enigma import eTimer, eDVBLocalTimeHandler, eEPGCache
from Tools.StbHardware import setRTCtime
from time import time, ctime
def AutoNTPSync(session = None, **kwargs):
global ntpsyncpoller
ntpsyncpoller = NTPSyncPoller()
ntpsyncpoller.start()
class NTPSyncPoller:
def __init__(self):
self.timer = eTimer()
self.Console = Console()
def start(self):
if self.timecheck not in self.timer.callback:
self.timer.callback.append(self.timecheck)
self.timer.startLongTimer(0)
def stop(self):
if self.timecheck in self.timer.callback:
self.timer.callback.remove(self.timecheck)
self.timer.stop()
def timecheck(self):
if config.misc.SyncTimeUsing.value == '1':
print '[NTP]: Updating'
self.Console.ePopen('/usr/bin/ntpdate-sync', self.update_schedule)
else:
self.update_schedule()
def update_schedule(self, result = None, retval = None, extra_args = None):
nowTime = time()
nowTimereal = ctime(nowTime)
if nowTime > 10000:
print '[NTP]: setting E2 unixtime:', nowTime
print '[NTP]: setting E2 realtime:', nowTimereal
setRTCtime(nowTime)
if config.misc.SyncTimeUsing.value == '1':
eDVBLocalTimeHandler.getInstance().setUseDVBTime(False)
else:
eDVBLocalTimeHandler.getInstance().setUseDVBTime(True)
eEPGCache.getInstance().timeUpdated()
self.timer.startLongTimer(int(config.misc.useNTPminutes.value) * 60)
else:
print 'NO TIME SET'
self.timer.startLongTimer(10) | kingvuplus/boom2 | lib/python/Components/NetworkTime.py | Python | gpl-2.0 | 1,875 | 0.005867 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr
from frappe import msgprint, _
def execute(filters=None):
if not filters: filters = {}
salary_slips = get_salary_slips(filters)
columns, earning_types, ded_types = get_columns(salary_slips)
ss_earning_map = get_ss_earning_map(salary_slips)
ss_ded_map = get_ss_ded_map(salary_slips)
data = []
for ss in salary_slips:
row = [ss.employee, ss.employee_name, ss.branch, ss.department, ss.designation,
ss.company, ss.month, ss.leave_withut_pay, ss.payment_days]
for e in earning_types:
row.append(ss_earning_map.get(ss.name, {}).get(e))
row += [ss.arrear_amount, ss.leave_encashment_amount, ss.gross_pay]
for d in ded_types:
row.append(ss_ded_map.get(ss.name, {}).get(d))
row += [ss.total_deduction, ss.net_pay]
data.append(row)
return columns, data
def get_columns(salary_slips):
columns = [
_("Employee") + ":Link/Employee:120", _("Employee Name") + "::140", _("Branch") + ":Link/Branch:120",
_("Department") + ":Link/Department:120", _("Designation") + ":Link/Designation:120",
_("Company") + ":Link/Company:120", _("Month") + "::80", _("Leave Without Pay") + ":Float:130",
_("Payment Days") + ":Float:120"
]
earning_types = frappe.db.sql_list("""select distinct e_type from `tabSalary Slip Earning`
where e_modified_amount != 0 and parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]))
ded_types = frappe.db.sql_list("""select distinct d_type from `tabSalary Slip Deduction`
where d_modified_amount != 0 and parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]))
columns = columns + [(e + ":Currency:120") for e in earning_types] + \
["Arrear Amount:Currency:120", "Leave Encashment Amount:Currency:150",
"Gross Pay:Currency:120"] + [(d + ":Currency:120") for d in ded_types] + \
["Total Deduction:Currency:120", "Net Pay:Currency:120"]
return columns, earning_types, ded_types
def get_salary_slips(filters):
conditions, filters = get_conditions(filters)
salary_slips = frappe.db.sql("""select * from `tabSalary Slip` where docstatus = 1 %s
order by employee, month""" % conditions, filters, as_dict=1)
if not salary_slips:
msgprint(_("No salary slip found for month: ") + cstr(filters.get("month")) +
_(" and year: ") + cstr(filters.get("fiscal_year")), raise_exception=1)
return salary_slips
def get_conditions(filters):
conditions = ""
if filters.get("month"):
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"].index(filters["month"]) + 1
filters["month"] = month
conditions += " and month = %(month)s"
if filters.get("fiscal_year"): conditions += " and fiscal_year = %(fiscal_year)s"
if filters.get("company"): conditions += " and company = %(company)s"
if filters.get("employee"): conditions += " and employee = %(employee)s"
return conditions, filters
def get_ss_earning_map(salary_slips):
ss_earnings = frappe.db.sql("""select parent, e_type, e_modified_amount
from `tabSalary Slip Earning` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_earning_map = {}
for d in ss_earnings:
ss_earning_map.setdefault(d.parent, frappe._dict()).setdefault(d.e_type, [])
ss_earning_map[d.parent][d.e_type] = flt(d.e_modified_amount)
return ss_earning_map
def get_ss_ded_map(salary_slips):
ss_deductions = frappe.db.sql("""select parent, d_type, d_modified_amount
from `tabSalary Slip Deduction` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_ded_map = {}
for d in ss_deductions:
ss_ded_map.setdefault(d.parent, frappe._dict()).setdefault(d.d_type, [])
ss_ded_map[d.parent][d.d_type] = flt(d.d_modified_amount)
return ss_ded_map | mahabuber/erpnext | erpnext/hr/report/monthly_salary_register/monthly_salary_register.py | Python | agpl-3.0 | 4,082 | 0.038707 |
###############################################################################
# Name: vbscript.py #
# Purpose: Define VBScript syntax for highlighting and other features #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2008 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
FILE: vbscript.py
AUTHOR: Cody Precord
@summary: Lexer configuration module for VBScript.
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: _vbscript.py 63834 2010-04-03 06:04:33Z CJP $"
__revision__ = "$Revision: 63834 $"
#-----------------------------------------------------------------------------#
# Imports
import wx.stc as stc
# Local Imports
import synglob
import syndata
#-----------------------------------------------------------------------------#
#---- Keyword Specifications ----#
VBS_KW = ("addressof alias and as attribute base begin binary boolean byref "
"byte byval call case cdbl cint clng compare const csng cstr "
"currency date decimal declare defbool defbyte defcur defdate defdbl "
"defdec defint deflng defobj defsng defstr defvar dim do double each "
"else elseif empty end enum eqv erase error event exit explicit "
"false for friend function get global gosub goto if imp implements "
"in input integer is len let lib like load lock long loop lset me "
"mid midb mod new next not nothing null object on option optional "
"or paramarray preserve print private property public raiseevent "
"randomize redim rem resume return rset seek select set single "
"static step stop string sub text then time to true type typeof "
"unload until variant wend while with withevents xor")
# Syntax specifications
SYNTAX_ITEMS = [ (stc.STC_B_ASM, 'asm_style'),
(stc.STC_B_BINNUMBER, 'default_style'), # STYLE NEEDED
(stc.STC_B_COMMENT, 'comment_style'),
(stc.STC_B_CONSTANT, 'const_style'),
(stc.STC_B_DATE, 'default_style'), # STYLE NEEDED
(stc.STC_B_DEFAULT, 'default_style'),
(stc.STC_B_ERROR, 'error_style'),
(stc.STC_B_HEXNUMBER, 'number_style'),
(stc.STC_B_IDENTIFIER, 'default_style'),
(stc.STC_B_KEYWORD, 'keyword_style'),
(stc.STC_B_KEYWORD2, 'class_style'), # STYLE NEEDED
(stc.STC_B_KEYWORD3, 'funct_style'), # STYLE NEEDED
(stc.STC_B_KEYWORD4, 'scalar_style'), # STYLE NEEDED
(stc.STC_B_LABEL, 'directive_style'), # STYLE NEEDED
(stc.STC_B_NUMBER, 'number_style'),
(stc.STC_B_OPERATOR, 'operator_style'),
(stc.STC_B_PREPROCESSOR, 'pre_style'),
(stc.STC_B_STRING, 'string_style'),
(stc.STC_B_STRINGEOL, 'stringeol_style')
]
#---- Extra Properties ----#
FOLD = ("fold", "1")
#-----------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for VbScript"""
def __init__(self, langid):
syndata.SyntaxDataBase.__init__(self, langid)
# Setup
self.SetLexer(stc.STC_LEX_VBSCRIPT)
def GetKeywords(self):
"""Returns Specified Keywords List """
return [(0, VBS_KW),]
def GetSyntaxSpec(self):
"""Syntax Specifications """
return SYNTAX_ITEMS
def GetProperties(self):
"""Returns a list of Extra Properties to set """
return [FOLD]
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code """
return [u'\'']
| 163gal/Time-Line | libs/wx/tools/Editra/src/syntax/_vbscript.py | Python | gpl-3.0 | 3,982 | 0.004269 |
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def get_block_hashes(settings):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
for height in xrange(settings['min_height'], settings['max_height']+1):
hash = rpc.getblockhash(height)
print(hash)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize-hashes.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 15715
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 319000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| TheAltcoinBoard/XAB-withoutSecp256k1 | contrib/linearize/linearize-hashes.py | Python | mit | 2,868 | 0.034519 |
#!/usr/bin/env python
#
# Analyse benchio output files
#
# System modules for grabbing data
import sys
import os.path
import re
from glob import glob
import seaborn as sns
# Modules for analysing and visualising data
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
matplotlib.rcParams.update({'font.size': 9})
matplotlib.rcParams.update({'figure.autolayout': True})
def main(argv):
resdir = sys.argv[1]
usesize = int(sys.argv[2])
files = get_filelist(resdir, "benchio_")
csvdump = open('csvdump.csv', 'w')
csvdump.write('"Writers","Scheme","Write Bandwidth (MiB/s)"\n')
# Loop over files getting data
resframe_proto = []
for file in files:
infile = open(file, 'r')
resdict = {}
for line in infile:
if re.search('MPI-IO', line):
break
elif re.search('Starting job', line):
tokens = line.split()
resdict['JobID'] = tokens[2]
elif re.search('Running', line):
tokens = line.split()
resdict['Writers'] = int(tokens[2])
elif re.search('Array', line):
tokens = line.split()
x = int(tokens[4])
y = int(tokens[6])
z = int(tokens[8])
resdict['LocalSize'] = (x, y, z)
elif re.search('Global', line):
tokens = line.split()
x = int(tokens[4])
y = int(tokens[6])
z = int(tokens[8])
resdict['GlobalSize'] = (x, y, z)
elif re.search('Total', line):
tokens = line.split()
resdict['TotData'] = float(tokens[5])
infile.close()
infile = open(file, 'r')
timedict = resdict.copy()
for line in infile:
if re.search('HDF5', line):
break
elif re.search('Writing to', line):
tokens = line.split()
nstripe = 0
if re.match('striped', tokens[2]):
timedict['Striping'] = 'Stripe Count = -1'
nstripe = -1
elif re.match('defstriped', tokens[2]):
timedict['Striping'] = 'Stripe Count = 4'
nstripe = 4
elif re.match('unstriped', tokens[2]):
timedict['Striping'] = 'Stripe Count = 1'
nstripe = 1
elif re.match(' time', line):
tokens = line.split()
timedict['Write'] = float(tokens[6])
timedict['File'] = os.path.abspath(file)
timedict['Count'] = 1
resframe_proto.append(timedict)
csvstring = '{0},"SSF -c {1} -s 1m",{2}\n'.format(timedict['Writers'], nstripe, timedict['Write'])
csvdump.write(csvstring)
curstriping = timedict['Striping']
timedict = resdict.copy()
timedict['Striping'] = curstriping
infile.close()
csvdump.close()
resframe = pd.DataFrame(resframe_proto)
print 'Number of valid results files read = ', len(resframe.index)
resframe = resframe[resframe.LocalSize == (usesize, usesize, usesize) ]
print "Summary of all results found:"
print resframe
labels = map(int, resframe['Writers'].unique())
labels.sort()
# Get copy of dataframe with only numeric values
resframe_num = resframe.drop(['File', 'GlobalSize', 'TotData'], 1)
# What stats are we computing on which columns
groupf = {'Write':['min','median','max','mean'], 'Count':'sum'}
# Compute the maximum read and write bandwidths from the data
stats = resframe_num.sort('Writers').groupby(['Writers', 'Striping', 'LocalSize']).agg(groupf)
print "Useful statistics:"
print stats
print stats.to_csv(float_format='%.3f')
fig, ax = plt.subplots()
sns.pointplot(x='Writers', y='Write', data=resframe, hue='Striping', estimator=np.median, scale=0.5)
# sns.stripplot(x='Writers', y='Write', data=resframe, hue='Striping', jitter=True)
ax.set_ylim(ymin=0)
plt.ylabel('Bandwidth / MiB/s')
plt.xlabel('Writers')
plt.legend()
plt.savefig('bandwidth_stats.png')
plt.clf()
sys.exit(0)
def get_filelist(dir, stem):
"""
Get list of date files in the specified directory
"""
files = []
if os.path.exists(dir):
files = glob(os.path.join(dir, stem + '*' ))
files.sort()
else:
sys.stderr.write("Directory does not exist: {1}".format(dir))
sys.exit(1)
return files
if __name__ == "__main__":
main(sys.argv[1:])
| ARCHER-CSE/parallel-io | benchmark/analysis/analyse_benchio_output.py | Python | gpl-3.0 | 4,610 | 0.014967 |
'''
Usage:
dbInputData.py
dbInputData.py -h | --help
dbInputData.py [--debug] [-i INPUT] [-o FILE]
Options:
-h, --help Shows this menu.
-d, --debug Print debug information. This is the most verbose
option.
-i INPUT Input file [default: test.csv]
-o FILE TinyDB database to output to. [default: netspark.json]
'''
import logging
import csv
from tinydb import TinyDB, Query
from docopt import docopt
arguments = docopt(__doc__)
# Set logging level https://www.digitalocean.com/community/tutorials/how-to-use-logging-in-python-3
if arguments['--debug'] == True:
logging.basicConfig(level=logging.DEBUG)
print("Arguments: \n" + str(arguments))
else:
logging.basicConfig(level=logging.INFO)
# Define the Database
DB = TinyDB(arguments['-o'])
logging.info("Set TinyDB database to: " + str(arguments['-o']))
logging.debug("Loaded database: " + str(DB))
# The test file
CSVFILE = arguments['-i']
logging.info("Set CSV input file to: " + str(CSVFILE))
# The Magic (read CSV as dict, form dict with data we care about, dump it into db)
with open(CSVFILE, mode='r') as csvfile:
logging.debug("Attempting load of CSVFILE into dictionary...")
READER = csv.DictReader(csvfile)
logging.debug("csv.DictReader load success")
# Now iterate through every row in the CSVfile and make dictionaries
for row in READER:
logging.debug("Iterating through csv dictionary rows...")
dbdict = {
'hostname': row['SysName'],
'device_type': row['device_type'],
'ipaddr': row['IP_Address'],
'department': row['Department']
}
logging.debug("Made the following dbdict: " + str(dbdict))
Find = Query()
logging.debug("Begin searching for IP Address using dbdict['ipaddr']")
ifexists = DB.contains(Find.ipaddr == dbdict['ipaddr'])
if ifexists is True:
logging.debug("Found match for IP. Updating values...")
DB.update({'hostname': dbdict['hostname']}, Find.ipaddr == dbdict['ipaddr'])
DB.update({'device_type': dbdict['device_type']}, Find.ipaddr == dbdict['ipaddr'])
DB.update({'department': dbdict['department']}, Find.ipaddr == dbdict['ipaddr'])
logging.debug("Updated DB with values: " + str(dbdict))
else:
logging.debug("No match found for IP. Adding new DB entry...")
DB.insert(dbdict)
logging.debug("Added new values: " + str(dbdict))
| admiralspark/NetSpark-Scripts | Example_Scripts/TinyDB/dbInputData.py | Python | gpl-3.0 | 2,573 | 0.002721 |
from scout.parse.variant.rank_score import parse_rank_score
from scout.parse.variant.variant import parse_variant
def test_parse_rank_score():
## GIVEN a rank score string on genmod format
rank_scores_info = "123:10"
variant_score = 10.0
family_id = "123"
## WHEN parsing the rank score
parsed_rank_score = parse_rank_score(rank_scores_info, family_id)
## THEN assert that the correct rank score is parsed
assert variant_score == parsed_rank_score
def test_parse_rank_score_no_score():
## GIVEN a empty rank score string
rank_scores_info = ""
family_id = "123"
## WHEN parsing the rank score
parsed_rank_score = parse_rank_score(rank_scores_info, family_id)
## THEN assert that None is returned
assert parsed_rank_score == None
def test_parse_rank_score_variant(cyvcf2_variant, case_obj, scout_config):
## GIVEN a variant
rank_score = 15
case_id = case_obj["_id"]
## WHEN adding a rank score string to the INFO field
rank_score_str = f"{case_id}:{rank_score}"
cyvcf2_variant.INFO["RankScore"] = rank_score_str
## WHEN parsing the variant
var_info = parse_variant(cyvcf2_variant, case_obj)
## THEN assert that the correct score is parsed
assert var_info["rank_score"] == rank_score
| Clinical-Genomics/scout | tests/parse/test_parse_rank_score.py | Python | bsd-3-clause | 1,288 | 0.00854 |
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler weights.
"""
from nova import context
from nova import exception
from nova.openstack.common.fixture import mockpatch
from nova.scheduler import weights
from nova import test
from nova.tests import matchers
from nova.tests.scheduler import fakes
class TestWeighedHost(test.NoDBTestCase):
def test_dict_conversion(self):
host_state = fakes.FakeHostState('somehost', None, {})
host = weights.WeighedHost(host_state, 'someweight')
expected = {'weight': 'someweight',
'host': 'somehost'}
self.assertThat(host.to_dict(), matchers.DictMatches(expected))
def test_all_weighers(self):
classes = weights.all_weighers()
class_names = [cls.__name__ for cls in classes]
self.assertEqual(len(classes), 2)
self.assertIn('RAMWeigher', class_names)
self.assertIn('MetricsWeigher', class_names)
class RamWeigherTestCase(test.NoDBTestCase):
def setUp(self):
super(RamWeigherTestCase, self).setUp()
self.useFixture(mockpatch.Patch(
'nova.db.compute_node_get_all',
return_value=fakes.COMPUTE_NODES))
self.host_manager = fakes.FakeHostManager()
self.weight_handler = weights.HostWeightHandler()
self.weight_classes = self.weight_handler.get_matching_classes(
['nova.scheduler.weights.ram.RAMWeigher'])
def _get_weighed_host(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {}
return self.weight_handler.get_weighed_objects(self.weight_classes,
hosts, weight_properties)[0]
def _get_all_hosts(self):
ctxt = context.get_admin_context()
return self.host_manager.get_all_host_states(ctxt)
def test_default_of_spreading_first(self):
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 1.0)
self.assertEqual(weighed_host.obj.host, 'host4')
def test_ram_filter_multiplier1(self):
self.flags(ram_weight_multiplier=0.0)
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# We do not know the host, all have same weight.
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 0.0)
def test_ram_filter_multiplier2(self):
self.flags(ram_weight_multiplier=2.0)
hostinfo_list = self._get_all_hosts()
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 1.0 * 2)
self.assertEqual(weighed_host.obj.host, 'host4')
def test_ram_filter_negative(self):
self.flags(ram_weight_multiplier=1.0)
hostinfo_list = self._get_all_hosts()
host_attr = {'id': 100, 'memory_mb': 8192, 'free_ram_mb': -512}
host_state = fakes.FakeHostState('negative', 'negative', host_attr)
hostinfo_list = list(hostinfo_list) + [host_state]
# host1: free_ram_mb=512
# host2: free_ram_mb=1024
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# negativehost: free_ram_mb=-512
# so, host4 should win
weights = self.weight_handler.get_weighed_objects(self.weight_classes,
hostinfo_list, {})
weighed_host = weights[0]
self.assertEqual(weighed_host.weight, 1)
self.assertEqual(weighed_host.obj.host, "host4")
# and negativehost should lose
weighed_host = weights[-1]
self.assertEqual(weighed_host.weight, 0)
self.assertEqual(weighed_host.obj.host, "negative")
class MetricsWeigherTestCase(test.NoDBTestCase):
def setUp(self):
super(MetricsWeigherTestCase, self).setUp()
self.useFixture(mockpatch.Patch(
'nova.db.compute_node_get_all',
return_value=fakes.COMPUTE_NODES_METRICS))
self.host_manager = fakes.FakeHostManager()
self.weight_handler = weights.HostWeightHandler()
self.weight_classes = self.weight_handler.get_matching_classes(
['nova.scheduler.weights.metrics.MetricsWeigher'])
def _get_weighed_host(self, hosts, setting, weight_properties=None):
if not weight_properties:
weight_properties = {}
self.flags(weight_setting=setting, group='metrics')
return self.weight_handler.get_weighed_objects(self.weight_classes,
hosts, weight_properties)[0]
def _get_all_hosts(self):
ctxt = context.get_admin_context()
return self.host_manager.get_all_host_states(ctxt)
def _do_test(self, settings, expected_weight, expected_host):
hostinfo_list = self._get_all_hosts()
weighed_host = self._get_weighed_host(hostinfo_list, settings)
self.assertEqual(weighed_host.weight, expected_weight)
self.assertEqual(weighed_host.obj.host, expected_host)
def test_single_resource(self):
# host1: foo=512
# host2: foo=1024
# host3: foo=3072
# host4: foo=8192
# so, host4 should win:
setting = ['foo=1']
self._do_test(setting, 1.0, 'host4')
def test_multiple_resource(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# so, host2 should win:
setting = ['foo=0.0001', 'bar=1']
self._do_test(setting, 1.0, 'host2')
def test_single_resourcenegtive_ratio(self):
# host1: foo=512
# host2: foo=1024
# host3: foo=3072
# host4: foo=8192
# so, host1 should win:
setting = ['foo=-1']
self._do_test(setting, 1.0, 'host1')
def test_multiple_resource_missing_ratio(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# so, host4 should win:
setting = ['foo=0.0001', 'bar']
self._do_test(setting, 1.0, 'host4')
def test_multiple_resource_wrong_ratio(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# so, host4 should win:
setting = ['foo=0.0001', 'bar = 2.0t']
self._do_test(setting, 1.0, 'host4')
def _check_parsing_result(self, weigher, setting, results):
self.flags(weight_setting=setting, group='metrics')
weigher._parse_setting()
self.assertEqual(len(weigher.setting), len(results))
for item in results:
self.assertIn(item, weigher.setting)
def test_parse_setting(self):
weigher = self.weight_classes[0]()
self._check_parsing_result(weigher,
['foo=1'],
[('foo', 1.0)])
self._check_parsing_result(weigher,
['foo=1', 'bar=-2.1'],
[('foo', 1.0), ('bar', -2.1)])
self._check_parsing_result(weigher,
['foo=a1', 'bar=-2.1'],
[('bar', -2.1)])
self._check_parsing_result(weigher,
['foo', 'bar=-2.1'],
[('bar', -2.1)])
self._check_parsing_result(weigher,
['=5', 'bar=-2.1'],
[('bar', -2.1)])
def test_metric_not_found_required(self):
setting = ['foo=1', 'zot=2']
self.assertRaises(exception.ComputeHostMetricNotFound,
self._do_test,
setting,
8192,
'host4')
def test_metric_not_found_non_required(self):
# host1: foo=512, bar=1
# host2: foo=1024, bar=2
# host3: foo=3072, bar=1
# host4: foo=8192, bar=0
# host5: foo=768, bar=0, zot=1
# host6: foo=2048, bar=0, zot=2
# so, host5 should win:
self.flags(required=False, group='metrics')
setting = ['foo=0.0001', 'zot=-1']
self._do_test(setting, 1.0, 'host5')
| jumpstarter-io/nova | nova/tests/scheduler/test_weights.py | Python | apache-2.0 | 9,359 | 0.000427 |
from __future__ import unicode_literals
from django.apps import AppConfig
class AnnonceConfig(AppConfig):
name = 'annonce'
| firasbenmakhlouf/JobLookup | annonce/apps.py | Python | mit | 130 | 0 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import math
import os
from datetime import date
# yapf: disable
class InPsight:
# POV-Ray defines
defines = {}
defines['Shadows'] = 'false'
defines['Background_Color'] = '<0.6,0.6,0.6>'
defines['Output_File_Type'] = 'N'
defines['Output_Alpha'] = 'true'
defines['Light_Color'] = '<1,1,1>'
defines['Filename'] = 'inpsight'
defines['Filepath'] = os.getcwd()
defines['Antialias'] = 'true'
defines['Antialias_Threshold'] = '0.1'
# Molecule geometry
atoms = [] # (Z,x,y,z,R,r,g,b,t) in bohr
bonds = [] # (x1,y1,z1,R1,x2,y2,z2,R2,r,g,b,t)
# Molecular geometry defines
colors = []
radii = []
radial_scale = 0.25
bond_width = 0.2 # bohr
bohr_per_ang = 1.8897161646320724
bonding_alpha = 0.65 # Used to select/reject bonds via sum of vDW radii
# View defines (high-level)
azimuth = 0.0
elevation = 0.0
zoom = 0.5
height = 900
width = 1200
# Camera positions (low-level)
location = [1.0,0.0,0.0]
up = [0.0,0.75,0.0]
right = [1.0,0.0,0.0]
sky = [0.0,-1.0,0.0]
look_at = [0.0,0.0,0.0]
light = [1.0,0.0,0.0]
light_color = [0.6,0.6,0.6]
# Standard Jmol colors, 256-based
colors.append([0,0,0])
colors.append([255,255,255])
colors.append([217,255,255])
colors.append([204,128,255])
colors.append([194,255,0])
colors.append([255,181,181])
colors.append([144,144,144])
colors.append([48,80,248])
colors.append([255,13,13])
colors.append([144,224,80])
colors.append([179,227,245])
colors.append([171,92,242])
colors.append([138,255,0])
colors.append([191,166,166])
colors.append([240,200,160])
colors.append([255,128,0])
colors.append([255,255,48])
colors.append([31,240,31])
colors.append([128,209,227])
colors.append([143,64,212])
colors.append([61,255,0])
colors.append([230,230,230])
colors.append([191,194,199])
colors.append([166,166,171])
colors.append([138,153,199])
colors.append([156,122,199])
colors.append([224,102,51])
colors.append([240,144,160])
colors.append([80,208,80])
colors.append([200,128,51])
colors.append([125,128,176])
colors.append([194,143,143])
colors.append([102,143,143])
colors.append([189,128,227])
colors.append([255,161,0])
colors.append([166,41,41])
colors.append([92,184,209])
colors.append([112,46,176])
colors.append([0,255,0])
colors.append([148,255,255])
colors.append([148,224,224])
colors.append([115,194,201])
colors.append([84,181,181])
colors.append([59,158,158])
colors.append([36,143,143])
colors.append([10,125,140])
colors.append([0,105,133])
colors.append([192,192,192])
colors.append([255,217,143])
colors.append([166,117,115])
colors.append([102,128,128])
colors.append([158,99,181])
colors.append([212,122,0])
colors.append([148,0,148])
colors.append([66,158,176])
colors.append([87,23,143])
colors.append([0,201,0])
colors.append([112,212,255])
colors.append([255,255,199])
colors.append([217,255,199])
colors.append([199,255,199])
colors.append([163,255,199])
colors.append([143,255,199])
colors.append([97,255,199])
colors.append([69,255,199])
colors.append([48,255,199])
colors.append([31,255,199])
colors.append([0,255,156])
colors.append([0,230,117])
colors.append([0,212,82])
colors.append([0,191,56])
colors.append([0,171,36])
colors.append([77,194,255])
colors.append([77,166,255])
colors.append([33,148,214])
colors.append([38,125,171])
colors.append([38,102,150])
colors.append([23,84,135])
colors.append([208,208,224])
colors.append([255,209,35])
colors.append([184,184,208])
colors.append([166,84,77])
colors.append([87,89,97])
colors.append([158,79,181])
colors.append([171,92,0])
colors.append([117,79,69])
colors.append([66,130,150])
colors.append([66,0,102])
colors.append([0,125,0])
colors.append([112,171,250])
colors.append([0,186,255])
colors.append([0,161,255])
colors.append([0,143,255])
colors.append([0,128,255])
colors.append([0,107,255])
colors.append([84,92,242])
colors.append([120,92,227])
colors.append([138,79,227])
colors.append([161,54,212])
colors.append([179,31,212])
colors.append([179,31,186])
colors.append([179,13,166])
colors.append([189,13,135])
colors.append([199,0,102])
colors.append([204,0,89])
colors.append([209,0,79])
colors.append([217,0,69])
colors.append([224,0,56])
colors.append([230,0,46])
colors.append([235,0,38])
# Approximate vDW radii in angstrom
radii.append(2.0)
radii.append(1.001)
radii.append(1.012)
radii.append(0.825)
radii.append(1.408)
radii.append(1.485)
radii.append(1.452)
radii.append(1.397)
radii.append(1.342)
radii.append(1.287)
radii.append(1.243)
radii.append(1.144)
radii.append(1.364)
radii.append(1.639)
radii.append(1.716)
radii.append(1.705)
radii.append(1.683)
radii.append(1.639)
radii.append(1.595)
radii.append(1.485)
radii.append(1.474)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.562)
radii.append(1.650)
radii.append(1.727)
radii.append(1.760)
radii.append(1.771)
radii.append(1.749)
radii.append(1.727)
radii.append(1.628)
radii.append(1.606)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.639)
radii.append(1.672)
radii.append(1.804)
radii.append(1.881)
radii.append(1.892)
radii.append(1.892)
radii.append(1.881)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
radii.append(2.0)
def __init__(self,molecule):
self.molecule = molecule
self.molecule.update_geometry()
self.update_geometry()
def update_geometry(self):
# Atoms
natom = self.molecule.natom()
self.atoms = []
for k in range(0,natom):
x = self.molecule.x(k)
y = self.molecule.y(k)
z = self.molecule.z(k)
Z = self.molecule.Z(k)
atom = Z, x, y, z, self.radial_scale * self.bohr_per_ang * self.radii[Z], self.colors[Z][0] / 256.0, \
self.colors[Z][1] / 256.0, self.colors[Z][2] / 256.0, 0.0
self.atoms.append(atom)
# Bonds
self.bonds = []
for k in range(1,natom):
for l in range (0, k):
Z1 = self.atoms[k][0]
Z2 = self.atoms[l][0]
R1 = self.bohr_per_ang*self.radii[Z1]
R2 = self.bohr_per_ang*self.radii[Z2]
x1 = self.atoms[k][1]
y1 = self.atoms[k][2]
z1 = self.atoms[k][3]
x2 = self.atoms[l][1]
y2 = self.atoms[l][2]
z2 = self.atoms[l][3]
r1 = self.atoms[k][5]
g1 = self.atoms[k][6]
b1 = self.atoms[k][7]
t1 = self.atoms[k][8]
r2 = self.atoms[l][5]
g2 = self.atoms[l][6]
b2 = self.atoms[l][7]
t2 = self.atoms[l][8]
R = math.sqrt((x1-x2)*(x1-x2) +
(y1-y2)*(y1-y2) +
(z1-z2)*(z1-z2))
if (R < self.bonding_alpha*(R1 + R2)):
omega = R2 / (R1 + R2)
xc = omega * (x1 - x2) + x2
yc = omega * (y1 - y2) + y2
zc = omega * (z1 - z2) + z2
bond1 = x1,y1,z1,self.bond_width, xc,yc,zc,self.bond_width,r1,g1,b1,t1
bond2 = x2,y2,z2,self.bond_width, xc,yc,zc,self.bond_width,r2,g2,b2,t2
self.bonds.append(bond1)
self.bonds.append(bond2)
def set_define(self, key, value):
self.defines[key] = value
def set_color(self, Z, color):
self.colors[Z] = color
def set_radius(self, Z, radius):
self.radii[Z] = radius
def position_camera(self):
xc = self.molecule.center_of_mass()
self.look_at = [xc[0], xc[1], xc[2]]
Rmax = 0.0
natom = self.molecule.natom()
for k in range(0,natom):
x = [self.molecule.x(k), self.molecule.y(k), self.molecule.z(k)]
R = math.sqrt((x[0] - xc[0])*(x[0] - xc[0]) +
(x[1] - xc[1])*(x[1] - xc[1]) +
(x[2] - xc[2])*(x[2] - xc[2]))
if R > Rmax:
Rmax = R
Rmax = Rmax / self.zoom
if (self.width < self.height):
self.right = [Rmax, 0.0, 0.0]
self.up = [0.0, self.right[0]*self.height/self.width, 0.0]
else:
self.up = [0.0, Rmax, 0.0]
self.right = [self.up[1]*self.width/self.height, 0.0, 0.0]
phi = math.pi*(-self.azimuth)/180.0
theta = math.pi*(90.0 - self.elevation)/180.0
delta = [Rmax*math.cos(phi)*math.sin(theta), Rmax*math.sin(phi)*math.sin(theta), Rmax*math.cos(theta)]
self.location = [xc[0] + delta[0], xc[1] + delta[1], xc[2] + delta[2]]
phi = math.pi*(-(self.azimuth + 30.0))/180.0
theta = math.pi*(90.0 - (self.elevation + 30.0))/180.0
delta = [Rmax*math.cos(phi)*math.sin(theta), Rmax*math.sin(phi)*math.sin(theta), Rmax*math.cos(theta)]
self.light = [xc[0] + delta[0], xc[1] + delta[1], xc[2] + delta[2]]
def set_view(self,azimuth, elevation, zoom = 0.7):
self.azimuth = azimuth
self.elevation = elevation
self.zoom = zoom
self.position_camera()
def set_size(self, width,height):
self.width = width
self.height = height
def set_camera(self, location, sky, up, right, look_at, light, light_color):
self.location = location
self.sky = sky
self.up = up
self.right = right
self.look_at = look_at
self.light = light
self.light_color = light_color
def save_molecule(self, filename):
if (filename != ''):
self.defines['Filename'] = filename
ini_filename = self.defines['Filepath'] + '/' + self.defines['Filename'] + '.pov.ini'
pov_filename = self.defines['Filepath'] + '/' + self.defines['Filename'] + '.pov'
png_filename = self.defines['Filepath'] + '/' + self.defines['Filename'] + '.png'
pov_file = self.defines['Filename'] + '.pov'
png_file = self.defines['Filename'] + '.png'
# Write the pov.ini file
fh = open(ini_filename,'w')
fh.write('; InPsight: visualization in Psi4\n')
fh.write('; by Rob Parrish\n')
fh.write('; .pov.ini file\n')
fh.write('; Created %s\n' % str(date.today()))
fh.write('\n')
fh.write('Input_File_Name=%s\n' % pov_file)
fh.write('Output_to_File=true\n')
fh.write('Output_File_Type=%s\n' % self.defines['Output_File_Type'])
fh.write('Output_File_Name=%s\n' % png_file)
fh.write('Height=%s\n' % str(self.height))
fh.write('Width=%s\n' % str(self.width))
fh.write('Output_Alpha=%s\n' % self.defines['Output_Alpha'])
fh.write('Antialias=%s\n' % self.defines['Antialias'])
fh.write('Antialias_Threshold=%s\n' % self.defines['Antialias_Threshold'])
fh.write('Display=true\n')
fh.write('Warning_Level=5\n')
fh.write('Verbose=false\n')
fh.close()
# Write the pov file
fh = open(pov_filename, 'w')
fh.write('// InPsight: visualization in Psi4\n')
fh.write('// by Rob Parrish\n')
fh.write('// .pov file (adopted from Jmol)\n')
fh.write('// Created %s\n' % str(date.today()))
fh.write('#declare Width = %s;\n' % str(self.width))
fh.write('#declare Height = %s;\n' % str(self.height))
fh.write('#declare Shadows = %s; \n' % self.defines['Shadows'])
fh.write('\n')
fh.write('camera{\n')
fh.write(' orthographic\n')
fh.write(' location < %s, %s, %s>\n' % (str(self.location[0]),str(self.location[1]),str(self.location[2]) ))
fh.write(' sky < %s, %s, %s>\n' % (str(self.sky[0]), str(self.sky[1]), str(self.sky[2]) ))
fh.write(' up < %s, %s, %s>\n' % (str(self.up[0]), str(self.up[1]), str(self.up[2]) ))
fh.write(' right < %s, %s, %s>\n' % (str(self.right[0]), str(self.right[1]), str(self.right[2]) ))
fh.write(' look_at < %s, %s, %s>\n' % (str(self.look_at[0]), str(self.look_at[1]), str(self.look_at[2]) ))
fh.write('}\n')
fh.write('\n')
fh.write('background { color rgb %s }\n' % self.defines['Background_Color'])
fh.write('light_source { <%s,%s,%s> rgb <%s,%s,%s> }\n'
% (str(self.light[0]),str(self.light[1]),str(self.light[2]),
str(self.light_color[0]),str(self.light_color[1]),str(self.light_color[2])))
fh.write('\n')
fh.write('// ***********************************************\n')
fh.write('// macros for atom/bond shapes\n')
fh.write('// ***********************************************\n')
fh.write('#macro check_shadow()\n')
fh.write(' #if (!Shadows)\n')
fh.write(' no_shadow \n')
fh.write(' #end\n')
fh.write('#end\n')
fh.write('\n')
fh.write('#macro translucentFinish(T)\n')
fh.write(' #local shineFactor = T;\n')
fh.write(' #if (T <= 0.25)\n')
fh.write(' #declare shineFactor = (1.0-4*T);\n')
fh.write(' #end\n')
fh.write(' #if (T > 0.25)\n')
fh.write(' #declare shineFactor = 0;\n')
fh.write(' #end\n')
fh.write(' finish {\n')
fh.write(' ambient 0.45\n')
fh.write(' diffuse 0.84\n')
fh.write(' specular 0.22\n')
fh.write(' roughness .00001\n')
fh.write(' metallic shineFactor\n')
fh.write(' phong 0.9*shineFactor\n')
fh.write(' phong_size 120*shineFactor\n')
fh.write('}#end\n')
fh.write('\n')
fh.write('#macro a(X,Y,Z,RADIUS,R,G,B,T)\n')
fh.write(' sphere{<X,Y,Z>,RADIUS\n')
fh.write(' pigment{rgbt<R,G,B,T>}\n')
fh.write(' translucentFinish(T)\n')
fh.write(' check_shadow()}\n')
fh.write('#end\n')
fh.write('\n')
fh.write('#macro b(X1,Y1,Z1,RADIUS1,X2,Y2,Z2,RADIUS2,R,G,B,T)\n')
fh.write(' cone{<X1,Y1,Z1>,RADIUS1,<X2,Y2,Z2>,RADIUS2\n')
fh.write(' pigment{rgbt<R,G,B,T>}\n')
fh.write(' translucentFinish(T)\n')
fh.write(' check_shadow()}\n')
fh.write('#end \n')
for bond in self.bonds:
fh.write('b(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\n' %
(str(bond[0]),str(bond[1]),str(bond[2]),str(bond[3]),
str(bond[4]),str(bond[5]),str(bond[6]),str(bond[7]),
str(bond[8]),str(bond[9]),str(bond[10]),str(bond[11])))
for atom in self.atoms:
fh.write('a(%s,%s,%s,%s,%s,%s,%s,%s)\n' %
(str(atom[1]),str(atom[2]),str(atom[3]),str(atom[4]),
str(atom[5]),str(atom[6]),str(atom[7]),str(atom[8])))
fh.close()
def save_density(self,filename='rho',overlap = 2.0,n = [40,40,40],caxis = [0.0,1.0]):
if (filename != ''):
self.defines['Filename'] = filename
# grid = GridProp()
# GridProp seems to have been retired
grid = None
grid.set_n(n[0],n[1],n[2])
grid.set_caxis(caxis[0],caxis[1])
grid.set_filename(self.defines['Filename'])
grid.add('RHO')
grid.compute()
df3_file = filename + '.RHO.df3'
l = [grid.get_l(0),grid.get_l(1),grid.get_l(2)]
o = [grid.get_o(0),grid.get_o(1),grid.get_o(2)]
ini_filename = self.defines['Filepath'] + '/' + self.defines['Filename'] + '.pov.ini'
pov_filename = self.defines['Filepath'] + '/' + self.defines['Filename'] + '.pov'
png_filename = self.defines['Filepath'] + '/' + self.defines['Filename'] + '.png'
pov_file = self.defines['Filename'] + '.pov'
png_file = self.defines['Filename'] + '.png'
# Write the pov.ini file
fh = open(ini_filename,'w')
fh.write('; InPsight: visualization in Psi4\n')
fh.write('; by Rob Parrish\n')
fh.write('; .pov.ini file\n')
fh.write('; Created %s\n' % str(date.today()))
fh.write('\n')
fh.write('Input_File_Name=%s\n' % pov_file)
fh.write('Output_to_File=true\n')
fh.write('Output_File_Type=%s\n' % self.defines['Output_File_Type'])
fh.write('Output_File_Name=%s\n' % png_file)
fh.write('Height=%s\n' % str(self.height))
fh.write('Width=%s\n' % str(self.width))
fh.write('Output_Alpha=%s\n' % self.defines['Output_Alpha'])
fh.write('Antialias=%s\n' % self.defines['Antialias'])
fh.write('Antialias_Threshold=%s\n' % self.defines['Antialias_Threshold'])
fh.write('Display=true\n')
fh.write('Warning_Level=5\n')
fh.write('Verbose=false\n')
fh.close()
# Write the pov file
fh = open(pov_filename, 'w')
fh.write('// InPsight: visualization in Psi4\n')
fh.write('// by Rob Parrish\n')
fh.write('// .pov file (adopted from Jmol)\n')
fh.write('// Created %s\n' % str(date.today()))
fh.write('#declare Shadows = %s; \n' % self.defines['Shadows'])
fh.write('\n')
fh.write('camera{\n')
fh.write(' orthographic\n')
fh.write(' location < %s, %s, %s>\n' % (str(self.location[0]),str(self.location[1]),str(self.location[2]) ))
fh.write(' sky < %s, %s, %s>\n' % (str(self.sky[0]), str(self.sky[1]), str(self.sky[2]) ))
fh.write(' up < %s, %s, %s>\n' % (str(self.up[0]), str(self.up[1]), str(self.up[2]) ))
fh.write(' right < %s, %s, %s>\n' % (str(self.right[0]), str(self.right[1]), str(self.right[2]) ))
fh.write(' look_at < %s, %s, %s>\n' % (str(self.look_at[0]), str(self.look_at[1]), str(self.look_at[2]) ))
fh.write('}\n')
fh.write('\n')
fh.write('background { color rgb %s }\n' % self.defines['Background_Color'])
fh.write('light_source { <%s,%s,%s> rgb <%s,%s,%s> }\n'
% (str(self.light[0]),str(self.light[1]),str(self.light[2]),
str(self.light_color[0]),str(self.light_color[1]),str(self.light_color[2])))
fh.write('\n')
fh.write('// ***********************************************\n')
fh.write('// macros for atom/bond shapes\n')
fh.write('// ***********************************************\n')
fh.write('#macro check_shadow()\n')
fh.write(' #if (!Shadows)\n')
fh.write(' no_shadow \n')
fh.write(' #end\n')
fh.write('#end\n')
fh.write('\n')
fh.write('#macro translucentFinish(T)\n')
fh.write(' #local shineFactor = T;\n')
fh.write(' #if (T <= 0.25)\n')
fh.write(' #declare shineFactor = (1.0-4*T);\n')
fh.write(' #end\n')
fh.write(' #if (T > 0.25)\n')
fh.write(' #declare shineFactor = 0;\n')
fh.write(' #end\n')
fh.write(' finish {\n')
fh.write(' ambient 0.45\n')
fh.write(' diffuse 0.84\n')
fh.write(' specular 0.22\n')
fh.write(' roughness .00001\n')
fh.write(' metallic shineFactor\n')
fh.write(' phong 0.9*shineFactor\n')
fh.write(' phong_size 120*shineFactor\n')
fh.write('}#end\n')
fh.write('\n')
fh.write('#macro a(X,Y,Z,RADIUS,R,G,B,T)\n')
fh.write(' sphere{<X,Y,Z>,RADIUS\n')
fh.write(' pigment{rgbt<R,G,B,T>}\n')
fh.write(' translucentFinish(T)\n')
fh.write(' check_shadow()}\n')
fh.write('#end\n')
fh.write('\n')
fh.write('#macro b(X1,Y1,Z1,RADIUS1,X2,Y2,Z2,RADIUS2,R,G,B,T)\n')
fh.write(' cone{<X1,Y1,Z1>,RADIUS1,<X2,Y2,Z2>,RADIUS2\n')
fh.write(' pigment{rgbt<R,G,B,T>}\n')
fh.write(' translucentFinish(T)\n')
fh.write(' check_shadow()}\n')
fh.write('#end \n')
for bond in self.bonds:
fh.write('b(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\n' %
(str(bond[0]),str(bond[1]),str(bond[2]),str(bond[3]),
str(bond[4]),str(bond[5]),str(bond[6]),str(bond[7]),
str(bond[8]),str(bond[9]),str(bond[10]),str(bond[11])))
for atom in self.atoms:
fh.write('a(%s,%s,%s,%s,%s,%s,%s,%s)\n' %
(str(atom[1]),str(atom[2]),str(atom[3]),str(atom[4]),
str(atom[5]),str(atom[6]),str(atom[7]),str(atom[8])))
fh.close()
# yapf: enable
| psi4/psi4 | psi4/driver/p4util/inpsight.py | Python | lgpl-3.0 | 23,279 | 0.016624 |
"""
This module models the host variant call object.
"""
import json
import logging
import os
import string
from cutlass.iHMPSession import iHMPSession
from cutlass.Base import Base
from cutlass.aspera import aspera
from cutlass.Util import *
# pylint: disable=W0703, C1801
# Create a module logger named after the module
module_logger = logging.getLogger(__name__)
# Add a NullHandler for the case if no logging is configured by the application
module_logger.addHandler(logging.NullHandler())
class HostVariantCall(Base):
"""
The class models host variant call data for the iHMP project. This class
contains all the fields required to save a HostVariantCall object to OSDF.
Attributes:
namespace (str): The namespace this class will use in the OSDF instance
"""
namespace = "ihmp"
aspera_server = "aspera2.ihmpdcc.org"
def __init__(self, *args, **kwargs):
"""
Constructor for the HostVariantCall class. This initializes
the fields specific to the class, and inherits from the Base class.
Args:
None
"""
self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)
self.logger.addHandler(logging.NullHandler())
# These are common to all objects
self._id = None
self._version = None
self._links = {}
self._tags = []
# These are particular to HostVariantCall objects
self._checksums = None
self._comment = None
self._date = None
self._format = None
self._local_file = None
self._reference = None
self._size = None
self._study = None
self._subtype = None
self._urls = ['']
self._variant_calling_process = None
# Optional properties
self._format_doc = None
self._private_files = None
self._sop = None
super(HostVariantCall, self).__init__(*args, **kwargs)
def validate(self):
"""
Validates the current object's data/JSON against the current schema
in the OSDF instance for that specific object. All required fields
for that specific object must be present.
Args:
None
Returns:
A list of strings, where each string is the error that the
validation raised during OSDF validation
"""
self.logger.debug("In validate.")
document = self._get_raw_doc()
session = iHMPSession.get_session()
self.logger.info("Got iHMP session.")
(valid, error_message) = session.get_osdf().validate_node(document)
problems = []
if not valid:
self.logger.info("Validation did not succeed for %s.", __name__)
problems.append(error_message)
if self._private_files:
self.logger.info("User specified the files are private.")
else:
self.logger.info("Data is NOT private, so check that local_file is set.")
if self._local_file is None:
problems.append("Local file is not yet set.")
elif not os.path.isfile(self._local_file):
problems.append("Local file does not point to an actual file.")
if 'computed_from' not in self._links.keys():
problems.append("Must add a 'computed_from' link to a host_wgs_raw_seq_set.")
self.logger.debug("Number of validation problems: %s.", len(problems))
return problems
def is_valid(self):
"""
Validates the current object's data/JSON against the current schema
in the OSDF instance for the specific object. However, unlike
validates(), this method does not provide exact error messages,
it states if the validation was successful or not.
Args:
None
Returns:
True if the data validates, False if the current state of
fields in the instance do not validate with the OSDF instance
"""
self.logger.debug("In is_valid.")
problems = self.validate()
valid = True
if len(problems):
self.logger.error("There were %s problems.", str(len(problems)))
valid = False
self.logger.debug("Valid? %s", str(valid))
return valid
@property
def checksums(self):
"""
str: One or more checksums used to ensure file integrity.
"""
self.logger.debug("In 'checksums' getter.")
return self._checksums
@checksums.setter
@enforce_dict
def checksums(self, checksums):
"""
The setter for the checksum data.
Args:
checksums (dict): The checksums for the data file.
Returns:
None
"""
self.logger.debug("In 'checksums' setter.")
self._checksums = checksums
@property
def comment(self):
"""
str: Free-text comment.
"""
self.logger.debug("In 'comment' getter.")
return self._comment
@comment.setter
@enforce_string
def comment(self, comment):
"""
The setter for the comment field. The comment must be a string,
and less than 512 characters.
Args:
comment (str): The new comment to add to the string.
Returns:
None
"""
self.logger.debug("In 'comment' setter.")
self._comment = comment
@property
def date(self):
"""
str: Date on which the output were generated.
"""
self.logger.debug("In 'date' getter.")
return self._date
@date.setter
@enforce_string
@enforce_past_date
def date(self, date):
"""
The date on which the output were generated. The date
must be in the past.
Args:
date (str): The date.
Returns:
None
"""
self.logger.debug("In 'date' setter.")
self._date = date
@property
def format(self):
"""
str: The file format of the sequence file.
"""
self.logger.debug("In 'format' getter.")
return self._format
@format.setter
@enforce_string
def format(self, format_str):
"""
The setter for the format. This must be either 'fasta' or 'fastq'.
Args:
format_str (str): The new format string for the current object.
Returns:
None
"""
self.logger.debug("In 'format' setter.")
formats = ["vcf", "txt"]
if format_str in formats:
self._format = format_str
else:
raise Exception("Format must be either vcf or txt.")
@property
def format_doc(self):
"""
str: URL for documentation of file format.
"""
self.logger.debug("In 'format_doc' getter.")
return self._format_doc
@format_doc.setter
@enforce_string
def format_doc(self, format_doc):
"""
The setter for the file format documentation URL.
Args:
format_doc (str): The new format_doc for the current object.
Returns:
None
"""
self.logger.debug("In 'format_doc' setter.")
self._format_doc = format_doc
@property
def local_file(self):
"""
str: URL to the local file to upload to the server.
"""
self.logger.debug("In 'local_file' getter.")
return self._local_file
@local_file.setter
@enforce_string
def local_file(self, local_file):
"""
The setter for the local file.
Args:
local_file (str): The URL to the local file that should
be uploaded to the server.
Returns:
None
"""
self.logger.debug("In 'local_file' setter.")
self._local_file = local_file
@property
def private_files(self):
"""
bool: Whether this object describes private data that should not
be uploaded to the DCC. Defaults to false.
"""
self.logger.debug("In 'private_files' getter.")
return self._private_files
@private_files.setter
@enforce_bool
def private_files(self, private_files):
"""
The setter for the private files flag to denote this object
describes data that should not be uploaded to the DCC.
Args:
private_files (bool):
Returns:
None
"""
self.logger.debug("In 'private_files' setter.")
self._private_files = private_files
@property
def reference(self):
"""
str: The reference used for variant calling, eg Homo_sapiens assembly19.
"""
self.logger.debug("In 'reference' getter.")
return self._reference
@reference.setter
@enforce_string
def reference(self, reference):
"""
The setter for the reference used for variant calling, eg
Homo_sapiens assembly19.
Args:
reference (str): The reference .
Returns:
None
"""
self.logger.debug("In 'reference' setter.")
self._reference = reference
@property
def size(self):
"""
int: The size of the file in bytes.
"""
self.logger.debug("In 'size' getter.")
return self._size
@size.setter
@enforce_int
def size(self, size):
"""
The setter for the file size in bytes.
Args:
size (int): The size of the seq set in bytes.
Returns:
None
"""
self.logger.debug("In 'size' setter.")
if size < 0:
raise ValueError("The size must be non-negative.")
self._size = size
@property
def sop(self):
"""
str: The URL for documentation of procedures used in variant calling.
"""
self.logger.debug("In 'sop' getter.")
return self._sop
@sop.setter
@enforce_string
def sop(self, sop):
"""
Set the URL for documentation of procedures used in variant calling.
Args:
sop (str): The documentation URL.
Returns:
None
"""
self.logger.debug("In 'sop' setter.")
self._sop = sop
@property
def study(self):
"""
str: One of the 3 studies that are part of the iHMP.
"""
self.logger.debug("In 'study' getter.")
return self._study
@study.setter
@enforce_string
def study(self, study):
"""
The setter for the sequence set's study. This is restricted to be one
of preg_preterm, ibd, or prediabetes.
Args:
study (str): The study of the sequence set.
Returns:
None
"""
self.logger.debug("In 'study' setter.")
studies = ["preg_preterm", "ibd", "prediabetes"]
if study in studies:
self._study = study
else:
raise Exception("Not a valid study.")
@property
def urls(self):
"""
list: An array of URL from where the file can be obtained,
http, ftp, fasp, etc...
"""
self.logger.debug("In 'urls' getter.")
return self._urls
@property
def variant_calling_process(self):
"""
str: The software and version used to perform variant calling.
"""
self.logger.debug("In 'variant_calling_process' getter.")
return self._variant_calling_process
@variant_calling_process.setter
@enforce_string
def variant_calling_process(self, variant_calling_process):
"""
The software and version used to perform variant calling.
Args:
variant_calling_process (str): The software and version used to
perform variant calling.
Returns:
None
"""
self.logger.debug("In 'variant_calling_process' setter.")
self._variant_calling_process = variant_calling_process
@staticmethod
def required_fields():
"""
A static method. The required fields for the class.
Args:
None
Returns:
Tuple of strings of required properties.
"""
module_logger.debug("In required_fields.")
return ("checksums", "comment", "format", "local_file",
"reference", "size", "study", "tags", "urls",
"variant_calling_process")
def _get_raw_doc(self):
"""
Generates the raw JSON document for the current object. All required
fields are filled into the JSON document, regardless they are set or
not. Any remaining fields are included only if they are set. This
allows the user to visualize the JSON to ensure fields are set
appropriately before saving into the database.
Args:
None
Returns:
A dictionary representation of the JSON document.
"""
self.logger.debug("In _get_raw_doc.")
doc = {
'acl': {
'read': ['all'],
'write': [HostVariantCall.namespace]
},
'linkage': self._links,
'ns': HostVariantCall.namespace,
'node_type': 'host_variant_call',
'meta': {
"checksums": self._checksums,
"comment": self._comment,
"format": self._format,
"reference": self._reference,
"size": self._size,
"study": self._study,
"subtype": "host",
'tags': self._tags,
"urls": self._urls,
"variant_calling_process": self._variant_calling_process
}
}
if self._id is not None:
self.logger.debug("%s object has the OSDF id set.", __name__)
doc['id'] = self._id
if self._version is not None:
self.logger.debug("%s object has the OSDF version set.", __name__)
doc['ver'] = self._version
# Handle optional properties
if self._format_doc is not None:
self.logger.debug("Object has the 'format_doc' property set.")
doc['meta']['format_doc'] = self._format_doc
if self._private_files is not None:
self.logger.debug("Object has the 'private_files' property set.")
doc['meta']['private_files'] = self._private_files
if self._sop is not None:
self.logger.debug("Object has the 'sop' property set.")
doc['meta']['sop'] = self._sop
return doc
@staticmethod
def search(query="\"host_variant_call\"[node_type]"):
"""
Searches the OSDF database through all HostVariantCall nodes. Any
criteria the user wishes to add is provided by the user in the query
language specifications provided in the OSDF documentation. A general
format is (including the quotes and brackets):
"search criteria"[field to search]
If there are any results, they are returned as a HostVariantCall
instance, otherwise an empty list will be returned.
Args:
query (str): The query for the OSDF framework. Defaults to the
HostVariantCall node type.
Returns:
Returns an array of HostVariantCall objects. It returns
an empty list if there are no results.
"""
module_logger.debug("In search.")
session = iHMPSession.get_session()
module_logger.info("Got iHMP session.")
if query != '"host_variant_call"[node_type]':
query = '({}) && "host_variant_call"[node_type]'.format(query)
module_logger.debug("Submitting OQL query: %s", query)
data = session.get_osdf().oql_query("ihmp", query)
all_results = data['results']
result_list = list()
if len(all_results) > 0:
for result in all_results:
loaded_result = HostVariantCall.load_host_variant_call(result)
result_list.append(loaded_result)
return result_list
@staticmethod
def load_host_variant_call(call_data):
"""
Takes the provided JSON string and converts it to a
HostVariantCall object.
Args:
seq_set_data (str): The JSON string to convert
Returns:
Returns a HostVariantCall instance.
"""
module_logger.info("Creating a template %s.", __name__)
call = HostVariantCall()
module_logger.debug("Filling in %s details.", __name__)
# The attributes commmon to all iHMP nodes
call._set_id(call_data['id'])
call.version = call_data['ver']
call.links = call_data['linkage']
# The attributes that are required
call.checksums = call_data['meta']['checksums']
call.comment = call_data['meta']['comment']
call.format = call_data['meta']['format']
call.reference = call_data['meta']['reference']
call.size = call_data['meta']['size']
call.study = call_data['meta']['study']
call.tags = call_data['meta']['tags']
call.variant_calling_process = call_data['meta']['variant_calling_process']
# We need to use the private attribute here because there is no
# public setter.
call._urls = call_data['meta']['urls']
# Optional fields.
if 'format_doc' in call_data['meta']:
call.format_doc = call_data['meta']['format_doc']
if 'private_files' in call_data['meta']:
call.private_files = call_data['meta']['private_files']
if 'sop' in call_data['meta']:
call.sop = call_data['meta']['sop']
module_logger.debug("Returning loaded %s", __name__)
return call
@staticmethod
def load(call_id):
"""
Loads the data for the specified input ID from OSDF to this object. If
the provided ID does not exist, then an error message is provided
stating the project does not exist.
Args:
call_id (str): The OSDF ID for the document to load.
Returns:
A HostVariantCall object with all the available OSDF data loaded
into it.
"""
module_logger.debug("In load. Specified ID: %s", call_id)
session = iHMPSession.get_session()
module_logger.info("Got iHMP session.")
data = session.get_osdf().get_node(call_id)
call = HostVariantCall.load_host_variant_call(data)
module_logger.debug("Returning loaded %s", __name__)
return call
def _upload_data(self):
self.logger.debug("In _upload_data.")
session = iHMPSession.get_session()
study = self._study
study2dir = {
"ibd": "ibd",
"preg_preterm": "ptb",
"prediabetes": "t2d"
}
if study not in study2dir:
raise ValueError("Invalid study. No directory mapping for %s" % study)
study_dir = study2dir[study]
remote_base = os.path.basename(self._local_file)
valid_chars = "-_.%s%s" % (string.ascii_letters, string.digits)
remote_base = ''.join(c for c in remote_base if c in valid_chars)
remote_base = remote_base.replace(' ', '_') # No spaces in filenames
remote_path = "/".join(["/" + study_dir, "variant_calls", "host",
remote_base])
self.logger.debug("Remote path for this file will be %s.", remote_path)
# Upload the file to the iHMP aspera server
upload_result = aspera.upload_file(HostVariantCall.aspera_server,
session.username,
session.password,
self._local_file,
remote_path)
if not upload_result:
self.logger.error("Experienced an error uploading the data. "
"Aborting save.")
raise Exception("Unable to load host variant call.")
else:
self._urls = ["fasp://" + HostVariantCall.aspera_server + remote_path]
def save(self):
"""
Saves the data in OSDF. The JSON form of the current data for the
instance is validated in the save function. If the data is not valid,
then the data will not be saved. If the instance was saved previously,
then the node ID is assigned the alpha numeric found in the OSDF
instance. If not saved previously, then the node ID is 'None', and upon
a successful save, will be assigned to the alphanumeric ID found in
OSDF.
Args:
None
Returns;
True if successful, False otherwise.
"""
self.logger.debug("In save.")
if not self.is_valid():
self.logger.error("Cannot save, data is invalid")
return False
session = iHMPSession.get_session()
self.logger.info("Got iHMP session.")
success = False
if self._private_files:
self._urls = ["<private>"]
else:
try:
self._upload_data()
except Exception as uploadException:
self.logger.exception(uploadException)
# Don't bother continuing...
return False
osdf = session.get_osdf()
if self.id is None:
self.logger.info("About to insert a new %s OSDF node.", __name__)
# Get the JSON form of the data and load it
self.logger.info("Converting %s to parsed JSON form.", __name__)
data = json.loads(self.to_json())
try:
self.logger.info("Attempting to save a new node.")
node_id = osdf.insert_node(data)
self._set_id(node_id)
self._version = 1
self.logger.info("Save for %s %s successful.", __name__, node_id)
self.logger.info("Setting ID for %s %s.", __name__, node_id)
success = True
except Exception as saveException:
self.logger.exception(saveException)
self.logger.error("An error occurred while saving %s. Reason: %s",
__name__,
saveException
)
else:
self.logger.info("%s already has an ID, so we do an update (not an insert).",
__name__
)
try:
seq_set_data = self._get_raw_doc()
seq_set_id = self._id
self.logger.info("Attempting to update %s with ID: %s.", __name__, seq_set_id)
osdf.edit_node(seq_set_data)
self.logger.info("Update for %s %s successful.", __name__, seq_set_id)
seq_set_data = osdf.get_node(seq_set_id)
latest_version = seq_set_data['ver']
self.logger.debug("The version of this %s is now %s",
__name__, str(latest_version)
)
self._version = latest_version
success = True
except Exception as update_exception:
self.logger.exception(update_exception)
self.logger.error("An error occurred while updating %s %s. "
"Reason: %s", __name__, self._id,
update_exception
)
self.logger.debug("Returning %s", str(success))
return success
| carze/cutlass | cutlass/HostVariantCall.py | Python | mit | 23,915 | 0.000794 |
def extractSpiritGodShura(item):
"""
# Sousetsuka
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if item['title'].startswith('Chapter') and item['tags'] == ['Chapters']:
if ':' in item['title'] and not postfix:
postfix = item['title'].split(':')[-1]
return buildReleaseMessageWithType(item, 'Spirit God Shura', vol, chp, postfix=postfix, tl_type='oel')
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractSpiritGodShura.py | Python | bsd-3-clause | 484 | 0.024793 |
agenda = []
def pedenome():
return input("Nome: ").replace("#", "$")
def pedetelefone():
return input("Telefone: ").replace("#", "$")
def pedearquivo():
return input("Nome do arquivo: ")
def mostra(nome, telefone):
print(f"Nome: {nome} Telefone: {telefone}")
def pesquisa(nome):
mnome = nome.lower()
for p, e in enumerate(agenda):
if e[0].lower() == mnome:
return p
return None
def novo():
nome = pedenome()
telefone = pedetelefone()
agenda.append([nome, telefone])
def apaga():
nome = pedenome()
p = pesquisa(nome)
if p is not None:
m = "Certeza que quer excluir? (1 - Para confirmar / 0 - para sair): "
valor = faixa(m, 0, 1)
if valor == 1:
del agenda[p]
else:
print("Não foi apagado!")
else:
print("Nome não encontrado.")
def altera():
p = pesquisa(pedenome())
if p is not None:
print("Encontrado!")
nome = agenda[p][0]
telefone = agenda[p][1]
mostra(nome, telefone)
nome = pedenome()
telefone = pedetelefone()
m = "Certeza que quer alterar? (1 - Para confirmar / 0 - para sair): "
valor = faixa(m, 0, 1)
if valor == 1:
agenda[p] = [nome, telefone]
else:
print("Não alterado!")
else:
print("Não encontrado")
def lista():
print("\nAgenda\n")
print("-"*6)
for n, d in enumerate(agenda):
nome, telefone = d
print(n+1, end=' ')
mostra(nome, telefone)
print("-"*6)
def grava():
nomearquivo = pedearquivo()
arquivo = open(nomearquivo, "w")
for nome, telefone in agenda:
arquivo.write(f"{nome}#{telefone}\n")
arquivo.close()
def le():
global agenda
agenda = []
nomearquivo = pedearquivo()
arquivo = open(nomearquivo, "r")
for linha in arquivo.readlines():
nome, telfone = linha.strip().split("#")
agenda.append([nome, telfone])
arquivo.close()
def faixa(pergunta, i, f):
while True:
try:
valor = int(input(pergunta))
if valor >= i and valor <= f:
return valor
except ValueError:
print(f"Valor inválido, favor digitar valor entre {i} e {f}")
def ordena():
global agenda
agenda.sort()
lista()
def menu():
print("""
0 - Sair
1 - Novo
2 - Alterar
3 - Excluir
4 - Lista
5 - Grava
6 - Lê
7 - Ordena por Nome
""")
la = len(agenda)
print(f"{la} contato(s) na agenda.")
return faixa("Escola uma opção: ", 0, 7)
while True:
opcao = menu()
if opcao == 0:
break
elif opcao == 1:
novo()
elif opcao == 2:
altera()
elif opcao == 3:
apaga()
elif opcao == 4:
lista()
elif opcao == 5:
grava()
elif opcao == 6:
le()
elif opcao == 7:
ordena()
| laenderoliveira/exerclivropy | cap09/exercicio-09-21.py | Python | mit | 2,962 | 0 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 12, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 0); | antoinecarme/pyaf | tests/artificial/transf_None/trend_Lag1Trend/cycle_12/ar_/test_artificial_32_None_Lag1Trend_12__20.py | Python | bsd-3-clause | 259 | 0.088803 |
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Tuple, Callable, List, Text, Type, Sequence
from typing import Union
from tensornetwork.backends import abstract_backend
from tensornetwork.backends.numpy import decompositions
import numpy as np
from tensornetwork.backends.jax import jitted_functions
from functools import partial
import warnings
Tensor = Any
# pylint: disable=abstract-method
_CACHED_MATVECS = {}
_CACHED_FUNCTIONS = {}
class JaxBackend(abstract_backend.AbstractBackend):
"""See abstract_backend.AbstractBackend for documentation."""
def __init__(self, dtype: Optional[np.dtype] = None,
precision: Optional[Text] = None) -> None:
# pylint: disable=global-variable-undefined
global libjax # Jax module
global jnp # jax.numpy module
global jsp # jax.scipy module
super().__init__()
try:
#pylint: disable=import-outside-toplevel
import jax
except ImportError as err:
raise ImportError("Jax not installed, please switch to a different "
"backend or install Jax.") from err
libjax = jax
jnp = libjax.numpy
jsp = libjax.scipy
self.name = "jax"
self._dtype = np.dtype(dtype) if dtype is not None else None
self.jax_precision = precision if precision is not None else libjax.lax.Precision.DEFAULT #pylint: disable=line-too-long
def tensordot(self, a: Tensor, b: Tensor,
axes: Union[int, Sequence[Sequence[int]]]) -> Tensor:
return jnp.tensordot(a, b, axes, precision=self.jax_precision)
def reshape(self, tensor: Tensor, shape: Tensor) -> Tensor:
return jnp.reshape(tensor, np.asarray(shape).astype(np.int32))
def transpose(self, tensor, perm=None) -> Tensor:
return jnp.transpose(tensor, perm)
def shape_concat(self, values: Tensor, axis: int) -> Tensor:
return np.concatenate(values, axis)
def slice(self, tensor: Tensor, start_indices: Tuple[int, ...],
slice_sizes: Tuple[int, ...]) -> Tensor:
if len(start_indices) != len(slice_sizes):
raise ValueError("Lengths of start_indices and slice_sizes must be"
"identical.")
return libjax.lax.dynamic_slice(tensor, start_indices, slice_sizes)
def svd(
self,
tensor: Tensor,
pivot_axis: int = -1,
max_singular_values: Optional[int] = None,
max_truncation_error: Optional[float] = None,
relative: Optional[bool] = False
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
return decompositions.svd(
jnp,
tensor,
pivot_axis,
max_singular_values,
max_truncation_error,
relative=relative)
def qr(
self,
tensor: Tensor,
pivot_axis: int = -1,
non_negative_diagonal: bool = False
) -> Tuple[Tensor, Tensor]:
return decompositions.qr(jnp, tensor, pivot_axis, non_negative_diagonal)
def rq(
self,
tensor: Tensor,
pivot_axis: int = -1,
non_negative_diagonal: bool = False
) -> Tuple[Tensor, Tensor]:
return decompositions.rq(jnp, tensor, pivot_axis, non_negative_diagonal)
def shape_tensor(self, tensor: Tensor) -> Tensor:
return tensor.shape
def shape_tuple(self, tensor: Tensor) -> Tuple[Optional[int], ...]:
return tensor.shape
def sparse_shape(self, tensor: Tensor) -> Tuple[Optional[int], ...]:
return self.shape_tuple(tensor)
def shape_prod(self, values: Tensor) -> Tensor:
return np.prod(values)
def sqrt(self, tensor: Tensor) -> Tensor:
return jnp.sqrt(tensor)
def convert_to_tensor(self, tensor: Tensor) -> Tensor:
if (not isinstance(tensor, (np.ndarray, jnp.ndarray))
and not jnp.isscalar(tensor)):
raise TypeError(("Expected a `jnp.array`, `np.array` or scalar. "
f"Got {type(tensor)}"))
result = jnp.asarray(tensor)
return result
def outer_product(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return jnp.tensordot(tensor1, tensor2, 0,
precision=self.jax_precision)
def einsum(self,
expression: str,
*tensors: Tensor,
optimize: bool = True) -> Tensor:
return jnp.einsum(expression, *tensors, optimize=optimize,
precision=self.jax_precision)
def norm(self, tensor: Tensor) -> Tensor:
return jnp.linalg.norm(tensor)
def eye(self,
N,
dtype: Optional[np.dtype] = None,
M: Optional[int] = None) -> Tensor:
dtype = dtype if dtype is not None else jnp.float64
return jnp.eye(N, M=M, dtype=dtype)
def ones(self,
shape: Tuple[int, ...],
dtype: Optional[np.dtype] = None) -> Tensor:
dtype = dtype if dtype is not None else jnp.float64
return jnp.ones(shape, dtype=dtype)
def zeros(self,
shape: Tuple[int, ...],
dtype: Optional[np.dtype] = None) -> Tensor:
dtype = dtype if dtype is not None else jnp.float64
return jnp.zeros(shape, dtype=dtype)
def randn(self,
shape: Tuple[int, ...],
dtype: Optional[np.dtype] = None,
seed: Optional[int] = None) -> Tensor:
if not seed:
seed = np.random.randint(0, 2**63)
key = libjax.random.PRNGKey(seed)
dtype = dtype if dtype is not None else np.dtype(np.float64)
def cmplx_randn(complex_dtype, real_dtype):
real_dtype = np.dtype(real_dtype)
complex_dtype = np.dtype(complex_dtype)
key_2 = libjax.random.PRNGKey(seed + 1)
real_part = libjax.random.normal(key, shape, dtype=real_dtype)
complex_part = libjax.random.normal(key_2, shape, dtype=real_dtype)
unit = (
np.complex64(1j)
if complex_dtype == np.dtype(np.complex64) else np.complex128(1j))
return real_part + unit * complex_part
if np.dtype(dtype) is np.dtype(jnp.complex128):
return cmplx_randn(dtype, jnp.float64)
if np.dtype(dtype) is np.dtype(jnp.complex64):
return cmplx_randn(dtype, jnp.float32)
return libjax.random.normal(key, shape).astype(dtype)
def random_uniform(self,
shape: Tuple[int, ...],
boundaries: Optional[Tuple[float, float]] = (0.0, 1.0),
dtype: Optional[np.dtype] = None,
seed: Optional[int] = None) -> Tensor:
if not seed:
seed = np.random.randint(0, 2**63)
key = libjax.random.PRNGKey(seed)
dtype = dtype if dtype is not None else np.dtype(np.float64)
def cmplx_random_uniform(complex_dtype, real_dtype):
real_dtype = np.dtype(real_dtype)
complex_dtype = np.dtype(complex_dtype)
key_2 = libjax.random.PRNGKey(seed + 1)
real_part = libjax.random.uniform(
key,
shape,
dtype=real_dtype,
minval=boundaries[0],
maxval=boundaries[1])
complex_part = libjax.random.uniform(
key_2,
shape,
dtype=real_dtype,
minval=boundaries[0],
maxval=boundaries[1])
unit = (
np.complex64(1j)
if complex_dtype == np.dtype(np.complex64) else np.complex128(1j))
return real_part + unit * complex_part
if np.dtype(dtype) is np.dtype(jnp.complex128):
return cmplx_random_uniform(dtype, jnp.float64)
if np.dtype(dtype) is np.dtype(jnp.complex64):
return cmplx_random_uniform(dtype, jnp.float32)
return libjax.random.uniform(
key, shape, minval=boundaries[0], maxval=boundaries[1]).astype(dtype)
def eigs(self, #pylint: disable=arguments-differ
A: Callable,
args: Optional[List] = None,
initial_state: Optional[Tensor] = None,
shape: Optional[Tuple[int, ...]] = None,
dtype: Optional[Type[np.number]] = None,
num_krylov_vecs: int = 50,
numeig: int = 6,
tol: float = 1E-8,
which: Text = 'LR',
maxiter: int = 20) -> Tuple[Tensor, List]:
"""
Implicitly restarted Arnoldi method for finding the lowest
eigenvector-eigenvalue pairs of a linear operator `A`.
`A` is a function implementing the matrix-vector
product.
WARNING: This routine uses jax.jit to reduce runtimes. jitting is triggered
at the first invocation of `eigs`, and on any subsequent calls
if the python `id` of `A` changes, even if the formal definition of `A`
stays the same.
Example: the following will jit once at the beginning, and then never again:
```python
import jax
import numpy as np
def A(H,x):
return jax.np.dot(H,x)
for n in range(100):
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigs(A, [H],x) #jitting is triggerd only at `n=0`
```
The following code triggers jitting at every iteration, which
results in considerably reduced performance
```python
import jax
import numpy as np
for n in range(100):
def A(H,x):
return jax.np.dot(H,x)
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigs(A, [H],x) #jitting is triggerd at every step `n`
```
Args:
A: A (sparse) implementation of a linear operator.
Call signature of `A` is `res = A(vector, *args)`, where `vector`
can be an arbitrary `Tensor`, and `res.shape` has to be `vector.shape`.
args: A list of arguments to `A`. `A` will be called as
`res = A(initial_state, *args)`.
initial_state: An initial vector for the algorithm. If `None`,
a random initial `Tensor` is created using the `backend.randn` method
shape: The shape of the input-dimension of `A`.
dtype: The dtype of the input `A`. If no `initial_state` is provided,
a random initial state with shape `shape` and dtype `dtype` is created.
num_krylov_vecs: The number of iterations (number of krylov vectors).
numeig: The number of eigenvector-eigenvalue pairs to be computed.
tol: The desired precision of the eigenvalues. For the jax backend
this has currently no effect, and precision of eigenvalues is not
guaranteed. This feature may be added at a later point. To increase
precision the caller can either increase `maxiter` or `num_krylov_vecs`.
which: Flag for targetting different types of eigenvalues. Currently
supported are `which = 'LR'` (larges real part) and `which = 'LM'`
(larges magnitude).
maxiter: Maximum number of restarts. For `maxiter=0` the routine becomes
equivalent to a simple Arnoldi method.
Returns:
(eigvals, eigvecs)
eigvals: A list of `numeig` eigenvalues
eigvecs: A list of `numeig` eigenvectors
"""
if args is None:
args = []
if which not in ('LR', 'LM'):
raise ValueError(f'which = {which} is currently not supported.')
if numeig > num_krylov_vecs:
raise ValueError('`num_krylov_vecs` >= `numeig` required!')
if initial_state is None:
if (shape is None) or (dtype is None):
raise ValueError("if no `initial_state` is passed, then `shape` and"
"`dtype` have to be provided")
initial_state = self.randn(shape, dtype)
if not isinstance(initial_state, (jnp.ndarray, np.ndarray)):
raise TypeError("Expected a `jax.array`. Got {}".format(
type(initial_state)))
if A not in _CACHED_MATVECS:
_CACHED_MATVECS[A] = libjax.tree_util.Partial(libjax.jit(A))
if "imp_arnoldi" not in _CACHED_FUNCTIONS:
imp_arnoldi = jitted_functions._implicitly_restarted_arnoldi(libjax)
_CACHED_FUNCTIONS["imp_arnoldi"] = imp_arnoldi
eta, U, numits = _CACHED_FUNCTIONS["imp_arnoldi"](_CACHED_MATVECS[A], args,
initial_state,
num_krylov_vecs, numeig,
which, tol, maxiter,
self.jax_precision)
if numeig > numits:
warnings.warn(
f"Arnoldi terminated early after numits = {numits}"
f" < numeig = {numeig} steps. For this value of `numeig `"
f"the routine will return spurious eigenvalues of value 0.0."
f"Use a smaller value of numeig, or a smaller value for `tol`")
return eta, U
def eigsh(
self, #pylint: disable=arguments-differ
A: Callable,
args: Optional[List] = None,
initial_state: Optional[Tensor] = None,
shape: Optional[Tuple[int, ...]] = None,
dtype: Optional[Type[np.number]] = None,
num_krylov_vecs: int = 50,
numeig: int = 6,
tol: float = 1E-8,
which: Text = 'SA',
maxiter: int = 20) -> Tuple[Tensor, List]:
"""
Implicitly restarted Lanczos method for finding the lowest
eigenvector-eigenvalue pairs of a symmetric (hermitian) linear operator `A`.
`A` is a function implementing the matrix-vector
product.
WARNING: This routine uses jax.jit to reduce runtimes. jitting is triggered
at the first invocation of `eigsh`, and on any subsequent calls
if the python `id` of `A` changes, even if the formal definition of `A`
stays the same.
Example: the following will jit once at the beginning, and then never again:
```python
import jax
import numpy as np
def A(H,x):
return jax.np.dot(H,x)
for n in range(100):
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigsh(A, [H],x) #jitting is triggerd only at `n=0`
```
The following code triggers jitting at every iteration, which
results in considerably reduced performance
```python
import jax
import numpy as np
for n in range(100):
def A(H,x):
return jax.np.dot(H,x)
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigsh(A, [H],x) #jitting is triggerd at every step `n`
```
Args:
A: A (sparse) implementation of a linear operator.
Call signature of `A` is `res = A(vector, *args)`, where `vector`
can be an arbitrary `Tensor`, and `res.shape` has to be `vector.shape`.
arsg: A list of arguments to `A`. `A` will be called as
`res = A(initial_state, *args)`.
initial_state: An initial vector for the algorithm. If `None`,
a random initial `Tensor` is created using the `backend.randn` method
shape: The shape of the input-dimension of `A`.
dtype: The dtype of the input `A`. If no `initial_state` is provided,
a random initial state with shape `shape` and dtype `dtype` is created.
num_krylov_vecs: The number of iterations (number of krylov vectors).
numeig: The number of eigenvector-eigenvalue pairs to be computed.
tol: The desired precision of the eigenvalues. For the jax backend
this has currently no effect, and precision of eigenvalues is not
guaranteed. This feature may be added at a later point. To increase
precision the caller can either increase `maxiter` or `num_krylov_vecs`.
which: Flag for targetting different types of eigenvalues. Currently
supported are `which = 'LR'` (larges real part) and `which = 'LM'`
(larges magnitude).
maxiter: Maximum number of restarts. For `maxiter=0` the routine becomes
equivalent to a simple Arnoldi method.
Returns:
(eigvals, eigvecs)
eigvals: A list of `numeig` eigenvalues
eigvecs: A list of `numeig` eigenvectors
"""
if args is None:
args = []
if which not in ('SA', 'LA', 'LM'):
raise ValueError(f'which = {which} is currently not supported.')
if numeig > num_krylov_vecs:
raise ValueError('`num_krylov_vecs` >= `numeig` required!')
if initial_state is None:
if (shape is None) or (dtype is None):
raise ValueError("if no `initial_state` is passed, then `shape` and"
"`dtype` have to be provided")
initial_state = self.randn(shape, dtype)
if not isinstance(initial_state, (jnp.ndarray, np.ndarray)):
raise TypeError("Expected a `jax.array`. Got {}".format(
type(initial_state)))
if A not in _CACHED_MATVECS:
_CACHED_MATVECS[A] = libjax.tree_util.Partial(libjax.jit(A))
if "imp_lanczos" not in _CACHED_FUNCTIONS:
imp_lanczos = jitted_functions._implicitly_restarted_lanczos(libjax)
_CACHED_FUNCTIONS["imp_lanczos"] = imp_lanczos
eta, U, numits = _CACHED_FUNCTIONS["imp_lanczos"](_CACHED_MATVECS[A], args,
initial_state,
num_krylov_vecs, numeig,
which, tol, maxiter,
self.jax_precision)
if numeig > numits:
warnings.warn(
f"Arnoldi terminated early after numits = {numits}"
f" < numeig = {numeig} steps. For this value of `numeig `"
f"the routine will return spurious eigenvalues of value 0.0."
f"Use a smaller value of numeig, or a smaller value for `tol`")
return eta, U
def eigsh_lanczos(
self,
A: Callable,
args: Optional[List[Tensor]] = None,
initial_state: Optional[Tensor] = None,
shape: Optional[Tuple] = None,
dtype: Optional[Type[np.number]] = None,
num_krylov_vecs: int = 20,
numeig: int = 1,
tol: float = 1E-8,
delta: float = 1E-8,
ndiag: int = 10,
reorthogonalize: Optional[bool] = False) -> Tuple[Tensor, List]:
"""
Lanczos method for finding the lowest eigenvector-eigenvalue pairs
of a hermitian linear operator `A`. `A` is a function implementing
the matrix-vector product.
WARNING: This routine uses jax.jit to reduce runtimes. jitting is triggered
at the first invocation of `eigsh_lanczos`, and on any subsequent calls
if the python `id` of `A` changes, even if the formal definition of `A`
stays the same.
Example: the following will jit once at the beginning, and then never again:
```python
import jax
import numpy as np
def A(H,x):
return jax.np.dot(H,x)
for n in range(100):
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigsh_lanczos(A, [H],x) #jitting is triggerd only at `n=0`
```
The following code triggers jitting at every iteration, which
results in considerably reduced performance
```python
import jax
import numpy as np
for n in range(100):
def A(H,x):
return jax.np.dot(H,x)
H = jax.np.array(np.random.rand(10,10))
x = jax.np.array(np.random.rand(10,10))
res = eigsh_lanczos(A, [H],x) #jitting is triggerd at every step `n`
```
Args:
A: A (sparse) implementation of a linear operator.
Call signature of `A` is `res = A(vector, *args)`, where `vector`
can be an arbitrary `Tensor`, and `res.shape` has to be `vector.shape`.
arsg: A list of arguments to `A`. `A` will be called as
`res = A(initial_state, *args)`.
initial_state: An initial vector for the Lanczos algorithm. If `None`,
a random initial `Tensor` is created using the `backend.randn` method
shape: The shape of the input-dimension of `A`.
dtype: The dtype of the input `A`. If no `initial_state` is provided,
a random initial state with shape `shape` and dtype `dtype` is created.
num_krylov_vecs: The number of iterations (number of krylov vectors).
numeig: The number of eigenvector-eigenvalue pairs to be computed.
If `numeig > 1`, `reorthogonalize` has to be `True`.
tol: The desired precision of the eigenvalues. For the jax backend
this has currently no effect, and precision of eigenvalues is not
guaranteed. This feature may be added at a later point.
To increase precision the caller can increase `num_krylov_vecs`.
delta: Stopping criterion for Lanczos iteration.
If a Krylov vector :math: `x_n` has an L2 norm
:math:`\\lVert x_n\\rVert < delta`, the iteration
is stopped. It means that an (approximate) invariant subspace has
been found.
ndiag: The tridiagonal Operator is diagonalized every `ndiag` iterations
to check convergence. This has currently no effect for the jax backend,
but may be added at a later point.
reorthogonalize: If `True`, Krylov vectors are kept orthogonal by
explicit orthogonalization (more costly than `reorthogonalize=False`)
Returns:
(eigvals, eigvecs)
eigvals: A jax-array containing `numeig` lowest eigenvalues
eigvecs: A list of `numeig` lowest eigenvectors
"""
if args is None:
args = []
if num_krylov_vecs < numeig:
raise ValueError('`num_krylov_vecs` >= `numeig` required!')
if numeig > 1 and not reorthogonalize:
raise ValueError(
"Got numeig = {} > 1 and `reorthogonalize = False`. "
"Use `reorthogonalize=True` for `numeig > 1`".format(numeig))
if initial_state is None:
if (shape is None) or (dtype is None):
raise ValueError("if no `initial_state` is passed, then `shape` and"
"`dtype` have to be provided")
initial_state = self.randn(shape, dtype)
if not isinstance(initial_state, (jnp.ndarray, np.ndarray)):
raise TypeError("Expected a `jax.array`. Got {}".format(
type(initial_state)))
if A not in _CACHED_MATVECS:
_CACHED_MATVECS[A] = libjax.tree_util.Partial(A)
if "eigsh_lanczos" not in _CACHED_FUNCTIONS:
eigsh_lanczos = jitted_functions._generate_jitted_eigsh_lanczos(libjax)
_CACHED_FUNCTIONS["eigsh_lanczos"] = eigsh_lanczos
eigsh_lanczos = _CACHED_FUNCTIONS["eigsh_lanczos"]
eta, U, numits = eigsh_lanczos(_CACHED_MATVECS[A], args, initial_state,
num_krylov_vecs, numeig, delta,
reorthogonalize, self.jax_precision)
if numeig > numits:
warnings.warn(
f"Lanczos terminated early after numits = {numits}"
f" < numeig = {numeig} steps. For this value of `numeig `"
f"the routine will return spurious eigenvalues of value 0.0."
f"Use a smaller value of numeig, or a smaller value for `tol`")
return eta, U
def _gmres(self,
A_mv: Callable,
b: Tensor,
A_args: List,
A_kwargs: dict,
x0: Tensor,
tol: float,
atol: float,
num_krylov_vectors: int,
maxiter: int,
M: Optional[Callable] = None) -> Tuple[Tensor, int]:
""" GMRES solves the linear system A @ x = b for x given a vector `b` and
a general (not necessarily symmetric/Hermitian) linear operator `A`.
As a Krylov method, GMRES does not require a concrete matrix representation
of the n by n `A`, but only a function
`vector1 = A_mv(vector0, *A_args, **A_kwargs)`
prescribing a one-to-one linear map from vector0 to vector1 (that is,
A must be square, and thus vector0 and vector1 the same size). If `A` is a
dense matrix, or if it is a symmetric/Hermitian operator, a different
linear solver will usually be preferable.
GMRES works by first constructing the Krylov basis
K = (x0, A_mv@x0, A_mv@A_mv@x0, ..., (A_mv^num_krylov_vectors)@x_0) and then
solving a certain dense linear system K @ q0 = q1 from whose solution x can
be approximated. For `num_krylov_vectors = n` the solution is provably exact
in infinite precision, but the expense is cubic in `num_krylov_vectors` so
one is typically interested in the `num_krylov_vectors << n` case.
The solution can in this case be repeatedly
improved, to a point, by restarting the Arnoldi iterations each time
`num_krylov_vectors` is reached. Unfortunately the optimal parameter choices
balancing expense and accuracy are difficult to predict in advance, so
applying this function requires a degree of experimentation.
In a tensor network code one is typically interested in A_mv implementing
some tensor contraction. This implementation thus allows `b` and `x0` to be
of whatever arbitrary, though identical, shape `b = A_mv(x0, ...)` expects.
Reshaping to and from a matrix problem is handled internally.
The Jax backend version of GMRES uses a homemade implementation that, for
now, is suboptimal for num_krylov_vecs ~ b.size.
For the same reason as described in eigsh_lancsoz, the function A_mv
should be Jittable (or already Jitted) and, if at all possible, defined
only once at the global scope. A new compilation will be triggered each
time an A_mv with a new function signature is passed in, even if the
'new' function is identical to the old one (function identity is
undecidable).
Args:
A_mv : A function `v0 = A_mv(v, *A_args, **A_kwargs)` where `v0` and
`v` have the same shape.
b : The `b` in `A @ x = b`; it should be of the shape `A_mv`
operates on.
A_args : Positional arguments to `A_mv`, supplied to this interface
as a list.
Default: None.
A_kwargs : In the other backends, keyword arguments to `A_mv`, supplied
as a dictionary. However, the Jax backend does not support
A_mv accepting
keyword arguments since this causes problems with Jit.
Therefore, an error is thrown if A_kwargs is specified.
Default: None.
x0 : An optional guess solution. Zeros are used by default.
If `x0` is supplied, its shape and dtype must match those of
`b`, or an
error will be thrown.
Default: zeros.
tol, atol: Solution tolerance to achieve,
norm(residual) <= max(tol*norm(b), atol).
Default: tol=1E-05
atol=tol
num_krylov_vectors
: Size of the Krylov space to build at each restart.
Expense is cubic in this parameter.
Default: 20.
maxiter : The Krylov space will be repeatedly rebuilt up to this many
times. Large values of this argument
should be used only with caution, since especially for nearly
symmetric matrices and small `num_krylov_vectors` convergence
might well freeze at a value significantly larger than `tol`.
Default: 1
M : Inverse of the preconditioner of A; see the docstring for
`scipy.sparse.linalg.gmres`. This is unsupported in the Jax
backend, and NotImplementedError will be raised if it is
supplied.
Default: None.
Raises:
ValueError: -if `x0` is supplied but its shape differs from that of `b`.
-if num_krylov_vectors <= 0.
-if tol or atol was negative.
NotImplementedError: - If M is supplied.
- If A_kwargs is supplied.
TypeError: -if the dtype of `x0` and `b` are mismatching.
Returns:
x : The converged solution. It has the same shape as `b`.
info : 0 if convergence was achieved, the number of restarts otherwise.
"""
if M is not None:
raise NotImplementedError("M is not supported by the Jax backend.")
if A_kwargs:
raise NotImplementedError("A_kwargs is not supported by the Jax backend.")
if A_mv not in _CACHED_MATVECS:
@libjax.tree_util.Partial
def matrix_matvec(x, *args):
x = x.reshape(b.shape)
result = A_mv(x, *args)
return result.ravel()
_CACHED_MATVECS[A_mv] = matrix_matvec
if "gmres" not in _CACHED_FUNCTIONS:
_CACHED_FUNCTIONS["gmres"] = jitted_functions.gmres_wrapper(libjax)
gmres_m = _CACHED_FUNCTIONS["gmres"].gmres_m
x, _, n_iter, converged = gmres_m(_CACHED_MATVECS[A_mv], A_args, b.ravel(),
x0, tol, atol, num_krylov_vectors,
maxiter, self.jax_precision)
if converged:
info = 0
else:
info = n_iter
x = self.reshape(x, b.shape)
return x, info
def conj(self, tensor: Tensor) -> Tensor:
return jnp.conj(tensor)
def eigh(self, matrix: Tensor) -> Tuple[Tensor, Tensor]:
return jnp.linalg.eigh(matrix)
def addition(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensor1 + tensor2
def subtraction(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensor1 - tensor2
def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensor1 * tensor2
def divide(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
return tensor1 / tensor2
def index_update(self, tensor: Tensor, mask: Tensor,
assignee: Tensor) -> Tensor:
return libjax.ops.index_update(tensor, mask, assignee)
def inv(self, matrix: Tensor) -> Tensor:
if len(matrix.shape) > 2:
raise ValueError("input to numpy backend method `inv` has shape {}."
" Only matrices are supported.".format(matrix.shape))
return jnp.linalg.inv(matrix)
def broadcast_right_multiplication(self, tensor1: Tensor,
tensor2: Tensor) -> Tensor:
if len(tensor2.shape) != 1:
raise ValueError("only order-1 tensors are allowed for `tensor2`,"
" found `tensor2.shape = {}`".format(tensor2.shape))
return tensor1 * tensor2
def broadcast_left_multiplication(self, tensor1: Tensor,
tensor2: Tensor) -> Tensor:
if len(tensor1.shape) != 1:
raise ValueError("only order-1 tensors are allowed for `tensor1`,"
" found `tensor1.shape = {}`".format(tensor1.shape))
t1_broadcast_shape = self.shape_concat(
[self.shape_tensor(tensor1), [1] * (len(tensor2.shape) - 1)], axis=-1)
return tensor2 * self.reshape(tensor1, t1_broadcast_shape)
def sin(self, tensor: Tensor) -> Tensor:
return jnp.sin(tensor)
def cos(self, tensor: Tensor) -> Tensor:
return jnp.cos(tensor)
def exp(self, tensor: Tensor) -> Tensor:
return jnp.exp(tensor)
def log(self, tensor: Tensor) -> Tensor:
return jnp.log(tensor)
def expm(self, matrix: Tensor) -> Tensor:
if len(matrix.shape) != 2:
raise ValueError("input to numpy backend method `expm` has shape {}."
" Only matrices are supported.".format(matrix.shape))
if matrix.shape[0] != matrix.shape[1]:
raise ValueError("input to numpy backend method `expm` only supports"
" N*N matrix, {x}*{y} matrix is given".format(
x=matrix.shape[0], y=matrix.shape[1]))
# pylint: disable=no-member
return jsp.linalg.expm(matrix)
def jit(self, fun: Callable, *args: List, **kwargs: dict) -> Callable:
return libjax.jit(fun, *args, **kwargs)
def sum(self,
tensor: Tensor,
axis: Optional[Sequence[int]] = None,
keepdims: bool = False) -> Tensor:
return jnp.sum(tensor, axis=axis, keepdims=keepdims)
def matmul(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
if (tensor1.ndim <= 1) or (tensor2.ndim <= 1):
raise ValueError("inputs to `matmul` have to be tensors of order > 1,")
return jnp.matmul(tensor1, tensor2, precision=self.jax_precision)
def diagonal(self, tensor: Tensor, offset: int = 0, axis1: int = -2,
axis2: int = -1) -> Tensor:
"""Return specified diagonals.
If tensor is 2-D, returns the diagonal of tensor with the given offset,
i.e., the collection of elements of the form a[i, i+offset].
If a has more than two dimensions, then the axes specified by
axis1 and axis2 are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by removing
axis1 and axis2 and appending an index to the right equal to the size of the
resulting diagonals.
This function only extracts diagonals. If you
wish to create diagonal matrices from vectors, use diagflat.
Args:
tensor: A tensor.
offset: Offset of the diagonal from the main diagonal.
axis1, axis2: Axis to be used as the first/second axis of the 2D
sub-arrays from which the diagonals should be taken.
Defaults to second last/last axis.
Returns:
array_of_diagonals: A dim = min(1, tensor.ndim - 2) tensor storing
the batched diagonals.
"""
if axis1 == axis2:
raise ValueError("axis1, axis2 cannot be equal.")
return jnp.diagonal(tensor, offset=offset, axis1=axis1, axis2=axis2)
def diagflat(self, tensor: Tensor, k: int = 0) -> Tensor:
""" Flattens tensor and creates a new matrix of zeros with its elements
on the k'th diagonal.
Args:
tensor: A tensor.
k : The diagonal upon which to place its elements.
Returns:
tensor: A new tensor with all zeros save the specified diagonal.
"""
return jnp.diag(jnp.ravel(tensor), k=k)
def trace(self, tensor: Tensor, offset: int = 0, axis1: int = -2,
axis2: int = -1) -> Tensor:
"""Return summed entries along diagonals.
If tensor is 2-D, the sum is over the
diagonal of tensor with the given offset,
i.e., the collection of elements of the form a[i, i+offset].
If a has more than two dimensions, then the axes specified by
axis1 and axis2 are used to determine the 2-D sub-array whose diagonal is
summed.
Args:
tensor: A tensor.
offset: Offset of the diagonal from the main diagonal.
axis1, axis2: Axis to be used as the first/second axis of the 2D
sub-arrays from which the diagonals should be taken.
Defaults to second last/last axis.
Returns:
array_of_diagonals: The batched summed diagonals.
"""
if axis1 == axis2:
raise ValueError("axis1, axis2 cannot be equal.")
return jnp.trace(tensor, offset=offset, axis1=axis1, axis2=axis2)
def abs(self, tensor: Tensor) -> Tensor:
"""
Returns the elementwise absolute value of tensor.
Args:
tensor: An input tensor.
Returns:
tensor: Its elementwise absolute value.
"""
return jnp.abs(tensor)
def sign(self, tensor: Tensor) -> Tensor:
"""
Returns an elementwise tensor with entries
y[i] = 1, 0, -1 where tensor[i] > 0, == 0, and < 0 respectively.
For complex input the behaviour of this function may depend on the backend.
The Jax backend version returns y[i] = x[i]/sqrt(x[i]^2).
Args:
tensor: The input tensor.
"""
return jnp.sign(tensor)
def item(self, tensor):
return tensor.item()
def power(self, a: Tensor, b: Union[Tensor, float]) -> Tensor:
"""
Returns the power of tensor a to the value of b.
In the case b is a tensor, then the power is by element
with a as the base and b as the exponent.
In the case b is a scalar, then the power of each value in a
is raised to the exponent of b.
Args:
a: The tensor that contains the base.
b: The tensor that contains the exponent or a single scalar.
"""
return jnp.power(a, b)
def eps(self, dtype: Type[np.number]) -> float:
"""
Return machine epsilon for given `dtype`
Args:
dtype: A dtype.
Returns:
float: Machine epsilon.
"""
return jnp.finfo(dtype).eps
| google/TensorNetwork | tensornetwork/backends/jax/jax_backend.py | Python | apache-2.0 | 36,454 | 0.004197 |
import numpy as np
import scipy.integrate as integ
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
import derivs
import gasproperties
import opacity
import loadinitial
import modelparameters
from multiprocessing import Pool
def deriv_wrapper(y, x, X, Y, Z, mu):
"""
Creates a system of 1st order ode's to be solved
Assumes:
mass, m as independent variable
Inputs:
y 1x4 float - consists of:
- r(m), radius [cm]
- l(m), luminosity [erg s^-1]
- P(m), total pressure [dyne cm^-2]
- T(m), Temperature [K]
x 1x1 float - consists of:
- m, mass [g]
params 1x4 float - consists of:
- X, hydrogen mass fraction
- Y, helium mass fraction
- Z, metals mass fraction
- mu, mean molecular weight
Outputs:
dy_dx 1x4 float - consists of:
- dr(m)/dm, radius derivative [cm g^-1]
- dl(m)/dm, luminosity derivative [erg s^-1 g^-1]
- dP(m)/dm, total pressure derivative [dyne cm^-2 g^-1]
- dT(m)/dm, Temperature derivative [K g^-1]
Warnings:
"""
m = x
r, l, P, T = y
beta = gasproperties.calculate_beta(P, T)
rho = gasproperties.calculate_density(P * beta, T, mu)
kappa = opacity.calculate_opacity(T, rho)
dr_dm = derivs.calculate_dr_dm(r, rho)
dl_dm = derivs.calculate_dl_dm(T, rho, X, Y, Z)
dP_dm = derivs.calculate_dP_dm(m, r)
dT_dm = derivs.calculate_dT_dm(m, r, l, P, T, kappa)
dy_dx = [dr_dm, dl_dm, dP_dm, dT_dm]
return dy_dx
def integrate_outwards(M_star, m_fitting_point, P_c, T_c, mu, X, Y, Z,
n_steps=1e4,
logspacing=True,
file_suffix="",
write=False):
m0 = 1e-8 * M_star
beta = gasproperties.calculate_beta(P_c, T_c)
rho = gasproperties.calculate_density(P_c * beta, T_c, mu)
r0, l0, P0, T0 = loadinitial.load1(m0, P_c, T_c, mu, X, Y, Z)
y0 = [r0, l0, P0, T0]
mu = modelparameters.mu
params = (X, Y, Z, mu)
if logspacing is True:
m = np.logspace(np.log10(m0), np.log10(m_fitting_point), n_steps)
else:
m = np.linspace(m0, m_fitting_point, n_steps)
y, infodict = integ.odeint(deriv_wrapper, y0, m,
mxstep=500,
args=params, full_output=True)
r,l,P,T = y.transpose()
sol = np.column_stack((m, y))
if write is True:
np.savetxt('data/sol_outwards' + file_suffix + '.dat', sol,
header=" \t\t m [m]\t\t\t\t\t r [cm]\t\t\t\t\t\t l [erg s^-1]\t\t\t\t\t P [dyne cm^-2]\t\t\t\t\t\t T [K]")
plt.figure(1)
plt.subplot(221)
plt.plot(m / M_star, r)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$r(m)$")
plt.subplot(222)
plt.semilogy(m / M_star, l)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$\ell(m)$")
plt.subplot(223)
plt.semilogy(m / M_star, P)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$P(m)$")
plt.subplot(224)
plt.semilogy(m / M_star, T)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$T(m)$")
plt.savefig("plots/stellar_model_outwards" + file_suffix + ".eps")
plt.savefig("plots/stellar_model_outwards" + file_suffix + ".pdf")
# plt.show()
plt.close()
return m, y, infodict
def integrate_inwards(M_star, m_fitting_point, R_star, L_star, mu, X, Y, Z,
n_steps=1e4,
logspacing=False,
file_suffix="",
write=False):
r0, l0, P0, T0 = loadinitial.load2(M_star, R_star, L_star, mu)
y0 = [r0, l0, P0, T0]
mu = modelparameters.mu
params = (X, Y, Z, mu)
if logspacing is True:
m = np.logspace(np.log10(m_fitting_point), np.log10(M_star), n_steps)
else:
m = np.linspace(m_fitting_point, M_star, n_steps)
m = np.flipud(m) #reverse direction of integration
y, infodict = integ.odeint(deriv_wrapper, y0, m,
mxstep=5000,
args=params, full_output=True)
r,l,P,T = y.transpose()
sol = np.column_stack((m, y))
if write is True:
np.savetxt('data/sol_inwards' + file_suffix + '.dat', sol,
header=" \t\t m [m]\t\t\t\t\t r [cm]\t\t\t\t\t\t l [erg s^-1]\t\t\t\t\t P [dyne cm^-2]\t\t\t\t\t\t T [K]")
plt.figure(1)
plt.subplot(221)
plt.plot(m / M_star, r)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$r(m)$")
plt.subplot(222)
plt.semilogy(m / M_star, l)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$\ell(m)$")
plt.subplot(223)
plt.semilogy(m / M_star, P)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$P(m)$")
plt.subplot(224)
plt.semilogy(m / M_star, T)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$T(m)$")
plt.savefig("plots/stellar_model_inwards" + file_suffix + ".pdf")
# plt.show()
plt.close()
return m, y, infodict
def test():
X = modelparameters.X
Y = modelparameters.Y
Z = modelparameters.Z
mu = modelparameters.mu
params = (X, Y, Z, mu)
P_c = modelparameters.P_c # core pressure, [dyne cm^-2]
T_c = modelparameters.T_c # core temperature, [K]
M_star = modelparameters.M_star
R_star = modelparameters.R_star
L_star = modelparameters.L_star
m_fitting_point = modelparameters.m_fitting_point
m_outward, y_outward, infodict_outward = integrate_outwards(M_star,
m_fitting_point, P_c, T_c, mu, X, Y, Z, n_steps = 5e1)
m_inward, y_inward, infodict_inward = integrate_inwards(M_star,
m_fitting_point, R_star, L_star, mu, X, Y, Z, n_steps = 5e1)
r_inward, l_inward, P_inward, T_inward = y_inward.transpose()
r_outward, l_outward, P_outward, T_outward = y_outward.transpose()
m_tot = np.concatenate((m_outward, np.flipud(m_inward)))
r_tot = np.concatenate((r_outward, np.flipud(r_inward)))
l_tot = np.concatenate((l_outward, np.flipud(l_inward)))
P_tot = np.concatenate((P_outward, np.flipud(P_inward)))
T_tot = np.concatenate((T_outward, np.flipud(T_inward)))
plt.figure(1)
plt.subplot(221)
plt.plot(m_tot / M_star, r_tot)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$r(m)$")
plt.subplot(222)
plt.semilogy(m_tot / M_star, l_tot)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$\ell(m)$")
plt.subplot(223)
plt.semilogy(m_tot / M_star, P_tot)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$P(m)$")
plt.subplot(224)
plt.semilogy(m_tot / M_star, T_tot)
plt.xlabel(r"$\frac{m}{M}$")
plt.ylabel(r"$T(m)$")
plt.savefig("plots/stellar_model_total.pdf")
# plt.show()
plt.close()
return (m_tot, r_tot, l_tot, P_tot, T_tot)
| egentry/stellarstructure | integrate.py | Python | mit | 6,257 | 0.042033 |
'''
Created on Jun 18, 2015
@author: Subhasis
'''
# Replace the following lines with client IDs obtained from the APIs
# Console or Cloud Console.
WEB_CLIENT_ID = '540614338141-drb3g1kcetlp4sbgaj7dfkjci6n5ove5.apps.googleusercontent.com'
ANDROID_CLIENT_ID = 'replace with Android client ID'
IOS_CLIENT_ID = 'replace with iOS client ID'
ANDROID_AUDIENCE = WEB_CLIENT_ID
| SubhasisDutta/NoteBook | settings.py | Python | mit | 370 | 0.002703 |
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
import ConfigParser
import calendar
import logging
import mimetypes
import os
import pprint
import requests
import urllib
from urlparse import urljoin
import json
from django.utils.dateformat import format
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger, InvalidPage
from django.core.urlresolvers import reverse
from django.db.models import Max
from django.http import HttpResponse, HttpResponseRedirect, StreamingHttpResponse
from django.core.servers.basehttp import FileWrapper
from django.shortcuts import render
from contrib import utils
from main import models
from mcpserver import Client as MCPServerClient
logger = logging.getLogger('archivematica.dashboard')
class AtomError(Exception):
pass
# Used for debugging
def pr(object):
return pprint.pformat(object)
# Used for raw SQL queries to return data in dictionaries instead of lists
def dictfetchall(cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
def keynat(string):
r'''A natural sort helper function for sort() and sorted()
without using regular expressions or exceptions.
>>> items = ('Z', 'a', '10th', '1st', '9')
>>> sorted(items)
['10th', '1st', '9', 'Z', 'a']
>>> sorted(items, key=keynat)
['1st', '9', '10th', 'a', 'Z']
'''
it = type(1)
r = []
for c in string:
if c.isdigit():
d = int(c)
if r and type( r[-1] ) == it:
r[-1] = r[-1] * 10 + d
else:
r.append(d)
else:
r.append(c.lower())
return r
def json_response(data, status_code=200):
return HttpResponse(
json.dumps(data),
content_type='application/json',
status=status_code,
)
def pager(objects, items_per_page, current_page_number):
"""
:param objects: Iterable of items to paginate
:param items_per_page: Number of items on each page
:param current_page_number: Page to return information for
:return: django.paginator.Page object (with additional attributes)
"""
if current_page_number is None:
current_page_number = 1
paginator = Paginator(objects, items_per_page)
try:
page = paginator.page(current_page_number)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
page = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
page = paginator.page(paginator.num_pages)
# For compatibility with old code, add the alternate names as attributes
# TODO replace all places that call this with the actual parameters
page.objects = page.object_list
page.current = page.number
try:
page.previous = page.previous_page_number()
except InvalidPage:
page.previous = None
try:
page.next = page.next_page_number()
except InvalidPage:
page.next = None
page.has_other = page.has_other_pages()
page.total_items = paginator.count
page.num_pages = paginator.num_pages
# Add lists of the (up to) 5 adjacent pages
num_neighbours = 5
if page.number > num_neighbours:
page.previous_pages = range(page.number - num_neighbours, page.number)
else:
page.previous_pages = range(1, page.number)
if page.number < (paginator.num_pages - num_neighbours):
page.next_pages = range(page.number + 1, page.number + num_neighbours + 1)
else:
page.next_pages = range(page.number + 1, paginator.num_pages + 1)
return page
def get_file_sip_uuid(fileuuid):
file = models.File.objects.get(uuid=fileuuid)
return file.sip.uuid
def task_duration_in_seconds(task):
if task.endtime != None:
duration = int(format(task.endtime, 'U')) - int(format(task.starttime, 'U'))
else:
duration = ''
if duration == 0:
duration = '< 1'
return duration
def get_jobs_by_sipuuid(uuid):
jobs = models.Job.objects.filter(sipuuid=uuid,subjobof='').order_by('-createdtime', 'subjobof')
priorities = {
'completedUnsuccessfully': 0,
'requiresAprroval': 1,
'requiresApproval': 1,
'exeCommand': 2,
'verificationCommand': 3,
'completedSuccessfully': 4,
'cleanupSuccessfulCommand': 5,
}
def get_priority(job):
try: return priorities[job.currentstep]
except Exception: return 0
return sorted(jobs, key = get_priority) # key = lambda job: priorities[job.currentstep]
def get_metadata_type_id_by_description(description):
return models.MetadataAppliesToType.objects.get(description=description)
def get_setting(setting, default=''):
try:
setting = models.DashboardSetting.objects.get(name=setting)
return setting.value
except:
return default
def get_boolean_setting(setting, default=''):
setting = get_setting(setting, default)
if setting == 'False':
return False
else:
return bool(setting)
def set_setting(setting, value=''):
try:
setting_data = models.DashboardSetting.objects.get(name=setting)
except:
setting_data = models.DashboardSetting.objects.create()
setting_data.name = setting
setting_data.value = value
setting_data.save()
def get_client_config_value(field):
clientConfigFilePath = '/etc/archivematica/MCPClient/clientConfig.conf'
config = ConfigParser.SafeConfigParser()
config.read(clientConfigFilePath)
try:
return config.get('MCPClient', field)
except:
return ''
def get_server_config_value(field):
clientConfigFilePath = '/etc/archivematica/MCPServer/serverConfig.conf'
config = ConfigParser.SafeConfigParser()
config.read(clientConfigFilePath)
try:
return config.get('MCPServer', field)
except:
return ''
def get_atom_levels_of_description(clear=True):
"""
Fetch levels of description from an AtoM instance and store them in the database.
The URL and authentication details for the AtoM instance must already be stored in the settings.
Note that only English levels of description are fetched at this point in time.
:param bool clear: When True, deletes all existing levels of description from the Archivematica database before fetching; otherwise, the fetched levels of description will be appended to the already-stored values.
:raises AtomError: if no AtoM URL or authentication credentials are defined in the settings, or if the levels of description cannot be fetched for another reason
"""
url = get_setting('dip_upload_atom_url')
if not url:
raise AtomError("AtoM URL not defined!")
auth = (
get_setting('dip_upload_atom_email'),
get_setting('dip_upload_atom_password'),
)
if not auth:
raise AtomError("AtoM authentication settings not defined!")
# taxonomy 34 is "level of description"
dest = urljoin(url, 'api/taxonomies/34')
response = requests.get(dest, params={'culture': 'en'}, auth=auth)
if response.status_code == 200:
base = 1
if clear:
models.LevelOfDescription.objects.all().delete()
else:
# Add after existing LoD
base = models.LevelOfDescription.objects.aggregate(max=Max('sortorder'))['max'] + 1
levels = response.json()
for idx, level in enumerate(levels):
lod = models.LevelOfDescription(name=level['name'], sortorder=base + idx)
lod.save()
else:
raise AtomError("Unable to fetch levels of description from AtoM!")
def redirect_with_get_params(url_name, *args, **kwargs):
url = reverse(url_name, args = args)
params = urllib.urlencode(kwargs)
return HttpResponseRedirect(url + "?%s" % params)
def send_file_or_return_error_response(request, filepath, content_type, verb='download'):
if os.path.exists(filepath):
return send_file(request, filepath)
else:
return render(request, 'not_found.html', {
'content_type': content_type,
'verb': verb
})
def send_file(request, filepath):
"""
Send a file through Django without loading the whole file into
memory at once. The FileWrapper will turn the file object into an
iterator for chunks of 8KB.
"""
filename = os.path.basename(filepath)
extension = os.path.splitext(filepath)[1].lower()
wrapper = FileWrapper(file(filepath))
response = HttpResponse(wrapper)
# force download for certain filetypes
extensions_to_download = ['.7z', '.zip']
try:
index = extensions_to_download.index(extension)
response['Content-Type'] = 'application/force-download'
response['Content-Disposition'] = 'attachment; filename="' + filename + '"'
except:
mimetype = mimetypes.guess_type(filename)[0]
response['Content-type'] = mimetype
response['Content-Length'] = os.path.getsize(filepath)
return response
def file_is_an_archive(file):
file = file.lower()
return file.endswith('.zip') or file.endswith('.tgz') or file.endswith('.tar.gz')
def feature_settings():
return {
'atom_dip_admin': 'dashboard_administration_atom_dip_enabled',
'dspace': 'dashboard_administration_dspace_enabled'
}
def hidden_features():
hide_features = {}
short_forms = feature_settings()
for short_form, long_form in short_forms.items():
# hide feature if setting isn't enabled
hide_features[short_form] = not get_boolean_setting(long_form)
return hide_features
def pad_destination_filepath_if_it_already_exists(filepath, original=None, attempt=0):
if original == None:
original = filepath
attempt = attempt + 1
if os.path.exists(filepath):
if os.path.isdir(filepath):
return pad_destination_filepath_if_it_already_exists(original + '_' + str(attempt), original, attempt)
else:
# need to work out basename
basedirectory = os.path.dirname(original)
basename = os.path.basename(original)
# do more complex padding to preserve file extension
period_position = basename.index('.')
non_extension = basename[0:period_position]
extension = basename[period_position:]
new_basename = non_extension + '_' + str(attempt) + extension
new_filepath = os.path.join(basedirectory, new_basename)
return pad_destination_filepath_if_it_already_exists(new_filepath, original, attempt)
return filepath
def processing_config_path():
return os.path.join(
get_server_config_value('sharedDirectory'),
'sharedMicroServiceTasksConfigs/processingMCPConfigs'
)
def stream_file_from_storage_service(url, error_message='Remote URL returned {}'):
stream = requests.get(url, stream=True)
if stream.status_code == 200:
content_type = stream.headers.get('content-type', 'text/plain')
return StreamingHttpResponse(stream, content_type=content_type)
else:
response = {
'success': False,
'message': error_message.format(stream.status_code)
}
return json_response(response, status_code=400)
def units_status(objects, unit_type):
jobs_awaiting = MCPServerClient().list_jobs_awaiting_approval().jobs
data = {'mcp': True, 'objects': list()}
model = {'ingest': models.SIP, 'transfer': models.Transfer}
unit_model = model[unit_type]
for item in objects:
if unit_model.objects.is_hidden(item['sipuuid']):
continue
jobs = get_jobs_by_sipuuid(item['sipuuid'])
# Add new unit to list
unit = {
'directory': utils.get_directory_name_from_job(jobs),
'timestamp': calendar.timegm(item['timestamp'].timetuple()),
'uuid': item['sipuuid'],
'id': item['sipuuid'],
'jobs': list(),
}
data['objects'].append(unit)
# Include in the unit the list of jobs
for job in jobs:
new_job = {
'uuid': job.jobuuid,
'type': job.jobtype,
'microservicegroup': job.microservicegroup,
'subjobof': = job.subjobof,
'currentstep': = job.currentstep,
'timestamp': '%d.%s' % (calendar.timegm(job.createdtime.timetuple()), str(job.createdtimedec).split('.')[-1]),
}
unit['jobs'].append(new_job)
# We should find a better way to do this!
for job_awaiting in jobs_awaiting:
if job_awaiting.UUID != job.jobuuid:
continue
choices = dict()
for ch in job_awaiting.choices:
choices[ch.value] = ch.description
if choices:
new_job['choices'] = choices
return data
| sevein/archivematica | src/dashboard/src/components/helpers.py | Python | agpl-3.0 | 13,812 | 0.004634 |
#!/usr/bin/env python
# Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import os
import sys
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(THIS_DIR, '..', '..'))
from tools import run_coverage
if __name__ == '__main__':
sys.exit(run_coverage.main(
THIS_DIR,
[],
'PRESUBMIT.py,components,*test*,tool*'))
| luci/luci-py | appengine/components/components/config/run_coverage.py | Python | apache-2.0 | 491 | 0.004073 |
from django.conf.urls import *
urlpatterns = patterns('',
url(r'^debug/', include('tests.urls')),
url(r'^', include('sentry.web.urls')),
)
| primepix/django-sentry | example_project/urls.py | Python | bsd-3-clause | 148 | 0.006757 |
#!/usr/bin/python
import sys
assert len(sys.argv) > 2
with open(sys.argv[1], "w") as out:
for l in sys.argv[2:]:
out.write("# %s\n" % l)
| facebook/buck | test/com/facebook/buck/android/testdata/android_project/native/proguard_gen/generator.py | Python | apache-2.0 | 151 | 0.006623 |
from django.conf.urls import patterns, include, url
handler404 = 'jobs.views.FileNotFound'
urlpatterns = [
url(r'^', include('jobs.urls'))
] | marchchad/GatorApp | gator_dev/urls.py | Python | gpl-2.0 | 146 | 0.006849 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import get_user_model
from django.utils import timezone
from django.template import defaultfilters
from django.conf import settings
from ..core.utils.timezone import timezones
from .models import UserProfile
User = get_user_model()
username_max_length = User._meta.get_field('username').max_length
TIMEZONE_CHOICES = timezones()
class CleanEmailMixin(object):
def clean_email(self):
email = self.cleaned_data["email"]
if settings.ST_CASE_INSENSITIVE_EMAILS:
email = email.lower()
if not settings.ST_UNIQUE_EMAILS:
return email
is_taken = User.objects\
.filter(email=email)\
.exists()
if is_taken:
raise forms.ValidationError(_("The email is taken."))
return email
def get_email(self):
return self.cleaned_data["email"]
class EmailCheckForm(CleanEmailMixin, forms.Form):
email = forms.CharField(label=_("Email"), widget=forms.EmailInput, max_length=254)
class EmailChangeForm(CleanEmailMixin, forms.Form):
email = forms.CharField(label=_("Email"), widget=forms.EmailInput, max_length=254)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
def __init__(self, user=None, *args, **kwargs):
self.user = user
super(EmailChangeForm, self).__init__(*args, **kwargs)
if not self.user.has_usable_password():
self.fields.pop('password')
def clean_password(self):
password = self.cleaned_data["password"]
if not self.user.check_password(password):
raise forms.ValidationError(_("The provided password is incorrect."))
return password
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ("first_name", "last_name")
class UserProfileForm(forms.ModelForm):
timezone = forms.ChoiceField(label=_("Time zone"), choices=TIMEZONE_CHOICES)
class Meta:
model = UserProfile
fields = ("location", "timezone", "hide_last_seen")
def __init__(self, *args, **kwargs):
super(UserProfileForm, self).__init__(*args, **kwargs)
now = timezone.localtime(timezone.now())
self.fields['timezone'].help_text = _('Current time is: %(date)s %(time)s') % {
'date': defaultfilters.date(now),
'time': defaultfilters.time(now)
}
class AvatarChangeForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ("avatar_chosen", "avatar")
widgets = {
'avatar_chosen': forms.RadioSelect
}
class UsernameChangeForm(forms.Form):
new_username = forms.CharField(label=_("New username"), max_length=username_max_length)
password = forms.CharField(label=_("Current password"), widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(UsernameChangeForm, self).__init__(*args, **kwargs)
def clean(self):
if not self.user.has_usable_password():
raise forms.ValidationError(_('You do not have a password set. Please use the set password form on your profile before trying to change your username.'))
if self.user.st.last_username_change_date:
raise forms.ValidationError(_('Sorry, you cannot change your username again!'))
def clean_new_username(self):
username = self.cleaned_data["new_username"]
if username.lower() in settings.ST_INVALID_USERNAMES:
raise forms.ValidationError(_("The username is invalid."))
if settings.ST_CASE_INSENSITIVE_EMAILS:
is_taken = User.objects.filter(username__iexact=username).exists()
else:
is_taken = User.objects.filter(username__exact=username).exists()
if is_taken:
raise forms.ValidationError(_("The username is taken."))
return username
def clean_password(self):
password = self.cleaned_data["password"]
if not self.user.check_password(password):
raise forms.ValidationError(_("The provided password is incorrect."))
return password
| alesdotio/Spirit | spirit/user/forms.py | Python | mit | 4,294 | 0.002329 |
from runtests.mpi import MPITest
from nbodykit.lab import *
from nbodykit import setup_logging
from numpy.testing import assert_allclose
import tempfile
import os
@MPITest([1])
def test_hdf(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8')])
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
grp = ff.create_group('Y')
grp.create_dataset('Position', data=dset['Position']) # column as dataset
grp.create_dataset('Mass', data=dset['Mass']) # column as dataset
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
assert_allclose(source['Position'], dset['Position'])
region = source.query_range(32, 64)
assert_allclose(region['Position'], dset['Position'][32:64])
os.unlink(tmpfile)
@MPITest([1, 4])
def test_query_range(comm):
import h5py
# fake structured array
dset = numpy.empty(1024, dtype=[('Position', ('f8', 3)), ('Mass', 'f8'), ('Index', 'i8')])
dset['Index'] = numpy.arange(1024)
dset['Position'] = numpy.random.random(size=(1024, 3))
dset['Mass'] = numpy.random.random(size=1024)
if comm.rank == 0:
tmpfile = tempfile.mkstemp()[1]
with h5py.File(tmpfile , 'w') as ff:
ds = ff.create_dataset('X', data=dset) # store structured array as dataset
ds.attrs['BoxSize'] = 1.0
tmpfile = comm.bcast(tmpfile)
else:
tmpfile = comm.bcast(None)
cosmo = cosmology.Planck15
source = HDFCatalog(tmpfile, dataset='X', attrs={"Nmesh":32}, comm=comm)
correct_region = source.gslice(32, 64)
region = source.query_range(32, 64)
assert_allclose(
numpy.concatenate(comm.allgather(region['Index'].compute())),
numpy.arange(32, 64)
)
if comm.rank == 0:
os.unlink(tmpfile)
@MPITest([1])
def test_csv(comm):
with tempfile.NamedTemporaryFile() as ff:
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(ff, data, fmt='%.7e'); ff.seek(0)
# read nrows
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(ff.name, names, blocksize=100, comm=comm)
# make sure data is the same
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(data[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
@MPITest([1])
def test_stack_glob(comm):
tmpfile1 = 'test-glob-1.dat'
tmpfile2 = 'test-glob-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog('test-glob-*', names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
@MPITest([1])
def test_stack_list(comm):
tmpfile1 = 'test-list-1.dat'
tmpfile2 = 'test-list-2.dat'
# generate data
data = numpy.random.random(size=(100,5))
numpy.savetxt(tmpfile1, data, fmt='%.7e')
numpy.savetxt(tmpfile2, data, fmt='%.7e')
# read using a glob
names =['a', 'b', 'c', 'd', 'e']
f = CSVCatalog(['test-list-1.dat', 'test-list-2.dat'], names, blocksize=100, comm=comm)
# make sure print works
print(f)
# make sure data is the same
fulldata = numpy.concatenate([data, data], axis=0)
for i, name in enumerate(names):
numpy.testing.assert_almost_equal(fulldata[:,i], f[name].compute(), decimal=7)
# make sure all the columns are there
assert all(col in f for col in names)
os.unlink(tmpfile1)
os.unlink(tmpfile2)
| nickhand/nbodykit | nbodykit/source/catalog/tests/test_file.py | Python | gpl-3.0 | 4,337 | 0.007378 |
# Copyright (c) 2013-2014, Cornell University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of HyperDex nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class MicrotransactionCall : pass
class AsyncCall: pass
class SyncCall: pass
class NoFailCall: pass
class Iterator: pass
class StructClient(object):
args = (('struct hyperdex_client*', 'client'),)
class StructAdmin(object):
args = (('struct hyperdex_admin*', 'admin'),)
class Microtransaction(object):
args = (('struct hyperdex_client_microtransaction*', 'microtransaction'),)
class SpaceName(object):
args = (('const char*', 'space'),)
class SpaceNameSource(object):
args = (('const char*', 'source'),)
class SpaceNameTarget(object):
args = (('const char*', 'target'),)
class Key(object):
args = (('const char*', 'key'), ('size_t', 'key_sz'))
class Predicates(object):
args = (('const struct hyperdex_client_attribute_check*', 'checks'),
('size_t', 'checks_sz'))
class Attributes(object):
args = (('const struct hyperdex_client_attribute*', 'attrs'),
('size_t', 'attrs_sz'))
class MapAttributes(object):
args = (('const struct hyperdex_client_map_attribute*', 'mapattrs'),
('size_t', 'mapattrs_sz'))
class AttributeNames(object):
args = (('const char**', 'attrnames'),
('size_t', 'attrnames_sz'))
class Status(object):
args = (('enum hyperdex_client_returncode', 'status'),)
class AdminStatus(object):
args = (('enum hyperdex_admin_returncode', 'status'),)
class Description(object):
args = (('const char*', 'description'),)
class SortBy(object):
args = (('const char*', 'sort_by'),)
class Limit(object):
args = (('uint64_t', 'limit'),)
class Count(object):
args = (('uint64_t', 'count'),)
class MaxMin(object):
args = (('int', 'maxmin'),)
class ReadOnly(object):
args = (('int', 'ro'),)
class FaultTolerance(object):
args = (('uint64_t', 'ft'),)
class SpaceDescription(object):
args = (('const char*', 'description'),)
class SpaceList(object):
args = (('const char*', 'spaces'),)
class IndexList(object):
args = (('const char*', 'indexes'),)
class SubspaceList(object):
args = (('const char*', 'subspaces'),)
class Token(object):
args = (('uint64_t', 'token'),)
class Address(object):
args = (('const char*', 'address'),)
class BackupName(object):
args = (('const char*', 'backup'),)
class BackupList(object):
args = (('const char*', 'backups'),)
class PerformanceCounters(object):
args = (('struct hyperdex_admin_perf_counter', 'pc'),)
class AttributeName(object):
args = (('const char*', 'attribute'),)
class IndexID(object):
args = (('uint64_t', 'idxid'),)
class Method(object):
def __init__(self, name, form, args_in, args_out):
self.name = name
self.form = form
self.args_in = args_in
self.args_out = args_out
# NOTE: The commas here aren't redundant, because the parser expects lists of arguments
Client = [
Method('get', AsyncCall, (SpaceName, Key), (Status, Attributes)),
Method('get_partial', AsyncCall, (SpaceName, Key, AttributeNames), (Status, Attributes)),
Method('put', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_put', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_put', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('cond_put_or_create', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_put', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('put_if_not_exist', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('del', AsyncCall, (SpaceName, Key), (Status,)),
Method('cond_del', AsyncCall, (SpaceName, Key, Predicates), (Status,)),
Method('group_del', AsyncCall, (SpaceName, Predicates), (Status, Count)),
Method('atomic_add', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_atomic_add', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_atomic_add', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_add', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('atomic_sub', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_atomic_sub', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_atomic_sub', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_sub', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('atomic_mul', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_atomic_mul', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_atomic_mul', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_mul', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('atomic_div', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_atomic_div', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_atomic_div', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_div', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('atomic_mod', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('cond_atomic_mod', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_mod', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('atomic_and', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_atomic_and', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_atomic_and', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_and', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('atomic_or', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_atomic_or', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_atomic_or', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_or', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('atomic_xor', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('cond_atomic_xor', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_xor', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('atomic_min', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('cond_atomic_min', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_min', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('atomic_max', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('cond_atomic_max', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_atomic_max', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('string_prepend', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_string_prepend', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_string_prepend', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_string_prepend', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('string_append', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_string_append', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_string_append', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_string_append', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('string_ltrim', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_string_ltrim', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_string_ltrim', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_string_ltrim', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('string_rtrim', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_string_rtrim', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_string_rtrim', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_string_rtrim', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('list_lpush', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_list_lpush', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_list_lpush', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_list_lpush', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('list_rpush', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_list_rpush', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_list_rpush', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_list_rpush', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('set_add', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('cond_set_add', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_set_add', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('set_remove', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('cond_set_remove', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_set_remove', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('set_intersect', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('cond_set_intersect', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_set_intersect', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('set_union', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('cond_set_union', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_set_union', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('document_rename', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_document_rename', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_document_rename', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_document_rename', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('document_unset', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('uxact_document_unset', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('cond_document_unset', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_document_unset', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('map_add', AsyncCall, (SpaceName, Key, MapAttributes), (Status,)),
Method('cond_map_add', AsyncCall, (SpaceName, Key, Predicates, MapAttributes), (Status,)),
Method('group_map_add', AsyncCall, (SpaceName, Predicates, MapAttributes), (Status, Count)),
Method('map_remove', AsyncCall, (SpaceName, Key, Attributes), (Status,)),
Method('cond_map_remove', AsyncCall, (SpaceName, Key, Predicates, Attributes), (Status,)),
Method('group_map_remove', AsyncCall, (SpaceName, Predicates, Attributes), (Status, Count)),
Method('map_atomic_add', AsyncCall, (SpaceName, Key, MapAttributes), (Status,)),
Method('cond_map_atomic_add', AsyncCall, (SpaceName, Key, Predicates, MapAttributes), (Status,)),
Method('group_map_atomic_add', AsyncCall, (SpaceName, Predicates, MapAttributes), (Status, Count)),
Method('map_atomic_sub', AsyncCall, (SpaceName, Key, MapAttributes), (Status,)),
Method('cond_map_atomic_sub', AsyncCall, (SpaceName, Key, Predicates, MapAttributes), (Status,)),
Method('group_map_atomic_sub', AsyncCall, (SpaceName, Predicates, MapAttributes), (Status, Count)),
Method('map_atomic_mul', AsyncCall, (SpaceName, Key, MapAttributes), (Status,)),
Method('cond_map_atomic_mul', AsyncCall, (SpaceName, Key, Predicates, MapAttributes), (Status,)),
Method('group_map_atomic_mul', AsyncCall, (SpaceName, Predicates, MapAttributes), (Status, Count)),
Method('map_atomic_div', AsyncCall, (SpaceName, Key, MapAttributes), (Status,)),
Method('cond_map_atomic_div', AsyncCall, (SpaceName, Key, Predicates, MapAttributes), (Status,)),
Method('group_map_atomic_div', AsyncCall, (SpaceName, Predicates, MapAttributes), (Status, Count)),
Method('map_atomic_mod', AsyncCall, (SpaceName, Key, MapAttributes), (Status,)),
Method('cond_map_atomic_mod', AsyncCall, (SpaceName, Key, Predicates, MapAttributes), (Status,)),
Method('group_map_atomic_mod', AsyncCall, (SpaceName, Predicates, MapAttributes), (Status, Count)),
Method('map_atomic_and', AsyncCall, (SpaceName, Key, MapAttributes), (Status,)),
Method('cond_map_atomic_and', AsyncCall, (SpaceName, Key, Predicates, MapAttributes), (Status,)),
Method('group_map_atomic_and', AsyncCall, (SpaceName, Predicates, MapAttributes), (Status, Count)),
Method('map_atomic_or', AsyncCall, (SpaceName, Key, MapAttributes), (Status,)),
Method('cond_map_atomic_or', AsyncCall, (SpaceName, Key, Predicates, MapAttributes), (Status,)),
Method('group_map_atomic_or', AsyncCall, (SpaceName, Predicates, MapAttributes), (Status, Count)),
Method('map_atomic_xor', AsyncCall, (SpaceName, Key, MapAttributes), (Status,)),
Method('cond_map_atomic_xor', AsyncCall, (SpaceName, Key, Predicates, MapAttributes), (Status,)),
Method('group_map_atomic_xor', AsyncCall, (SpaceName, Predicates, MapAttributes), (Status, Count)),
Method('map_string_prepend', AsyncCall, (SpaceName, Key, MapAttributes), (Status,)),
Method('cond_map_string_prepend', AsyncCall, (SpaceName, Key, Predicates, MapAttributes), (Status,)),
Method('group_map_string_prepend', AsyncCall, (SpaceName, Predicates, MapAttributes), (Status, Count)),
Method('map_string_append', AsyncCall, (SpaceName, Key, MapAttributes), (Status,)),
Method('cond_map_string_append', AsyncCall, (SpaceName, Key, Predicates, MapAttributes), (Status,)),
Method('group_map_string_append', AsyncCall, (SpaceName, Predicates, MapAttributes), (Status, Count)),
Method('map_atomic_min', AsyncCall, (SpaceName, Key, MapAttributes), (Status,)),
Method('cond_map_atomic_min', AsyncCall, (SpaceName, Key, Predicates, MapAttributes), (Status,)),
Method('group_map_atomic_min', AsyncCall, (SpaceName, Predicates, MapAttributes), (Status, Count)),
Method('uxact_atomic_min', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('map_atomic_max', AsyncCall, (SpaceName, Key, MapAttributes), (Status,)),
Method('cond_map_atomic_max', AsyncCall, (SpaceName, Key, Predicates, MapAttributes), (Status,)),
Method('group_map_atomic_max', AsyncCall, (SpaceName, Predicates, MapAttributes), (Status, Count)),
Method('uxact_atomic_max', MicrotransactionCall, (Microtransaction, Attributes), ()),
Method('search', Iterator, (SpaceName, Predicates), (Status, Attributes)),
Method('search_describe', AsyncCall, (SpaceName, Predicates), (Status, Description)),
Method('sorted_search', Iterator, (SpaceName, Predicates, SortBy, Limit, MaxMin), (Status, Attributes)),
Method('count', AsyncCall, (SpaceName, Predicates), (Status, Count)),
]
Admin = [
Method('read_only', AsyncCall, (ReadOnly,), (AdminStatus,)),
Method('wait_until_stable', AsyncCall, (), (AdminStatus,)),
Method('fault_tolerance', AsyncCall, (SpaceName, FaultTolerance), (AdminStatus,)),
Method('validate_space', SyncCall, (SpaceDescription,), (AdminStatus,)),
Method('add_space', AsyncCall, (SpaceDescription,), (AdminStatus,)),
Method('rm_space', AsyncCall, (SpaceName,), (AdminStatus,)),
Method('mv_space', AsyncCall, (SpaceNameSource, SpaceNameTarget), (AdminStatus,)),
Method('list_spaces', AsyncCall, (), (AdminStatus, SpaceList)),
Method('list_indices', AsyncCall, (SpaceName,), (AdminStatus, IndexList)),
Method('list_subspaces', AsyncCall, (SpaceName,), (AdminStatus, SubspaceList)),
Method('add_index', AsyncCall, (SpaceName, AttributeName), (AdminStatus,)),
Method('rm_index', AsyncCall, (IndexID,), (AdminStatus,)),
Method('server_register', AsyncCall, (Token, Address), (AdminStatus,)),
Method('server_online', AsyncCall, (Token,), (AdminStatus,)),
Method('server_offline', AsyncCall, (Token,), (AdminStatus,)),
Method('server_forget', AsyncCall, (Token,), (AdminStatus,)),
Method('server_kill', AsyncCall, (Token,), (AdminStatus,)),
Method('backup', AsyncCall, (BackupName,), (AdminStatus, BackupList)),
Method('enable_perf_counters', AsyncCall, (), (AdminStatus, PerformanceCounters)),
Method('disable_perf_counters', NoFailCall, (), ()),
]
DoNotDocument = ['search_describe']
def call_name(x):
call = x.form.__name__.lower()
call += '__'
call += '_'.join([arg.__name__.lower() for arg in x.args_in])
call += '__'
call += '_'.join([arg.__name__.lower() for arg in x.args_out])
return call
def copyright(style, date):
assert style in ('#', '*', '/', '%')
template = '''{comment} Copyright (c) {date}, Cornell University
{comment} All rights reserved.
{comment}
{comment} Redistribution and use in source and binary forms, with or without
{comment} modification, are permitted provided that the following conditions are met:
{comment}
{comment} * Redistributions of source code must retain the above copyright notice,
{comment} this list of conditions and the following disclaimer.
{comment} * Redistributions in binary form must reproduce the above copyright
{comment} notice, this list of conditions and the following disclaimer in the
{comment} documentation and/or other materials provided with the distribution.
{comment} * Neither the name of HyperDex nor the names of its contributors may be
{comment} used to endorse or promote products derived from this software without
{comment} specific prior written permission.
{comment}
{comment} THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
{comment} AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
{comment} IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
{comment} DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
{comment} FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
{comment} DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
{comment} SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
{comment} CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
{comment} OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
{comment} OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
if style == '#':
return template.format(comment='#', date=date)
if style == '/':
return template.format(comment='//', date=date)
if style == '*':
return '/' + template.format(comment=' *', date=date)[1:] + ' */\n'
if style == '%':
return template.format(comment='%', date=date)
def LaTeX(s):
return s.replace('_', '\\_')
def parameters_c_style(arg):
label = ', '.join(['\\code{' + LaTeX(a[1]) + '}' for a in arg.args])
return label
def parameters_script_style(arg):
label = '\\code{' + LaTeX(str(arg).lower()[17:-2]) + '}'
return label
def doc_parameter_list(form, args, fragments, label_maker):
if not args:
return 'Nothing'
block = '\\begin{itemize}[noitemsep]\n'
for arg in args:
label = label_maker(arg)
block += '\\item ' + label + '\\\\\n'
frag = fragments + '_' + form.__name__.lower() + '_' + arg.__name__.lower()
block += '\\input{\\topdir/' + frag +'}\n'
block += '\\end{itemize}\n'
return block
def substitute_generated(name, text, replacement, prefix='//', suffix='\n'):
if not replacement.endswith('\n'):
replacement += '\n'
START = '{0} Begin Automatically Generated {1}{2}'.format(prefix, name, suffix)
END = '{0} End Automatically Generated {1}{2}'.format(prefix, name, suffix)
head, tail = text.split(START)
body, tail = tail.split(END)
last_line = body.rsplit('\n')[-1]
return head + START + replacement + last_line + END + tail
| pombredanne/HyperDex | bindings/__init__.py | Python | bsd-3-clause | 22,251 | 0.008314 |
""" Code used for running the package directly from the Command Line """
import os.path
import logging
import argparse
import sys
from .db import AvailabilityDB
from .core import AvailabilityInfo
from .reports import ALL_REPORTS
from .settings import get_settings
def run():
# -------------------------- Logging Settings ---------------------------
logger = logging.getLogger(__name__)
main_logger = logging.getLogger('dn_availability')
_handler = logging.StreamHandler()
_formatter = logging.Formatter('%(levelname)s(%(name)s): %(message)s')
_handler.setFormatter(_formatter)
main_logger.addHandler(_handler)
# -------------------------- Argument Parser -----------------------------
parser = argparse.ArgumentParser(
description='A utility for managing available numbers for a Cisco UCM system',
epilog='For more information, visit the project page at: https://github.com/supernathan23/dn_availability')
subparsers = parser.add_subparsers(title='Actions', dest='action',
metavar='<action>',
description='You can enter "<action> -h" for details '
'on that action',
help='Available actions: add_report backup example_settings export '
'gen_report import init list restore')
# global args
parser.add_argument('-f', dest='settings_file',
help='Settings File. See the example_settings.cfg file for details')
parser.add_argument('-c', '--confirm', action='store_true',
dest='confirm',
help='Prompt for comfirmation before doing anything. '
'Default is to only prompt when deleting data')
parser.add_argument('-q', '--quiet', action='store_true',
help='Do not prompt for confirmations')
parser.add_argument('-v', '--verbose', action='count',
help='Display log messages. (Will override -q | --quiet)')
# Example Settings
parser_settings = subparsers.add_parser('example_settings')
parser_settings.add_argument('-o', '--output_file',
help='Output filename (will be overwritten if it exists!!)')
# list subcommand
parser_list = subparsers.add_parser('list')
parser_list.add_argument('-t', '--table',
help='Table to list data from, if not provided will display a list of '
'tables')
# init subcommand
parser_init = subparsers.add_parser('init_db')
parser_init.add_argument('-D', '--drop', action='store_true', default=False,
help='Drops existing tables, erasing existing data, before initializing')
# import subcommand
parser_import = subparsers.add_parser('import')
parser_import.add_argument('table',
help='Table to store the imported data. (use the list command to get a '
'list of the available tables)')
parser_import.add_argument('filename',
help='CSV filename to import')
# export subcommand
parser_export = subparsers.add_parser('export')
parser_export.add_argument('table',
help='Table to export. (use the list command to get a list of the '
'available tables)')
parser_export.add_argument('filename',
help='Destination filename (will be overwritten if it exists!!)')
# backup subcommand
parser_backup = subparsers.add_parser('backup')
parser_backup.add_argument('filename',
help='Destination filename (will be overwritten if it exists!!)')
# restore subcommand
parser_restore = subparsers.add_parser('restore')
parser_restore.add_argument('filename',
help='Source filename')
parser_restore.add_argument('-D', '--drop', action='store_true', default=False,
help='Drops existing tables, erasing existing data, before restoring backup')
# add_report subcommand
parser_add_report = subparsers.add_parser('add_report')
group_add_report = parser_add_report.add_mutually_exclusive_group(required=True)
group_add_report.add_argument('-t', '--timestamp',
help='Timestamp of when the report was generated.')
group_add_report.add_argument('-a', '--auto_timestamp',
action='store_true', default=False,
help='Obtain the timestamp from the file\'s creation date. Will prompt to '
'confirm that the timestamp is correct.')
parser_add_report.add_argument('-c', '--confirm_timestamp',
action='store_true', default=False,
help='Prompts to confirm the timestamp is correct. Timestamp is shown '
'in the systems standard format to make things easier. (Enabled by '
'default when -a (--auto_timestamp) is used')
parser_add_report.add_argument('system_id',
help='Phone System ID (can be obtained by using "list -t PhoneSystem" '
'subcommand')
parser_add_report.add_argument('filename',
help='Device report filename to be added to the system')
# gen_report subcommand
parser_gen_report = subparsers.add_parser('gen_report')
parser_gen_report.add_argument('report_name',
choices=ALL_REPORTS.keys(), metavar='report_name',
help='Name of the report. Available Reports: {}'.format(
', '.join(ALL_REPORTS.keys()))
)
parser_gen_report.add_argument('-s', '--system_id', action='append',
help='System ID (use the list -t PhoneSystem" subcommand for a list of'
' systems)')
parser_gen_report.add_argument('-g', '--number_group', action='append',
help='Number Group ID (use the "list -t NumberGroup" subcommand for a '
'list of number groups')
parser_gen_report.add_argument('-o', '--output_filename',
help='Destination filename (will be overwritten if it exists!!)')
# ---------------------------Setup----------------------------------------
args = parser.parse_args()
if args.verbose:
log_level = logging.INFO
if args.verbose > 1:
log_level = logging.DEBUG
elif args.quiet:
log_level = logging.ERROR
else:
log_level = logging.WARNING
main_logger.setLevel(log_level)
logger.info('Log verbosity set to %s', log_level)
app_settings = get_settings(args.settings_file)
db = AvailabilityDB(app_settings['DEFAULT']['db_url'])
info = AvailabilityInfo(db)
# -------------------------- Actions -------------------------------------
if args.action == 'list':
if not args.table:
logger.info('Listing tables')
print('Active Tables:')
print('\n'.join(info.db.metadata.tables.keys()))
sys.exit()
logger.info('Listing records for table %s', args.table)
conn = info.db.connect()
table = info.db.get_table(args.table)
results = conn.execute(table.select())
for row in results:
print(row)
if args.action == 'example_settings':
from pkg_resources import resource_string
settings_data = resource_string('dn_availability', 'example_settings.cfg').decode()
if args.output_file:
if args.confirm:
print('About to export an example settings file to "{}". (If file '
'exists it will be overwritten)'.format(args.output_file))
if not confirmation():
logger.info('Operation cancelled')
sys.exit()
logger.info('Exporting example settings file to "%s"', args.output_file)
with open(args.output_file, 'w') as f:
f.write(settings_data)
else:
print(settings_data)
elif args.action == 'init_db':
if args.drop:
if not args.quiet:
print('You are about to re-initialize the DB, '
'ALL EXISTING DATA WILL BE ERASED!!!')
if not confirmation():
logger.info('Operation cancelled')
sys.exit()
info.db.teardown_db()
logger.info('DB torn down')
info.db.setup_db()
logger.info('DB initialized')
elif args.action == 'import':
if args.confirm:
print('About to import data from "{}" into table "{}"'.format(
args.filename, args.table))
if not confirmation():
logger.info('Operation cancelled')
sys.exit()
logger.info('Importing data from "%s" into table %s', args.table,
args.filename)
info.db.csv_import(args.table, args.filename)
logger.info('Import complete')
elif args.action == 'export':
if args.confirm:
print('About to export data from table {} to "{}". (If file exists it '
'will be overwritten)'.format(args.table, args.filename))
if not confirmation():
logger.info('Operation cancelled')
sys.exit()
logger.info('Exporting data from table %s to file "%s"', args.table,
args.filename)
info.db.csv_export(args.table, args.filename)
logger.info('Export complete')
elif args.action == 'backup':
if args.confirm:
print('About to backup data to file "{}". (If file exists it will be '
'overwritten)'.format(args.filename))
if not confirmation():
logger.info('Operation cancelled')
sys.exit()
logger.info('Backing up date to file %s', args.filename)
info.db.backup_data(args.filename)
logger.info('Backup complete')
elif args.action == 'restore':
if args.confirm or args.drop:
print('About to restore data from backup "{}".format(args.filename)')
if args.drop:
print('Existing data will be ERASED!')
else:
print('Existing data will be maintained, but may interfere with '
'the backup data')
if not confirmation():
logger.info('Operation cancelled')
sys.exit()
logger.info('Restoring data from file %s')
if args.drop:
logger.info('Existing data will be ERASED')
info.db.restore_data(args.filename, args.drop)
logger.info('Restore complete.')
elif args.action == 'add_report':
timestamp = args.timestamp
if args.auto_timestamp:
timestamp = os.path.getctime(args.filename)
logger.debug('Received timestamp %s from file', timestamp)
print('About to add device report "{}" for system ID {} and timestamp '
'{}'.format(args.filename, args.system_id, timestamp))
if (args.auto_timestamp and not args.quiet) or args.confirm:
prompt = 'Is timestamp "{}" correct?'.format(time.ctime(timestamp))
if not confirmation(prompt):
logger.info('Next time, use the -t (--timestamp) option to manually '
'specify the timestamp')
logger.info('Operation cancelled')
sys.exit()
logger.info('Adding device report "%s" for system ID %s and timestamp '
'%s', args.filename, args.system_id, timestamp)
info.add_device_report(args.filename, args.system_id, timestamp)
logger.info('Report added.')
elif args.action == 'gen_report':
ReportDef = ALL_REPORTS[args.report_name]
if args.output_filename:
if args.confirm:
print('About to generate report {} to file "{}". (If file '
'exists it will be overwritten!)'.format(
args.report_name, args.output_filename))
if not confirmation():
logger.info('Operation cancelled')
sys.exit()
f = open(args.output_filename, 'w')
else:
f = sys.stdout
ReportDef(
avail_info_obj=info,
output_file=f,
args=args)
def confirmation(question='Are you sure?', default=None):
""" Prompts the user for a yes/no confirmation.
question is the text provided to the user.
default value is used when the user just presses enter.
Ff the value is None, will continue to prompt the user
for an acceptable answer.
(based on http://code.activestate.com/recipes/577058/)
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = ' [y/n] '
elif default:
prompt = ' [Y/n] '
else:
prompt = ' [y/N] '
while True:
response = input(question + prompt).lower()
if not response and default is not None:
return default
try:
return valid[response]
except KeyError:
print('YES OR NO!?')
if __name__ == '__main__':
run() | supernathan23/dn_availability | dn_availability/cli.py | Python | mit | 13,300 | 0.008722 |
"""Base classes for channels."""
import asyncio
from enum import Enum
from functools import wraps
import logging
from typing import Any, Union
import zigpy.exceptions
from homeassistant.core import callback
from .. import typing as zha_typing
from ..const import (
ATTR_ARGS,
ATTR_ATTRIBUTE_ID,
ATTR_ATTRIBUTE_NAME,
ATTR_CLUSTER_ID,
ATTR_COMMAND,
ATTR_UNIQUE_ID,
ATTR_VALUE,
CHANNEL_ZDO,
SIGNAL_ATTR_UPDATED,
)
from ..helpers import LogMixin, safe_read
_LOGGER = logging.getLogger(__name__)
def parse_and_log_command(channel, tsn, command_id, args):
"""Parse and log a zigbee cluster command."""
cmd = channel.cluster.server_commands.get(command_id, [command_id])[0]
channel.debug(
"received '%s' command with %s args on cluster_id '%s' tsn '%s'",
cmd,
args,
channel.cluster.cluster_id,
tsn,
)
return cmd
def decorate_command(channel, command):
"""Wrap a cluster command to make it safe."""
@wraps(command)
async def wrapper(*args, **kwds):
try:
result = await command(*args, **kwds)
channel.debug(
"executed '%s' command with args: '%s' kwargs: '%s' result: %s",
command.__name__,
args,
kwds,
result,
)
return result
except (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError) as ex:
channel.debug(
"command failed: '%s' args: '%s' kwargs '%s' exception: '%s'",
command.__name__,
args,
kwds,
str(ex),
)
return ex
return wrapper
class ChannelStatus(Enum):
"""Status of a channel."""
CREATED = 1
CONFIGURED = 2
INITIALIZED = 3
class ZigbeeChannel(LogMixin):
"""Base channel for a Zigbee cluster."""
REPORT_CONFIG = ()
def __init__(
self, cluster: zha_typing.ZigpyClusterType, ch_pool: zha_typing.ChannelPoolType
) -> None:
"""Initialize ZigbeeChannel."""
self._generic_id = f"channel_0x{cluster.cluster_id:04x}"
self._channel_name = getattr(cluster, "ep_attribute", self._generic_id)
self._ch_pool = ch_pool
self._cluster = cluster
self._id = f"{ch_pool.id}:0x{cluster.cluster_id:04x}"
unique_id = ch_pool.unique_id.replace("-", ":")
self._unique_id = f"{unique_id}:0x{cluster.cluster_id:04x}"
self._report_config = self.REPORT_CONFIG
if not hasattr(self, "_value_attribute") and len(self._report_config) > 0:
attr = self._report_config[0].get("attr")
if isinstance(attr, str):
self.value_attribute = self.cluster.attridx.get(attr)
else:
self.value_attribute = attr
self._status = ChannelStatus.CREATED
self._cluster.add_listener(self)
@property
def id(self) -> str:
"""Return channel id unique for this device only."""
return self._id
@property
def generic_id(self):
"""Return the generic id for this channel."""
return self._generic_id
@property
def unique_id(self):
"""Return the unique id for this channel."""
return self._unique_id
@property
def cluster(self):
"""Return the zigpy cluster for this channel."""
return self._cluster
@property
def name(self) -> str:
"""Return friendly name."""
return self._channel_name
@property
def status(self):
"""Return the status of the channel."""
return self._status
@callback
def async_send_signal(self, signal: str, *args: Any) -> None:
"""Send a signal through hass dispatcher."""
self._ch_pool.async_send_signal(signal, *args)
async def bind(self):
"""Bind a zigbee cluster.
This also swallows ZigbeeException exceptions that are thrown when
devices are unreachable.
"""
try:
res = await self.cluster.bind()
self.debug("bound '%s' cluster: %s", self.cluster.ep_attribute, res[0])
except (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError) as ex:
self.debug(
"Failed to bind '%s' cluster: %s", self.cluster.ep_attribute, str(ex)
)
async def configure_reporting(self) -> None:
"""Configure attribute reporting for a cluster.
This also swallows ZigbeeException exceptions that are thrown when
devices are unreachable.
"""
kwargs = {}
if self.cluster.cluster_id >= 0xFC00 and self._ch_pool.manufacturer_code:
kwargs["manufacturer"] = self._ch_pool.manufacturer_code
for report in self._report_config:
attr = report["attr"]
attr_name = self.cluster.attributes.get(attr, [attr])[0]
min_report_int, max_report_int, reportable_change = report["config"]
try:
res = await self.cluster.configure_reporting(
attr, min_report_int, max_report_int, reportable_change, **kwargs
)
self.debug(
"reporting '%s' attr on '%s' cluster: %d/%d/%d: Result: '%s'",
attr_name,
self.cluster.ep_attribute,
min_report_int,
max_report_int,
reportable_change,
res,
)
except (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError) as ex:
self.debug(
"failed to set reporting for '%s' attr on '%s' cluster: %s",
attr_name,
self.cluster.ep_attribute,
str(ex),
)
async def async_configure(self) -> None:
"""Set cluster binding and attribute reporting."""
if not self._ch_pool.skip_configuration:
await self.bind()
if self.cluster.is_server:
await self.configure_reporting()
ch_specific_cfg = getattr(self, "async_configure_channel_specific", None)
if ch_specific_cfg:
await ch_specific_cfg()
self.debug("finished channel configuration")
else:
self.debug("skipping channel configuration")
self._status = ChannelStatus.CONFIGURED
async def async_initialize(self, from_cache: bool) -> None:
"""Initialize channel."""
if not from_cache and self._ch_pool.skip_configuration:
self._status = ChannelStatus.INITIALIZED
return
self.debug("initializing channel: from_cache: %s", from_cache)
attributes = [cfg["attr"] for cfg in self._report_config]
if attributes:
await self.get_attributes(attributes, from_cache=from_cache)
ch_specific_init = getattr(self, "async_initialize_channel_specific", None)
if ch_specific_init:
await ch_specific_init(from_cache=from_cache)
self.debug("finished channel configuration")
self._status = ChannelStatus.INITIALIZED
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle commands received to this cluster."""
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}",
attrid,
self.cluster.attributes.get(attrid, [attrid])[0],
value,
)
@callback
def zdo_command(self, *args, **kwargs):
"""Handle ZDO commands on this cluster."""
@callback
def zha_send_event(self, command: str, args: Union[int, dict]) -> None:
"""Relay events to hass."""
self._ch_pool.zha_send_event(
{
ATTR_UNIQUE_ID: self.unique_id,
ATTR_CLUSTER_ID: self.cluster.cluster_id,
ATTR_COMMAND: command,
ATTR_ARGS: args,
}
)
async def async_update(self):
"""Retrieve latest state from cluster."""
async def get_attribute_value(self, attribute, from_cache=True):
"""Get the value for an attribute."""
manufacturer = None
manufacturer_code = self._ch_pool.manufacturer_code
if self.cluster.cluster_id >= 0xFC00 and manufacturer_code:
manufacturer = manufacturer_code
result = await safe_read(
self._cluster,
[attribute],
allow_cache=from_cache,
only_cache=from_cache and not self._ch_pool.is_mains_powered,
manufacturer=manufacturer,
)
return result.get(attribute)
async def get_attributes(self, attributes, from_cache=True):
"""Get the values for a list of attributes."""
manufacturer = None
manufacturer_code = self._ch_pool.manufacturer_code
if self.cluster.cluster_id >= 0xFC00 and manufacturer_code:
manufacturer = manufacturer_code
try:
result, _ = await self.cluster.read_attributes(
attributes,
allow_cache=from_cache,
only_cache=from_cache and not self._ch_pool.is_mains_powered,
manufacturer=manufacturer,
)
return result
except (asyncio.TimeoutError, zigpy.exceptions.ZigbeeException) as ex:
self.debug(
"failed to get attributes '%s' on '%s' cluster: %s",
attributes,
self.cluster.ep_attribute,
str(ex),
)
return {}
def log(self, level, msg, *args):
"""Log a message."""
msg = f"[%s:%s]: {msg}"
args = (self._ch_pool.nwk, self._id) + args
_LOGGER.log(level, msg, *args)
def __getattr__(self, name):
"""Get attribute or a decorated cluster command."""
if hasattr(self._cluster, name) and callable(getattr(self._cluster, name)):
command = getattr(self._cluster, name)
command.__name__ = name
return decorate_command(self, command)
return self.__getattribute__(name)
class ZDOChannel(LogMixin):
"""Channel for ZDO events."""
def __init__(self, cluster, device):
"""Initialize ZDOChannel."""
self.name = CHANNEL_ZDO
self._cluster = cluster
self._zha_device = device
self._status = ChannelStatus.CREATED
self._unique_id = "{}:{}_ZDO".format(str(device.ieee), device.name)
self._cluster.add_listener(self)
@property
def unique_id(self):
"""Return the unique id for this channel."""
return self._unique_id
@property
def cluster(self):
"""Return the aigpy cluster for this channel."""
return self._cluster
@property
def status(self):
"""Return the status of the channel."""
return self._status
@callback
def device_announce(self, zigpy_device):
"""Device announce handler."""
@callback
def permit_duration(self, duration):
"""Permit handler."""
async def async_initialize(self, from_cache):
"""Initialize channel."""
self._status = ChannelStatus.INITIALIZED
async def async_configure(self):
"""Configure channel."""
self._status = ChannelStatus.CONFIGURED
def log(self, level, msg, *args):
"""Log a message."""
msg = f"[%s:ZDO](%s): {msg}"
args = (self._zha_device.nwk, self._zha_device.model) + args
_LOGGER.log(level, msg, *args)
class ClientChannel(ZigbeeChannel):
"""Channel listener for Zigbee client (output) clusters."""
@callback
def attribute_updated(self, attrid, value):
"""Handle an attribute updated on this cluster."""
self.zha_send_event(
SIGNAL_ATTR_UPDATED,
{
ATTR_ATTRIBUTE_ID: attrid,
ATTR_ATTRIBUTE_NAME: self._cluster.attributes.get(attrid, ["Unknown"])[
0
],
ATTR_VALUE: value,
},
)
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle a cluster command received on this cluster."""
if (
self._cluster.server_commands is not None
and self._cluster.server_commands.get(command_id) is not None
):
self.zha_send_event(self._cluster.server_commands.get(command_id)[0], args)
| tboyce021/home-assistant | homeassistant/components/zha/core/channels/base.py | Python | apache-2.0 | 12,609 | 0.001269 |
class Solution(object):
# Op1: time O(n*log(n)) space O(1)
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
citations.sort(reverse=True)
for i, x in enumerate(citations):
# print(i, x)
if i >= x:
return i
return len(citations)
# Op1.1
return sum(i < j for i, j in enumerate(sorted(citations, reverse=True)))
# Op2: time O(n) space O(n)
def hIndex2(self, citations):
n = len(citations)
citeCount = [0] * (n + 1)
for c in citations:
if c >= n:
citeCount[n] += 1
else:
citeCount[c] += 1
count = 0
for i in reversed(range(n + 1)):
count += citeCount[i]
if count >= i:
return i
return 0
citations = [3, 0, 6, 1, 5]
test = Solution()
print(test.hIndex(citations))
print(test.hIndex2(citations))
| rx2130/Leetcode | python/274 H-Index.py | Python | apache-2.0 | 993 | 0.001007 |
# -*- coding: utf-8 -*-
"""
author: ferris
update: 2015-12-08
function: query from Google Analytics API, using loop to overcome quota of 10,000 records per query.
"""
import argparse
from googleapiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
import httplib2
from oauth2client import client
from oauth2client import file
from oauth2client import tools
# api_name = 'analytics'
# api_version = 'v3'
# scope = ['https://www.googleapis.com/auth/analytics.readonly']
# service_account_email = 'account-1@still-sensor-115306.iam.gserviceaccount.com'
# key_file_location = 'client_secrets.p12'
class GA:
def __init__(self, api_name='analytics', api_version='v3',
scope=['https://www.googleapis.com/auth/analytics.readonly'],
service_account_email='account-1@still-sensor-115306.iam.gserviceaccount.com',
key_file_location='client_secrets.p12'):
self.service = self.get_service(api_name, api_version, scope, key_file_location, service_account_email)
@staticmethod
def get_service(api_name, api_version, scope, key_file_location, service_account_email):
"""
:param api_name: The name of the api to connect to.
:param api_version: The api version to connect to.
:param scope: A list auth scopes to authorize for the application.
:param key_file_location: The path to a valid service account p12 key file.
:param service_account_email: The service account email address.
"""
f = open(key_file_location, 'rb')
key = f.read()
f.close()
credentials = SignedJwtAssertionCredentials(service_account_email, key,
scope=scope)
http = credentials.authorize(httplib2.Http())
# Build the service object.
service = build(api_name, api_version, http=http)
return service
def get_list(self, conf):
"""
:param conf: Python dictionary containing these query parameters: profile_id,
start_date, end_date, dimensions, metrics, filters
"""
# get first 10,000 records
start_index = 1
max_results = 10000
api_query = self.service.data().ga().get(ids='ga:' + conf['profile_id'],
start_date=conf['start_date'],
end_date=conf['end_date'],
dimensions=conf['dimensions'],
metrics=conf['metrics'],
filters=conf['filters'],
start_index=start_index,
max_results=max_results,
samplingLevel='HIGHER_PRECISION' # minimize sampling effect
)
try:
temp_data = api_query.execute()
except TypeError:
print('There was an error in constructing your query')
# no results
num_results = temp_data.get('totalResults')
if num_results == 0:
print("no results from query")
return []
# print number of total results
print("total results from query: {0}".format(num_results))
# save results of the 1st query
result_list = []
result_list.extend(temp_data.get('rows'))
# save results of additional queries
if num_results > 10000:
rows_left = num_results - 10000
# loop queries
while rows_left > 0:
start_index += 10000
api_query = self.service.data().ga().get(ids='ga:' + conf['profile_id'],
start_date=conf['start_date'],
end_date=conf['end_date'],
dimensions=conf['dimensions'],
metrics=conf['metrics'],
filters=conf['filters'],
start_index=start_index,
max_results=max_results,
samplingLevel='HIGHER_PRECISION'
).execute()
try:
temp_data = api_query.execute()
except TypeError:
print('There was an error in constructing your query')
result_list.extend(temp_data.get('rows'))
print('appended more records')
rows_left -= 10000
print("export to list success")
return result_list
# demo
if __name__ == "__main__":
# sample configuration
sample_conf = {'dimensions': 'ga:pagePath,ga:eventCategory,ga:eventAction,ga:eventLabel',
'metrics': 'ga:totalEvents,ga:users', 'filters': 'ga:hostname=~imike\.com',
'profile_id': '112805419', 'start_date': '2015-12-08', 'end_date': '2015-12-08'}
# query
G1 = GA()
results = G1.get_list(sample_conf)
# output
with open('test.out', 'w+') as f_out:
for r in results:
line = '\t'.join(r) + '\n'
f_out.write(line)
print("output to file success")
| ferris-wufei/toolbox | dw/api_ga/api_ga.py | Python | gpl-2.0 | 5,603 | 0.003926 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import io
import json
import os
import socket
import sys
import time
import traceback
from system_test import Logger
from system_test import main_module
from system_test import Process
from system_test import Qdrouterd
from system_test import TestCase
from system_test import TIMEOUT
from system_test import unittest
from subprocess import PIPE
from subprocess import STDOUT
# Tests in this file are organized by classes that inherit TestCase.
# The first instance is TcpAdaptor(TestCase).
# The tests emit files that are named starting with 'TcpAdaptor'. This includes
# logs and shell scripts.
# Subsequent TestCase subclasses must follow this pattern and emit files named
# with the test class name at the beginning of the emitted files.
try:
from TCP_echo_client import TcpEchoClient
from TCP_echo_server import TcpEchoServer
except ImportError:
class TCP_echo_client(object):
pass
class TCP_echo_server(object):
pass
DISABLE_SELECTOR_TESTS = False
DISABLE_SELECTOR_REASON = ''
try:
import selectors # noqa F401: imported but unused (echo server and echo client import this, they run as subprocesses)
except ImportError:
DISABLE_SELECTOR_TESTS = True
DISABLE_SELECTOR_REASON = "Python selectors module is not available on this platform."
# This code takes a wild guess how long an echo server must stall
# receiving input data before Q2 holdoff triggers in the host router
# on all the various CI systems out there.
Q2_DELAY_SECONDS = 1.0
# This code needs to know the size in bytes of the holdoff trigger threshold.
# Whitebox testing knows that the holdoff is specified in some number of
# buffers. What whitebox testing does not know how big the buffers are
# or the number of buffers or how many bytes are actually in each buffer.
# Today the holdoff is probably 128K bytes so use something bigger than that
# in the test to get the trigger to kick in.
# On top of that the echo server is undermined by having TCP window or python
# read the server socket in advance of the echo server asking it to.
# In a test case the adaptor logged writing almost 3MBytes
# 2021-02-26 19:11:20.831826 PN_RAW_CONNECTION_WRITTEN Wrote 8192 bytes. Total written 2777007 bytes
# well before the server started reading from the socket.
# 2021-02-26 19:11:21.534246 J0#206 TCP_TEST [] [] ECHO_SERVER TcpAdaptor NS_EC2_CONN_STALL Connection from 127.0.0.1:54410 stall end
# 2021-02-26 19:11:21.534801 J0#207 TCP_TEST [] [] ECHO_SERVER TcpAdaptor NS_EC2_CONN_STALL read from: 127.0.0.1:54410 len:1024:
# Giving the stalled server 10Mbytes seems to run the TCP window out of capacity
# so that it stops reading from the TcpConnector and Q2 finally kicks in.
Q2_TEST_MESSAGE_SIZE = 10000000
# local timeout in seconds to wait for one echo client to finish
echo_timeout = 30
def ncat_available():
popen_args = ['ncat', '--version']
try:
process = Process(popen_args,
name='ncat_check',
stdout=PIPE,
expect=None,
universal_newlines=True)
out = process.communicate()[0]
return True
except:
return False
#
# Test concurrent clients
#
class EchoClientRunner():
"""
Launch an echo client upon construction.
Provide poll interface for checking done/error.
Provide wait/join to shut down.
"""
def __init__(self, test_name, client_n, logger, client, server, size,
count,
print_client_logs=True,
timeout=TIMEOUT,
port_override=None):
"""
Launch an echo client upon construction.
:param test_name: Unique name for log file prefix
:param client_n: Client number for differentiating otherwise identical clients
:param logger: parent logger for logging test activity vs. client activity
:param client: router name to which the client connects
:param server: name whose address the client is targeting
:param size: length of messages in bytes
:param count: number of messages to be sent/verified
:param print_client_logs: verbosity switch
:return Null if success else string describing error
"""
self.test_name = test_name
self.client_n = str(client_n)
self.logger = logger
self.client = client
self.server = server
self.size = size
self.count = count
self.timeout = timeout
self.print_client_logs = print_client_logs
self.client_final = False
# Each router has a listener for the echo server attached to every router
self.listener_port = TcpAdaptor.tcp_client_listener_ports[self.client][self.server] if port_override is None else port_override
self.name = "%s_%s_%s_%s" % \
(self.test_name, self.client_n, self.size, self.count)
self.client_prefix = "ECHO_CLIENT %s" % self.name
parent_path = os.path.dirname(os.getcwd())
self.client_logger = Logger(title=self.client_prefix,
print_to_console=self.print_client_logs,
save_for_dump=False,
ofilename=os.path.join(parent_path, "setUpClass/TcpAdaptor_echo_client_%s.log" % self.name))
try:
self.e_client = TcpEchoClient(prefix=self.client_prefix,
host='localhost',
port=self.listener_port,
size=self.size,
count=self.count,
timeout=self.timeout,
logger=self.client_logger)
except Exception as exc:
self.e_client.error = "TCP_TEST TcpAdaptor_runner_%s failed. Exception: %s" % \
(self.name, traceback.format_exc())
self.logger.log(self.e_client.error)
raise Exception(self.e_client.error)
def client_error(self):
return self.e_client.error
def client_exit_status(self):
return self.e_client.exit_status
def client_running(self):
return self.e_client.is_running
def wait(self):
# wait for client to exit
# Return None if successful wait/join/exit/close else error message
result = None
try:
self.e_client.wait()
except Exception as exc:
self.e_client.error = "TCP_TEST EchoClient %s failed. Exception: %s" % \
(self.name, traceback.format_exc())
self.logger.log(self.e_client.error)
result = self.e_client.error
return result
class TcpAdaptor(TestCase):
"""
6 edge routers connected via 3 interior routers.
9 echo servers are connected via tcpConnector, one to each router.
Each router has 10 listeners, one for each server and
another for which there is no server.
"""
# +-------+ +---------+ +---------+ +---------+ +-------+
# | EA1 |<-->| INTA |<==>| INTB |<==>| INTC |<-->| EC1 |
# +-------+ | | | | | | +-------+
# +-------+ | | | | | | +-------+
# | EA2 |<-->| | | | | |<-->| EC2 |
# +-------+ +---------+ +---------+ +---------+ +-------+
# ^ ^
# | |
# +-------+ +-------+
# | EB1 | | EB2 |
# +-------+ +-------+
#
# Each router tcp-connects to a like-named echo server.
# Each router has tcp-listeners for every echo server
#
# +----+ +----+ +----+ +----+ +----+ +----+ +----+ +----+ +----+
# +--|tcp |-|tcp |-|tcp |-|tcp |-|tcp |-|tcp |-|tcp |-|tcp |-|tcp |--+
# | |lsnr| |lsnr| |lsnr| |lsnr| |lsnr| |lsnr| |lsnr| |lsnr| |lsnr| |
# | |EA1 | |EA2 | |INTA| |EB1 | |EB2 | |INTB| |EC1 | |EC2 | |INTC| |
# | +----+ +----+ +----+ +----+ +----+ +----+ +----+ +----+ +----+ |
# | +---------+ +------+
# | Router | tcp | | echo |
# | EA1 |connector|->|server|
# | +---------+ | EA1 |
# | | +------+
# +------------------------------------------------------------------+
#
# Router EC2 has naughty, misbehaving echo servers:
# * conn_stall - delays before reading socket to force triggering Q2 holdoff
#
# Routers EC2 has a TCP listener for conn_stall echo server.
# * Sending "large" messages through this listener should trigger Q2 holdoff
# on router EC1.
# * A similar listener on INTA does *not* trigger Q2 holdoff on EA1.
# Allocate routers in this order
router_order = ['INTA', 'INTB', 'INTC', 'EA1', 'EA2', 'EB1', 'EB2', 'EC1', 'EC2']
# List indexed in router_order
# First listener in each router is normal AMQP for test setup and mgmt.
amqp_listener_ports = {}
# Each router listens for TCP where the tcp-address is the router name.
# Each router has N listeners, one for the echo server connected to each router.
tcp_client_listener_ports = {}
# Each router connects to an echo server
tcp_server_listener_ports = {}
# Each router has a TCP listener that has no associated server
nodest_listener_ports = {}
# Each router has a console listener
# http_listener_ports = {}
# TCP siteId for listeners and connectors
site = "mySite"
# Each router has an echo server to which it connects
echo_servers = {}
# Special echo servers
echo_server_NS_CONN_STALL = None
@classmethod
def setUpClass(cls):
"""Start a router"""
super(TcpAdaptor, cls).setUpClass()
if DISABLE_SELECTOR_TESTS:
return
def router(name, mode, connection, extra=None):
"""
Launch a router through the system_test framework.
For each router:
* normal listener first
#* http listener for console connections
* tcp listener for 'nodest', which will never exist
* tcp connector to echo server whose address is the same as this router's name
* six tcp listeners, one for each server on each router on the network
:param name: router name
:param mode: router mode: interior or edge
:param connection: list of router-level connection/listener tuples
:param extra: yet more configuation tuples. unused for now
:return:
"""
config = [
('router', {'mode': mode, 'id': name}),
('listener', {'port': cls.amqp_listener_ports[name]}),
# ('listener', {'port': cls.http_listener_ports[name], 'http': 'yes'}),
('tcpListener', {'host': "0.0.0.0",
'port': cls.nodest_listener_ports[name],
'address': 'nodest',
'siteId': cls.site}),
('tcpConnector', {'host': "127.0.0.1",
'port': cls.tcp_server_listener_ports[name],
'address': 'ES_' + name,
'siteId': cls.site})
]
if connection:
config.extend(connection)
listeners = []
for rtr in cls.router_order:
listener = {'host': "0.0.0.0",
'port': cls.tcp_client_listener_ports[name][rtr],
'address': 'ES_' + rtr,
'siteId': cls.site}
tup = [(('tcpListener', listener))]
listeners.extend(tup)
config.extend(listeners)
if extra:
config.extend(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
cls.routers = []
# define logging levels
cls.print_logs_server = False
cls.print_logs_client = False
parent_path = os.path.dirname(os.getcwd())
cls.logger = Logger(title="TcpAdaptor-testClass",
print_to_console=True,
save_for_dump=False,
ofilename=os.path.join(parent_path, "setUpClass/TcpAdaptor.log"))
# Write a dummy log line for scraper.
cls.logger.log("SERVER (info) Container Name: TCP_TEST")
# Start echo servers first, store their listening port numbers
parent_path = os.path.dirname(os.getcwd())
for rtr in cls.router_order:
test_name = "TcpAdaptor"
server_prefix = "ECHO_SERVER %s ES_%s" % (test_name, rtr)
server_logger = Logger(title=test_name,
print_to_console=cls.print_logs_server,
save_for_dump=False,
ofilename=os.path.join(parent_path, "setUpClass/TcpAdaptor_echo_server_%s.log" % rtr))
cls.logger.log("TCP_TEST Launching echo server '%s'" % server_prefix)
server = TcpEchoServer(prefix=server_prefix,
port=0,
logger=server_logger)
assert server.is_running
cls.tcp_server_listener_ports[rtr] = server.port
cls.echo_servers[rtr] = server
# start special naughty servers that misbehave on purpose
server_prefix = "ECHO_SERVER TcpAdaptor NS_EC2_CONN_STALL"
server_logger = Logger(title="TcpAdaptor",
print_to_console=cls.print_logs_server,
save_for_dump=False,
ofilename=os.path.join(parent_path, "setUpClass/TcpAdaptor_echo_server_NS_CONN_STALL.log"))
cls.logger.log("TCP_TEST Launching echo server '%s'" % server_prefix)
server = TcpEchoServer(prefix=server_prefix,
port=0,
logger=server_logger,
conn_stall=Q2_DELAY_SECONDS)
assert server.is_running
cls.EC2_conn_stall_connector_port = server.port
cls.echo_server_NS_CONN_STALL = server
# Allocate a sea of router ports
for rtr in cls.router_order:
cls.amqp_listener_ports[rtr] = cls.tester.get_port()
tl_ports = {}
for tcp_listener in cls.router_order:
tl_ports[tcp_listener] = cls.tester.get_port()
cls.tcp_client_listener_ports[rtr] = tl_ports
cls.nodest_listener_ports[rtr] = cls.tester.get_port()
inter_router_port_AB = cls.tester.get_port()
cls.INTA_edge_port = cls.tester.get_port()
cls.INTA_conn_stall_listener_port = cls.tester.get_port()
# Launch the routers using the sea of router ports
router('INTA', 'interior',
[('listener', {'role': 'inter-router', 'port': inter_router_port_AB}),
('listener', {'name': 'uplink', 'role': 'edge', 'port': cls.INTA_edge_port}),
('tcpListener', {'host': "0.0.0.0", 'port': cls.INTA_conn_stall_listener_port,
'address': 'NS_EC2_CONN_STALL', 'siteId': cls.site})])
inter_router_port_BC = cls.tester.get_port()
cls.INTB_edge_port = cls.tester.get_port()
router('INTB', 'interior',
[('connector', {'role': 'inter-router', 'port': inter_router_port_AB}),
('listener', {'role': 'inter-router', 'port': inter_router_port_BC}),
('listener', {'name': 'uplink', 'role': 'edge', 'port': cls.INTB_edge_port})])
cls.INTC_edge_port = cls.tester.get_port()
router('INTC', 'interior',
[('connector', {'role': 'inter-router', 'port': inter_router_port_BC}),
('listener', {'name': 'uplink', 'role': 'edge', 'port': cls.INTC_edge_port})])
router('EA1', 'edge',
[('connector', {'name': 'uplink', 'role': 'edge', 'port': cls.INTA_edge_port})])
router('EA2', 'edge',
[('connector', {'name': 'uplink', 'role': 'edge', 'port': cls.INTA_edge_port})])
router('EB1', 'edge',
[('connector', {'name': 'uplink', 'role': 'edge', 'port': cls.INTB_edge_port})])
router('EB2', 'edge',
[('connector', {'name': 'uplink', 'role': 'edge', 'port': cls.INTB_edge_port})])
router('EC1', 'edge',
[('connector', {'name': 'uplink', 'role': 'edge', 'port': cls.INTC_edge_port})])
cls.EC2_conn_stall_listener_port = cls.tester.get_port()
router('EC2', 'edge',
[('connector', {'name': 'uplink', 'role': 'edge', 'port': cls.INTC_edge_port}),
('tcpConnector', {'host': "127.0.0.1", 'port': cls.EC2_conn_stall_connector_port,
'address': 'NS_EC2_CONN_STALL', 'siteId': cls.site}),
('tcpListener', {'host': "0.0.0.0", 'port': cls.EC2_conn_stall_listener_port,
'address': 'NS_EC2_CONN_STALL', 'siteId': cls.site})])
cls.INTA = cls.routers[0]
cls.INTB = cls.routers[1]
cls.INTC = cls.routers[2]
cls.EA1 = cls.routers[3]
cls.EA2 = cls.routers[4]
cls.EB1 = cls.routers[5]
cls.EB2 = cls.routers[6]
cls.EC1 = cls.routers[7]
cls.EC2 = cls.routers[8]
cls.router_dict = {}
cls.router_dict['INTA'] = cls.INTA
cls.router_dict['INTB'] = cls.INTB
cls.router_dict['INTC'] = cls.INTC
cls.router_dict['EA1'] = cls.EA1
cls.router_dict['EA2'] = cls.EA2
cls.router_dict['EB1'] = cls.EB1
cls.router_dict['EB2'] = cls.EB2
cls.router_dict['EC1'] = cls.EC1
cls.router_dict['EC2'] = cls.EC2
cls.logger.log("TCP_TEST INTA waiting for connection to INTB")
cls.INTA.wait_router_connected('INTB')
cls.logger.log("TCP_TEST INTB waiting for connection to INTA")
cls.INTB.wait_router_connected('INTA')
cls.logger.log("TCP_TEST INTB waiting for connection to INTC")
cls.INTB.wait_router_connected('INTC')
cls.logger.log("TCP_TEST INTC waiting for connection to INTB")
cls.INTC.wait_router_connected('INTB')
# Create a scoreboard for the ports
p_out = []
for rtr in cls.router_order:
p_out.append("%s_amqp=%d" %
(rtr, cls.amqp_listener_ports[rtr]))
p_out.append("%s_echo_server=%d" %
(rtr, cls.tcp_server_listener_ports[rtr]))
for tcp_listener in cls.router_order:
p_out.append("%s_echo_listener_for_%s=%d" %
(rtr, tcp_listener, cls.tcp_client_listener_ports[rtr][tcp_listener]))
p_out.append("%s_nodest_listener=%d" %
(rtr, cls.nodest_listener_ports[rtr]))
# p_out.append("%s_http_listener=%d" %
# (rtr, cls.http_listener_ports[rtr]))
p_out.append("inter_router_port_AB=%d" % inter_router_port_AB)
p_out.append("inter_router_port_BC=%d" % inter_router_port_BC)
p_out.append("INTA_edge_port=%d" % cls.INTA_edge_port)
p_out.append("INTB_edge_port=%d" % cls.INTB_edge_port)
p_out.append("INTC_edge_port=%d" % cls.INTC_edge_port)
p_out.append("EC2_conn_stall_connector_port%d" % cls.EC2_conn_stall_connector_port)
p_out.append("INTA_conn_stall_listener_port%d" % cls.INTA_conn_stall_listener_port)
p_out.append("EC2_conn_stall_listener_port%d" % cls.EC2_conn_stall_listener_port)
# write to log
for line in p_out:
cls.logger.log("TCP_TEST %s" % line)
# write to shell script
parent_path = os.path.dirname(os.getcwd())
file_name = os.path.join(parent_path, "setUpClass/TcpAdaptor-ports.sh")
with open(file_name, 'w') as o_file:
for line in p_out:
o_file.write("set %s\n" % line)
# Write a script to run scraper on this test's log files
scraper_abspath = os.path.join(os.environ.get('BUILD_DIR'), 'tests', 'scraper', 'scraper.py')
logs_dir = os.path.join(parent_path, "setUpClass")
main_log = "TcpAdaptor.log"
echo_logs = "TcpAdaptor_echo*"
big_test_log = "TcpAdaptor_all.log"
int_logs = "I*.log"
edge_logs = "E*.log"
log_modules_spec = "--log-modules TCP_ADAPTOR,TCP_TEST,ECHO_SERVER,ECHO_CLIENT"
html_output = "TcpAdaptor.html"
with open(os.path.join(parent_path, "setUpClass/TcpAdaptor-run-scraper.sh"), 'w') as o_file:
o_file.write("#!/bin/bash\n\n")
o_file.write("# Script to run scraper on test class TcpAdaptor test result\n")
o_file.write("# cd into logs directory\n")
o_file.write("cd %s\n\n" % logs_dir)
o_file.write("# Concatenate test class logs into single file\n")
o_file.write("cat %s %s > %s\n\n" % (main_log, echo_logs, big_test_log))
o_file.write("# run scraper\n")
o_file.write("python %s %s -f %s %s %s > %s\n\n" %
(scraper_abspath, log_modules_spec, int_logs, edge_logs, big_test_log, html_output))
o_file.write("echo View the results by opening the html file\n")
o_file.write("echo firefox %s" % (os.path.join(logs_dir, html_output)))
# wait for server addresses (mobile ES_<rtr>) to propagate to all interior routers
interior_rtrs = [rtr for rtr in cls.router_order if rtr.startswith('I')]
poll_loops = 100
poll_loop_delay = 0.5 # seconds
found_all = False
while not found_all:
found_all = True
cls.logger.log("TCP_TEST Poll wait for echo server addresses to propagate")
for rtr in interior_rtrs:
# query each interior for addresses
p = Process(
['qdstat', '-b', str(cls.router_dict[rtr].addresses[0]), '-a'],
name='qdstat-snap1', stdout=PIPE, expect=None,
universal_newlines=True)
out = p.communicate()[0]
# examine what this router can see; signal poll loop to continue or not
lines = out.split("\n")
server_lines = [line for line in lines if "mobile" in line and "ES_" in line]
if not len(server_lines) == len(cls.router_order):
found_all = False
seen = []
for line in server_lines:
flds = line.split()
seen.extend([fld for fld in flds if fld.startswith("ES_")])
unseen = [srv for srv in cls.router_order if "ES_" + srv not in seen]
cls.logger.log("TCP_TEST Router %s sees only %d of %d addresses. Waiting for %s" %
(rtr, len(server_lines), len(cls.router_order), unseen))
if poll_loops == 1:
# last poll loop
for line in lines:
cls.logger.log("TCP_TEST Router %s : %s" % (rtr, line))
poll_loops -= 1
if poll_loops == 0:
assert False, "TCP_TEST TCP_Adaptor test setup failed. Echo tests never executed."
else:
time.sleep(poll_loop_delay)
cls.logger.log("TCP_TEST Done poll wait")
@classmethod
def tearDownClass(cls):
# stop echo servers
for rtr in cls.router_order:
server = cls.echo_servers.get(rtr)
if server is not None:
cls.logger.log("TCP_TEST Stopping echo server ES_%s" % rtr)
server.wait()
if cls.echo_server_NS_CONN_STALL is not None:
cls.logger.log("TCP_TEST Stopping echo server NS_EC2_CONN_STALL")
cls.echo_server_NS_CONN_STALL.wait()
super(TcpAdaptor, cls).tearDownClass()
def run_qdmanage(self, cmd, input=None, expect=Process.EXIT_OK, address=None):
p = self.popen(
['qdmanage'] + cmd.split(' ') + ['--bus', address or str(self.router_dict['INTA'].addresses[0]),
'--indent=-1', '--timeout', str(TIMEOUT)],
stdin=PIPE, stdout=PIPE, stderr=STDOUT, expect=expect,
universal_newlines=True)
out = p.communicate(input)[0]
try:
p.teardown()
except Exception as e:
raise Exception(out if out else str(e))
return out
class EchoPair():
"""
For the concurrent tcp tests this class describes one of the client-
server echo pairs and the traffic pattern between them.
"""
def __init__(self, client_rtr, server_rtr, sizes=None, counts=None):
self.client_rtr = client_rtr
self.server_rtr = server_rtr
self.sizes = [1] if sizes is None else sizes
self.counts = [1] if counts is None else counts
def do_tcp_echo_n_routers(self, test_name, echo_pair_list):
"""
Launch all the echo pairs defined in the list
Wait for completion.
:param test_name test name
:param echo_pair_list list of EchoPair objects describing the test
:return: None if success else error message for ctest
"""
self.logger.log("TCP_TEST %s Start do_tcp_echo_n_routers" % (test_name))
result = None
runners = []
client_num = 0
start_time = time.time()
try:
# Launch the runners
for echo_pair in echo_pair_list:
client = echo_pair.client_rtr.name
server = echo_pair.server_rtr.name
for size in echo_pair.sizes:
for count in echo_pair.counts:
log_msg = "TCP_TEST %s Running pair %d %s->%s size=%d count=%d" % \
(test_name, client_num, client, server, size, count)
self.logger.log(log_msg)
runner = EchoClientRunner(test_name, client_num,
self.logger,
client, server, size, count,
self.print_logs_client)
runners.append(runner)
client_num += 1
# Loop until timeout, error, or completion
while result is None:
# Check for timeout
time.sleep(0.1)
elapsed = time.time() - start_time
if elapsed > echo_timeout:
result = "TCP_TEST TIMEOUT - local wait time exceeded"
break
# Make sure servers are still up
for rtr in TcpAdaptor.router_order:
es = TcpAdaptor.echo_servers[rtr]
if es.error is not None:
self.logger.log("TCP_TEST %s Server %s stopped with error: %s" %
(test_name, es.prefix, es.error))
result = es.error
break
if es.exit_status is not None:
self.logger.log("TCP_TEST %s Server %s stopped with status: %s" %
(test_name, es.prefix, es.exit_status))
result = es.exit_status
break
if result is not None:
break
# Check for completion or runner error
complete = True
for runner in runners:
if not runner.client_final:
error = runner.client_error()
if error is not None:
self.logger.log("TCP_TEST %s Client %s stopped with error: %s" %
(test_name, runner.name, error))
result = error
runner.client_final = True
break
status = runner.client_exit_status()
if status is not None:
self.logger.log("TCP_TEST %s Client %s stopped with status: %s" %
(test_name, runner.name, status))
result = status
runner.client_final = True
break
running = runner.client_running()
if running:
complete = False
else:
self.logger.log("TCP_TEST %s Client %s exited normally" %
(test_name, runner.name))
runner.client_final = True
if complete and result is None:
self.logger.log("TCP_TEST %s SUCCESS" %
test_name)
break
# Wait/join all the runners
for runner in runners:
runner.wait()
if result is not None:
self.logger.log("TCP_TEST %s failed: %s" % (test_name, result))
except Exception as exc:
result = "TCP_TEST %s failed. Exception: %s" % \
(test_name, traceback.format_exc())
return result
def do_tcp_echo_singleton(self, test_name, client, server, size, count, echo_port):
"""
Launch a single echo client to the echo_port
Wait for completion.
Note that client and server do not define the port that the echo client
must connect to. That is overridden by echo_port. Still client and server
are passed to the EchoClientRunner
:param test_name test name
:param client router to which echo client attaches
:param server router that has the connector to the echo server
:param size size of message to be echoed
:param count number of messages to be echoed
:param echo_port the router network listener port
:return: None if success else error message for ctest
"""
self.logger.log("TCP_TEST %s Start do_tcp_echo_singleton" % test_name)
result = None
runners = []
client_num = 0
start_time = time.time()
try:
# Launch the runner
log_msg = "TCP_TEST %s Running singleton %d %s->%s port %d, size=%d count=%d" % \
(test_name, client_num, client.name, server.name, echo_port, size, count)
self.logger.log(log_msg)
runner = EchoClientRunner(test_name, client_num, self.logger,
client.name, server.name, size, count,
self.print_logs_client,
port_override=echo_port)
runners.append(runner)
client_num += 1
# Loop until timeout, error, or completion
while result is None:
# Check for timeout
time.sleep(0.1)
elapsed = time.time() - start_time
if elapsed > echo_timeout:
result = "TCP_TEST TIMEOUT - local wait time exceeded"
break
# Make sure servers are still up
for rtr in TcpAdaptor.router_order:
es = TcpAdaptor.echo_servers[rtr]
if es.error is not None:
self.logger.log("TCP_TEST %s Server %s stopped with error: %s" %
(test_name, es.prefix, es.error))
result = es.error
break
if es.exit_status is not None:
self.logger.log("TCP_TEST %s Server %s stopped with status: %s" %
(test_name, es.prefix, es.exit_status))
result = es.exit_status
break
if result is not None:
break
# Check for completion or runner error
complete = True
for runner in runners:
if not runner.client_final:
error = runner.client_error()
if error is not None:
self.logger.log("TCP_TEST %s Client %s stopped with error: %s" %
(test_name, runner.name, error))
result = error
runner.client_final = True
break
status = runner.client_exit_status()
if status is not None:
self.logger.log("TCP_TEST %s Client %s stopped with status: %s" %
(test_name, runner.name, status))
result = status
runner.client_final = True
break
running = runner.client_running()
if running:
complete = False
else:
self.logger.log("TCP_TEST %s Client %s exited normally" %
(test_name, runner.name))
runner.client_final = True
if complete and result is None:
self.logger.log("TCP_TEST %s SUCCESS" %
test_name)
break
# Wait/join all the runners
for runner in runners:
runner.wait()
if result is not None:
self.logger.log("TCP_TEST %s failed: %s" % (test_name, result))
except Exception as exc:
result = "TCP_TEST %s failed. Exception: %s" % \
(test_name, traceback.format_exc())
return result
#
# Tests run by ctest
#
@unittest.skipIf(DISABLE_SELECTOR_TESTS, DISABLE_SELECTOR_REASON)
def test_01_tcp_basic_connectivity(self):
"""
Echo a series of 1-byte messages, one at a time, to prove general connectivity.
Every listener is tried. Proves every router can forward to servers on
every other router.
"""
for l_rtr in self.router_order:
for s_rtr in self.router_order:
name = "test_01_tcp_%s_%s" % (l_rtr, s_rtr)
self.logger.log("TCP_TEST test_01_tcp_basic_connectivity Start %s" % name)
pairs = [self.EchoPair(self.router_dict[l_rtr], self.router_dict[s_rtr])]
result = self.do_tcp_echo_n_routers(name, pairs)
if result is not None:
print(result)
sys.stdout.flush()
assert result is None, "TCP_TEST test_01_tcp_basic_connectivity Stop %s FAIL: %s" % (name, result)
self.logger.log("TCP_TEST test_01_tcp_basic_connectivity Stop %s SUCCESS" % name)
# larger messages
@unittest.skipIf(DISABLE_SELECTOR_TESTS, DISABLE_SELECTOR_REASON)
def test_10_tcp_INTA_INTA_100(self):
name = "test_10_tcp_INTA_INTA_100"
self.logger.log("TCP_TEST Start %s" % name)
pairs = [self.EchoPair(self.INTA, self.INTA, sizes=[100])]
result = self.do_tcp_echo_n_routers(name, pairs)
if result is not None:
print(result)
sys.stdout.flush()
assert result is None, "TCP_TEST Stop %s FAIL: %s" % (name, result)
self.logger.log("TCP_TEST Stop %s SUCCESS" % name)
@unittest.skipIf(DISABLE_SELECTOR_TESTS, DISABLE_SELECTOR_REASON)
def test_11_tcp_INTA_INTA_1000(self):
name = "test_11_tcp_INTA_INTA_1000"
self.logger.log("TCP_TEST Start %s" % name)
pairs = [self.EchoPair(self.INTA, self.INTA, sizes=[1000])]
result = self.do_tcp_echo_n_routers(name, pairs)
if result is not None:
print(result)
sys.stdout.flush()
assert result is None, "TCP_TEST Stop %s FAIL: %s" % (name, result)
self.logger.log("TCP_TEST Stop %s SUCCESS" % name)
@unittest.skipIf(DISABLE_SELECTOR_TESTS, DISABLE_SELECTOR_REASON)
def test_12_tcp_INTA_INTA_500000(self):
name = "test_12_tcp_INTA_INTA_500000"
self.logger.log("TCP_TEST Start %s" % name)
pairs = [self.EchoPair(self.INTA, self.INTA, sizes=[500000])]
result = self.do_tcp_echo_n_routers(name, pairs)
if result is not None:
print(result)
sys.stdout.flush()
assert result is None, "TCP_TEST Stop %s FAIL: %s" % (name, result)
self.logger.log("TCP_TEST Stop %s SUCCESS" % name)
@unittest.skipIf(DISABLE_SELECTOR_TESTS, DISABLE_SELECTOR_REASON)
def test_13_tcp_EA1_EC2_500000(self):
name = "test_12_tcp_EA1_EC2_500000"
self.logger.log("TCP_TEST Start %s" % name)
pairs = [self.EchoPair(self.INTA, self.INTA, sizes=[500000])]
result = self.do_tcp_echo_n_routers(name, pairs)
if result is not None:
print(result)
sys.stdout.flush()
assert result is None, "TCP_TEST Stop %s FAIL: %s" % (name, result)
@unittest.skipIf(DISABLE_SELECTOR_TESTS, DISABLE_SELECTOR_REASON)
def test_20_tcp_connect_disconnect(self):
name = "test_20_tcp_connect_disconnect"
self.logger.log("TCP_TEST Start %s" % name)
pairs = [self.EchoPair(self.INTA, self.INTA, sizes=[0])]
result = self.do_tcp_echo_n_routers(name, pairs)
if result is not None:
print(result)
sys.stdout.flush()
assert result is None, "TCP_TEST Stop %s FAIL: %s" % (name, result)
# TODO: This test passes but in passing router INTA crashes undetected.
self.logger.log("TCP_TEST Stop %s SUCCESS" % name)
# concurrent messages
@unittest.skipIf(DISABLE_SELECTOR_TESTS, DISABLE_SELECTOR_REASON)
def test_50_concurrent(self):
name = "test_50_concurrent_AtoA_BtoB"
self.logger.log("TCP_TEST Start %s" % name)
pairs = [self.EchoPair(self.INTA, self.INTA),
self.EchoPair(self.INTB, self.INTB)]
result = self.do_tcp_echo_n_routers(name, pairs)
if result is not None:
print(result)
sys.stdout.flush()
assert result is None, "TCP_TEST Stop %s FAIL: %s" % (name, result)
self.logger.log("TCP_TEST Stop %s SUCCESS" % name)
# Q2 holdoff
@unittest.skipIf(DISABLE_SELECTOR_TESTS, DISABLE_SELECTOR_REASON)
def test_60_q2_holdoff(self):
# for now, Q2 is disabled to avoid stalling TCP backpressure
self.skipTest("Q2 is disabled on TCP adaptor")
name = "test_60_q2_holdoff"
self.logger.log("TCP_TEST Start %s" % name)
# Verify going to EC2
result = self.do_tcp_echo_singleton(name, self.EC2, self.EC2, Q2_TEST_MESSAGE_SIZE,
1, self.EC2_conn_stall_listener_port)
if result is not None:
print(result)
sys.stdout.flush()
assert result is None, "TCP_TEST Stop %s FAIL: %s" % (name, result)
# search the router log file to verify Q2 was hit
for attempt in range(10):
block_ct = 0
unblock_ct = 0
lines = 0
with io.open(self.EC2.logfile_path) as f:
for line in f:
lines += 1
if 'client link blocked on Q2 limit' in line:
block_ct += 1
if 'client link unblocked from Q2 limit' in line:
unblock_ct += 1
if block_ct > 0 and block_ct == unblock_ct:
break
self.logger.log("Q2 holdoff from EC2 not detected. Wait for log file to update...")
time.sleep(0.1)
result = "failed" if block_ct == 0 or not block_ct == unblock_ct else "passed"
self.logger.log("TCP_TEST %s EC2 log scrape %s. block_ct=%d, unblock_ct=%d, lines=%d" %
(name, result, block_ct, unblock_ct, lines))
self.assertTrue(block_ct > 0)
self.assertEqual(block_ct, unblock_ct)
# Declare success
self.logger.log("TCP_TEST Stop %s SUCCESS" % name)
def run_ncat(self, port, logger, expect=Process.EXIT_OK, timeout=2, data=b'abcd'):
ncat_cmd = ['ncat', '127.0.0.1', str(port)]
logger.log("Starting ncat '%s' and input '%s'" % (ncat_cmd, str(data)))
p = self.popen(
ncat_cmd,
stdin=PIPE, stdout=PIPE, stderr=PIPE, expect=expect,
universal_newlines=True)
out = p.communicate(input='abcd', timeout=timeout)[0]
try:
p.teardown()
except Exception as e:
raise Exception(out if out else str(e))
return out
def ncat_runner(self, tname, client, server, logger):
name = "%s_%s_%s" % (tname, client, server)
logger.log(name + " Start")
out = self.run_ncat(TcpAdaptor.tcp_client_listener_ports[client][server], logger, data=b'abcd')
logger.log("run_ncat returns: '%s'" % out)
assert(len(out) > 0)
assert("abcd" in out)
logger.log(tname + " Stop")
# half-closed handling
def test_70_half_closed(self):
if DISABLE_SELECTOR_TESTS:
self.skipTest(DISABLE_SELECTOR_REASON)
if not ncat_available():
self.skipTest("Ncat utility is not available")
name = "test_70_half_closed"
self.logger.log("TCP_TEST Start %s" % name)
self.ncat_runner(name, "INTA", "INTA", self.logger)
self.ncat_runner(name, "INTA", "INTB", self.logger)
self.ncat_runner(name, "INTA", "INTC", self.logger)
self.ncat_runner(name, "EA1", "EA1", self.logger)
self.ncat_runner(name, "EA1", "EB1", self.logger)
self.ncat_runner(name, "EA1", "EC2", self.logger)
self.logger.log("TCP_TEST Stop %s SUCCESS" % name)
# connector/listener stats
def test_80_stats(self):
tname = "test_80 check stats in qdmanage"
self.logger.log(tname + " START")
# Verify listener stats
query_command = 'QUERY --type=tcpListener'
outputs = json.loads(self.run_qdmanage(query_command))
for output in outputs:
if output['name'].startswith("ES"):
# Check only echo server listeners
assert("connectionsOpened" in output)
assert(output["connectionsOpened"] > 0)
assert(output["connectionsOpened"] == output["connectionsClosed"])
assert(output["bytesIn"] == output["bytesOut"])
# Verify connector stats
query_command = 'QUERY --type=tcpConnector'
outputs = json.loads(self.run_qdmanage(query_command))
for output in outputs:
assert(output['address'].startswith("ES"))
assert("connectionsOpened" in output)
assert(output["connectionsOpened"] > 0)
# egress_dispatcher connection opens and should never close
assert(output["connectionsOpened"] == output["connectionsClosed"] + 1)
assert(output["bytesIn"] == output["bytesOut"])
self.logger.log(tname + " SUCCESS")
class TcpAdaptorManagementTest(TestCase):
"""
Test Creation and deletion of TCP management entities
"""
@classmethod
def setUpClass(cls):
super(TcpAdaptorManagementTest, cls).setUpClass()
if DISABLE_SELECTOR_TESTS:
return
cls.test_name = 'TCPMgmtTest'
# create edge and interior routers. The listener/connector will be on
# the edge router. It is expected that the edge will create proxy
# links to the interior and remove them when the test is done.
cls.interior_edge_port = cls.tester.get_port()
cls.interior_mgmt_port = cls.tester.get_port()
cls.edge_mgmt_port = cls.tester.get_port()
cls.tcp_server_port = cls.tester.get_port()
cls.tcp_listener_port = cls.tester.get_port()
i_config = [
('router', {'mode': 'interior',
'id': 'TCPMgmtTestInterior'}),
('listener', {'role': 'normal',
'port': cls.interior_mgmt_port}),
('listener', {'role': 'edge', 'port': cls.interior_edge_port}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
]
config = Qdrouterd.Config(i_config)
cls.i_router = cls.tester.qdrouterd('TCPMgmtTestInterior', config, wait=False)
e_config = [
('router', {'mode': 'edge',
'id': 'TCPMgmtTestEdge'}),
('listener', {'role': 'normal',
'port': cls.edge_mgmt_port}),
('connector', {'name': 'edge', 'role': 'edge',
'port': cls.interior_edge_port}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
]
config = Qdrouterd.Config(e_config)
cls.e_router = cls.tester.qdrouterd('TCPMgmtTestEdge', config,
wait=False)
cls.i_router.wait_ready()
cls.e_router.wait_ready()
def _query_links_by_addr(self, router_mgmt, owning_addr):
oid = 'org.apache.qpid.dispatch.router.link'
attrs = ['owningAddr', 'linkDir']
links = []
rc = router_mgmt.query(type=oid, attribute_names=attrs).results
for link in rc:
if link[0] is not None and link[0].endswith(owning_addr):
links.append(link)
return links
@unittest.skipIf(DISABLE_SELECTOR_TESTS, DISABLE_SELECTOR_REASON)
def test_01_mgmt(self):
"""
Create and delete TCP connectors and listeners. Ensure that the service
address is properly removed on the interior router.
"""
LISTENER_TYPE = 'org.apache.qpid.dispatch.tcpListener'
CONNECTOR_TYPE = 'org.apache.qpid.dispatch.tcpConnector'
mgmt = self.e_router.management
# When starting out, there should be no tcpListeners or tcpConnectors.
self.assertEqual(0, len(mgmt.query(type=LISTENER_TYPE).results))
self.assertEqual(0, len(mgmt.query(type=CONNECTOR_TYPE).results))
connector_name = "ServerConnector"
listener_name = "ClientListener"
mgmt.create(type=LISTENER_TYPE,
name=listener_name,
attributes={'address': self.test_name,
'port': self.tcp_listener_port,
'host': '127.0.0.1'})
mgmt.create(type=CONNECTOR_TYPE,
name=connector_name,
attributes={'address': self.test_name,
'port': self.tcp_server_port,
'host': '127.0.0.1'})
# verify the entities have been created and tcp traffic works
self.assertEqual(1, len(mgmt.query(type=LISTENER_TYPE).results))
self.assertEqual(1, len(mgmt.query(type=CONNECTOR_TYPE).results))
# now verify that the interior router sees the service address
# and two proxy links are created
self.i_router.wait_address(self.test_name, subscribers=1)
while True:
links = self._query_links_by_addr(self.i_router.management,
self.test_name)
if links:
# expect a single consumer link that represents
# the connector
self.assertEqual(1, len(links))
self.assertEqual("out", links[0][1])
break
time.sleep(0.25)
# Delete the connector and listener
out = mgmt.delete(type=CONNECTOR_TYPE, name=connector_name)
self.assertIsNone(out)
self.assertEqual(0, len(mgmt.query(type=CONNECTOR_TYPE).results))
out = mgmt.delete(type=LISTENER_TYPE, name=listener_name)
self.assertIsNone(out)
self.assertEqual(0, len(mgmt.query(type=LISTENER_TYPE).results))
# verify the service address and proxy links are no longer active on
# the interior router
self.i_router.wait_address_unsubscribed(self.test_name)
while True:
links = self._query_links_by_addr(self.i_router.management,
self.test_name)
if len(links) == 0:
break
time.sleep(0.25)
# verify that clients can no longer connect to the listener
client_conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_conn.setblocking(True)
client_conn.settimeout(5)
with self.assertRaises(ConnectionRefusedError):
client_conn.connect(('127.0.0.1', self.tcp_listener_port))
client_conn.close()
if __name__ == '__main__':
unittest.main(main_module())
| apache/dispatch | tests/system_tests_tcp_adaptor.py | Python | apache-2.0 | 50,963 | 0.002178 |
# -*- coding: utf-8 -*-
"""
Adapter to use Python str.format() to render a template
"""
from __future__ import absolute_import, unicode_literals, print_function
from string import Formatter
# handle py2 and py3 strings without relying on six lib since we don't use it for anything else.
try:
basestring
except NameError:
# if it's good enough for Kenneth Reitz, it's good enough for me
# https://github.com/kennethreitz/requests/blob/5c4549493b35f5dbb084d029eaf12b6c7ce22579/requests/compat.py#L66
basestring = (str, bytes)
class DefaultValueFormatter(Formatter):
"""
String formatter which replaces keys found in the string but not in the replacement parameters
with a default value.
The default value for the default is the empty string `''`
"""
def __init__(self, default=''):
Formatter.__init__(self)
self.default = default
def get_value(self, key, args, kwds):
if isinstance(key, basestring):
try:
return kwds[key]
except KeyError:
return self.default
Formatter.get_value(key, args, kwds)
class StringFormatAdapter(object):
"""
Adapter for NoteBlockPreprocessor to render templates using standard python string substitution
using named arguments.
"""
def render(self, template='', context=None, *args, **kwargs):
if context is None:
context = {}
formatter = DefaultValueFormatter()
return formatter.format(template, **context)
| livio/DocDown-Python | docdown/template_adapters/string_format.py | Python | bsd-3-clause | 1,530 | 0.001961 |
#!/usr/bin/env python
# Remove .egg-info directory if it exists, to avoid dependency problems with
# partially-installed packages (20160119/dphiffer)
import os, sys
from shutil import rmtree
cwd = os.path.dirname(os.path.realpath(sys.argv[0]))
egg_info = cwd + "/mapzen.whosonfirst.pip.utils.egg-info"
if os.path.exists(egg_info):
rmtree(egg_info)
from setuptools import setup, find_packages
packages = find_packages()
desc = open("README.md").read(),
version = open("VERSION").read()
setup(
name='mapzen.whosonfirst.pip.utils',
namespace_packages=['mapzen', 'mapzen.whosonfirst'],
version=version,
description='Python utility methods for making Who\'s On First documents play nicely with the go-whosonfirst-pip server',
author='Mapzen',
url='https://github.com/mapzen/py-mapzen-whosonfirst-pip-utils',
install_requires=[
'mapzen.whosonfirst.pip>=0.04',
'mapzen.whosonfirst.placetypes>=0.11',
'shapely',
],
dependency_links=[
'https://github.com/whosonfirst/py-mapzen-whosonfirst-pip/tarball/master#egg=mapzen.whosonfirst.pip-0.04',
'https://github.com/whosonfirst/py-mapzen-whosonfirst-placetypes/tarball/master#egg=mapzen.whosonfirst.placetypes-0.11',
],
packages=packages,
scripts=[
],
download_url='https://github.com/mapzen/py-mapzen-whosonfirst-pip-utils/releases/tag/' + version,
license='BSD')
| whosonfirst/py-mapzen-whosonfirst-pip-utils | setup.py | Python | bsd-3-clause | 1,676 | 0.004773 |
#!/usr/bin/env python
import cv2
import cv2.cv as cv
class Display:
def setup(self, fullscreen):
cv2.namedWindow('proj_0', cv2.WINDOW_OPENGL)
if fullscreen:
cv2.setWindowProperty('proj_0', cv2.WND_PROP_FULLSCREEN, cv.CV_WINDOW_FULLSCREEN)
def draw(self, image):
cv2.imshow('proj_0', image)
cv2.waitKey(1)
| light-swarm/lightswarm_render | scripts/display.py | Python | mit | 385 | 0.012987 |
from collections import namedtuple
def game():
# 元组拆包
a, b, *rest = range(5)
print(a, b, rest)
a, b, *rest = range(2)
print(a, b, rest)
a, *body, c, d = range(5)
print(a, body, c, d)
*head, b, c, d = range(5)
print(head, b, c, d)
# 具名元组 page_26
City = namedtuple('City', 'name country population coordinates')
tokyo = City('Tokyo', 'JP', '36.933', (35.689722, 139.691667))
print(tokyo)
print("population is %s" % tokyo.population)
print("coordinates is {}".format(tokyo.coordinates))
print("index one in tokyo is %s" % tokyo[1])
print("all fields is {}".format(City._fields))
LatLong = namedtuple('LatLong', 'lat long')
delhi_data = ('Delhi NCR', 'IN', 21.935, LatLong(28.613889, 77.208889))
delhi = City._make(delhi_data)
print(delhi._asdict()) # collections.OrderedDict
if __name__ == "__main__":
game()
| hugoxia/Python | FluentPython/chapter_2/sequence.py | Python | mit | 919 | 0 |
#!/usr/bin/env python
from select import poll, POLLIN, POLLHUP
from subprocess import Popen, PIPE
from phpsh import PhpshConfig
import xml.dom.minidom
import signal
import socket
import shlex
import time
import sys
import re
import os
"""This is a DBGp xdebug protocol proxy started by phpsh. It accepts a
connection from xdebug, connects to an IDE debug client and
communicates with its parent phpsh over a pair of pipes."""
__version__ = "1.0"
__author__ = "march@facebook.com"
__date__ = "Nov 05, 2008"
usage = "dbgp.py <4-pipe-fds>"
client_init_error_msg = """
Timed out while waiting for debug client for %ds. Make sure the client is
configured for PHP debugging and expects xdebug connections on port
%d. Client command was: %s"""
logfile = None
def debug_log(s):
global tracing_enabled
global logfile
if not tracing_enabled:
return
if not logfile:
logfile = open("dbgp.log", "a", 1)
logfile.write('\n>>>>>>>>>>>>>>>>>>>>>>>\n\n')
logfile.write(s+'\n\n')
logfile.flush()
def dbgp_get_filename(dbgp_response):
"""If dbgp_response is a dbgp <response> message with status='break' and
'filename' attribute set, return the value of filename. Otherwise
return None"""
doc = xml.dom.minidom.parseString(dbgp_response)
res = doc.getElementsByTagName("response")
if res and res[0].getAttribute('status') == "break":
msg = doc.getElementsByTagName("xdebug:message")
if msg and msg[0].hasAttribute('filename'):
return msg[0].getAttribute('filename')
def dbgp_get_txid(dbgp_response):
doc = xml.dom.minidom.parseString(dbgp_response)
res = doc.getElementsByTagName("response")
if res:
return res[0].getAttribute('transaction_id')
def dbgp_get_bpid(dbgp_response):
"""If dbgp_response is a response to 'breakpoint_set' with
transaction_id=txid, return the value of id attribute as a string.
Otherwise return None"""
doc = xml.dom.minidom.parseString(dbgp_response)
res = doc.getElementsByTagName("response")
if res and res[0].getAttribute('command') == 'breakpoint_set':
return res[0].getAttribute('id')
def xdebug_is_stopping(dbgp_response):
doc = xml.dom.minidom.parseString(dbgp_response)
res = doc.getElementsByTagName("response")
return res and res[0].getAttribute("status") == "stopping"
def parse_port(portstr):
if not portstr:
return None
try:
port = int(portstr)
if port < 0:
raise ValueError, "Invalid port: " + portstr
elif port == 0:
port = None
except ValueError:
raise ValueError, "Invalid port: " + portstr
return port
def parse_timeout(timeoutstr):
if not timeoutstr:
return None
try:
timeout = int(timeoutstr)
if timeout <= 0:
return None
except ValueError:
raise ValueError, "Invalid timeout: " + timeoutstr
return timeout
def get_emacs_version():
vline = Popen("emacs --version | head -n 1", shell=True,
stdout=PIPE, stderr=PIPE).communicate()[0]
if not vline:
raise OSError, "emacs not found. Make sure it's in your PATH."
m = re.compile("GNU Emacs ([0-9.]+)").match(vline)
if not m:
raise ValueError, "could not parse emacs version: " + vline +\
"\nexpected GNU Emacs [0-9.]+"
try:
return [int(s) for s in m.group(1).strip('.').split('.')]
except ValueError:
raise ValueError, "invalid Emacs version format: " + m.group(1)
def get_debugclient_version(debugclient_path):
vline = Popen(debugclient_path + " -v | head -n 1", shell=True,
stdout=PIPE, stderr=PIPE).communicate()[0]
if not vline:
raise OSError, "debugclient not found\nThis is a simple xdebug "\
"protocol client distributed with xdebug\n"\
"Make sure it's in your PATH."
m = re.compile("Xdebug Simple DBGp client \(([0-9.]+)\)").match(vline)
if not m:
raise ValueError, "could not parse debugclient version: " + vline +\
"\nexpected Xdebug Simple DBGp client ([0-9.]+)"
try:
return [int(s) for s in m.group(1).strip('.').split('.')]
except ValueError:
raise ValueError, "invalid debugclient version format: " + m.group(1)
class DebugClient:
"""Objects of this class are interfaces to debug IDE clients. A DebugClient object may exist even if the underlying IDE process is no longer running."""
def __init__(self, config, port):
self.p_client = None # Popen to client
self.conn = None # DBGpConn to client
self.lasttxid = None # last txid seen from this client
self.lastdbgpcmd = None # name of last command read from client
self.stopped = True # never sent anything to this client, or
# last message was "stopped"
self.config = config # RawConfigParser
self.port = port
self.host = config.get_option("Debugging", "ClientHost")
self.timeout = parse_timeout(config.get_option("Debugging",
"ClientTimeout"))
self.auto_close = False # client exits after each debugging session
# self.emacs_command() may set this to True
debug_log("creating DebugClient object")
if config.get_option("Debugging", "X11").startswith("require") \
and not os.getenv('DISPLAY'):
debug_log("X11 is required and DISPLAY is not set")
raise Exception, "X11 is required and DISPLAY is not set"
cmd = config.get_option("Debugging", "DebugClient")
if cmd.startswith("emacs"):
emacs_version = get_emacs_version()
if emacs_version < [22, 1]:
raise Exception, "emacs version " + str(emacs_version) +\
" is too low, 22.1 or above required"
debugclient_path = config.get_option("Emacs", "XdebugClientPath")
debugclient_version = get_debugclient_version(debugclient_path)
if debugclient_version < [0, 10, 0]:
raise Exception, "debugclient (xdebug client) version " +\
str(debugclient_version) + " is too low. 0.10.0 or "\
"above required"
self.cmd = self.emacs_command(config)
else:
self.cmd = shlex.split(cmd)
def connect(self):
"""Try to connect to self.host:self.port (if host is an empty string,
connect to localhost). If can't connect and host is localhost,
execute cmd and try to connect again until timeout. Raises
socket.timeout if client is not up until timeout, OSError if
client could not be started"""
global config
if self.conn:
if self.conn.isconnected():
# check if the client is still connected by reading
# everything that the client sent us since the end of
# last session (if any) and checking for HUP
client_set = poll()
client_set.register(self.conn.get_sockfd(), POLLIN)
events = client_set.poll(0)
try:
while events:
fd, e = events[0]
if e&POLLHUP:
self.conn.close()
self.conn = None
raise EOFError
else:
self.recv_cmd()
events = client_set.poll(0)
return # still connected
except (socket.error, EOFError):
pass
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((self.host, self.port))
except socket.error, msg:
if self.host != '' and self.host != 'localhost' \
and self.host != '127.0.0.1' or not self.cmd:
# could not connect and client is not local or no command
# to start a client. Propagate exception.
raise socket.error, msg
# client is local, X display is set, try to start it
debug_log("Starting client: " + " ".join(self.cmd))
self.p_client = Popen(self.cmd, close_fds=True)
sock.settimeout(self.timeout)
tstart = time.time()
while True:
try:
debug_log("Connecting to client")
sock.connect(('', self.port))
sock.settimeout(None)
debug_log("Connected to client")
break
except socket.error:
# failed to connect, likely client is not up yet
# keep trying
if self.timeout and time.time() - tstart > self.timeout:
debug_log("Timed out while waiting "\
"for debug client to come up")
self.close()
raise socket.timeout, "Timed out while waiting "\
"for debug client to come up"
time.sleep(1)
self.conn = DBGpConn(sock)
def send_msg(self, msg):
self.conn.send_msg(msg)
self.stopped = False
def recv_cmd(self):
cmd = self.conn.recv_cmd()
try:
lcmd = cmd.split()
i = lcmd.index("-i")
self.lasttxid = lcmd[i+1]
self.lastdbgpcmd = lcmd[0]
except Exception, msg:
debug_log("ERROR: did not find txid in command read from client: "\
+ cmd)
return cmd
def get_sockfd(self):
return self.conn.get_sockfd()
def stop(self):
if self.stopped or not self.lasttxid or not self.lastdbgpcmd or \
not self.conn.isconnected():
# client is already stopped or hasn't run a debug session
return False
stopped = '<?xml version="1.0" encoding="iso-8859-1"?>\n'\
'<response xmlns="urn:debugger_protocol_v1" '\
'xmlns:xdebug="http://xdebug.org/dbgp/xdebug" '\
'command="'+self.lastdbgpcmd+'" transaction_id="'\
+self.lasttxid+'" status="stopped" reason="ok"></response>'
self.send_msg(stopped)
self.stopped = True
# If our client is emacs that we started and it is
# running without X11, phpsh.el will make it exit at the
# end of debug session. We must wait for that event before
# allowing php to run and phpsh to try to print to terminal.
# Emacs uses tcsetpgrp() to effectively place all other
# processes in its pgroup (including phpsh) in the background.
# Allowing phpsh to print to terminal before emacs reverts the
# terminal pgroup will result in SIGTTOU sent to phpsh
# and dbgp, suspending them. Even if we wignored the signals,
# anything phpsh prints to terminal before emacs resets it
# will be lost or unreadable.
if self.auto_close:
debug_log("waiting for client to exit")
self.wait()
def wait(self):
if self.p_client:
os.waitpid(self.p_client.pid, 0)
def close(self):
"""Close connection to debug client and kill it if we started it"""
if self.conn and self.conn.isconnected():
self.stop()
self.conn.close()
if self.p_client:
try:
os.kill(self.p_client.pid, signal.SIGKILL)
self.wait() # collect zombie
except OSError:
pass
self.p_client = None
def emacs_command(self, config):
"""Returns a list containing a shell command to start and
configure emacs according to the settings in phpsh config file"""
phpsh_root = os.path.dirname(os.path.realpath(__file__))
elisp_root = os.path.join(phpsh_root, "xdebug-clients/geben")
geben_elc = os.path.join(elisp_root, "geben.elc")
phpsh_el = os.path.join(phpsh_root, "phpsh.el")
help = os.path.join(elisp_root, "help")
debugclient_path = config.get_option("Emacs", "XdebugClientPath")
use_x = os.getenv('DISPLAY') and \
config.get_option("Debugging", "X11") != "no"
if use_x:
fg = config.get_option("Emacs", "ForegroundColor")
bg = config.get_option("Emacs", "BackgroundColor")
ina = config.get_option("Emacs", "InactiveColor")
family = config.get_option("Emacs", "FontFamily")
size = config.get_option("Emacs", "FontSize")
elisp = "(progn (set-face-foreground 'default \""+fg+"\") "+\
"(setq active-bg \""+bg+"\") "+\
"(setq inactive-bg \""+ina+"\") "\
"(setq geben-dbgp-command-line \""+debugclient_path+\
" -p "+str(self.port)+"\") "
if family or size:
elisp += "(set-face-attribute 'default nil"
if family:
elisp += " :family \""+family+"\""
if size:
elisp += " :height "+size
elisp += ") "
if config.get_option("Emacs", "InactiveMinimize") == "yes":
elisp +="(add-hook 'geben-session-finished-hook "\
"'iconify-frame) "\
"(add-hook 'geben-session-starting-hook "\
"'make-all-frames-visible) "
else:
# no X
self.auto_close = True
elisp = "(progn (setq geben-dbgp-command-line \""+debugclient_path+\
" -p "+str(self.port)+"\") "\
"(setq geben-dbgp-redirect-stdout-current :intercept) "\
"(setq geben-dbgp-redirect-stderr-current :intercept) "
# in terminal mode we set php stdout/err redirection mode
# to "intercept" in geben. this will make xdebug forward
# php stdout/err to geben over the TCP connection instead
# of writing to the parent pipe. This prevents phpsh from
# printing the output to terminal while emacs is running,
# which could mess up display and generate a SIGTTOU.
if config.get_option("Debugging", "Help") == "yes":
elisp += "(split-window) "\
"(find-file-read-only \""+help+"\") "\
"(other-window 1) "
else:
elisp += "(find-file-read-only \""+help+"\") "\
"(switch-to-buffer \"*scratch*\") "
elisp += ")"
if use_x:
return ["emacs", "--name", "phpsh-emacs", "-Q", "-l", geben_elc,
"-l", phpsh_el, "--eval", elisp, "-f", "geben"]
else:
return ["emacs", "-nw", "-Q", "-l", geben_elc,
"-l", phpsh_el, "--eval", elisp, "-f", "geben"]
class XDebug:
"""Encapsulates XDebug connection and communication"""
def __init__(self, s_accept):
sock, addr = s_accept.accept()
self.conn = DBGpConn(sock) # xdebug DBGp connection
self.dbgp_init = self.conn.recv_msg()
self.txid = 1000000 # use high txids to avoid clashes with clients
self.run()
def isconnected(self):
return self.conn and self.conn.isconnected()
def get_dbgp_init(self):
return self.dbgp_init
def set_breakpoint(self, function):
"""Attempt to set a breakpoint at the beginning of _function_.
Return breakpoint id on success, None if breakpoint could not be set"""
self.txid+=1
cmd = "breakpoint_set -i " + str(self.txid) + " -t call -m " + function
self.send_cmd(cmd)
reply = self.recv_reply()
bpid = dbgp_get_bpid(reply)
return bpid
def remove_breakpoint(self, bpid):
self.txid+=1
cmd = "breakpoint_remove -i " + str(self.txid) + " -d " + bpid
self.send_cmd(cmd)
self.recv_reply() # discard reply
def remove_all_breakpoints(self):
self.txid+=1
cmd = "breakpoint_list -i " + str(self.txid)
self.send_cmd(cmd)
response = self.recv_reply()
doc = xml.dom.minidom.parseString(response)
bps = doc.getElementsByTagName("breakpoint")
for bp in bps:
if bp.hasAttribute("id"):
self.remove_breakpoint(bp.getAttribute("id"))
def run(self):
self.txid+=1
cmd = "run -i " + str(self.txid)
self.send_cmd(cmd)
def stop(self):
self.txid+=1
cmd = "stop -i " + str(self.txid)
self.send_cmd(cmd)
def recv_msg(self):
msg = self.conn.recv_msg()
if xdebug_is_stopping(msg):
self.stop()
self.disconnect()
raise EOFError, "xdebug is stopping"
else:
return msg
def recv_reply(self, txid=None):
if not txid:
txid = self.txid
while True:
msg = self.recv_msg()
rtxid = dbgp_get_txid(msg)
if rtxid and str(txid) == str(rtxid):
return msg
def send_cmd(self, cmd):
return self.conn.send_cmd(cmd)
def get_sockfd(self):
return self.conn.get_sockfd()
def disconnect(self):
self.conn.close()
class DebugSession:
"""This class encapsulates the process of debugging a single
function"""
def __init__(self, function, client, xdebug, p_in):
self.client = client
self.xdebug = xdebug
self.function = function
self.p_in = p_in # file encapsulating "from parent" pipe end
def setup(self):
"""This function must be called when php just executed the initial xdebug_break() call that starts a new debugging session and the initial <break> message has been received from xdebug. setup() verifies that the function call being debugged is valid. If it is, a debug client is started and its view is set to display the first line of function. If the function call is invalid, for example, the function name cannot be found, setup() throws an Exception"""
# set a breakpoint on the function being debugged and on
# ___phpsh___eval_completed(), which phpsh.php will execute
# right after the eval(), continue PHP execution
debug_log("setting up debug session")
funbp = self.xdebug.set_breakpoint(self.function)
donebp = self.xdebug.set_breakpoint('___phpsh___eval_completed')
self.xdebug.run()
filename = None
while not filename:
reply = self.xdebug.recv_msg()
filename = dbgp_get_filename(reply)
if not filename.startswith("file://") or \
filename.endswith("/phpsh.php"):
# Execution stopped at ___phpsh___eval_completed() or in the
# eval(). Abort the session.
self.client = None
raise Exception, "Invalid PHP function call"
# at this point reply and filename are initialized. Delete
# breakpoint at self.function, then send <init> to client with
# fileuri attr set to filename (followed by the break message
# itself?)
self.xdebug.remove_breakpoint(funbp)
r = re.compile(' fileuri="([^"]*)"', re.M)
client_init = r.sub(' fileuri="' + filename + '"',
self.xdebug.get_dbgp_init())
self.client.connect()
self.client.send_msg(client_init)
#self.client.send_msg(reply) --this breaks geben, so disabling for now
# but vim seems to need id
def run(self):
"""forward messages between client and xdebug until xdebug stops in
phpsh.php, client closes connection or parent closes stdin"""
debug_log("running debug session")
session_set = poll()
session_set.register(self.client.get_sockfd(), POLLIN)
session_set.register(self.xdebug.get_sockfd(), POLLIN)
session_set.register(self.p_in.fileno(), POLLIN|POLLHUP)
while True:
events = session_set.poll()
for fd, e in events:
if fd == self.p_in.fileno():
phpsh_cmd = self.p_in.readline().strip()
if phpsh_cmd == 'run php':
# phpsh wants to restart php
# if php is blocked in xdebug, send a run command
raise EOFError # TODO: handle this gracefully
else:
raise EOFError
elif fd == self.client.get_sockfd():
dbgp_cmd = self.client.recv_cmd()
self.xdebug.send_cmd(dbgp_cmd)
elif fd == self.xdebug.get_sockfd():
reply = self.xdebug.recv_msg()
filename = dbgp_get_filename(reply)
if filename and filename.endswith("/phpsh.php"):
return
self.client.send_msg(reply)
def stop(self):
"""Do our best to clean up if we got to the end of a session
or if client or xdebug connection had an exception. This
function does not throw any IO exceptions."""
if self.client:
try:
# Send client a stop message at end of session.
self.client.stop()
except (socket.error, EOFError):
pass
if self.xdebug and self.xdebug.isconnected():
try:
# remove all bps we set in xdebug so that php will not
# get stuck on them when phpsh issues non-debug evals
debug_log("removing all breakpoints")
self.xdebug.remove_all_breakpoints()
debug_log("unblocking script")
self.xdebug.run()
except (socket.error, EOFError):
pass
class PhpshDebugProxy:
"""This is a DBGp filtering proxy for phpsh. It sits between xdebug.so
extension in PHP interpreter and a GUI debug client such as
Geben/Emacs and alters the conversation in a way that makes the
debugger and client begin debugging on a function specified through
commands that the proxy reads from g_in"""
# if doing dynamic port assignment, pick ports from this range:
minport = 9002
maxport = 9998
def __init__(self, config, p_in, p_out):
self.config = config # RawConfigParser
self.cmd = None # Popen command list to start client if local
self.client = None # DebugClient
self.xdebug = None # XDebug
self.session = None # DebugSession
self.s_accept = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.p_in = p_in # file encapsulating a pipe from parent
self.p_out = p_out # file encapsulating a pipe to parent
# host on which client runs:
clienthost = config.get_option("Debugging", "ClientHost")
# client listens on this port
clientport = parse_port(config.get_option("Debugging", "ClientPort"))
if not clientport and clienthost and clienthost != "localhost" and \
not clienthost.startswith("127.0.0."):
raise Exception, "configuration error: remote ClientHost with "\
"no ClientPort"
listenport = parse_port(config.get_option("Debugging", "ProxyPort"))
if listenport:
self.s_accept.bind(('', listenport))
else:
listenport = self.bind_to_port()
if not clientport:
clientport = listenport+1
try:
self.client = DebugClient(config, clientport)
self.s_accept.listen(1)
self.s_accept.settimeout(1)
except Exception:
self.s_accept.close()
raise
# tell parent we have initialized
debug_log("initialized, bound to port " + str(listenport))
self.tell_parent('initialized port='+ str(listenport))
def parent_closed(self):
"""Return True if 'from parent' pipe has HUP condition"""
evset = poll()
evset.register(self.p_in.fileno(), POLLIN|POLLHUP)
events = evset.poll(0)
if events:
fd, e = events[0]
if e & POLLHUP:
debug_log("parent_closed(): detected HUP")
return True
return False
def tell_parent(self, str):
self.p_out.write(str+'\n')
self.p_out.flush()
def bind_to_port(self):
"""Find an unused pair of adjacent ports (n,n+1) where n is
between PhpshDebugProxy.minport and maxport. Bind .s_accept to
n and return n. Throw socket.exception if suitable pair was
available."""
for n in xrange(PhpshDebugProxy.minport, PhpshDebugProxy.maxport+2, 2):
try:
self.s_accept.bind(('', n))
except socket.error:
continue
# check if client port is also available
trysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
trysock.bind(('', n+1))
trysock.close()
return n
except socket.error:
trysock.close()
self.s_accept.close()
self.s_accept = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
raise socket.error, "No ports available"
def run(self):
"""main loop of dbgp proxy"""
while True:
if self.xdebug == None or not self.xdebug.isconnected():
# if we do not have an xdebug connection, accept one
# keep an eye on phpsh, exit if it closes our stdin
self.xdebug = None
while not self.xdebug:
try:
debug_log("creating XDebug object")
self.xdebug = XDebug(self.s_accept)
except (socket.error, socket.timeout, EOFError):
if self.parent_closed():
debug_log("parent closed its end of pipe")
return
time.sleep(1)
# at this point self.xdebug is initialized
# block waiting for a command from phpsh, if xdebug disconnects
# because PHP was restarted, start over
phpsh_cmd = None
try:
cmd_pollset = poll()
cmd_pollset.register(self.p_in.fileno(), POLLIN|POLLHUP)
cmd_pollset.register(self.xdebug.get_sockfd(), POLLIN|POLLHUP)
while not phpsh_cmd:
# wait for a command from phpsh
# keep an eye on PHP, handle restarts
events = cmd_pollset.poll()
for fd, e in events:
if fd == self.p_in.fileno():
phpsh_cmd = self.p_in.readline().strip()
debug_log("Got command: >>" + phpsh_cmd + "<<")
break
elif fd == self.xdebug.get_sockfd():
# xdebug disconnected or sent us something
# after <init> or after previous client
# session ended. This cannot be the
# initial <break> of next session because
# phpsh only evals xdebug_break() after it
# gets a "ready" from dbgp, and we haven't
# sent it yet.
res = self.xdebug.recv_msg()
filename = dbgp_get_filename(res)
if filename and filename.endswith("/phpsh.php"):
# there is a bug in xdebug where it breaks
# on ___phpsh___eval_completed() after the
# breakpoint on that function was removed. This
# happens only if we reach the function via
# a "step out" command. Compensate by sending
# a run command.
debug_log("ERROR: xdebug stopped in phpsh.php "\
"after breakpoint was removed.")
self.xdebug.run()
except (socket.error, EOFError), msg:
debug_log("Exception while waiting for command: " + str(msg))
if self.parent_closed():
return # phpsh went away
if not self.xdebug.isconnected():
continue # xdebug disconnected
else:
debug_log("Error: unexpected exception " + str(msg))
# at this point phpsh_cmd has a new command from phpsh
if phpsh_cmd.startswith("x "):
function = phpsh_cmd[2:].strip()
session = None
# in the future we will be checking here if debugging
# can be started, for now we create a session object
# unconditionally and tell phpsh to go on. If later we
# fail to start a debug client, we just continue
# execution without debugging.
session = DebugSession(function, self.client,
self.xdebug, self.p_in)
debug_log("sending 'ready' to parent")
self.tell_parent("ready")
try:
# wait for an initial break message issued by
# xdebug_break() that phpsh is going to execute
# right before the target function
debug_log("waiting for inital 'break'")
self.xdebug.recv_msg()
# php is stopped in xdebug after executing xdebug_break()
session.setup()
session.run()
except Exception, msg:
# client, PHP, or phpsh exited, session.stop() will
# clean up
debug_log("Exception while setting up or running debug "
"session: " + str(msg))
pass
session.stop()
if self.parent_closed():
return
elif phpsh_cmd == "run php":
# phpsh sends this when it needs to restart PHP
# Since PHP is not blocked in debugger there
# is nothing to do. Ignore.
debug_log("got 'run php' from phpsh while waiting "
"for x command")
else:
self.tell_parent("ERROR: invalid command: " + phpsh_cmd)
# Based on debugger.py for Vim. Closes underlying socket on all exceptions.
#
# Authors:
# Seung Woo Shin <segv <at> sayclub.com>
# Sam Ghods <sam <at> box.net>
class DBGpConn:
""" DBGp Connection class """
def __init__(self, sock):
self.sock = sock
def isconnected(self):
return self.sock != None
def close(self):
if self.sock != None:
self.sock.close()
self.sock = None
def _recv_length(self):
length = ''
while 1:
c = self.sock.recv(1)
if c == '':
self.close()
raise EOFError, 'Socket Closed'
if c == '\0':
return int(length)
if c.isdigit():
length = length + c
def _recv_null(self):
while 1:
c = self.sock.recv(1)
if c == '':
self.close()
raise EOFError, 'Socket Closed'
if c == '\0':
return
def _recv_body(self, to_recv):
body = ''
while to_recv > 0:
buf = self.sock.recv(to_recv)
if buf == '':
self.close()
raise EOFError, 'Socket Closed'
to_recv -= len(buf)
body = body + buf
return body
def recv_msg(self):
try:
length = self._recv_length()
body = self._recv_body(length)
self._recv_null()
debug_log("received from " + str(self.sock.fileno()) + ": " + body)
return body
except:
self.close()
raise
def recv_cmd(self):
try:
cmd = ''
while True:
c = self.sock.recv(1)
if c == '':
self.close()
raise EOFError, 'Socket Closed'
elif c == '\0':
debug_log("received from " + str(self.sock.fileno()) + ": " +
cmd)
return cmd
else:
cmd += c
except:
self.close()
raise
def send_cmd(self, cmd):
try:
self.sock.send(cmd + '\0')
debug_log("sent to " + str(self.sock.fileno()) + ": " + cmd)
except:
self.close()
raise
def send_msg(self, msg):
try:
self.sock.send(str(len(msg))+'\0'+msg+'\0')
debug_log("sent to " + str(self.sock.fileno()) + ": " + msg)
except:
self.close()
raise
def get_sockfd(self):
if self.sock:
return self.sock.fileno()
config = PhpshConfig()
try:
config.read()
except Exception, msg:
pass
tracing_enabled = (config.get_option("Debugging", "LogDBGp") == "yes")
if len(sys.argv) < 5:
debug_log("dbgp called with %d arguments, 4 required, exiting..." %
(len(sys.argv)-1))
sys.exit(1)
try:
p_in = os.fdopen(int(sys.argv[1]), "r", 0) # read end of pipe from parent
try:
os.close(int(sys.argv[2])) # write end of "in" pipe
except OSError:
pass
try:
os.close(int(sys.argv[3])) # read end of "out" pipe
except OSError:
pass
p_out = os.fdopen(int(sys.argv[4]), "w", 0) # write end of pipe to parent
except Exception, msg:
debug_log("Caught an exception while parsing arguments, exiting: " +
str(msg))
sys.exit(1)
# do not die on SIGINT, SIGPIPE
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
try:
proxy = PhpshDebugProxy(config, p_in, p_out)
except Exception, msg:
debug_log("failed to initialize: " + str(msg))
p_out.write("failed to initialize: " + str(msg))
sys.exit(1)
proxy.run()
if proxy.client:
debug_log("Closing client")
proxy.client.close()
else:
debug_log("No client to close")
| joshbedo/phpsh | src/dbgp-phpsh.py | Python | bsd-3-clause | 34,783 | 0.004226 |
#!/usr/bin/env python3
import logging
import socket
import sys
from zeroconf import ServiceInfo, Zeroconf, __version__
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) > 1:
assert sys.argv[1:] == ['--debug']
logging.getLogger('zeroconf').setLevel(logging.DEBUG)
# Test a few module features, including service registration, service
# query (for Zoe), and service unregistration.
print(f"Multicast DNS Service Discovery for Python, version {__version__}")
r = Zeroconf()
print("1. Testing registration of a service...")
desc = {'version': '0.10', 'a': 'test value', 'b': 'another value'}
addresses = [socket.inet_aton("127.0.0.1")]
expected = {'127.0.0.1'}
if socket.has_ipv6:
addresses.append(socket.inet_pton(socket.AF_INET6, '::1'))
expected.add('::1')
info = ServiceInfo(
"_http._tcp.local.",
"My Service Name._http._tcp.local.",
addresses=addresses,
port=1234,
properties=desc,
)
print(" Registering service...")
r.register_service(info)
print(" Registration done.")
print("2. Testing query of service information...")
print(" Getting ZOE service: %s" % (r.get_service_info("_http._tcp.local.", "ZOE._http._tcp.local.")))
print(" Query done.")
print("3. Testing query of own service...")
queried_info = r.get_service_info("_http._tcp.local.", "My Service Name._http._tcp.local.")
assert queried_info
assert set(queried_info.parsed_addresses()) == expected
print(f" Getting self: {queried_info}")
print(" Query done.")
print("4. Testing unregister of service information...")
r.unregister_service(info)
print(" Unregister done.")
r.close()
| jstasiak/python-zeroconf | examples/self_test.py | Python | lgpl-2.1 | 1,785 | 0.00112 |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
"""Gets genres for imported music based on Last.fm tags.
Uses a provided whitelist file to determine which tags are valid genres.
The included (default) genre list was originally produced by scraping Wikipedia
and has been edited to remove some questionable entries.
The scraper script used is available here:
https://gist.github.com/1241307
"""
import pylast
import os
import yaml
import traceback
from beets import plugins
from beets import ui
from beets import config
from beets.util import normpath, plurality
from beets import library
LASTFM = pylast.LastFMNetwork(api_key=plugins.LASTFM_KEY)
PYLAST_EXCEPTIONS = (
pylast.WSError,
pylast.MalformedResponseError,
pylast.NetworkError,
)
REPLACE = {
u'\u2010': '-',
}
def deduplicate(seq):
"""Remove duplicates from sequence wile preserving order.
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
# Canonicalization tree processing.
def flatten_tree(elem, path, branches):
"""Flatten nested lists/dictionaries into lists of strings
(branches).
"""
if not path:
path = []
if isinstance(elem, dict):
for (k, v) in elem.items():
flatten_tree(v, path + [k], branches)
elif isinstance(elem, list):
for sub in elem:
flatten_tree(sub, path, branches)
else:
branches.append(path + [unicode(elem)])
def find_parents(candidate, branches):
"""Find parents genre of a given genre, ordered from the closest to
the further parent.
"""
for branch in branches:
try:
idx = branch.index(candidate.lower())
return list(reversed(branch[:idx + 1]))
except ValueError:
continue
return [candidate]
# Main plugin logic.
WHITELIST = os.path.join(os.path.dirname(__file__), 'genres.txt')
C14N_TREE = os.path.join(os.path.dirname(__file__), 'genres-tree.yaml')
class LastGenrePlugin(plugins.BeetsPlugin):
def __init__(self):
super(LastGenrePlugin, self).__init__()
self.config.add({
'whitelist': True,
'min_weight': 10,
'count': 1,
'fallback': None,
'canonical': False,
'source': 'album',
'force': True,
'auto': True,
'separator': u', ',
})
self.setup()
def setup(self):
"""Setup plugin from config options
"""
if self.config['auto']:
self.import_stages = [self.imported]
self._genre_cache = {}
# Read the whitelist file if enabled.
self.whitelist = set()
wl_filename = self.config['whitelist'].get()
if wl_filename in (True, ''): # Indicates the default whitelist.
wl_filename = WHITELIST
if wl_filename:
wl_filename = normpath(wl_filename)
with open(wl_filename, 'r') as f:
for line in f:
line = line.decode('utf8').strip().lower()
if line and not line.startswith(u'#'):
self.whitelist.add(line)
# Read the genres tree for canonicalization if enabled.
self.c14n_branches = []
c14n_filename = self.config['canonical'].get()
if c14n_filename in (True, ''): # Default tree.
c14n_filename = C14N_TREE
if c14n_filename:
c14n_filename = normpath(c14n_filename)
genres_tree = yaml.load(open(c14n_filename, 'r'))
flatten_tree(genres_tree, [], self.c14n_branches)
@property
def sources(self):
"""A tuple of allowed genre sources. May contain 'track',
'album', or 'artist.'
"""
source = self.config['source'].as_choice(('track', 'album', 'artist'))
if source == 'track':
return 'track', 'album', 'artist'
elif source == 'album':
return 'album', 'artist'
elif source == 'artist':
return 'artist',
def _resolve_genres(self, tags):
"""Given a list of strings, return a genre by joining them into a
single string and (optionally) canonicalizing each.
"""
if not tags:
return None
count = self.config['count'].get(int)
if self.c14n_branches:
# Extend the list to consider tags parents in the c14n tree
tags_all = []
for tag in tags:
# Add parents that are in the whitelist, or add the oldest
# ancestor if no whitelist
if self.whitelist:
parents = [x for x in find_parents(tag, self.c14n_branches)
if self._is_allowed(x)]
else:
parents = [find_parents(tag, self.c14n_branches)[-1]]
tags_all += parents
if len(tags_all) >= count:
break
tags = tags_all
tags = deduplicate(tags)
# c14n only adds allowed genres but we may have had forbidden genres in
# the original tags list
tags = [x.title() for x in tags if self._is_allowed(x)]
return self.config['separator'].get(unicode).join(
tags[:self.config['count'].get(int)]
)
def fetch_genre(self, lastfm_obj):
"""Return the genre for a pylast entity or None if no suitable genre
can be found. Ex. 'Electronic, House, Dance'
"""
min_weight = self.config['min_weight'].get(int)
return self._resolve_genres(self._tags_for(lastfm_obj, min_weight))
def _is_allowed(self, genre):
"""Determine whether the genre is present in the whitelist,
returning a boolean.
"""
if genre is None:
return False
if not self.whitelist or genre in self.whitelist:
return True
return False
# Cached entity lookups.
def _last_lookup(self, entity, method, *args):
"""Get a genre based on the named entity using the callable `method`
whose arguments are given in the sequence `args`. The genre lookup
is cached based on the entity name and the arguments. Before the
lookup, each argument is has some Unicode characters replaced with
rough ASCII equivalents in order to return better results from the
Last.fm database.
"""
# Shortcut if we're missing metadata.
if any(not s for s in args):
return None
key = u'{0}.{1}'.format(entity, u'-'.join(unicode(a) for a in args))
if key in self._genre_cache:
return self._genre_cache[key]
else:
args_replaced = []
for arg in args:
for k, v in REPLACE.items():
arg = arg.replace(k, v)
args_replaced.append(arg)
genre = self.fetch_genre(method(*args_replaced))
self._genre_cache[key] = genre
return genre
def fetch_album_genre(self, obj):
"""Return the album genre for this Item or Album.
"""
return self._last_lookup(
u'album', LASTFM.get_album, obj.albumartist, obj.album
)
def fetch_album_artist_genre(self, obj):
"""Return the album artist genre for this Item or Album.
"""
return self._last_lookup(
u'artist', LASTFM.get_artist, obj.albumartist
)
def fetch_artist_genre(self, item):
"""Returns the track artist genre for this Item.
"""
return self._last_lookup(
u'artist', LASTFM.get_artist, item.artist
)
def fetch_track_genre(self, obj):
"""Returns the track genre for this Item.
"""
return self._last_lookup(
u'track', LASTFM.get_track, obj.artist, obj.title
)
def _get_genre(self, obj):
"""Get the genre string for an Album or Item object based on
self.sources. Return a `(genre, source)` pair. The
prioritization order is:
- track (for Items only)
- album
- artist
- original
- fallback
- None
"""
# Shortcut to existing genre if not forcing.
if not self.config['force'] and self._is_allowed(obj.genre):
return obj.genre, 'keep'
# Track genre (for Items only).
if isinstance(obj, library.Item):
if 'track' in self.sources:
result = self.fetch_track_genre(obj)
if result:
return result, 'track'
# Album genre.
if 'album' in self.sources:
result = self.fetch_album_genre(obj)
if result:
return result, 'album'
# Artist (or album artist) genre.
if 'artist' in self.sources:
result = None
if isinstance(obj, library.Item):
result = self.fetch_artist_genre(obj)
elif obj.albumartist != config['va_name'].get(unicode):
result = self.fetch_album_artist_genre(obj)
else:
# For "Various Artists", pick the most popular track genre.
item_genres = []
for item in obj.items():
item_genre = None
if 'track' in self.sources:
item_genre = self.fetch_track_genre(item)
if not item_genre:
item_genre = self.fetch_artist_genre(item)
if item_genre:
item_genres.append(item_genre)
if item_genres:
result, _ = plurality(item_genres)
if result:
return result, 'artist'
# Filter the existing genre.
if obj.genre:
result = self._resolve_genres([obj.genre])
if result:
return result, 'original'
# Fallback string.
fallback = self.config['fallback'].get()
if fallback:
return fallback, 'fallback'
return None, None
def commands(self):
lastgenre_cmd = ui.Subcommand('lastgenre', help=u'fetch genres')
lastgenre_cmd.parser.add_option(
u'-f', u'--force', dest='force',
action='store_true', default=False,
help=u're-download genre when already present'
)
lastgenre_cmd.parser.add_option(
u'-s', u'--source', dest='source', type='string',
help=u'genre source: artist, album, or track'
)
def lastgenre_func(lib, opts, args):
write = ui.should_write()
self.config.set_args(opts)
for album in lib.albums(ui.decargs(args)):
album.genre, src = self._get_genre(album)
self._log.info(u'genre for album {0} ({1}): {0.genre}',
album, src)
album.store()
for item in album.items():
# If we're using track-level sources, also look up each
# track on the album.
if 'track' in self.sources:
item.genre, src = self._get_genre(item)
item.store()
self._log.info(u'genre for track {0} ({1}): {0.genre}',
item, src)
if write:
item.try_write()
lastgenre_cmd.func = lastgenre_func
return [lastgenre_cmd]
def imported(self, session, task):
"""Event hook called when an import task finishes."""
if task.is_album:
album = task.album
album.genre, src = self._get_genre(album)
self._log.debug(u'added last.fm album genre ({0}): {1}',
src, album.genre)
album.store()
if 'track' in self.sources:
for item in album.items():
item.genre, src = self._get_genre(item)
self._log.debug(u'added last.fm item genre ({0}): {1}',
src, item.genre)
item.store()
else:
item = task.item
item.genre, src = self._get_genre(item)
self._log.debug(u'added last.fm item genre ({0}): {1}',
src, item.genre)
item.store()
def _tags_for(self, obj, min_weight=None):
"""Core genre identification routine.
Given a pylast entity (album or track), return a list of
tag names for that entity. Return an empty list if the entity is
not found or another error occurs.
If `min_weight` is specified, tags are filtered by weight.
"""
# Work around an inconsistency in pylast where
# Album.get_top_tags() does not return TopItem instances.
# https://code.google.com/p/pylast/issues/detail?id=85
if isinstance(obj, pylast.Album):
obj = super(pylast.Album, obj)
try:
res = obj.get_top_tags()
except PYLAST_EXCEPTIONS as exc:
self._log.debug(u'last.fm error: {0}', exc)
return []
except Exception as exc:
# Isolate bugs in pylast.
self._log.debug(u'{}', traceback.format_exc())
self._log.error(u'error in pylast library: {0}', exc)
return []
# Filter by weight (optionally).
if min_weight:
res = [el for el in res if (int(el.weight or 0)) >= min_weight]
# Get strings from tags.
res = [el.item.get_name().lower() for el in res]
return res
| bbsan2k/nzbToMedia | libs/beetsplug/lastgenre/__init__.py | Python | gpl-3.0 | 14,453 | 0 |
from django.conf.urls import url
from . import views
from django.views.decorators.cache import cache_page
app_name = 'webinter'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^logout/$', views.logout_view, name='logout'),
]
| ipriver/0x71aBot-Web-Interface | webinter/urls.py | Python | mit | 262 | 0 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class ScroogeGenTest(PantsRunIntegrationTest):
@classmethod
def hermetic(cls):
return True
def run_pants(self, command, config=None, stdin_data=None, extra_env=None, **kwargs):
full_config = {
'GLOBAL': {
'pythonpath': ["%(buildroot)s/contrib/scrooge/src/python"],
'backend_packages': ["pants.backend.codegen", "pants.backend.jvm", "pants.contrib.scrooge"]
},
}
if config:
for scope, scoped_cfgs in config.items():
updated = full_config.get(scope, {})
updated.update(scoped_cfgs)
full_config[scope] = updated
return super(ScroogeGenTest, self).run_pants(command, full_config, stdin_data, extra_env,
**kwargs)
@staticmethod
def thrift_test_target(name):
return 'contrib/scrooge/tests/thrift/org/pantsbuild/contrib/scrooge/scrooge_gen:' + name
def test_good(self):
# scrooge_gen should pass with correct thrift files.
cmd = ['gen', self.thrift_test_target('good-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
def test_namespace_map(self):
# scrooge_gen should pass with namespace_map specified
cmd = ['gen', self.thrift_test_target('namespace-map-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
def test_default_java_namespace(self):
# scrooge_gen should pass with default_java_namespace specified
cmd = ['gen', self.thrift_test_target('default-java-namespace-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
def test_include_paths(self):
# scrooge_gen should pass with include_paths specified
cmd = ['gen', self.thrift_test_target('include-paths-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
| pombredanne/pants | contrib/scrooge/tests/python/pants_test/contrib/scrooge/tasks/test_scrooge_gen_integration.py | Python | apache-2.0 | 2,191 | 0.007303 |
import torch as t
from torch.autograd import Variable as V
from torch import FloatTensor as FT
import numpy as np
from bayestorch.hmc import HMCSampler
class SimpleTrainer:
def __init__(self, env,critic,hallucinator,policy_buffer,policy_c, noise_dim):
self.env = env
self.hallucinator = hallucinator
self.critic = critic
self.policy_buffer = policy_buffer
self.policy_c = policy_c
self.noise_dim = noise_dim
def train(self, train_steps,sample_steps,opt_steps):
in_dim=self.env.obs_size
out_dim=self.env.action_size
cur_policy = self.policy_c(in_dim,out_dim)
for i in range(train_steps):
reward = self.sample_episode(cur_policy)
self.policy_buffer.put(cur_policy.state_dict(),reward)
self.train_critic_hallucinator(sample_steps)
self.train_policy(opt_steps)
def sample_episode(self, policy,n=1,skip = 3):
done = False
total_reward = 0
for i in range(n):
cur_obs = self.env.new_episode()
t = 0
while not done:
cur_obs = V(FT(cur_obs)).unsqueeze(0)
display = (t % skip == 0)
cur_action = policy.forward(cur_obs).data.cpu().numpy()
cur_obs,cur_reward,done = self.env.next_obs(cur_action.squeeze(0), render = display)
total_reward += cur_reward
t += 1
avg_episode_reward = total_reward / n
return avg_episode_reward
def train_critic_hallucinator(self,sample_steps):
def closure_gen():
yield (lambda: self.critic.get_prior_llh())
for state_dict,reward in self.policy_buffer:
policy = self.policy_c(self.env.obs_size, self.env.action_size)
policy.load_state_dict(state_dict)
def closure():
noise=V(FT(np.random.randn(self.noise_dim)))
states = self.hallucinator.forward(noise.unsqueeze(0))
# Concatenating dimensions of bath(which is currently 1) and dimensions of
states = states.view(states.size(0)*self.hallucinator.n, -1)
actions = policy.forward(states)
actions = actions.view(1,-1)
states = states.view(1,-1)
mean = self.critic(states,actions)[0]
lsd = self.critic(states,actions)[0]
llh = gaussian_llh(mean,lsd,reward)
return reward
yield closure
params = self.critic.parameter_list() \
+ self.hallucinator.parameter_list()
sampler = HMCSampler(params)
for i in range(sample_steps):
sampler.step(closure_gen)
def train_policy(self,opt_steps):
state_dict, _ = self.policy_buffer.peek()
policy = self.policy_c(self.env.obs_size, self.env.action_size)
policy.load_state_dict(state_dict)
opt = t.optim.SGD(policy.parameters(), lr=0.001)
# This is bad just have one goddamnit
def closure():
noise=V(FT(np.random.randn(self.noise_dim)))
states = self.hallucinator.forward(noise.unsqueeze(0))
# Concatenating dimensions of bath(which is currently 1) and dimensions of
states = states.view(states.size(0)*self.hallucinator.n, -1)
actions = policy.forward(states)
actions = actions.view(1,-1)
states = states.view(1,-1)
reward = self.critic(states,actions)[0]
return reward
for i in range(opt_steps):
opt.zero_grad()
opt.step(closure)
return policy.state_dict()
def gaussian_llh(mean,log_std_dev,reward):
llh = -(mean-reward)**2 - 2*log_std_dev
return llh
| fizz-ml/policybandit | trainer.py | Python | mit | 3,859 | 0.011143 |
import hashlib
import struct
class DNSparam:
"""Class to encapsulate some DNS parameter types (type, class etc)"""
def __init__(self, prefix, name2val):
self.name2val = name2val
self.val2name = dict([(y,x) for (x,y) in name2val.items()])
self.prefix = prefix
self.prefix_offset = len(prefix)
def get_name(self, val):
"""given code (value), return text name of dns parameter"""
if self.prefix:
return self.val2name.get(val, "%s%d" % (self.prefix, val))
else:
return self.val2name[val]
def get_val(self, name):
"""given text name, return code (value) of a dns parameter"""
if self.prefix and name.startswith(self.prefix):
return int(name[self.prefix_offset:])
else:
return self.name2val[name]
# DNS Resource Record Types
DICT_RRTYPE = {
"A": 1,
"NS": 2,
"MD": 3,
"MF": 4,
"CNAME": 5,
"SOA": 6,
"MB": 7,
"MG": 8,
"MR": 9,
"NULL": 10,
"WKS": 11,
"PTR": 12,
"HINFO": 13,
"MINFO": 14,
"MX": 15,
"TXT": 16,
"RP": 17,
"AFSDB": 18,
"X25": 19,
"ISDN": 20,
"RT": 21,
"NSAP": 22,
"NSAP-PTR": 23,
"SIG": 24,
"KEY": 25,
"PX": 26,
"GPOS": 27,
"AAAA": 28,
"LOC": 29,
"NXT": 30,
"EID": 31,
"NIMLOC": 32,
"SRV": 33,
"ATMA": 34,
"NAPTR": 35,
"KX": 36,
"CERT": 37,
"A6": 38,
"DNAME": 39,
"SINK": 40,
"OPT": 41,
"APL": 42,
"DS": 43,
"SSHFP": 44,
"IPSECKEY": 45,
"RRSIG": 46,
"NSEC": 47,
"DNSKEY": 48,
"DHCID": 49,
"NSEC3": 50,
"NSEC3PARAM": 51,
"TLSA": 52,
"HIP": 55,
"NINFO": 56,
"RKEY": 57,
"TALINK": 58,
"CDS": 59,
"CDNSKEY": 60,
"OPENPGPKEY": 61,
"SPF": 99,
"UINFO": 100,
"UID": 101,
"GID": 102,
"UNSPEC": 103,
"NID": 104,
"L32": 105,
"L64": 106,
"LP": 107,
"EUI48": 108,
"EUI64": 109,
"TKEY": 249,
"TSIG": 250,
"IXFR": 251,
"AXFR": 252,
"MAILB": 253,
"MAILA": 254,
"ANY": 255,
"URI": 256,
"CAA": 257,
"TA": 32768,
"DLV": 32769,
}
DICT_RRCLASS = {
"IN": 1,
"CH": 3,
"HS": 4,
"ANY": 255,
}
# DNS Response Codes
DICT_RCODE = {
"NOERROR": 0,
"FORMERR": 1,
"SERVFAIL": 2,
"NXDOMAIN": 3,
"NOTIMPL": 4,
"REFUSED": 5,
"NOTAUTH": 9,
"BADVERS": 16,
"BADKEY": 17,
"BADTIME": 18,
"BADMODE": 19,
"BADNAME": 20,
"BADALG": 21,
"BADTRUNC": 22,
}
# Instantiate the DNS parameter classes at the module level, since they
# are used by a variety of module routines.
qt = DNSparam("TYPE", DICT_RRTYPE)
qc = DNSparam("CLASS", DICT_RRCLASS)
rc = DNSparam("RCODE", DICT_RCODE)
# DNSSEC Protocol Numbers
dnssec_proto = {
0: "Reserved",
1: "TLS",
2: "Email",
3: "DNSSEC",
4: "IPSEC",
}
# DNSSEC Algorithms
dnssec_alg = {
0: "Reserved",
1: "RSAMD5",
2: "DH",
3: "DSA",
4: "Reserved",
5: "RSASHA1",
6: "DSA-NSEC3-SHA1",
7: "RSASHA1-NSEC3-SHA1",
8: "RSASHA256",
10: "RSASHA512",
12:"ECC-GOST",
13:"ECDSAP256SHA256",
14:"ECDSAP384SHA384",
}
# DNSSEC Digest algorithms (see RFC 4509 and RFC 6605)
dnssec_digest = {
1: "SHA-1",
2: "SHA-256",
4: "SHA-384",
}
# SSH Fingerprint algorithms (see RFC 4255)
sshfp_alg = {
1: "RSA",
2: "DSA",
3: "ECDSA",
4: "ED25519",
}
# SSHFP fingerprint types (see RFC 4255)
sshfp_fptype = {
1: "SHA-1",
2: "SHA-256",
}
| thomasleveil/pydig | pydiglib/dnsparam.py | Python | gpl-2.0 | 3,623 | 0.008556 |
#!/usr/bin/env python
#shallow linguistic kernel
import sys, os
import os.path
import xml.etree.ElementTree as ET
import logging
from optparse import OptionParser
import pickle
import operator
from subprocess import Popen, PIPE
from time import time
#from pandas import DataFrame
import numpy as np
from scipy.stats import mode
import platform
import re
import nltk
import nltk.data
from nltk.tree import Tree
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import wordnet
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import KFold
from sklearn import svm
from sklearn.feature_extraction.text import CountVectorizer
import sklearn.metrics as skm
from sklearn.preprocessing import normalize
from sklearn.preprocessing import MinMaxScaler
import relations
basedir = "models/ddi_models/"
temp_dir = "temp/"
def reparse_tree(line):
ptree = Tree.fromstring(line)
leaves = ptree.leaves()
def get_pair_instances(pair, pairtext):
pairinstances = []
#if the first candidate has more than one mention, each one is an instance
if len(pair[relations.PAIR_E1TOKENS]) > 1:
#logging.debug("%s e1 tokens", len(pairdic[ddi.PAIR_E1TOKENS]))
#create to instances for this pair
#print "a", [pairtext[t] for t in pairs[pair][ddi.PAIR_E1TOKENS]]
#print "b", [pairtext[t] for t in pairs[pair][ddi.PAIR_E2TOKENS]]
for idx in pair[relations.PAIR_E1TOKENS]:
temptokens = pairtext[:]
#for tidx in range(len(pairtext)):
# if tidx != idx and pairtext[tidx] == "#drug-candidatea#":
# temptokens.append("#drug-entity#")
# else:
# temptokens.append(pairtext[tidx])
for index, item in enumerate(temptokens):
if index != idx and item == "#drug-candidatea#":
temptokens[index] = "#drug-entity#"
pairinstances.append(temptokens[:])
else: # otherwise, consider just one instance for now
pairinstances.append(pairtext[:])
# if the second candidate has more than one mention, for each one of candidate1 mention,
# add another instance for each candidate 2 mention
if len(pairdic[relations.PAIR_E2TOKENS]) > 1:
#logging.debug("%s e2 tokens", len(pairdic[ddi.PAIR_E2TOKENS]))
totalinstances = len(pairinstances)
#logging.debug("duplicating %s sentences", totalinstances)
for idx in pairdic[relations.PAIR_E2TOKENS]:
for isent in range(totalinstances):
#logging.debug(' '.join(sent))
temptokens = pairinstances[isent][:]
for index, item in enumerate(temptokens):
if index != idx and item == "#drug-candidateb#":
temptokens[index] = "#drug-entity#"
#for tidx in range(len(sent)):
# if tidx != idx and pairtext[tidx] == "#drug-candidateb#":
# temptokens.append("#drug-entity#")
# else:
# temptokens.append(pairtext[tidx])
pairinstances.append(temptokens[:])
#print pairinstances
#originallen = len(pairinstances)
#duplicate number of instances for this pair, switching roles
#for i in range(originallen):
# inverted = pairinstances[i][:]
# for index, t in enumerate(inverted):
# if t == "#drug-candidatea#":
# inverted[i] = "#drug-candidateb#"
# elif t == "#drug-candidateb#":
# inverted[i] = "#drug-candidatea#"
# pairinstances.append(inverted[:])
return pairinstances
def generatejSRE_line(pairtext, pos, lemmas, ner):
candidates = [0,0]
body = ''
for it in range(len(pairtext)):
#for it in range(len(pairtext)):
if pairtext[it] == "#drug-candidatea#":
#print pairtext[i],
tokentype = 'DRUG'
#tokentype = etypes[0]
tokenlabel = 'A'
candidates[0] += 1
tokentext = "#candidate#"
#tokentext = entitytext[0]
#tokentext = pairtext[it].lstrip()
lemma = tokentext
elif pairtext[it] == "#drug-candidateb#":
#print pairtext[i]
tokentype = 'DRUG'
#tokentype = etypes[0]
tokenlabel = 'T'
tokentext = "#candidate#"
#tokentext = pairtext[it].lstrip()
#tokentext = entitytext[1]
lemma = tokentext
candidates[1] += 1
elif pairtext[it] == "#drug-entity#":
tokentype = 'DRUG'
tokenlabel = 'O'
tokentext = pairtext[it].lstrip()
lemma = tokentext
else:
# logging.debug("{}".format(pairtext[it].lstrip()))
tokentype = ner[it]
tokenlabel = 'O'
tokentext = pairtext[it].lstrip()
lemma = lemmas[it]
if tokentext == '-RRB-':
tokentext = ')'
lemma = ')'
elif tokentext == '-LRB-':
tokentext = '('
lemma = '('
#if ' ' in pairtext[it][0].lstrip() or '\n' in pairtext[it][0].lstrip():
# print "token with spaces!"
# print pairs[pair][ddi.PAIR_TOKENS][it][0].lstrip()
# sys.exit()
body += "&&".join([str(it), tokentext,
lemma,
pos[it],
tokentype, tokenlabel])
body += ' '
#logging.debug("%s\t%s\t%s", str(trueddi), pair, body)
if candidates[0] == 0:
logging.debug("missing first candidate on pair ")
body = "0&&#candidate#&&#candidate#&&-None-&&drug&&T " + body
#print body
elif candidates[1] == 0:
logging.debug("missing second candidate on pair")
#print body
body += " " + str(it+1) + "&&#candidate#&&#candidate#&&-None-&&drug&&T "
return body
def generatejSREdata(pairs, sentence, basemodel, savefile, train=False):
examplelines = []
for pair in pairs:
#logging.debug(pair)
e1id = pair.eids[0]
e2id = pair.eids[1]
sid = sentence.sid
sentence_tokens = [t.text for t in sentence.tokens]
#print pairtext,
if not pair.relation:
trueddi = 0
else:
trueddi = 1
#print pairtext
pos = [t.pos for t in sentence.tokens]
lemmas = [t.lemma for t in sentence.tokens]
ner = [t.tag for t in sentence.tokens]
logging.debug("{} {} {} {}".format(len(sentence_tokens), len(pos), len(lemmas), len(ner)))
pair_text, pos, lemmas, ner = blind_all_entities(sentence_tokens, sentence.entities.elist[basemodel],
[e1id, e2id], pos, lemmas, ner)
logging.debug("{} {} {} {}".format(len(pair_text), len(pos), len(lemmas), len(ner)))
#logging.debug("generating jsre lines...")
#for i in range(len(pairinstances)):
#body = generatejSRE_line(pairinstances[i], pos, stems, ner)
body = generatejSRE_line(pair_text, pos, lemmas, ner)
examplelines.append(str(trueddi) + '\t' + pair.pid + '.i' + '0\t' + body + '\n')
#print body
#elif candidates[0] > 1 or candidates[1] > 1:
# print "multiple candidates!!", pairtext
# logging.debug("writing to file...")
with open(temp_dir + savefile, 'w') as trainfile:
for l in examplelines:
#print l
trainfile.write(l)
# logging.info("wrote " + temp_dir + savefile)
def compact_id(eid):
return eid.replace('.', '').replace('-', '')
def blind_all_entities(tokens, entities, eids, pos, lemmas, ner):
# logging.info(eids)
ogtokens = tokens[:]
found1 = 0
found2 = 0
for e in entities:
if e.eid == eids[0]:
first_token = e.tokens[0].order + found1 + found2
# logging.debug("{} {} {} {}".format(tokens[first_token], pos[first_token], lemmas[first_token], ner[first_token]))
tokens = tokens[:first_token] + ["#drug-candidatea#"] + tokens[first_token:]
pos = pos[:first_token] + [pos[first_token]] + pos[:first_token]
lemmas = lemmas[:first_token] + [lemmas[first_token]] + lemmas[:first_token]
ner = ner[:first_token] + [ner[first_token]] + ner[:first_token]
# logging.debug("found e1 {} {} {} {}".format(len(tokens), len(pos), len(lemmas), len(ner)))
found1 += 1
elif e.eid == eids[1]:
first_token = e.tokens[0].order + found1 + found2
# logging.debug("{} {} {} {}".format(tokens[first_token], pos[first_token], lemmas[first_token], ner[first_token]))
tokens = tokens[:first_token] + ["#drug-candidateb#"] + tokens[first_token:]
pos = pos[:first_token] + [pos[first_token]] + pos[first_token:]
lemmas = lemmas[:first_token] + [lemmas[first_token]] + lemmas[first_token:]
ner = ner[:first_token] + [ner[first_token]] + ner[first_token:]
# logging.debug("found e2 {} {} {} {}".format(len(tokens), len(pos), len(lemmas), len(ner)))
found2 += 1
else:
tokens[e.tokens[0].order] = "#drug-entity#"
#print "found other drug"
if (not found1 or not found2):
logging.warning("ddi_preprocess: could not find one of the pairs here!")
logging.warning(tokens)
logging.warning(ogtokens)
logging.info([(e.text, e.eid) for e in entities if e.eid in eids])
sys.exit()
# logging.debug("{} {} {} {}".format(len(tokens), len(pos), len(lemmas), len(ner)))
return tokens, pos, lemmas, ner
def trainjSRE(inputfile, model="slk_classifier.model"):
if os.path.isfile("ddi_models/" + model):
print "removed old model"
os.remove("ddi_models/" + model)
if not os.path.isfile(temp_dir + inputfile):
print "could not find training file " + basedir + inputfile
sys.exit()
if platform.system() == "Windows":
sep = ";"
else:
sep = ":"
libs = ["libsvm-2.8.jar", "log4j-1.2.8.jar", "commons-digester.jar", "commons-beanutils.jar", "commons-logging.jar", "commons-collections.jar"]
classpath = 'jsre/jsre-1.1/bin/' + sep + sep.join(["jsre/jsre-1.1/lib/" + l for l in libs])
jsrecall = ['java', '-mx8g', '-classpath', classpath, "org.itc.irst.tcc.sre.Train",
"-k", "SL", "-n", "4", "-w", "3", "-m", "4098", "-c", "2",
temp_dir + inputfile, basedir + model]
#print " ".join(jsrecall)
jsrecall = Popen(jsrecall, stdout = PIPE, stderr = PIPE)
res = jsrecall.communicate()
if not os.path.isfile("ddi_models/" + model):
print "error with jsre!"
print res[1]
sys.exit()
else:
statinfo = os.stat("ddi_models/" + model)
if statinfo.st_size == 0:
print "error with jsre! model has 0 bytes"
print res[0]
print res[1]
sys.exit()
#logging.debug(res)
def testjSRE(inputfile, outputfile, model="slk_classifier.model"):
if os.path.isfile(temp_dir + outputfile):
os.remove(temp_dir + outputfile)
if not os.path.isfile(basedir + model):
print "model", basedir + model, "not found"
sys.exit()
if platform.system() == "Windows":
sep = ";"
else:
sep = ":"
#logging.debug("testing %s with %s to %s", temp_dir + inputfile,
# basedir + model, temp_dir + outputfile)
libs = ["libsvm-2.8.jar", "log4j-1.2.8.jar", "commons-digester.jar", "commons-beanutils.jar", "commons-logging.jar", "commons-collections.jar"]
classpath = 'bin/jsre/jsre-1.1/bin/' + sep + sep.join(["bin/jsre/jsre-1.1/lib/" + l for l in libs])
jsrecommand = ['java', '-mx4g', '-classpath', classpath, "org.itc.irst.tcc.sre.Predict",
temp_dir + inputfile, basedir + model, temp_dir + outputfile]
#print ' '.join(jsrecommand)
jsrecall = Popen(jsrecommand, stdout = PIPE, stderr = PIPE)
res = jsrecall.communicate()
#logging.debug(res[0].strip().split('\n')[-2:])
#os.system(' '.join(jsrecommand))
if not os.path.isfile(temp_dir + outputfile):
print "something went wrong with JSRE!"
print res
sys.exit()
#logging.debug("done.")
def getjSREPredicitons(examplesfile, resultfile, pairs):
#pred_y = []
with open(temp_dir + resultfile, 'r') as resfile:
pred = resfile.readlines()
with open(temp_dir + examplesfile, 'r') as trainfile:
original = trainfile.readlines()
if len(pred) != len(original):
print "different number of predictions!"
sys.exit()
temppreds = {}
for i in range(len(pred)):
original_tsv = original[i].split('\t')
# logging.debug(original_tsv)
pid = '.'.join(original_tsv[1].split('.')[:-1])
if pid not in pairs:
print "pair not in pairs!"
print pid
print pairs
sys.exit()
#confirm that everything makes sense
# true = float(original_tsv[0])
# if true == 0:
# true = -1
p = float(pred[i].strip())
if p == 0:
p = -1
if p == 2:
print "p=2!"
p = 1
logging.debug("{} - {} SLK: {}".format(pairs[pid].entities[0], pairs[pid].entities[1], p))
#if pair not in temppreds:
# temppreds[pair] = []
#temppreds[pair].append(p)
pairs[pid].recognized_by[relations.SLK_PRED] = p
'''for pair in temppreds:
if relations.SLK_PRED not in pairs[pair]:
pairs[pair][relations.SLK_PRED] = {}
p = mode(temppreds[pair])[0][0]
if len(set(temppreds[pair])) > 1:
print temppreds[pair], p
pairs[pair][relations.SLK_PRED][dditype] = p
#if pairs[pair][ddi.SLK_PRED][dditype] and not pairs[pair][ddi.SLK_PRED]["all"]:
# logging.info("type classifier %s found a new true pair: %s", dditype, pair)
for pair in pairs:
if relations.SLK_PRED not in pairs[pair]:
pairs[pair][relations.SLK_PRED] = {}
if dditype not in pairs[pair][relations.SLK_PRED]:
pairs[pair][relations.SLK_PRED][dditype] = -1'''
return pairs
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
def get_svm_train_line(tree, pair, sid):
lmtzr = WordNetLemmatizer()
e1id = compact_id(pair.eids[0])
e2id = compact_id(pair.eids[1])
tree = tree.replace(pair.entities[0].tokens[0].text, 'candidatedrug')
tree = tree.replace(pair.entities[1].tokens[0].text, 'candidatedrug')
#tree = tree.replace(sid.replace('.', '').replace('-', '') + 'e', 'otherdrug')
sid2 = compact_id(sid) + 'e'
# TODO: replace other entities
#tree = rext.sub(sid2 + r'\d+', 'otherdrug', tree)
#print "tree2:", tree
if tree[0] != '(':
tree = '(S (' + tree + ' NN))'
#this depends on the version of nlkt
ptree = Tree.fromstring(tree)
#ptree = Tree.parse(tree)
leaves = list(ptree.pos())
lemmaleaves = []
for t in leaves:
pos = get_wordnet_pos(t[1])
lemma = lmtzr.lemmatize(t[0].lower(), pos)
lemmaleaves.append(lemma)
#lemmaleaves = [ for t in leaves)]
logging.debug("tree:" + tree)
line = '1 '
line += '|BT|' + tree
#bowline = '(BOW (' + ' *)('.join(lemmaleaves) + ' *)) '
#ptree = Tree.parse(bowline)
#ptree = ptree.pprint(indent=-1000)
#bowline = ptree.replace('\n', ' ')
#bowline = '|BT| ' + bowline
#if not bowline.count("otherdrug") > 8:
# line += bowline
#else:
#print "problem with BOW!"
#line += bowline
line += '|ET| '
#i = 1
#for m in docsp[ddi.PAIR_SSM_VECTOR]:
# line += " %s:%s" % (i, m)
# i += 1
#line += " 2:" + str()
#line += " |EV|"
line += '\n'
return line
def trainSVMTK(docs, pairs, dditype, model="svm_tk_classifier.model", excludesentences=[]):
if os.path.isfile("ddi_models/" + model):
os.remove("ddi_models/" + model)
if os.path.isfile("ddi_models/" + model + ".txt"):
os.remove("ddi_models/" + model + ".txt")
#docs = use_external_data(docs, excludesentences, dditype)
xerrors = 0
with open("ddi_models/" + model + ".txt", 'w') as train:
#print pairs
for p in pairs:
if dditype != "all" and pairs[p][relations.PAIR_DDI] and pairs[p][relations.PAIR_TYPE] != dditype:
continue
sid = relations.getSentenceID(p)
if sid not in excludesentences:
tree = pairs[p][relations.PAIR_DEP_TREE][:]
#print "tree1:", tree
#if len(docs[sid][ddi.SENTENCE_ENTITIES]) > 20:
#print line
# line = "1 |BT| (ROOT (NP (NN candidatedrug) (, ,) (NN candidatedrug))) |ET|"
# xerrors += 1
#else:
line = get_svm_train_line(tree, pairs[p], sid,
docs[sid][relations.SENTENCE_PAIRS][p])
if not pairs[p][relations.PAIR_DDI]:
line = '-' + line
elif pairs[p][relations.PAIR_TYPE] != dditype and dditype != "all":
line = '-' + line
train.write(line)
#print "tree errors:", xerrors
svmlightcall = Popen(["./svm-light-TK-1.2/svm-light-TK-1.2.1/svm_learn", "-t", "5",
"-L", "0.4", "-T", "2", "-S", "2", "-g", "10",
"-D", "0", "-C", "T", basedir + model + ".txt", basedir + model],
stdout = PIPE, stderr = PIPE)
res = svmlightcall.communicate()
if not os.path.isfile("ddi_models/" + model):
print "failed training model " + basedir + model
print res
sys.exit()
def testSVMTK(sentence, pairs, pairs_list, model="svm_tk_classifier.model", tag=""):
if os.path.isfile(basedir + tag + "svm_test_data.txt"):
os.remove(basedir + tag + "svm_test_data.txt")
if os.path.isfile(basedir + tag + "svm_test_output.txt"):
os.remove(basedir + tag + "svm_test_output.txt")
#docs = use_external_data(docs, excludesentences, dditype)
#pidlist = pairs.keys()
total = 0
with open(temp_dir + tag + "svm_test_data.txt", 'w') as test:
for pid in pairs:
sid = pairs[pid].sid
tree = sentence.parsetree
#if len(docs[sid][ddi.SENTENCE_ENTITIES]) > 30:
#print line
#line = reparse_tree(line)
# line = "1 |BT| (ROOT (NP (NN candidatedrug) (, ,) (NN candidatedrug))) |ET|\n"
# xerrors += 1
#else:
line = get_svm_train_line(tree, pairs[pid], sid)
line = '-' + line
test.write(line)
total += 1
#print "tree errors:", xerrors, "total:", total
svmtklightargs = ["./bin/svm-light-TK-1.2/svm-light-TK-1.2.1/svm_classify",
temp_dir + tag + "svm_test_data.txt", basedir + model,
temp_dir + tag + "svm_test_output.txt"]
svmlightcall = Popen(svmtklightargs, stdout=PIPE, stderr=PIPE)
res = svmlightcall.communicate()
# logging.debug(res[0].split('\n')[-3:])
#os.system(' '.join(svmtklightargs))
if not os.path.isfile(temp_dir + tag + "svm_test_output.txt"):
print "something went wrong with SVM-light-TK"
print res
sys.exit()
with open(temp_dir + tag + "svm_test_output.txt", 'r') as out:
lines = out.readlines()
if len(lines) != len(pairs_list):
print "check " + tag + "svm_test_output.txt! something is wrong"
print res
sys.exit()
for p, pid in enumerate(pairs):
score = float(lines[p])
if float(score) < 0:
pairs[pid].recognized_by[relations.SST_PRED] = -1
else:
pairs[pid].recognized_by[relations.SST_PRED] = 1
logging.info("{} - {} SST: {}".format(pairs[pid].entities[0], pairs[pid].entities[0], score))
return pairs
def main():
parser = OptionParser(usage='train and evaluate ML model for DDI classification based on the DDI corpus')
parser.add_option("-f", "--file", dest="file", action="store", default="pairs.pickle",
help="Pickle file to load/store the data")
parser.add_option("-d", "--dir", action="store", dest="dir", type = "string", default="DDICorpus/Test/DDIextraction/MedLine/",
help="Corpus directory with XML files")
parser.add_option("--reload", action="store_true", default=False, dest="reload",
help="Reload corpus")
parser.add_option("--log", action="store", dest="loglevel", type = "string", default="WARNING",
help="Log level")
parser.add_option("--logfile", action="store", dest="logfile", type="string", default="kernel.log",
help="Log file")
parser.add_option("--nfolds", action="store", dest="nfolds", type="int", default=10,
help="Number of cross-validation folds")
parser.add_option("--action", action="store", dest="action", type="string", default="cv",
help="cv, train, test, or classify")
parser.add_option("--kernel", action="store", dest="kernel", type="string", default="slk",
help="slk, svmtk")
(options, args) = parser.parse_args()
numeric_level = getattr(logging, options.loglevel.upper(), None)
while len(logging.root.handlers) > 0:
logging.root.removeHandler(logging.root.handlers[-1])
logging.basicConfig(level=numeric_level, format='%(asctime)s %(levelname)s %(message)s')
#logging.getLogger().setLevel(numeric_level)
logging.debug("debug test")
logging.info("info test")
logging.warning("warning test")
if options.file in os.listdir(os.getcwd()) and not options.reload:
print "loading corpus pickle", options.file
docs = pickle.load(open(options.file, 'rb'))
else:
print "loading corpus", options.dir
docs = relations.loadCorpus(options.dir)
pickle.dump(docs, open(options.file, 'wb'))
#build_data_frame(docs)
#if 'parsetree' not in docs['info']:
# for doc in docs:
# for s in docs[doc]:
# docs[doc][s]['parsetree'] = gettree(docs[doc][s]['tokens'])
# docs['info'].append('parsetree')
#trainEvaluatePairs(docs, nfolds=options.nfolds)
if options.kernel == 'slk':
generatejSREdata(docs, options.action + '_pairs.txt')
if options.action == 'train':
trainjSRE(options.kernel + '_' + options.action + '_pairs.txt')
elif options.action == 'test':
testjSRE(options.kernel + '_' +options.action + '_pairs.txt', options.kernel + '_' + "test_results.txt")
elif options.kernel == 'svmtk':
generateSVMTKdata(docs)
if options.action == 'train':
trainSVMTK(options.kernel + '_' +options.action + '_pairs.txt')
elif options.action == 'test':
testSVMTK(options.kernel + '_' +options.action + '_pairs.txt', options.kernel + '_' + "test_results.txt")
generateSVMTKdata(docs)
#tokenslist = tokens.strip().replace('\r', '').split('\n')
if __name__ == "__main__":
main()
| AndreLamurias/IBEnt | src/classification/rext/ddi_kernels.py | Python | mit | 23,772 | 0.008287 |
#!/usr/bin/env python
import click
import json
import re
from icinga2_api.api import Api
from icinga2_api import defaults
VALID_ACTIONS = ['create', 'read', 'update', 'delete']
def validate_uri(ctx, param, value):
if not value.startswith('/'):
raise click.BadParameter('should begin with single /')
return value
def validate_action(ctx, param, value):
if value not in VALID_ACTIONS:
raise click.BadParameter('should be in %s' % VALID_ACTIONS)
return value
def validate_data(ctx, param, value):
if value is None:
return value
try:
return json.loads(value)
except ValueError as e:
raise click.BadParameter('should be valid json')
@click.command()
@click.option('-c', '--configfile',
help='icinga2 API config file. Default: %s' % defaults.CONFIGFILE,
default=defaults.CONFIGFILE)
@click.option('-p', '--profile',
help='icinga2 profile to load. Default: %s' % defaults.PROFILE,
default=defaults.PROFILE)
@click.option('-a', '--action', help='|'.join(VALID_ACTIONS) + ' Default: read',
callback=validate_action,
default='read')
@click.option('-H', '--host', help='icinga2 api host - not required if profile specified',
default=None)
@click.option('--port', help='icinga2 api port - not required if profile specified',
default=None)
@click.option('-u', '--uri', help='icinga2 api uri. Default: ' + defaults.READ_ACTION_URI,
callback=validate_uri,
default=defaults.READ_ACTION_URI)
@click.option('-U', '--user', help='icinga2 api user - not required if profile specified',
default=None)
@click.option('--password', help='icinga2 api password - not required if profile specified',
default=None)
@click.option('-t', '--timeout', help='icinga2 api timeout - not required if profile specified',
default=None)
@click.option('-V', '--verify', help='verify certificate. Default: false',
default=False)
@click.option('-C', '--cert-path', help='verify certificate path - not required if profile specified',
default=None)
@click.option('-v', '--verbose/--no-verbose', help='verbose. Default: false',
default=False)
@click.option('-d', '--data', help='json data to pass',
callback=validate_data,
default=None)
@click.pass_context
def icinga2_api(ctx, **kwargs):
"""
https://github.com/saurabh-hirani/icinga2_api/blob/master/README.md
"""
if kwargs['verbose']:
print 'args: %s' % kwargs
obj = Api(**kwargs)
kwargs['uri'] = re.sub("/{2,}", "/", kwargs['uri'])
method_ref = getattr(obj, kwargs['action'])
output_ds = method_ref(kwargs['uri'], kwargs['data'])
exit_code = 0
if output_ds['status'] != 'success':
click.echo(click.style('CRITICAL: %s action failed' % kwargs['action'], fg='red'))
exit_code = 2
else:
click.echo(click.style('OK: %s action succeeded' % kwargs['action'], fg='green'))
click.echo(json.dumps(output_ds, indent=2))
ctx.exit(exit_code)
if __name__ == '__main__':
icinga2_api()
| saurabh-hirani/icinga2_api | icinga2_api/cmdline.py | Python | isc | 3,123 | 0.012168 |
#This python script is modified from oneyear_data_layer_subset_good.pro
#This routine open one year files defined in file lists, stack these file, subset, and fill bad data with -2000
#input arguments are flist_ndvi, flist_bq, ul_lon,ul_lat,lr_lon,lr_lat
#;inputs: yyyy_flist_ndvi----file list for one year *ndvi.tif,
#; yyyy_flist_bq -----file list fro one year *nvdi_bq.tif
#; ul-----upper left coordinate in unit of degree in geographic coordinates,WGS84
#; lr-----lower right cordinate in unit of degree in geographic coordinates,WGS84
#; data_ver_flg------, 0-old version data,1-new version data
import sys
import os
import platform
from read_ndvi import *
import raster_process as rp
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
if len(sys.argv) != 7:
print "input arguments are: flist_ndvi, flist_bq, ulx,uly,lrx,lry"
sys.exit(1)
flist_ndvi=sys.argv[1]
flist_bq=sys.argv[2]
ulx=float(sys.argv[3])
uly=float(sys.argv[4])
lrx=float(sys.argv[5])
lry=float(sys.argv[6])
#;test
#;ul in deg, minute, secons= 173d 0' 0.00"W, 72d 0' 0.00"N
#;lr in deg, minute, second= 127d59'56.82"W, 54d 0' 0.07"N
#;if do not want subsize the data, just input 0,0,0,0 for ul_lon,ul_lat,lr_lon,lr_lat, respectively.
#;wrkdir='/home/jiang/nps/cesu/modis_ndvi_250m/wrkdir/'
#;flist_ndvi='/mnt/jzhu_scratch/EMODIS-NDVI-DATA/wrk/ver_new_201107/2008/flist_ndvi'
#;flist_bq = '/mnt/jzhu_scratch/EMODIS-NDVI-DATA/wrk/ver_new_201107/2008/flist_bq'
#;flist_ndvi='/raid/scratch/cesu/eMODIS/ver_old/2008/flist_ndvi'
#;flist_bq='/raid/scratch/cesu/eMODIS/ver_old/2008/flist_bq'
#;flist_ndvi='/home/jiang/nps/cesu/modis_ndvi_250m/wrkdir/2010/2010_flist_ndvi'
#;flist_bq = '/home/jiang/nps/cesu/modis_ndvi_250m/wrkdir/2010/2010_flist_bq'
#;ul=[-173.0d,72.0d]
#;lr=[-127.999116667d,54.000019444d]
#;set path and start envi
#;ENVI, /RESTORE_BASE_SAVE_FILES
#;PREF_SET, 'IDL_PATH', '<IDL_DEFAULT>:+~/nps/cesu/modis_ndvi_250m/bin', /COMMIT
#;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
if platform.system() == 'Windows':
sign='\\'
else:
sign='/'
#---- read these two lists into flist and flist_bq
u1=open(flist_ndvi,'r')
u2=open(flist_bq ,'r')
#---- count the number of lines in the flist files
#total_line_count = sum(1 for line in open("filename.txt"))
#total_line_count = sum(1 for line in open("filename.txt"))
#---- get the file names into the list
flist=u1.readlines()
flist=[x.rstrip('\n') for x in flist]
flistbq=u2.readlines()
flistbq=[x.rstrip('\n') for x in flistbq]
num=len(flist)
#---- get workdir and year from mid-year file
#p =strpos(flist(1),sign,/reverse_search)
#len=strlen(flist(1))
wrkdir=os.path.dirname(flist[0])
filen =os.path.basename(flist[0])
#;-----use file header to determine the
if filen.find('MT3RG_') == True:
data_ver_flg=0
else:
data_ver_flg=1
if data_ver_flg == 0:
year=filen[6:9] #MT3RG_2008_141-147_250m_composite_ndvi.tif
else:
year=filen[13:17] #AK_eMTH_NDVI.2008.036-042.QKM.VI_NDVI.005.2011202142526.tif
#;---- define a struc to save info of each file
#;p={flists,fn:'abc',sn:0,dims:lonarr(5),bn:0L}
#;x=create_struct(name=flist,fn,'abc',fid,0L,dims,lonarr(5),bn,0L)
#x={flist,fn:'abc',bname:'abc',fid:0L,dims:lonarr(5),pos:0L}
#flista=replicate(x,num) ;save ndvi data files
#flistq=replicate(x,num) ; save ndvi_bq data files
#;---- go through one year ndvi and ndvi_bq data files
First_Flag=True
for j in range(0L, num):
fn_ndvi = flist[j]
#;---- for old data name
if data_ver_flg == 0:
str1='composite_ndvi'
str2='composite_ndvi_bq'
p1=fn_ndvi.rfinid(sign)
tmpbname=fn_ndvi[p1+7:p1+19] # for old data, its name looks like:MT3RG_2008_253-259_250m_composite_ndvi.tif
else:
#;---- for new data name
str1='.VI_NDVI.'
str2='.VI_QUAL.'
p1=fn_ndvi.rfind(sign)
tmpbname=fn_ndvi[p1+14:p1+26] #for new data, its name looks like:eMTH_NDVI.2008.029-035.QKM.VI_NDVI.005.2011202084157.tif
p=fn_ndvi.find(str1)
length=len(fn_ndvi)
file_hdr=fn_ndvi[0:p]
file_end =fn_ndvi[p+len(str1):length]
fn_bq=file_hdr+str2+file_end
idx = fn_bq in flistbq
if idx == True:
#---- read ndvi and bq to cut off no-sense points
print('process the '+ str(j+1) + ' th file: ' +fn_ndvi)
(rt_t, rt_d)=read_ndvi(fn_ndvi,fn_bq,ulx,uly,lrx,lry,tmpbname)
if First_Flag == True:
First_Flag=False
tot_t=wrkdir+'/'+year+'_stack_ndvi.tif'
tot_d=wrkdir+'/'+year+'_stack_bq.tif'
os.system('cp '+ rt_t +' '+ tot_t)
os.system('rm -f '+rt_t)
os.system('cp '+ rt_d +' '+ tot_d)
os.system('rm -f '+rt_d)
else:
tot_t=rp.raster_comb(tot_t,rt_t)
tot_d=rp.raster_comb(tot_d,rt_d)
| gina-alaska/emodis_ndvi_python-docker | emodis_ndvi_python/pycodes/oneyear_data_layer_subset_good.py | Python | mit | 5,054 | 0.033241 |
# -*- coding: utf-8 -*-
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
from __future__ import print_function
from datetime import datetime
from importlib import import_module
from itertools import chain, islice
import sys
import traceback
import six
# ----------------------------------------------------------------------
def ichunked(seq, chunksize):
"""Yields items from an iterator in iterable chunks.
http://stackoverflow.com/a/1335572
"""
iterable = iter(seq)
while True:
yield list(chain([next(iterable)], islice(iterable, chunksize - 1)))
# ----------------------------------------------------------------------
def safe_log_via_print(log_level, message, *args, **kwargs):
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
log_message = u'{}: {}: {}'.format(timestamp, log_level, message)
print(log_message % args, file=sys.stderr)
# print stack trace if available
exc_info = kwargs.get('exc_info', None)
if exc_info or log_level == 'exception':
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
stack_trace = ''.join(traceback.format_exception(*exc_info))
print(stack_trace, file=sys.stderr)
# ----------------------------------------------------------------------
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
(stolen from Django)
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "{} doesn't look like a module path".format(dotted_path)
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "{}" does not define a "{}" attribute/class'.format(module_path, class_name)
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
| koendeschacht/python-logstash-async | logstash_async/utils.py | Python | mit | 2,122 | 0.000943 |
# coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import *
def build_scenario(builder):
builder.config().game_duration = 30
builder.config().deterministic = False
if builder.EpisodeNumber() % 2 == 0:
builder.SetBallPosition(0.9, 0.3)
else:
builder.SetBallPosition(-0.9, -0.3)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.00, 0.00, e_PlayerRole_GK, True)
builder.AddPlayer(0.85, 0.30, e_PlayerRole_RM, True)
builder.AddPlayer(0.00, 0.00, e_PlayerRole_RM, True)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.00, 0.00, e_PlayerRole_GK, True)
builder.AddPlayer(0.85, 0.30, e_PlayerRole_RM, True)
| google-research/football | gfootball/scenarios/tests/keeper_test.py | Python | apache-2.0 | 1,184 | 0.010135 |
from neolib.plots.Step import Step
from neolib.NST import NST
import time
class HealPetPet(Step):
_paths = {
'links': '//*[@id="content"]/table/tr/td[2]//a/@href',
'img': '//*[@id="content"]/table/tr/td[2]/div/img/@src',
'cert': '//area/@href',
}
_HEALS = {
'http://images.neopets.com/altador/misc/petpet_act_b_ffabe6bc57.gif': 0,
'http://images.neopets.com/altador/misc/petpet_act_a_2a605ae262.gif': 1,
'http://images.neopets.com/altador/misc/petpet_act_c_5f4438778c.gif': 2,
'http://images.neopets.com/altador/misc/petpet_act_d_42b934a33b.gif': 3,
}
def __init__(self, usr):
super().__init__(usr, '', '', False)
# Setup link
self.link = ['http://www.neopets.com/altador/petpet.phtml?ppheal=1',
'http://www.neopets.com/altador/petpet.phtml?ppheal=1&sthv=%s']
# Setup checks
self._checks = ['']
def execute(self, last_pg=None):
# Heal the PetPet 10 times to get the certificate
check = ''
for i in range(0, 11):
if check:
pg = self._usr.get_page(check)
else:
pg = self._usr.get_page(self.link[0])
f = open('test.html', 'w', encoding='utf-8')
f.write(pg.content)
f.close()
if len(self._xpath('cert', pg)) > 0:
print('Found certificate!')
url = self._base_url + self._xpath('cert', pg)[0]
pg = self._usr.get_page(url)
f = open('test.html', 'w', encoding='utf-8')
f.write(pg.content)
f.close()
print('Saved page')
exit()
links = self._xpath('links', pg)
action = self._HEALS[self._xpath('img', pg)[0]]
url = self._base_url + links[action]
print('URL: ' + url)
pg = self._usr.get_page(url)
links = self._xpath('links', pg)
check = self._base_url + links[4]
f = open('test.html', 'w', encoding='utf-8')
f.write(pg.content)
f.close()
if len(self._xpath('cert', pg)) > 0:
print('Found certificate!')
url = self._base_url + self._xpath('cert', pg)[0]
pg = self._usr.get_page(url)
f = open('test.html', 'w', encoding='utf-8')
f.write(pg.content)
f.close()
print('Saved page')
exit()
# Wait till the next minute to check on the petpet
wait = (60 - NST.sec) + 1
print('Waiting ' + str(wait) + ' seconds')
time.sleep(wait)
| jmgilman/neolib2 | neolib/plots/altador/steps/HealPetPet.py | Python | gpl-2.0 | 2,736 | 0.001827 |
from django.db import models
from phonenumber_field.modelfields import PhoneNumberField
class PhoneNumber(models.Model):
provider = models.ForeignKey(
to='providers.Provider'
)
phone_number = PhoneNumberField()
| aniruddha-adhikary/bookit | bookit/providers/models/phone_number.py | Python | mit | 235 | 0 |
#!/usr/bin/env python2
from taptaptap.proc import plan, ok, not_ok, out
plan(first=1, last=13)
ok('Starting the program')
ok('Starting the engine')
ok('Find the object')
ok('Grab it', todo=True)
ok('Use it', todo=True)
2 * 2 == 4 and ok('2 * 2 == 4') or not_ok('2 * 2 != 4')
out()
## validity: -1
## ok testcases: 6 / 13
## bailout: no
## stderr: 2 * 2 == 4
## stderr: TODO
## stderr: ~TRUE
## stderr: ~True
## stderr: ~true
| meisterluk/taptaptap | tests/proc_005.py | Python | bsd-3-clause | 472 | 0.019068 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import code
import cpp_util
from model import Platforms
from schema_util import CapitalizeFirstLetter
from schema_util import JsFunctionNameToClassName
import json
import os
import re
def _RemoveDescriptions(node):
"""Returns a copy of |schema| with "description" fields removed.
"""
if isinstance(node, dict):
result = {}
for key, value in node.items():
# Some schemas actually have properties called "description", so only
# remove descriptions that have string values.
if key == 'description' and isinstance(value, basestring):
continue
result[key] = _RemoveDescriptions(value)
return result
if isinstance(node, list):
return [_RemoveDescriptions(v) for v in node]
return node
class CppBundleGenerator(object):
"""This class contains methods to generate code based on multiple schemas.
"""
def __init__(self,
root,
model,
api_defs,
cpp_type_generator,
cpp_namespace,
source_file_dir,
impl_dir):
self._root = root
self._model = model
self._api_defs = api_defs
self._cpp_type_generator = cpp_type_generator
self._cpp_namespace = cpp_namespace
self._source_file_dir = source_file_dir
self._impl_dir = impl_dir
self.api_cc_generator = _APICCGenerator(self)
self.api_h_generator = _APIHGenerator(self)
self.schemas_cc_generator = _SchemasCCGenerator(self)
self.schemas_h_generator = _SchemasHGenerator(self)
def _GenerateHeader(self, file_base, body_code):
"""Generates a code.Code object for a header file
Parameters:
- |file_base| - the base of the filename, e.g. 'foo' (for 'foo.h')
- |body_code| - the code to put in between the multiple inclusion guards"""
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append(cpp_util.GENERATED_BUNDLE_FILE_MESSAGE % self._source_file_dir)
ifndef_name = cpp_util.GenerateIfndefName(self._source_file_dir, file_base)
c.Append()
c.Append('#ifndef %s' % ifndef_name)
c.Append('#define %s' % ifndef_name)
c.Append()
c.Concat(body_code)
c.Append()
c.Append('#endif // %s' % ifndef_name)
c.Append()
return c
def _GetPlatformIfdefs(self, model_object):
"""Generates the "defined" conditional for an #if check if |model_object|
has platform restrictions. Returns None if there are no restrictions.
"""
if model_object.platforms is None:
return None
ifdefs = []
for platform in model_object.platforms:
if platform == Platforms.CHROMEOS:
ifdefs.append('defined(OS_CHROMEOS)')
elif platform == Platforms.LINUX:
ifdefs.append('defined(OS_LINUX)')
elif platform == Platforms.MAC:
ifdefs.append('defined(OS_MACOSX)')
elif platform == Platforms.WIN:
ifdefs.append('defined(OS_WIN)')
else:
raise ValueError("Unsupported platform ifdef: %s" % platform.name)
return ' || '.join(ifdefs)
def _GenerateRegisterFunctions(self, namespace_name, function):
c = code.Code()
function_ifdefs = self._GetPlatformIfdefs(function)
if function_ifdefs is not None:
c.Append("#if %s" % function_ifdefs, indent_level=0)
function_name = JsFunctionNameToClassName(namespace_name, function.name)
c.Append("registry->RegisterFunction<%sFunction>();" % (
function_name))
if function_ifdefs is not None:
c.Append("#endif // %s" % function_ifdefs, indent_level=0)
return c
def _GenerateFunctionRegistryRegisterAll(self):
c = code.Code()
c.Append('// static')
c.Sblock('void GeneratedFunctionRegistry::RegisterAll('
'ExtensionFunctionRegistry* registry) {')
for namespace in self._model.namespaces.values():
namespace_ifdefs = self._GetPlatformIfdefs(namespace)
if namespace_ifdefs is not None:
c.Append("#if %s" % namespace_ifdefs, indent_level=0)
namespace_name = CapitalizeFirstLetter(namespace.name.replace(
"experimental.", ""))
for function in namespace.functions.values():
if function.nocompile:
continue
c.Concat(self._GenerateRegisterFunctions(namespace.name, function))
for type_ in namespace.types.values():
for function in type_.functions.values():
if function.nocompile:
continue
namespace_types_name = JsFunctionNameToClassName(
namespace.name, type_.name)
c.Concat(self._GenerateRegisterFunctions(namespace_types_name,
function))
if namespace_ifdefs is not None:
c.Append("#endif // %s" % namespace_ifdefs, indent_level=0)
c.Eblock("}")
return c
class _APIHGenerator(object):
"""Generates the header for API registration / declaration"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append('#include <string>')
c.Append()
c.Append('#include "base/basictypes.h"')
c.Append()
c.Append("class ExtensionFunctionRegistry;")
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Append('class GeneratedFunctionRegistry {')
c.Sblock(' public:')
c.Append('static void RegisterAll('
'ExtensionFunctionRegistry* registry);')
c.Eblock('};')
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
return self._bundle._GenerateHeader('generated_api', c)
class _APICCGenerator(object):
"""Generates a code.Code object for the generated API .cc file"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append('#include "%s"' % (os.path.join(self._bundle._source_file_dir,
'generated_api.h')))
c.Append()
for namespace in self._bundle._model.namespaces.values():
namespace_name = namespace.unix_name.replace("experimental_", "")
implementation_header = namespace.compiler_options.get(
"implemented_in",
"%s/%s/%s_api.h" % (self._bundle._impl_dir,
namespace_name,
namespace_name))
if not os.path.exists(
os.path.join(self._bundle._root,
os.path.normpath(implementation_header))):
if "implemented_in" in namespace.compiler_options:
raise ValueError('Header file for namespace "%s" specified in '
'compiler_options not found: %s' %
(namespace.unix_name, implementation_header))
continue
ifdefs = self._bundle._GetPlatformIfdefs(namespace)
if ifdefs is not None:
c.Append("#if %s" % ifdefs, indent_level=0)
c.Append('#include "%s"' % implementation_header)
if ifdefs is not None:
c.Append("#endif // %s" % ifdefs, indent_level=0)
c.Append()
c.Append('#include '
'"extensions/browser/extension_function_registry.h"')
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Concat(self._bundle._GenerateFunctionRegistryRegisterAll())
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
c.Append()
return c
class _SchemasHGenerator(object):
"""Generates a code.Code object for the generated schemas .h file"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append('#include <map>')
c.Append('#include <string>')
c.Append()
c.Append('#include "base/strings/string_piece.h"')
c.Append()
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Append('class GeneratedSchemas {')
c.Sblock(' public:')
c.Append('// Determines if schema named |name| is generated.')
c.Append('static bool IsGenerated(std::string name);')
c.Append()
c.Append('// Gets the API schema named |name|.')
c.Append('static base::StringPiece Get(const std::string& name);')
c.Eblock('};')
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
return self._bundle._GenerateHeader('generated_schemas', c)
def _FormatNameAsConstant(name):
"""Formats a name to be a C++ constant of the form kConstantName"""
name = '%s%s' % (name[0].upper(), name[1:])
return 'k%s' % re.sub('_[a-z]',
lambda m: m.group(0)[1].upper(),
name.replace('.', '_'))
class _SchemasCCGenerator(object):
"""Generates a code.Code object for the generated schemas .cc file"""
def __init__(self, cpp_bundle):
self._bundle = cpp_bundle
def Generate(self, namespace):
c = code.Code()
c.Append(cpp_util.CHROMIUM_LICENSE)
c.Append()
c.Append('#include "%s"' % (os.path.join(self._bundle._source_file_dir,
'generated_schemas.h')))
c.Append()
c.Append('#include "base/lazy_instance.h"')
c.Append()
c.Append('namespace {')
for api in self._bundle._api_defs:
namespace = self._bundle._model.namespaces[api.get('namespace')]
# JSON parsing code expects lists of schemas, so dump a singleton list.
json_content = json.dumps([_RemoveDescriptions(api)],
separators=(',', ':'))
# Escape all double-quotes and backslashes. For this to output a valid
# JSON C string, we need to escape \ and ". Note that some schemas are
# too large to compile on windows. Split the JSON up into several
# strings, since apparently that helps.
max_length = 8192
segments = [json_content[i:i + max_length].replace('\\', '\\\\')
.replace('"', '\\"')
for i in xrange(0, len(json_content), max_length)]
c.Append('const char %s[] = "%s";' %
(_FormatNameAsConstant(namespace.name), '" "'.join(segments)))
c.Append('}')
c.Concat(cpp_util.OpenNamespace(self._bundle._cpp_namespace))
c.Append()
c.Sblock('struct Static {')
c.Sblock('Static() {')
for api in self._bundle._api_defs:
namespace = self._bundle._model.namespaces[api.get('namespace')]
c.Append('schemas["%s"] = %s;' % (namespace.name,
_FormatNameAsConstant(namespace.name)))
c.Eblock('}')
c.Append()
c.Append('std::map<std::string, const char*> schemas;')
c.Eblock('};')
c.Append()
c.Append('base::LazyInstance<Static> g_lazy_instance;')
c.Append()
c.Append('// static')
c.Sblock('base::StringPiece GeneratedSchemas::Get('
'const std::string& name) {')
c.Append('return IsGenerated(name) ? '
'g_lazy_instance.Get().schemas[name] : "";')
c.Eblock('}')
c.Append()
c.Append('// static')
c.Sblock('bool GeneratedSchemas::IsGenerated(std::string name) {')
c.Append('return g_lazy_instance.Get().schemas.count(name) > 0;')
c.Eblock('}')
c.Append()
c.Concat(cpp_util.CloseNamespace(self._bundle._cpp_namespace))
c.Append()
return c
| boundarydevices/android_external_chromium_org | tools/json_schema_compiler/cpp_bundle_generator.py | Python | bsd-3-clause | 11,513 | 0.006428 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Law-to-Code -- Extract formulas & parameters from laws
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2013 OpenFisca Team
# https://github.com/openfisca/LawToCode
#
# This file is part of Law-to-Code.
#
# Law-to-Code is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Law-to-Code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Extract parameters from IPP's "Barèmes des prélèvements sociaux" and upload them to Law-to-Code.
IPP = Institut des politiques publiques
http://www.ipp.eu/fr/outils/baremes-prelevements-sociaux/
http://www.ipp.eu/fr/outils/taxipp-simulation/
"""
import argparse
import collections
import ConfigParser
import itertools
import json
import logging
import os
import sys
import urlparse
from biryani1 import baseconv, custom_conv, datetimeconv, states
import requests
import xlrd
app_name = os.path.splitext(os.path.basename(__file__))[0]
conv = custom_conv(baseconv, datetimeconv, states)
log = logging.getLogger(app_name)
N_ = lambda message: message
parameters = []
currency_converter = conv.first_match(
conv.pipe(
conv.test_isinstance(basestring),
conv.cleanup_line,
conv.test_none(),
),
conv.pipe(
conv.test_isinstance(tuple),
conv.test(lambda couple: len(couple) == 2, error = N_(u"Invalid couple length")),
conv.struct(
(
conv.pipe(
conv.test_isinstance((float, int)),
conv.not_none,
),
conv.pipe(
conv.test_isinstance(basestring),
conv.test_in([
u'EUR',
u'FRF',
]),
),
),
),
),
)
pss_converters = collections.OrderedDict((
(u"Date d'effet", conv.pipe(
conv.test_isinstance(basestring),
conv.iso8601_input_to_date,
conv.date_to_iso8601_str,
conv.not_none,
)),
(u'Plafond de la Sécurité sociale (mensuel)', currency_converter),
(u'Plafond de la Sécurité sociale (annuel)', currency_converter),
(u'Référence législative', conv.pipe(
conv.test_isinstance(basestring),
conv.cleanup_line,
)),
(u'Parution au JO', conv.pipe(
conv.test_isinstance(basestring),
conv.iso8601_input_to_date,
conv.date_to_iso8601_str,
)),
(u'Notes', conv.pipe(
conv.test_isinstance(basestring),
conv.cleanup_line,
)),
(None, conv.pipe(
conv.test_isinstance(basestring),
conv.cleanup_line,
conv.test_none(),
)),
))
def main():
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument('config', help = 'path of configuration file')
parser.add_argument('-v', '--verbose', action = 'store_true', default = False, help = "increase output verbosity")
args = parser.parse_args()
logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)
config_parser = ConfigParser.SafeConfigParser(dict(
here = os.path.dirname(os.path.abspath(os.path.normpath(args.config))),
))
config_parser.read(args.config)
conf = conv.check(conv.pipe(
conv.test_isinstance(dict),
conv.struct(
{
'law_to_code.api_key': conv.pipe(
conv.cleanup_line,
conv.not_none,
),
'law_to_code.site_url': conv.pipe(
conv.make_input_to_url(error_if_fragment = True, error_if_path = True, error_if_query = True,
full = True),
conv.not_none,
),
'user_agent': conv.pipe(
conv.cleanup_line,
conv.not_none,
),
},
default = 'drop',
),
conv.not_none,
))(dict(config_parser.items('Law-to-Code-TAXIPP-Harvester')), conv.default_state)
response = requests.get('http://www.ipp.eu/wp-content/uploads/2012/01/IPP-prelevements-sociaux-avril2012.xls')
book = xlrd.open_workbook(file_contents = response.content, formatting_info = True)
sheet_names = book.sheet_names()
assert sheet_names == [
u'Sommaire',
u'PSS',
u'SMIG',
u'SMIC',
u'GMR',
u'CSG-1',
u'CSG-2',
u'CRDS',
u'SS',
u'MMID',
u'MMID-AM',
u'CNAV',
u'VEUVAGE',
u'CSA',
u'FAMILLE',
u'CSS_RED',
u'CHOMAGE',
u'ASF',
u'AGFF',
u'AGS',
u'ARRCO',
u'AGIRC',
u'APEC',
u'CET',
u'DECES_CADRES',
u'ASSIETTE PU',
u'MMID-Etat',
u'MMID-CL',
u'RP',
u'CI',
u'RAFP',
u'CNRACL',
u'IRCANTEC',
u'FDS',
u'TAXSAL',
u'CONSTRUCTION',
u'FNAL',
u'ACCIDENTS',
u'FORMATION',
u'APPRENTISSAGE',
u'VT',
u'PREVOYANCE',
u'AUBRY I',
u'ALLEG_GEN',
u'AUBRYII',
u'SFT',
u'INDICE_FP',
], str((sheet_names,))
sheet = book.sheet_by_name(u'PSS')
sheet_data = [
[
transform_xls_cell_to_json(book, cell_type, cell_value, sheet.cell_xf_index(row_index, column_index))
for column_index, (cell_type, cell_value) in enumerate(itertools.izip(sheet.row_types(row_index),
sheet.row_values(row_index)))
]
for row_index in range(sheet.nrows)
]
taxipp_names = sheet_data[0]
labels = sheet_data[1]
assert labels == pss_converters.keys(), str((labels,))
taxipp_name_by_label = dict(zip(labels, taxipp_names))
description_lines = []
entries = []
state = None
for row_index, row in enumerate(itertools.islice(sheet_data, 2, None)):
if all(cell in (None, u'') for cell in row):
state = 'description'
if state is None:
entry = conv.check(conv.struct(pss_converters))(dict(zip(labels, row)), state = conv.default_state)
entries.append(entry)
else:
description_line = u' '.join(
cell.strip()
for cell in row
if cell is not None
)
description_lines.append(description_line)
description = u'\n'.join(description_lines) or None
parameters = []
for entry in entries:
value_label = u'Plafond de la Sécurité sociale (mensuel)'
parameters.append(dict(
comment = entry[u"Notes"],
description = description,
format = u'float',
legislative_reference = entry[u'Référence législative'],
official_publication_date = entry[u'Parution au JO'],
start_date = entry[u"Date d'effet"],
taxipp_code = taxipp_name_by_label[value_label],
title = value_label,
unit = entry[value_label][1]
if entry[value_label] is not None
else None,
value = entry[value_label][0]
if entry[value_label] is not None
else None,
))
value_label = u'Plafond de la Sécurité sociale (annuel)'
parameters.append(dict(
comment = entry[u"Notes"],
description = description,
format = u'float',
legislative_reference = entry[u'Référence législative'],
official_publication_date = entry[u'Parution au JO'],
start_date = entry[u"Date d'effet"],
taxipp_code = taxipp_name_by_label[value_label],
title = value_label,
unit = entry[value_label][1] if entry[value_label] is not None else None,
value = entry[value_label][0] if entry[value_label] is not None else None,
))
parameter_upsert_url = urlparse.urljoin(conf['law_to_code.site_url'], 'api/1/parameters/upsert')
for parameter in parameters:
response = requests.post(parameter_upsert_url,
data = unicode(json.dumps(dict(
api_key = conf['law_to_code.api_key'],
value = parameter,
), ensure_ascii = False, indent = 2)).encode('utf-8'),
headers = {
'Content-Type': 'application/json; charset=utf-8',
'User-Agent': conf['user_agent']
}
)
if not response.ok:
print response.json()
response.raise_for_status()
return 0
def transform_xls_cell_to_json(book, type, value, xf_index):
"""Convert an XLS cell (type & value) to an unicode string.
Code taken from http://code.activestate.com/recipes/546518-simple-conversion-of-excel-files-into-csv-and-yaml/
Type Codes:
EMPTY 0
TEXT 1 a Unicode string
NUMBER 2 float
DATE 3 float
BOOLEAN 4 int; 1 means TRUE, 0 means FALSE
ERROR 5
"""
if type == 0:
value = None
elif type == 1:
if not value:
value = None
elif type == 2:
# NUMBER
value_int = int(value)
if value_int == value:
value = value_int
xf = book.xf_list[xf_index] # gets an XF object
format_key = xf.format_key
format = book.format_map[format_key] # gets a Format object
format_str = format.format_str # this is the "number format string"
if format_str.endswith(ur'\ "€"'):
return (value, u'EUR')
if format_str.endswith(ur'\ [$FRF]'):
return (value, u'FRF')
print value, format_str
TODO
elif type == 3:
# DATE
y, m, d, hh, mm, ss = xlrd.xldate_as_tuple(value, book.datemode)
date = u'{0:04d}-{1:02d}-{2:02d}'.format(y, m, d) if any(n != 0 for n in (y, m, d)) else None
value = u'T'.join(
fragment
for fragment in (
date,
u'{0:02d}:{1:02d}:{2:02d}'.format(hh, mm, ss)
if any(n != 0 for n in (hh, mm, ss)) or date is None
else None,
)
if fragment is not None
)
elif type == 4:
value = bool(value)
elif type == 5:
# ERROR
value = xlrd.error_text_from_code[value]
return value
if __name__ == "__main__":
sys.exit(main())
| openfisca/LawToCode | lawtocode/scripts/harvest_ipp_prelevements_sociaux.py | Python | agpl-3.0 | 11,104 | 0.010558 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_l3_interface
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage L3 interfaces on network devices
description:
- This module provides declarative management of L3 interfaces
on network devices.
options:
name:
description:
- Name of the L3 interface.
ipv4:
description:
- IPv4 of the L3 interface.
ipv6:
description:
- IPv6 of the L3 interface.
aggregate:
description: List of L3 interfaces definitions
purge:
description:
- Purge L3 interfaces not defined in the I(aggregate) parameter.
default: no
state:
description:
- State of the L3 interface configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: Set eth0 IPv4 address
net_l3_interface:
name: eth0
ipv4: 192.168.0.1/24
- name: Remove eth0 IPv4 address
net_l3_interface:
name: eth0
state: absent
- name: Set IP addresses on aggregate
net_l3_interface:
aggregate:
- { name: eth1, ipv4: 192.168.2.10/24 }
- { name: eth2, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" }
- name: Remove IP addresses on aggregate
net_l3_interface:
aggregate:
- { name: eth1, ipv4: 192.168.2.10/24 }
- { name: eth2, ipv4: 192.168.3.10/24, ipv6: "fd5d:12c9:2201:1::1/64" }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- set interfaces ethernet eth0 address '192.168.0.1/24'
"""
| e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/layer3/net_l3_interface.py | Python | bsd-3-clause | 2,074 | 0.000964 |
from __future__ import absolute_import, print_function, division
from .. import decorators
def multiply(elems, units, inplace_units=False, unitdict=None, key=None):
"""
Multiply elements considering their units
"""
return operate(elems, units, inplace_units=inplace_units, unitdict=unitdict, key=key, operation='*')
def add(elems, units, inplace_units=False, unitdict=None, key=None):
"""
Add elements considering their units
"""
return operate(elems, units, inplace_units=inplace_units, unitdict=unitdict, key=key, operation='+')
def divide(elems, units, inplace_units=False, unitdict=None, key=None):
"""
Divide elements considering their units
"""
return operate(elems, units, inplace_units=inplace_units, unitdict=unitdict, key=key, operation='/')
def operate(elems, units, inplace_units=False, unitdict=None, key=None, operation='+'):
"""
Operate on elements considering their units
Parameters
-----------
elems: list, tuple
list of pandas.Series
units: list, tuple
list of pint.units ordered as the elems list
inplace_units: bool
sets dictionary inplace_units
unitdict: dict
dict to be set inplace
key: str
name of variables to be set inplace as dict key
"""
import pandas as pd
import numpy as np
idx = elems[0].index
if operation=='+':
result = elems[0].values*units[0]
for elem, unit in zip(elems[1:], units[1:]):
if type(elem) == pd.Series:
elem = elem.reindex(idx)
result += elem.values*unit
else:
result += elem*unit
if operation=='*':
result = elems[0].values*units[0]
for elem, unit in zip(elems[1:], units[1:]):
if type(elem) == pd.Series:
elem = elem.reindex(idx)
result *= elem.values*unit
else:
result *= elem*unit
if operation=='/':
result = elems[0].values*units[0]
for elem, unit in zip(elems[1:], units[1:]):
if type(elem) == pd.Series:
elem = elem.reindex(idx)
result /= elem.values*unit
else:
result /= elem*unit
out = pd.Series(result.magnitude, index=idx)
funit = result.units
if inplace_units==True:
unitdict.update({key : funit})
return out
else:
return out, funit
def parseUnits(unitstr):
"""
Gets unit from string, list of strings, or dict's values, using the UnitRegistry
defined in __init__.py
"""
from .. import ureg
if isinstance(unitstr, str):
return ureg(unitstr).u
elif isinstance(unitstr, list):
return [ ureg(el).u for el in unitstr ]
elif isinstance(unitstr, dict):
return { key: ureg(el).u for key, el in unitstr.items() }
def convert_to(data, inunit, outunit, inplace_units=False, key=None):
"""
Converts data from one unit to the other
Parameters
-----------
data: pandas.series
to be chanhed from one unit to the other
inunit: pint.quantity or dict
unit(s) that the data is in
outunit: str
convert to this unit
inplace_units: bool
if inunit is a dict, the dict is update in place. "key" keyword must be provided
key: str
if inunit is a dict, it is the name of the variable to be changed
"""
from .. import Q_
if key:
Q = inunit[key].to(outunit)
else:
Q = inunit.to(outunit)
coef = Q.magnitude
outunit = Q.units
if inplace_units:
inunit.update({key : outunit})
return data*coef
else:
return data*coef, outunit
def convert_cols(data, guide, units, inplace_units=False):
"""
Converts data from one unit to the other
Parameters
-----------
data: pandas.DataFrame
to be chanhed from one unit to the other
guide: dict
{names of columns : units to converted to}
units: dict
units dictionary
inplace_units: bool
if inunit is a dict, the dict is update in place. "key" keyword must be provided
"""
from .. import algs
from .. import Q_
data = data.copy()
#-------
# An attempt to make it work with Series
if len(data.columns)==1 and (type(guide) != dict):
guide = { data.columns[0] : guide }
guide = algs.parseUnits(guide)
#-------
#-------
# We first turn it into a numpy array to make the conversion using pint natively
for col, outunit in guide.items():
aux = Q_(data[ col ].values, units[ col ])
aux = aux.to(outunit)
data.loc[:, col] = aux
#-------
if inplace_units:
units.update(guide)
return data
else:
return data, guide
def convert_indexes(data, guide, units, inplace_units=False):
"""
Converts data from one unit to the other
Parameters
-----------
data: pandas.Series
to be chanhed from one unit to the other
guide: dict
{names of columns : units to converted to}
units: dict
units dictionary
inplace_units: bool
if inunit is a dict, the dict is update in place. "key" keyword must be provided
"""
from .. import algs
data = data.copy()
guide = algs.parseUnits(guide)
#-------
# We first turn it into a numpy array to make the conversion using pint natively
for idx, outunit in guide.items():
aux = data[ idx ] * units[ idx ]
aux = aux.to(outunit)
data.loc[ idx ] = aux.magnitude
#-------
if inplace_units:
units.update(guide)
return data
else:
return data, guide
def with_units(data, units):
"""
Wrapper around toUnitsCsv to create a method to print the contents of
a dataframe plus its units into a unitsCsv file.
Parameters
-----------
self: pandas.DataFrame, pandas.Series
dataframe or series to which units belong
units: dict
dictionary with the names of each column and their unit
"""
import pandas as pd
data = data.copy()
if isinstance(data, pd.DataFrame):
cols = data.columns
#-------------
# A series can be a column of a main DataFrame, or separate elements
elif isinstance(data, pd.Series):
#---------
# We check if it's a column by the name of the Series
if data.name in units.keys() or isinstance(data.index, pd.DatetimeIndex):
data = pd.DataFrame(data, columns=[ data.name ])
cols = data.columns
#---------
#---------
# If the name is None or it's not in the list of units, then it's different variables
else:
cols = data.index
#---------
#-------------
unts = [ '<{}>'.format(units[c]) if c in units.keys() else '<?>' for c in cols ]
columns = pd.MultiIndex.from_tuples(tuple(zip(cols, unts)))
if isinstance(data, pd.DataFrame):
data.columns = columns
elif isinstance(data, pd.Series):
data.index = columns
return data
| tomchor/pymicra | pymicra/algs/units.py | Python | gpl-3.0 | 7,214 | 0.016912 |
from flask import Blueprint, jsonify, request
from urbansearch.utils import db_utils
relations_api = Blueprint('relations_api', __name__)
@relations_api.route('/document_info', methods=['GET'])
def document_info():
if 'city_a' not in request.args or 'city_b' not in request.args:
return jsonify(status=400, error='No city pair given')
city_a = request.args.get('city_a')
city_b = request.args.get('city_b')
documents = db_utils.get_related_documents(city_a, city_b, int(request.args.get('limit', 300)))
return jsonify(status=200, documents=documents)
@relations_api.route('/all', methods=['GET'])
def all():
threshold = int(request.args.get('threshold', 125))
relations = db_utils.get_ic_rels(None, threshold)
return jsonify(status=200, relations=relations)
| urbansearchTUD/UrbanSearch | urbansearch/server/relations.py | Python | gpl-3.0 | 807 | 0.001239 |
#!/usr/bin/env python2
import numpy as np
import pdb
from random import sample
from time import time
import heapq
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import sys, os
from eft_calculator import EFT_calculator, Water
import tools
def load_coordinates(name):
lines = open('test.dat/random/'+name).readlines()[-7:-1]
coors = [[float(item) for item in line.split()[2:5]] for line in lines]
return np.array(coors)
class Classical_calculator:
def __init__(self):
self.eps = [0.12, 0.046, 0.046]
self.sigma = [1.7, 0.2245, 0.2245]
self.charge = [-0.834, 0.417, 0.417]
def eval(self, coors):
mol = Water()
coor0 = coors[:3]
coor1 = coors[3:]
e = 0.
f = np.zeros(3)
t = np.zeros(3)
com1 = mol.getCOM(coor1)
eps, sigma, charge = self.eps, self.sigma, self.charge
for i in range(3):
for j in range(3):
ener, force = self.atomicEF(coor0[i], eps[i], sigma[i], charge[i], coor1[j], eps[j], sigma[j], charge[j])
e += ener
f += force
t += np.cross(coor1[j]-com1, force)
#if e>100.0:
# e = 100.0
# f = f/np.linalg.norm(f) * 100.0
# t = t/np.linalg.norm(t) * 100.0
return np.array([e, f[0], f[1], f[2], t[0], t[1], t[2]])
def atomicEF(self, x0, e0, s0, q0, x1, e1, s1, q1):
k = 138.935456
e = np.sqrt(e0 * e1)
s = s0 + s1
r = np.linalg.norm(x0 - x1)
if r <0.1 : return 100.0, np.array([100., 100.,100.,])
sor6 = (s/r) ** 6
evdw = e * (sor6**2 - 2 * sor6)
fvdw = e / r**2 * sor6 * (sor6 - 1) * (x1 - x0)
eelec = k * q0 * q1 / r
felec = k * q0 * q1 / r**3 * (x1 - x0)
ener = evdw + eelec
force = fvdw + felec
return ener, force
def test_random_set():
e0 = []
e1 = []
fce0 = []
fce1 = []
trq0 = []
trq1 = []
all = []
t1 = time()
for i in range(1, 2000):
# load atomic coor
name = 'test.dat/random/test%04d.inp.log' % i
#if i == 1693: pdb.set_trace()
eft, coors = calculator._parseQMlog(name)
# evaluate with analytical function
eft = cc.eval(coors)
e0.append(eft[0])
fce0 += list(eft[1:4])
trq0 += list(eft[4:7])
# convert atomic coor to r, phi, theta...
X0, q0 = calculator.mol.atomic2Xq(coors[:3])
X1, q1 = calculator.mol.atomic2Xq(coors[3:])
# evaluate with calculator
eft = calculator.eval(X0, q0, X1, q1)
e1.append(eft[0])
#if eft[0] > 15:
# print(coors, name)
# print(np.dtype(q1[0]))
fce1 += list(eft[1:4])
trq1 += list(eft[4:7])
#all.append((-np.abs(e0[-1]-e1[-1]), name))
all.append((-np.linalg.norm(np.array(fce0) - np.array(fce1)), name))
t2 = time()
print('took %.1f s to evaluate the random set' % (t2 - t1))
heapq.heapify(all)
#for i in range(3):
# de, name = heapq.heappop(all)
# print -de, name
"""
for i in range(len(e0)):
if e1[i]> 100.0:
e0[i] = e1[i] = 0.0
for j in range(3):
fce0[i*3 +j ] = fce1[i*3+j] = trq0[i*3+j] = trq1[i*3+j] = 0.0
"""
# make a plot
_, axarr = plt.subplots(1, 3)
p = np.corrcoef(e0, e1)[0, 1]
print("Energy: p =", p)
axarr[0].scatter(e0, e1)
axarr[0].text(0, 0, 'p=%.4f'%p)
p = np.corrcoef(fce0, fce1)[0, 1]
print("Force: p =", p)
axarr[1].scatter(fce0, fce1)
axarr[1].text(0, 0, 'p=%.4f'%p)
p = np.corrcoef(trq0, trq1)[0, 1]
print("Torque: p =", p)
axarr[2].scatter(trq0, trq1)
axarr[2].text(0, 0, 'p=%.4f'%p)
plt.savefig(figname)
def randomSample():
root = 'golden.dat'
if not os.path.exists(root):os.mkdir(root)
def mol2mol_init(ele):
mol = [[i,0.0,0.0,0.0] for i in ele]
return mol
size = 200
folder_id = 0
file_count = 0
confs = calculator.grid._iter_conf()
confs = list(confs)
if len(confs) > 2000:
confs = sample(list(confs), 2000)
for idx, coors in calculator.gen_PDB(confs):
#for id, coors in calculator.gen_atomic_coors(0,10):
#print(idx, coors)
if file_count%size == 0:
folder = os.path.join(root,"EFT_%04d"%(folder_id))
if not os.path.exists(folder):os.mkdir(folder)
folder_id += 1
pdb = open("%s/eft.%s.pdb"%(folder,idx),"w")
pdb.write(coors)
pdb.close()
file_count += 1
def grids_conf():
root = 'grids.dat'
if not os.path.exists(root):os.mkdir(root)
def mol2mol_init(ele):
mol = [[i,0.0,0.0,0.0] for i in ele]
return mol
size = 200
folder_id = 0
file_count = 0
confs = calculator.grid._grid_conf()
for idx, coors in calculator.gen_PDB(confs):
#for id, coors in calculator.gen_atomic_coors(0,10):
#print(idx, coors)
if file_count%size == 0:
folder = os.path.join(root,"EFT_%04d"%(folder_id))
if not os.path.exists(folder):os.mkdir(folder)
folder_id += 1
pdb = open("%s/eft.%s.pdb"%(folder,idx),"w")
pdb.write(coors)
pdb.close()
file_count += 1
if __name__ == '__main__':
if len(sys.argv) < 2:
print("\n Usage:#0 figname.png [datfilename.dat err_cutoff]\n")
sys.exit()
figname = sys.argv[1] # a output fig name
databaseName = sys.argv[2]
t0 = time()
cc = Classical_calculator()
if os.path.exists(databaseName):
print("loaded a old database")
calculator = EFT_calculator(databaseName)
else:
print("created a new mesh")
calculator = EFT_calculator()
if len(sys.argv) == 4:
error_cutoff = float(sys.argv[3])
print("set cutoff as %f"%(error_cutoff))
calculator.fill_grid(cc, databaseName, error_cutoff)
t1 = time()
print('took %.1f s to fill the grid' % (t1 - t0))
test_random_set()
#randomSample()
grids_conf()
| yangjincai/Xq2EFT | testAndOutputGrids.py | Python | apache-2.0 | 6,139 | 0.012543 |
# -*- coding: utf-8 -*-
__license__ = "GNU Affero General Public License, Ver.3"
__author__ = "Pablo Alvarez de Sotomayor Posadillo"
# This file is part of Kirinki.
#
# Kirinki is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Kirinki is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with kirinki. If not, see <http://www.gnu.org/licenses/>.
from django.core.cache import cache
from django.contrib.sessions.models import Session
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.template.loader import render_to_string
from recaptcha.client import captcha
from datetime import datetime, timedelta
import logging
class MainViewer:
def __init__(self, req):
logging.basicConfig(filename='/var/log/kirinki.log',level=logging.DEBUG)
self.request = req
self.session_data = req.session
def getLeftCol(self, blocks = []):
return render_to_string('kirinki/left.html', {'blocks' : blocks})
def getCenterCol(self, blocks = []):
return render_to_string('kirinki/center.html', {'blocks' : blocks})
def getRightCol(self, blocks = []):
return render_to_string('kirinki/right.html', {'blocks' : blocks})
def render(self, leftBlocks, centerBlocks, rightBlocks):
self.page = render_to_response('kirinki/index.html', {'copy' : '© Pablo Alvarez de Sotomayor Posadillo',
'session' : self.session_data,
'leftCol' : self.getLeftCol(leftBlocks),
'centerCol' : self.getCenterCol(centerBlocks),
'rightCol' : self.getRightCol(rightBlocks)}, context_instance=RequestContext(self.request))
return self.page
| i02sopop/Kirinki | kirinki/mainviewer.py | Python | agpl-3.0 | 2,339 | 0.013254 |
from typing import Text
from zerver.lib.test_classes import WebhookTestCase
class HomeAssistantHookTests(WebhookTestCase):
STREAM_NAME = 'homeassistant'
URL_TEMPLATE = "/api/v1/external/homeassistant?&api_key={api_key}"
FIXTURE_DIR_NAME = 'homeassistant'
def test_simplereq(self) -> None:
expected_subject = "homeassistant"
expected_message = "The sun will be shining today!"
self.send_and_test_stream_message('simplereq', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def test_req_with_title(self) -> None:
expected_subject = "Weather forecast"
expected_message = "It will be 30 degrees Celsius out there today!"
self.send_and_test_stream_message('reqwithtitle', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def get_body(self, fixture_name: Text) -> Text:
return self.fixture_data("homeassistant", fixture_name, file_type="json")
| mahim97/zulip | zerver/webhooks/homeassistant/tests.py | Python | apache-2.0 | 1,083 | 0.00554 |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import os
import time
import struct
import sys
import re
import base64
import math
import random
import binascii
from datetime import datetime, timedelta
from pprint import pformat
from array import array
import six
import txaio
__all__ = ("xor",
"utcnow",
"utcstr",
"id",
"rid",
"newid",
"rtime",
"Stopwatch",
"Tracker",
"EqualityMixin",
"ObservableMixin",
"IdGenerator",
"generate_token",
"generate_activation_code",
"generate_serial_number",
"generate_user_password")
def encode_truncate(text, limit, encoding='utf8', return_encoded=True):
"""
Given a string, return a truncated version of the string such that
the UTF8 encoding of the string is smaller than the given limit.
This function correctly truncates even in the presence of Unicode code
points that encode to multi-byte encodings which must not be truncated
in the middle.
:param text: The Unicode string to truncate.
:type text: unicode
:param limit: The number of bytes to limit the UTF8 encoding to.
:type limit: int
:returns: The truncated Unicode string.
:rtype: unicode
"""
assert(text is None or type(text) == six.text_type)
assert(type(limit) in six.integer_types)
assert(limit >= 0)
if text is None:
return
# encode the given string in the specified encoding
s = text.encode(encoding)
# when the resulting byte string is longer than the given limit ..
if len(s) > limit:
# .. truncate, and
s = s[:limit]
# decode back, ignoring errors that result from truncation
# in the middle of multi-byte encodings
text = s.decode(encoding, 'ignore')
if return_encoded:
s = text.encode(encoding)
if return_encoded:
return s
else:
return text
def xor(d1, d2):
"""
XOR two binary strings of arbitrary (equal) length.
:param d1: The first binary string.
:type d1: binary
:param d2: The second binary string.
:type d2: binary
:returns: XOR(d1, d2)
:rtype: binary
"""
if type(d1) != six.binary_type:
raise Exception("invalid type {} for d1 - must be binary".format(type(d1)))
if type(d2) != six.binary_type:
raise Exception("invalid type {} for d2 - must be binary".format(type(d2)))
if len(d1) != len(d2):
raise Exception("cannot XOR binary string of differing length ({} != {})".format(len(d1), len(d2)))
d1 = array('B', d1)
d2 = array('B', d2)
for i in range(len(d1)):
d1[i] ^= d2[i]
if six.PY3:
return d1.tobytes()
else:
return d1.tostring()
def utcstr(ts=None):
"""
Format UTC timestamp in ISO 8601 format.
Note: to parse an ISO 8601 formatted string, use the **iso8601**
module instead (e.g. ``iso8601.parse_date("2014-05-23T13:03:44.123Z")``).
:param ts: The timestamp to format.
:type ts: instance of :py:class:`datetime.datetime` or None
:returns: Timestamp formatted in ISO 8601 format.
:rtype: unicode
"""
assert(ts is None or isinstance(ts, datetime))
if ts is None:
ts = datetime.utcnow()
return u"{0}Z".format(ts.strftime(u"%Y-%m-%dT%H:%M:%S.%f")[:-3])
def utcnow():
"""
Get current time in UTC as ISO 8601 string.
:returns: Current time as string in ISO 8601 format.
:rtype: unicode
"""
return utcstr()
class IdGenerator(object):
"""
ID generator for WAMP request IDs.
WAMP request IDs are sequential per WAMP session, starting at 1 and
wrapping around at 2**53 (both value are inclusive [1, 2**53]).
The upper bound **2**53** is chosen since it is the maximum integer that can be
represented as a IEEE double such that all smaller integers are representable as well.
Hence, IDs can be safely used with languages that use IEEE double as their
main (or only) number type (JavaScript, Lua, etc).
See https://github.com/wamp-proto/wamp-proto/blob/master/spec/basic.md#ids
"""
def __init__(self):
self._next = 0 # starts at 1; next() pre-increments
def next(self):
"""
Returns next ID.
:returns: The next ID.
:rtype: int
"""
self._next += 1
if self._next > 9007199254740992:
self._next = 1
return self._next
# generator protocol
def __next__(self):
return self.next()
#
# Performance comparison of IdGenerator.next(), id() and rid().
#
# All tests were performed on:
#
# - Ubuntu 14.04 LTS x86-64
# - Intel Core i7 920 @ 3.3GHz
#
# The tests generated 100 mio. IDs and run-time was measured
# as wallclock from Unix "time" command. In each run, a single CPU
# core was essentially at 100% load all the time (though the sys/usr
# ratio was different).
#
# PyPy 2.6.1:
#
# IdGenerator.next() 0.5s
# id() 29.4s
# rid() 106.1s
#
# CPython 2.7.10:
#
# IdGenerator.next() 49.0s
# id() 370.5s
# rid() 196.4s
#
#
# Note on the ID range [0, 2**53]. We once reduced the range to [0, 2**31].
# This lead to extremely hard to track down issues due to ID collisions!
# Here: https://github.com/crossbario/autobahn-python/issues/419#issue-90483337
#
# 8 byte mask with 53 LSBs set (WAMP requires IDs from [0, 2**53]
_WAMP_ID_MASK = struct.unpack(">Q", b"\x00\x1f\xff\xff\xff\xff\xff\xff")[0]
def rid():
"""
Generate a new random integer ID from range **[0, 2**53]**.
The generated ID is uniformly distributed over the whole range, doesn't have
a period (no pseudo-random generator is used) and cryptographically strong.
The upper bound **2**53** is chosen since it is the maximum integer that can be
represented as a IEEE double such that all smaller integers are representable as well.
Hence, IDs can be safely used with languages that use IEEE double as their
main (or only) number type (JavaScript, Lua, etc).
:returns: A random integer ID.
:rtype: int
"""
return struct.unpack("@Q", os.urandom(8))[0] & _WAMP_ID_MASK
# noinspection PyShadowingBuiltins
def id():
"""
Generate a new random integer ID from range **[0, 2**53]**.
The generated ID is based on a pseudo-random number generator (Mersenne Twister,
which has a period of 2**19937-1). It is NOT cryptographically strong, and
hence NOT suitable to generate e.g. secret keys or access tokens.
The upper bound **2**53** is chosen since it is the maximum integer that can be
represented as a IEEE double such that all smaller integers are representable as well.
Hence, IDs can be safely used with languages that use IEEE double as their
main (or only) number type (JavaScript, Lua, etc).
:returns: A random integer ID.
:rtype: int
"""
return random.randint(0, 9007199254740992)
def newid(length=16):
"""
Generate a new random string ID.
The generated ID is uniformly distributed and cryptographically strong. It is
hence usable for things like secret keys and access tokens.
:param length: The length (in chars) of the ID to generate.
:type length: int
:returns: A random string ID.
:rtype: unicode
"""
l = int(math.ceil(float(length) * 6. / 8.))
return base64.b64encode(os.urandom(l))[:length].decode('ascii')
# a standard base36 character set
# DEFAULT_TOKEN_CHARS = string.digits + string.ascii_uppercase
# we take out the following 9 chars (leaving 27), because there
# is visual ambiguity: 0/O/D, 1/I, 8/B, 2/Z
DEFAULT_TOKEN_CHARS = u'345679ACEFGHJKLMNPQRSTUVWXY'
"""
Default set of characters to create rtokens from.
"""
DEFAULT_ZBASE32_CHARS = u'13456789abcdefghijkmnopqrstuwxyz'
"""
http://philzimmermann.com/docs/human-oriented-base-32-encoding.txt
Our choice of confusing characters to eliminate is: `0', `l', `v', and `2'. Our
reasoning is that `0' is potentially mistaken for `o', that `l' is potentially
mistaken for `1' or `i', that `v' is potentially mistaken for `u' or `r'
(especially in handwriting) and that `2' is potentially mistaken for `z'
(especially in handwriting).
Note that we choose to focus on typed and written transcription more than on
vocal, since humans already have a well-established system of disambiguating
spoken alphanumerics, such as the United States military's "Alpha Bravo Charlie
Delta" and telephone operators' "Is that 'd' as in 'dog'?".
"""
def generate_token(char_groups, chars_per_group, chars=None, sep=None, lower_case=False):
"""
Generate cryptographically strong tokens, which are strings like `M6X5-YO5W-T5IK`.
These can be used e.g. for used-only-once activation tokens or the like.
The returned token has an entropy of:
math.log(len(chars), 2.) * chars_per_group * char_groups
bits. With the default charset and 4 characters per group, rtoken() produces
tokens with the following entropy:
character groups entropy (at least) recommended use
2 38 bits
3 57 bits one-time activation or pairing code
4 76 bits secure user password
5 95 bits
6 114 bits globally unique serial / product code
7 133 bits
Here are 3 examples:
* token(3): 9QXT-UXJW-7R4H
* token(4): LPNN-JMET-KWEP-YK45
* token(6): NXW9-74LU-6NUH-VLPV-X6AG-QUE3
:param char_groups: Number of character groups (or characters if chars_per_group == 1).
:type char_groups: int
:param chars_per_group: Number of characters per character group (or 1 to return a token with no grouping).
:type chars_per_group: int
:param chars: Characters to choose from. Default is 27 character subset
of the ISO basic Latin alphabet (see: DEFAULT_TOKEN_CHARS).
:type chars: unicode or None
:param sep: When separating groups in the token, the separater string.
:type sep: unicode
:returns: The generated token.
:rtype: unicode
"""
assert(type(char_groups) in six.integer_types)
assert(type(chars_per_group) in six.integer_types)
assert(chars is None or type(chars) == six.text_type)
chars = chars or DEFAULT_TOKEN_CHARS
if lower_case:
chars = chars.lower()
sep = sep or u'-'
rng = random.SystemRandom()
token_value = u''.join(rng.choice(chars) for _ in range(char_groups * chars_per_group))
if chars_per_group > 1:
return sep.join(map(u''.join, zip(*[iter(token_value)] * chars_per_group)))
else:
return token_value
def generate_activation_code():
return generate_token(char_groups=3, chars_per_group=4, chars=DEFAULT_TOKEN_CHARS, sep=u'-', lower_case=False)
def generate_user_password():
return generate_token(char_groups=16, chars_per_group=1, chars=DEFAULT_ZBASE32_CHARS, sep=u'-', lower_case=True)
def generate_serial_number():
return generate_token(char_groups=6, chars_per_group=4, chars=DEFAULT_TOKEN_CHARS, sep=u'-', lower_case=False)
# Select the most precise walltime measurement function available
# on the platform
#
if sys.platform.startswith('win'):
# On Windows, this function returns wall-clock seconds elapsed since the
# first call to this function, as a floating point number, based on the
# Win32 function QueryPerformanceCounter(). The resolution is typically
# better than one microsecond
_rtime = time.clock
_ = _rtime() # this starts wallclock
else:
# On Unix-like platforms, this used the first available from this list:
# (1) gettimeofday() -- resolution in microseconds
# (2) ftime() -- resolution in milliseconds
# (3) time() -- resolution in seconds
_rtime = time.time
rtime = _rtime
"""
Precise wallclock time.
:returns: The current wallclock in seconds. Returned values are only guaranteed
to be meaningful relative to each other.
:rtype: float
"""
class Stopwatch(object):
"""
Stopwatch based on walltime.
This can be used to do code timing and uses the most precise walltime measurement
available on the platform. This is a very light-weight object,
so create/dispose is very cheap.
"""
def __init__(self, start=True):
"""
:param start: If ``True``, immediately start the stopwatch.
:type start: bool
"""
self._elapsed = 0
if start:
self._started = rtime()
self._running = True
else:
self._started = None
self._running = False
def elapsed(self):
"""
Return total time elapsed in seconds during which the stopwatch was running.
:returns: The elapsed time in seconds.
:rtype: float
"""
if self._running:
now = rtime()
return self._elapsed + (now - self._started)
else:
return self._elapsed
def pause(self):
"""
Pauses the stopwatch and returns total time elapsed in seconds during which
the stopwatch was running.
:returns: The elapsed time in seconds.
:rtype: float
"""
if self._running:
now = rtime()
self._elapsed += now - self._started
self._running = False
return self._elapsed
else:
return self._elapsed
def resume(self):
"""
Resumes a paused stopwatch and returns total elapsed time in seconds
during which the stopwatch was running.
:returns: The elapsed time in seconds.
:rtype: float
"""
if not self._running:
self._started = rtime()
self._running = True
return self._elapsed
else:
now = rtime()
return self._elapsed + (now - self._started)
def stop(self):
"""
Stops the stopwatch and returns total time elapsed in seconds during which
the stopwatch was (previously) running.
:returns: The elapsed time in seconds.
:rtype: float
"""
elapsed = self.pause()
self._elapsed = 0
self._started = None
self._running = False
return elapsed
class Tracker(object):
"""
A key-based statistics tracker.
"""
def __init__(self, tracker, tracked):
"""
"""
self.tracker = tracker
self.tracked = tracked
self._timings = {}
self._offset = rtime()
self._dt_offset = datetime.utcnow()
def track(self, key):
"""
Track elapsed for key.
:param key: Key under which to track the timing.
:type key: str
"""
self._timings[key] = rtime()
def diff(self, start_key, end_key, formatted=True):
"""
Get elapsed difference between two previously tracked keys.
:param start_key: First key for interval (older timestamp).
:type start_key: str
:param end_key: Second key for interval (younger timestamp).
:type end_key: str
:param formatted: If ``True``, format computed time period and return string.
:type formatted: bool
:returns: Computed time period in seconds (or formatted string).
:rtype: float or str
"""
if end_key in self._timings and start_key in self._timings:
d = self._timings[end_key] - self._timings[start_key]
if formatted:
if d < 0.00001: # 10us
s = "%d ns" % round(d * 1000000000.)
elif d < 0.01: # 10ms
s = "%d us" % round(d * 1000000.)
elif d < 10: # 10s
s = "%d ms" % round(d * 1000.)
else:
s = "%d s" % round(d)
return s.rjust(8)
else:
return d
else:
if formatted:
return "n.a.".rjust(8)
else:
return None
def absolute(self, key):
"""
Return the UTC wall-clock time at which a tracked event occurred.
:param key: The key
:type key: str
:returns: Timezone-naive datetime.
:rtype: instance of :py:class:`datetime.datetime`
"""
elapsed = self[key]
if elapsed is None:
raise KeyError("No such key \"%s\"." % elapsed)
return self._dt_offset + timedelta(seconds=elapsed)
def __getitem__(self, key):
if key in self._timings:
return self._timings[key] - self._offset
else:
return None
def __iter__(self):
return self._timings.__iter__()
def __str__(self):
return pformat(self._timings)
class EqualityMixin(object):
"""
Mixing to add equality comparison operators to a class.
Two objects are identical under this mixin, if and only if:
1. both object have the same class
2. all non-private object attributes are equal
"""
def __eq__(self, other):
"""
Compare this object to another object for equality.
:param other: The other object to compare with.
:type other: obj
:returns: ``True`` iff the objects are equal.
:rtype: bool
"""
if not isinstance(other, self.__class__):
return False
# we only want the actual message data attributes (not eg _serialize)
for k in self.__dict__:
if not k.startswith('_'):
if not self.__dict__[k] == other.__dict__[k]:
return False
return True
# return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__)
def __ne__(self, other):
"""
Compare this object to another object for inequality.
:param other: The other object to compare with.
:type other: obj
:returns: ``True`` iff the objects are not equal.
:rtype: bool
"""
return not self.__eq__(other)
def wildcards2patterns(wildcards):
"""
Compute a list of regular expression patterns from a list of
wildcard strings. A wildcard string uses '*' as a wildcard character
matching anything.
:param wildcards: List of wildcard strings to compute regular expression patterns for.
:type wildcards: list of str
:returns: Computed regular expressions.
:rtype: list of obj
"""
return [re.compile(wc.replace('.', '\.').replace('*', '.*')) for wc in wildcards]
class ObservableMixin(object):
"""
Internal utility for enabling event-listeners on particular objects
"""
# A "helper" style composable class (as opposed to a mix-in) might
# be a lot easier to deal with here. Having an __init__ method
# with a "mix in" style class can be fragile and error-prone,
# especially if it takes arguments. Since we don't use the
# "parent" beavior anywhere, I didn't add a .set_parent() (yet?)
# these are class-level globals; individual instances are
# initialized as-needed (e.g. the first .on() call adds a
# _listeners dict). Thus, subclasses don't have to call super()
# properly etc.
_parent = None
_valid_events = None
_listeners = None
def set_valid_events(self, valid_events=None):
"""
:param valid_events: if non-None, .on() or .fire() with an event
not listed in valid_events raises an exception.
"""
self._valid_events = list(valid_events)
def _check_event(self, event):
"""
Internal helper. Throws RuntimeError if we have a valid_events
list, and the given event isnt' in it. Does nothing otherwise.
"""
if self._valid_events and event not in self._valid_events:
raise RuntimeError(
"Invalid event '{event}'. Expected one of: {events}",
event=event,
events=', '.join(self._valid_events),
)
def on(self, event, handler):
"""
Add a handler for an event.
:param event: the name of the event
:param handler: a callable thats invoked when .fire() is
called for this events. Arguments will be whatever are given
to .fire()
"""
# print("adding '{}' to '{}': {}".format(event, hash(self), handler))
self._check_event(event)
if self._listeners is None:
self._listeners = dict()
if event not in self._listeners:
self._listeners[event] = set()
self._listeners[event].add(handler)
def off(self, event=None, handler=None):
"""
Stop listening for a single event, or all events.
:param event: if None, remove all listeners. Otherwise, remove
listeners for the single named event.
:param handler: if None, remove all handlers for the named
event; otherwise remove just the given handler.
"""
if event is None:
if handler is not None:
# maybe this should mean "remove the given handler
# from any event at all that contains it"...?
raise RuntimeError(
"Can't specificy a specific handler without an event"
)
self._listeners = dict()
else:
if self._listeners is None:
return
self._check_event(event)
if event in self._listeners:
if handler is None:
del self._listeners[event]
else:
self._listeners[event].discard(handler)
def fire(self, event, *args, **kwargs):
"""
Fire a particular event.
:param event: the event to fire. All other args and kwargs are
passed on to the handler(s) for the event.
:return: a Deferred/Future gathering all async results from
all handlers and/or parent handlers.
"""
# print("firing '{}' from '{}'".format(event, hash(self)))
if self._listeners is None:
return txaio.create_future(result=[])
self._check_event(event)
res = []
for handler in self._listeners.get(event, set()):
future = txaio.as_future(handler, *args, **kwargs)
res.append(future)
if self._parent is not None:
res.append(self._parent.fire(event, *args, **kwargs))
return txaio.gather(res, consume_exceptions=False)
class _LazyHexFormatter(object):
"""
This is used to avoid calling binascii.hexlify() on data given to
log.debug() calls unless debug is active (for example). Like::
self.log.debug(
"Some data: {octets}",
octets=_LazyHexFormatter(os.urandom(32)),
)
"""
__slots__ = ('obj',)
def __init__(self, obj):
self.obj = obj
def __str__(self):
return binascii.hexlify(self.obj)
| meejah/AutobahnPython | autobahn/util.py | Python | mit | 24,461 | 0.001431 |
#!/usr/bin/env python3
# imports go here
import threading
#
# Free Coding session for 2015-04-09
# Written by Matt Warren
#
data = threading.local()
def message():
print(data.name)
class Foo(threading.Thread):
def run(self):
data.name = self.getName()
message()
if __name__ == '__main__':
f = Foo()
f2 = Foo()
f.start()
f2.start()
| mfwarren/FreeCoding | 2015/04/fc_2015_04_09.py | Python | mit | 378 | 0.002646 |
"""Mock helpers for Z-Wave component."""
from pydispatch import dispatcher
from tests.async_mock import MagicMock
def value_changed(value):
"""Fire a value changed."""
dispatcher.send(
MockNetwork.SIGNAL_VALUE_CHANGED,
value=value,
node=value.node,
network=value.node._network,
)
def node_changed(node):
"""Fire a node changed."""
dispatcher.send(MockNetwork.SIGNAL_NODE, node=node, network=node._network)
def notification(node_id, network=None):
"""Fire a notification."""
dispatcher.send(
MockNetwork.SIGNAL_NOTIFICATION, args={"nodeId": node_id}, network=network
)
class MockOption(MagicMock):
"""Mock Z-Wave options."""
def __init__(self, device=None, config_path=None, user_path=None, cmd_line=None):
"""Initialize a Z-Wave mock options."""
super().__init__()
self.device = device
self.config_path = config_path
self.user_path = user_path
self.cmd_line = cmd_line
def _get_child_mock(self, **kw):
"""Create child mocks with right MagicMock class."""
return MagicMock(**kw)
class MockNetwork(MagicMock):
"""Mock Z-Wave network."""
SIGNAL_NETWORK_FAILED = "mock_NetworkFailed"
SIGNAL_NETWORK_STARTED = "mock_NetworkStarted"
SIGNAL_NETWORK_READY = "mock_NetworkReady"
SIGNAL_NETWORK_STOPPED = "mock_NetworkStopped"
SIGNAL_NETWORK_RESETTED = "mock_DriverResetted"
SIGNAL_NETWORK_AWAKED = "mock_DriverAwaked"
SIGNAL_DRIVER_FAILED = "mock_DriverFailed"
SIGNAL_DRIVER_READY = "mock_DriverReady"
SIGNAL_DRIVER_RESET = "mock_DriverReset"
SIGNAL_DRIVER_REMOVED = "mock_DriverRemoved"
SIGNAL_GROUP = "mock_Group"
SIGNAL_NODE = "mock_Node"
SIGNAL_NODE_ADDED = "mock_NodeAdded"
SIGNAL_NODE_EVENT = "mock_NodeEvent"
SIGNAL_NODE_NAMING = "mock_NodeNaming"
SIGNAL_NODE_NEW = "mock_NodeNew"
SIGNAL_NODE_PROTOCOL_INFO = "mock_NodeProtocolInfo"
SIGNAL_NODE_READY = "mock_NodeReady"
SIGNAL_NODE_REMOVED = "mock_NodeRemoved"
SIGNAL_SCENE_EVENT = "mock_SceneEvent"
SIGNAL_VALUE = "mock_Value"
SIGNAL_VALUE_ADDED = "mock_ValueAdded"
SIGNAL_VALUE_CHANGED = "mock_ValueChanged"
SIGNAL_VALUE_REFRESHED = "mock_ValueRefreshed"
SIGNAL_VALUE_REMOVED = "mock_ValueRemoved"
SIGNAL_POLLING_ENABLED = "mock_PollingEnabled"
SIGNAL_POLLING_DISABLED = "mock_PollingDisabled"
SIGNAL_CREATE_BUTTON = "mock_CreateButton"
SIGNAL_DELETE_BUTTON = "mock_DeleteButton"
SIGNAL_BUTTON_ON = "mock_ButtonOn"
SIGNAL_BUTTON_OFF = "mock_ButtonOff"
SIGNAL_ESSENTIAL_NODE_QUERIES_COMPLETE = "mock_EssentialNodeQueriesComplete"
SIGNAL_NODE_QUERIES_COMPLETE = "mock_NodeQueriesComplete"
SIGNAL_AWAKE_NODES_QUERIED = "mock_AwakeNodesQueried"
SIGNAL_ALL_NODES_QUERIED = "mock_AllNodesQueried"
SIGNAL_ALL_NODES_QUERIED_SOME_DEAD = "mock_AllNodesQueriedSomeDead"
SIGNAL_MSG_COMPLETE = "mock_MsgComplete"
SIGNAL_NOTIFICATION = "mock_Notification"
SIGNAL_CONTROLLER_COMMAND = "mock_ControllerCommand"
SIGNAL_CONTROLLER_WAITING = "mock_ControllerWaiting"
STATE_STOPPED = 0
STATE_FAILED = 1
STATE_RESETTED = 3
STATE_STARTED = 5
STATE_AWAKED = 7
STATE_READY = 10
def __init__(self, options=None, *args, **kwargs):
"""Initialize a Z-Wave mock network."""
super().__init__()
self.options = options
self.state = MockNetwork.STATE_STOPPED
class MockNode(MagicMock):
"""Mock Z-Wave node."""
def __init__(
self,
*,
node_id=567,
name="Mock Node",
manufacturer_id="ABCD",
product_id="123",
product_type="678",
command_classes=None,
can_wake_up_value=True,
manufacturer_name="Test Manufacturer",
product_name="Test Product",
network=None,
**kwargs,
):
"""Initialize a Z-Wave mock node."""
super().__init__()
self.node_id = node_id
self.name = name
self.manufacturer_id = manufacturer_id
self.product_id = product_id
self.product_type = product_type
self.manufacturer_name = manufacturer_name
self.product_name = product_name
self.can_wake_up_value = can_wake_up_value
self._command_classes = command_classes or []
if network is not None:
self._network = network
for attr_name in kwargs:
setattr(self, attr_name, kwargs[attr_name])
def has_command_class(self, command_class):
"""Test if mock has a command class."""
return command_class in self._command_classes
def get_battery_level(self):
"""Return mock battery level."""
return 42
def can_wake_up(self):
"""Return whether the node can wake up."""
return self.can_wake_up_value
def _get_child_mock(self, **kw):
"""Create child mocks with right MagicMock class."""
return MagicMock(**kw)
class MockValue(MagicMock):
"""Mock Z-Wave value."""
_mock_value_id = 1234
def __init__(
self,
*,
label="Mock Value",
node=None,
instance=0,
index=0,
value_id=None,
**kwargs,
):
"""Initialize a Z-Wave mock value."""
super().__init__()
self.label = label
self.node = node
self.instance = instance
self.index = index
if value_id is None:
MockValue._mock_value_id += 1
value_id = MockValue._mock_value_id
self.value_id = value_id
self.object_id = value_id
for attr_name in kwargs:
setattr(self, attr_name, kwargs[attr_name])
def _get_child_mock(self, **kw):
"""Create child mocks with right MagicMock class."""
return MagicMock(**kw)
def refresh(self):
"""Mock refresh of node value."""
value_changed(self)
class MockEntityValues:
"""Mock Z-Wave entity values."""
def __init__(self, **kwargs):
"""Initialize the mock zwave values."""
self.primary = None
self.wakeup = None
self.battery = None
self.power = None
for name in kwargs:
setattr(self, name, kwargs[name])
def __iter__(self):
"""Allow iteration over all values."""
return iter(self.__dict__.values())
| sdague/home-assistant | tests/mock/zwave.py | Python | apache-2.0 | 6,380 | 0.00047 |
import re, os
def cmd_install(ensoapi):
seldict = ensoapi.get_selection()
text = seldict.get("text", "").strip()
lines = text.split("\n")
ensoapi.display_message(lines)
return
if len(lines) < 3:
msg = "There was no command to install!"
ensoapi.display_message(msg)
ensoapi.set_selection({
"text":"Enso: %s" % msg
})
return
while lines[0].strip() == "":
lines.pop(0)
if lines[0].strip() != "# Enso command file":
msg = "There was no command to install!"
ensoapi.display_message(msg)
ensoapi.set_selection({
"text":"Enso: %s" % msg
})
return
command_file_name = re.sub("^\s*#\s*","",lines[1].strip())
if not command_file_name.endswith(".py"):
msg = "Couldn't install this command %s" % command_file_name
ensoapi.display_message(msg)
ensoapi.set_selection({
"text":"Enso: %s" % msg
})
return
cmd_folder = ensoapi.get_enso_commands_folder()
command_file_path = os.path.join(cmd_folder, command_file_name)
shortname = os.path.splitext(command_file_name)[0]
if os.path.exists(command_file_path):
msg = "You already have a command named %s" % shortname
ensoapi.display_message(msg)
ensoapi.set_selection({
"text":"Enso: %s" % msg
})
return
installed_commands = [x['cmdName'] for x in ensoapi.get_commands_from_text(text)]
if len(installed_commands) == 1:
install_message = "%s is now a command" % installed_commands[0]
else:
install_message = "%s are now commands" % ", ".join(installed_commands)
fp = open(command_file_path, "w")
fp.write(text)
fp.close()
ensoapi.display_message(install_message)
ensoapi.set_selection({
"text":"Enso: %s" % install_message
})
def cmd_footnote(ensoapi):
"Wrap text in my in-HTML footnote style"
seldict = ensoapi.get_selection()
text = seldict.get("text", "")
html = seldict.get("html", text)
if not text:
ensoapi.display_message("No selection!")
else:
result = '<span style="color:red" title="%s">*</span>' % html
ensoapi.set_selection({
"text":result
})
def cmd_echo(ensoapi):
"Displays the current selection dictionary"
sel = ensoapi.get_selection()
ensoapi.display_message(str(sel))
def cmd_learn_as(ensoapi, new_command):
"Remember current selection as a command"
sel = ensoapi.get_selection().get("text", "")
if not sel:
ensoapi.display_message("No selection!")
return
cmd_folder = ensoapi.get_enso_commands_folder()
learned_commands = os.path.join(cmd_folder, "learned_commands.py")
write_os = False
if not os.path.exists(learned_commands): write_os = True
fp = open(learned_commands,"a")
if write_os: fp.write("import os\n")
fp.write("def cmd_%s(ensoapi): os.system('gnome-open %s')\n" % (new_command.replace(" ","_"),sel))
fp.close()
ensoapi.display_message("%s is now a command" % new_command)
| curaloucura/Enso-Ubuntu | ensocommands/random.py | Python | bsd-3-clause | 2,893 | 0.023159 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from .dos_plotter import DOSPlotter
__author__ = "Yuji Ikeda"
class TotalDOSPlotter(DOSPlotter):
def load_data(self, data_file='total_dos.dat'):
super(TotalDOSPlotter, self).load_data(data_file)
return self
def run(self):
variables = self._variables
primitive = self.create_primitive()
natoms = primitive.get_number_of_atoms()
symbols = primitive.get_chemical_symbols()
print("natoms:", natoms)
print("symbols:", symbols)
self.set_figure_name_prefix("total_dos")
self.set_plot_symbol(False)
self.set_plot_atom(False)
self.load_data(variables["data_file"])
variables.update({
"freq_unit": "THz",
"unit": 1.0,
"natoms": natoms,
"symbols": symbols,
})
self.update_variables(variables)
# self.set_is_horizontal(True)
# self.plot_dos()
self.set_is_horizontal(False)
self.create_figure()
return
from scipy.constants import eV, Planck
THz2meV = Planck / eV * 1e+15 # 4.135667662340164
# meV
variables.update({
"freq_unit": "meV",
"unit": THz2meV,
})
scale = 4.0
variables["f_min"] *= scale
variables["f_max"] *= scale
variables["d_freq"] *= scale
variables["dos_min"] /= scale
variables["dos_max"] /= scale
variables["dos_ticks"] /= scale
self.update_variables(variables)
# self.set_is_horizontal(True)
# self.plot_dos()
self.set_is_horizontal(False)
self.create_figure()
| yuzie007/ph_plotter | ph_plotter/total_dos_plotter.py | Python | mit | 1,815 | 0.002204 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is an Astropy affiliated package.
"""
# Affiliated packages may add whatever they like to this file, but
# should keep this content at the top.
# ----------------------------------------------------------------------------
from ._astropy_init import *
# ----------------------------------------------------------------------------
# For egg_info test builds to pass, put package imports here.
if not _ASTROPY_SETUP_:
from example_mod import *
from astrotoyz import tasks
from astrotoyz import viewer
from astrotoyz import detect_sources
from astrotoyz import io
from astrotoyz import data_types
from astrotoyz import config | fred3m/astro-toyz | astrotoyz/__init__.py | Python | bsd-3-clause | 727 | 0.001376 |
class Solution(object):
def wiggleMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) < 2: return len(nums)
prev_diff = nums[1] - nums[0]
if prev_diff != 0:
longest = 2
else:
longest = 1
for i in range(2, len(nums)):
curr_diff = (nums[i] - nums[i-1])
if (curr_diff > 0 and prev_diff <= 0) or (curr_diff < 0 and prev_diff >= 0):
longest += 1
prev_diff = curr_diff
return longest | Mlieou/leetcode_python | leetcode/python/ex_376.py | Python | mit | 570 | 0.007018 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-19 16:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Beer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Brewery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('location', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Style',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Venue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('venue_type', models.CharField(choices=[('bar', 'Bar'), ('brew', 'Brewery'), ('truck', 'Food Truck')], max_length=5)),
('beers', models.ManyToManyField(related_name='venues', to='tracker.Beer')),
],
),
migrations.AddField(
model_name='beer',
name='brewery',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tracker.Brewery'),
),
migrations.AddField(
model_name='beer',
name='style',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='tracker.Style'),
),
]
| djangophx/beer-tracker | tracker/migrations/0001_initial.py | Python | mit | 2,278 | 0.003512 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper classes for tensor shape inference."""
from . import compat, dtypes
from tensorboard.compat.proto import tensor_shape_pb2
# @tf_export("Dimension")
class Dimension(object):
"""Represents the value of one dimension in a TensorShape."""
def __init__(self, value):
"""Creates a new Dimension with the given value."""
if value is None:
self._value = None
elif isinstance(value, dtypes.DType):
raise TypeError("Cannot convert %s to Dimension" % value)
else:
self._value = int(value)
if (
not isinstance(value, compat.bytes_or_text_types)
and self._value != value
):
raise ValueError("Ambiguous dimension: %s" % value)
if self._value < 0:
raise ValueError("Dimension %d must be >= 0" % self._value)
def __repr__(self):
return "Dimension(%s)" % repr(self._value)
def __str__(self):
value = self._value
return "?" if value is None else str(value)
def __eq__(self, other):
"""Returns true if `other` has the same known value as this
Dimension."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value == other.value
def __ne__(self, other):
"""Returns true if `other` has a different known value from `self`."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value != other.value
def __int__(self):
return self._value
# This is needed for Windows.
# See https://github.com/tensorflow/tensorflow/pull/9780
def __long__(self):
return self._value
def __index__(self):
# Allow use in Python 3 range
return self._value
@property
def value(self):
"""The value of this dimension, or None if it is unknown."""
return self._value
def is_convertible_with(self, other):
"""Returns true if `other` is convertible with this Dimension.
Two known Dimensions are convertible if they have the same value.
An unknown Dimension is convertible with all other Dimensions.
Args:
other: Another Dimension.
Returns:
True if this Dimension and `other` are convertible.
"""
other = as_dimension(other)
return (
self._value is None
or other.value is None
or self._value == other.value
)
def assert_is_convertible_with(self, other):
"""Raises an exception if `other` is not convertible with this
Dimension.
Args:
other: Another Dimension.
Raises:
ValueError: If `self` and `other` are not convertible (see
is_convertible_with).
"""
if not self.is_convertible_with(other):
raise ValueError(
"Dimensions %s and %s are not convertible" % (self, other)
)
def merge_with(self, other):
"""Returns a Dimension that combines the information in `self` and
`other`.
Dimensions are combined as follows:
```python
tf.Dimension(n) .merge_with(tf.Dimension(n)) == tf.Dimension(n)
tf.Dimension(n) .merge_with(tf.Dimension(None)) == tf.Dimension(n)
tf.Dimension(None).merge_with(tf.Dimension(n)) == tf.Dimension(n)
tf.Dimension(None).merge_with(tf.Dimension(None)) == tf.Dimension(None)
tf.Dimension(n) .merge_with(tf.Dimension(m)) # raises ValueError for n != m
```
Args:
other: Another Dimension.
Returns:
A Dimension containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not convertible (see
is_convertible_with).
"""
other = as_dimension(other)
self.assert_is_convertible_with(other)
if self._value is None:
return Dimension(other.value)
else:
return Dimension(self._value)
def __add__(self, other):
"""Returns the sum of `self` and `other`.
Dimensions are summed as follows:
```python
tf.Dimension(m) + tf.Dimension(n) == tf.Dimension(m + n)
tf.Dimension(m) + tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) + tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) + tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value + other.value)
def __radd__(self, other):
"""Returns the sum of `other` and `self`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
return self + other
def __sub__(self, other):
"""Returns the subtraction of `other` from `self`.
Dimensions are subtracted as follows:
```python
tf.Dimension(m) - tf.Dimension(n) == tf.Dimension(m - n)
tf.Dimension(m) - tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) - tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) - tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the subtraction of `other` from `self`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value - other.value)
def __rsub__(self, other):
"""Returns the subtraction of `self` from `other`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the subtraction of `self` from `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(other.value - self._value)
def __mul__(self, other):
"""Returns the product of `self` and `other`.
Dimensions are summed as follows:
```python
tf.Dimension(m) * tf.Dimension(n) == tf.Dimension(m * n)
tf.Dimension(m) * tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) * tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) * tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the product of `self` and `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value * other.value)
def __rmul__(self, other):
"""Returns the product of `self` and `other`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the product of `self` and `other`.
"""
return self * other
def __floordiv__(self, other):
"""Returns the quotient of `self` and `other` rounded down.
Dimensions are divided as follows:
```python
tf.Dimension(m) // tf.Dimension(n) == tf.Dimension(m // n)
tf.Dimension(m) // tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) // tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) // tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value // other.value)
def __rfloordiv__(self, other):
"""Returns the quotient of `other` and `self` rounded down.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(other.value // self._value)
def __div__(self, other):
"""DEPRECATED: Use `__floordiv__` via `x // y` instead.
This function exists only for backwards convertibility purposes; new code
should use `__floordiv__` via the syntax `x // y`. Using `x // y`
communicates clearly that the result rounds down, and is forward convertible
to Python 3.
Args:
other: Another `Dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
return self // other
def __mod__(self, other):
"""Returns `self` modulo `other`.
Dimension moduli are computed as follows:
```python
tf.Dimension(m) % tf.Dimension(n) == tf.Dimension(m % n)
tf.Dimension(m) % tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) % tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) % tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is `self` modulo `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value % other.value)
def __rmod__(self, other):
"""Returns `other` modulo `self`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is `other` modulo `self`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
return other % self
def __lt__(self, other):
"""Returns True if `self` is known to be less than `other`.
Dimensions are compared as follows:
```python
(tf.Dimension(m) < tf.Dimension(n)) == (m < n)
(tf.Dimension(m) < tf.Dimension(None)) == None
(tf.Dimension(None) < tf.Dimension(n)) == None
(tf.Dimension(None) < tf.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value < other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value < other.value
def __le__(self, other):
"""Returns True if `self` is known to be less than or equal to `other`.
Dimensions are compared as follows:
```python
(tf.Dimension(m) <= tf.Dimension(n)) == (m <= n)
(tf.Dimension(m) <= tf.Dimension(None)) == None
(tf.Dimension(None) <= tf.Dimension(n)) == None
(tf.Dimension(None) <= tf.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value <= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value <= other.value
def __gt__(self, other):
"""Returns True if `self` is known to be greater than `other`.
Dimensions are compared as follows:
```python
(tf.Dimension(m) > tf.Dimension(n)) == (m > n)
(tf.Dimension(m) > tf.Dimension(None)) == None
(tf.Dimension(None) > tf.Dimension(n)) == None
(tf.Dimension(None) > tf.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value > other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value > other.value
def __ge__(self, other):
"""Returns True if `self` is known to be greater than or equal to
`other`.
Dimensions are compared as follows:
```python
(tf.Dimension(m) >= tf.Dimension(n)) == (m >= n)
(tf.Dimension(m) >= tf.Dimension(None)) == None
(tf.Dimension(None) >= tf.Dimension(n)) == None
(tf.Dimension(None) >= tf.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value >= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value >= other.value
def __reduce__(self):
return Dimension, (self._value,)
def as_dimension(value):
"""Converts the given value to a Dimension.
A Dimension input will be returned unmodified.
An input of `None` will be converted to an unknown Dimension.
An integer input will be converted to a Dimension with that value.
Args:
value: The value to be converted.
Returns:
A Dimension corresponding to the given value.
"""
if isinstance(value, Dimension):
return value
else:
return Dimension(value)
# @tf_export("TensorShape")
class TensorShape(object):
"""Represents the shape of a `Tensor`.
A `TensorShape` represents a possibly-partial shape specification for a
`Tensor`. It may be one of the following:
* *Fully-known shape:* has a known number of dimensions and a known size
for each dimension. e.g. `TensorShape([16, 256])`
* *Partially-known shape:* has a known number of dimensions, and an unknown
size for one or more dimension. e.g. `TensorShape([None, 256])`
* *Unknown shape:* has an unknown number of dimensions, and an unknown
size in all dimensions. e.g. `TensorShape(None)`
If a tensor is produced by an operation of type `"Foo"`, its shape
may be inferred if there is a registered shape function for
`"Foo"`. See @{$adding_an_op#shape-functions-in-c$`Shape functions in C++`}
for details of shape functions and how to register them. Alternatively,
the shape may be set explicitly using @{tf.Tensor.set_shape}.
"""
def __init__(self, dims):
"""Creates a new TensorShape with the given dimensions.
Args:
dims: A list of Dimensions, or None if the shape is unspecified.
DEPRECATED: A single integer is treated as a singleton list.
Raises:
TypeError: If dims cannot be converted to a list of dimensions.
"""
# TODO(irving): Eliminate the single integer special case.
if dims is None:
self._dims = None
elif isinstance(dims, compat.bytes_or_text_types):
raise TypeError(
"A string has ambiguous TensorShape, please wrap in a "
"list or convert to an int: %s" % dims
)
elif isinstance(dims, tensor_shape_pb2.TensorShapeProto):
if dims.unknown_rank:
self._dims = None
else:
self._dims = [
# Protos store variable-size dimensions as -1
as_dimension(dim.size if dim.size != -1 else None)
for dim in dims.dim
]
elif isinstance(dims, TensorShape):
self._dims = dims.dims
else:
try:
dims_iter = iter(dims)
except TypeError:
# Treat as a singleton dimension
self._dims = [as_dimension(dims)]
else:
# Got a list of dimensions
self._dims = [as_dimension(d) for d in dims_iter]
self._ndims = None
def __repr__(self):
return "TensorShape(%r)" % self._dims
def __str__(self):
if self.ndims is None:
return "<unknown>"
elif self.ndims == 1:
return "(%s,)" % self._dims[0]
else:
return "(%s)" % ", ".join(str(d) for d in self._dims)
@property
def dims(self):
"""Returns a list of Dimensions, or None if the shape is
unspecified."""
return self._dims
@dims.setter
def dims(self, dims):
self._dims = dims
self._ndims = None
@property
def ndims(self):
"""Returns the rank of this shape, or None if it is unspecified."""
if self._dims is None:
return None
else:
if self._ndims is None:
self._ndims = len(self._dims)
return self._ndims
def __len__(self):
"""Returns the rank of this shape, or raises ValueError if
unspecified."""
if self._dims is None:
raise ValueError(
"Cannot take the length of Shape with unknown rank."
)
return self.ndims
def __bool__(self):
"""Returns True if this shape contains non-zero information."""
return self._dims is not None
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __iter__(self):
"""Returns `self.dims` if the rank is known, otherwise raises
ValueError."""
if self._dims is None:
raise ValueError("Cannot iterate over a shape with unknown rank.")
else:
return iter(self._dims)
def __getitem__(self, key):
"""Returns the value of a dimension or a shape, depending on the key.
Args:
key: If `key` is an integer, returns the dimension at that index;
otherwise if `key` is a slice, returns a TensorShape whose
dimensions are those selected by the slice from `self`.
Returns:
A dimension if `key` is an integer, or a `TensorShape` if `key` is a
slice.
Raises:
ValueError: If `key` is a slice, and any of its elements are negative, or
if `self` is completely unknown and the step is set.
"""
if self._dims is not None:
if isinstance(key, slice):
return TensorShape(self._dims[key])
else:
return self._dims[key]
else:
if isinstance(key, slice):
start = key.start if key.start is not None else 0
stop = key.stop
if key.step is not None:
# TODO(mrry): Handle these maybe.
raise ValueError("Steps are not yet handled")
if stop is None:
# NOTE(mrry): This implies that TensorShape(None) is convertible with
# TensorShape(None)[1:], which is obviously not true. It would be
# possible to track the number of dimensions symbolically,
# and perhaps we should do that.
return unknown_shape()
elif start < 0 or stop < 0:
# TODO(mrry): Handle this better, as it will be useful for handling
# suffixes of otherwise unknown shapes.
return unknown_shape()
else:
return unknown_shape(ndims=stop - start)
else:
return Dimension(None)
def num_elements(self):
"""Returns the total number of elements, or none for incomplete
shapes."""
if self.is_fully_defined():
size = 1
for dim in self._dims:
size *= dim.value
return size
else:
return None
def merge_with(self, other):
"""Returns a `TensorShape` combining the information in `self` and
`other`.
The dimensions in `self` and `other` are merged elementwise,
according to the rules defined for `Dimension.merge_with()`.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not convertible.
"""
other = as_shape(other)
if self._dims is None:
return other
else:
try:
self.assert_same_rank(other)
new_dims = []
for i, dim in enumerate(self._dims):
new_dims.append(dim.merge_with(other[i]))
return TensorShape(new_dims)
except ValueError:
raise ValueError(
"Shapes %s and %s are not convertible" % (self, other)
)
def concatenate(self, other):
"""Returns the concatenation of the dimension in `self` and `other`.
*N.B.* If either `self` or `other` is completely unknown,
concatenation will discard information about the other shape. In
future, we might support concatenation that preserves this
information for use with slicing.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` whose dimensions are the concatenation of the
dimensions in `self` and `other`.
"""
# TODO(mrry): Handle the case where we concatenate a known shape with a
# completely unknown shape, so that we can use the partial information.
other = as_shape(other)
if self._dims is None or other.dims is None:
return unknown_shape()
else:
return TensorShape(self._dims + other.dims)
def assert_same_rank(self, other):
"""Raises an exception if `self` and `other` do not have convertible
ranks.
Args:
other: Another `TensorShape`.
Raises:
ValueError: If `self` and `other` do not represent shapes with the
same rank.
"""
other = as_shape(other)
if self.ndims is not None and other.ndims is not None:
if self.ndims != other.ndims:
raise ValueError(
"Shapes %s and %s must have the same rank" % (self, other)
)
def assert_has_rank(self, rank):
"""Raises an exception if `self` is not convertible with the given
`rank`.
Args:
rank: An integer.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
if self.ndims not in (None, rank):
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank(self, rank):
"""Returns a shape based on `self` with the given rank.
This method promotes a completely unknown shape to one with a
known rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with the given rank.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
try:
return self.merge_with(unknown_shape(ndims=rank))
except ValueError:
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank_at_least(self, rank):
"""Returns a shape based on `self` with at least the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at least the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at least the given
`rank`.
"""
if self.ndims is not None and self.ndims < rank:
raise ValueError(
"Shape %s must have rank at least %d" % (self, rank)
)
else:
return self
def with_rank_at_most(self, rank):
"""Returns a shape based on `self` with at most the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at most the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at most the given
`rank`.
"""
if self.ndims is not None and self.ndims > rank:
raise ValueError(
"Shape %s must have rank at most %d" % (self, rank)
)
else:
return self
def is_convertible_with(self, other):
"""Returns True iff `self` is convertible with `other`.
Two possibly-partially-defined shapes are convertible if there
exists a fully-defined shape that both shapes can represent. Thus,
convertibility allows the shape inference code to reason about
partially-defined shapes. For example:
* TensorShape(None) is convertible with all shapes.
* TensorShape([None, None]) is convertible with all two-dimensional
shapes, such as TensorShape([32, 784]), and also TensorShape(None). It is
not convertible with, for example, TensorShape([None]) or
TensorShape([None, None, None]).
* TensorShape([32, None]) is convertible with all two-dimensional shapes
with size 32 in the 0th dimension, and also TensorShape([None, None])
and TensorShape(None). It is not convertible with, for example,
TensorShape([32]), TensorShape([32, None, 1]) or TensorShape([64, None]).
* TensorShape([32, 784]) is convertible with itself, and also
TensorShape([32, None]), TensorShape([None, 784]), TensorShape([None,
None]) and TensorShape(None). It is not convertible with, for example,
TensorShape([32, 1, 784]) or TensorShape([None]).
The convertibility relation is reflexive and symmetric, but not
transitive. For example, TensorShape([32, 784]) is convertible with
TensorShape(None), and TensorShape(None) is convertible with
TensorShape([4, 4]), but TensorShape([32, 784]) is not convertible with
TensorShape([4, 4]).
Args:
other: Another TensorShape.
Returns:
True iff `self` is convertible with `other`.
"""
other = as_shape(other)
if self._dims is not None and other.dims is not None:
if self.ndims != other.ndims:
return False
for x_dim, y_dim in zip(self._dims, other.dims):
if not x_dim.is_convertible_with(y_dim):
return False
return True
def assert_is_convertible_with(self, other):
"""Raises exception if `self` and `other` do not represent the same
shape.
This method can be used to assert that there exists a shape that both
`self` and `other` represent.
Args:
other: Another TensorShape.
Raises:
ValueError: If `self` and `other` do not represent the same shape.
"""
if not self.is_convertible_with(other):
raise ValueError(
"Shapes %s and %s are inconvertible" % (self, other)
)
def most_specific_convertible_shape(self, other):
"""Returns the most specific TensorShape convertible with `self` and
`other`.
* TensorShape([None, 1]) is the most specific TensorShape convertible with
both TensorShape([2, 1]) and TensorShape([5, 1]). Note that
TensorShape(None) is also convertible with above mentioned TensorShapes.
* TensorShape([1, 2, 3]) is the most specific TensorShape convertible with
both TensorShape([1, 2, 3]) and TensorShape([1, 2, 3]). There are more
less specific TensorShapes convertible with above mentioned TensorShapes,
e.g. TensorShape([1, 2, None]), TensorShape(None).
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` which is the most specific convertible shape of `self`
and `other`.
"""
other = as_shape(other)
if (
self._dims is None
or other.dims is None
or self.ndims != other.ndims
):
return unknown_shape()
dims = [(Dimension(None))] * self.ndims
for i, (d1, d2) in enumerate(zip(self._dims, other.dims)):
if d1 is not None and d2 is not None and d1 == d2:
dims[i] = d1
return TensorShape(dims)
def is_fully_defined(self):
"""Returns True iff `self` is fully defined in every dimension."""
return self._dims is not None and all(
dim.value is not None for dim in self._dims
)
def assert_is_fully_defined(self):
"""Raises an exception if `self` is not fully defined in every
dimension.
Raises:
ValueError: If `self` does not have a known value for every dimension.
"""
if not self.is_fully_defined():
raise ValueError("Shape %s is not fully defined" % self)
def as_list(self):
"""Returns a list of integers or `None` for each dimension.
Returns:
A list of integers or `None` for each dimension.
Raises:
ValueError: If `self` is an unknown shape with an unknown rank.
"""
if self._dims is None:
raise ValueError(
"as_list() is not defined on an unknown TensorShape."
)
return [dim.value for dim in self._dims]
def as_proto(self):
"""Returns this shape as a `TensorShapeProto`."""
if self._dims is None:
return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)
else:
return tensor_shape_pb2.TensorShapeProto(
dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
size=-1 if d.value is None else d.value
)
for d in self._dims
]
)
def __eq__(self, other):
"""Returns True if `self` is equivalent to `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
return self._dims == other.dims
def __ne__(self, other):
"""Returns True if `self` is known to be different from `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
if self.ndims is None or other.ndims is None:
raise ValueError(
"The inequality of unknown TensorShapes is undefined."
)
if self.ndims != other.ndims:
return True
return self._dims != other.dims
def __reduce__(self):
return TensorShape, (self._dims,)
def as_shape(shape):
"""Converts the given object to a TensorShape."""
if isinstance(shape, TensorShape):
return shape
else:
return TensorShape(shape)
def unknown_shape(ndims=None):
"""Returns an unknown TensorShape, optionally with a known rank.
Args:
ndims: (Optional) If specified, the number of dimensions in the shape.
Returns:
An unknown TensorShape.
"""
if ndims is None:
return TensorShape(None)
else:
return TensorShape([Dimension(None)] * ndims)
_SCALAR_SHAPE = TensorShape([])
def scalar():
"""Returns a shape representing a scalar."""
return _SCALAR_SHAPE
def vector(length):
"""Returns a shape representing a vector.
Args:
length: The length of the vector, which may be None if unknown.
Returns:
A TensorShape representing a vector of the given length.
"""
return TensorShape([length])
def matrix(rows, cols):
"""Returns a shape representing a matrix.
Args:
rows: The number of rows in the matrix, which may be None if unknown.
cols: The number of columns in the matrix, which may be None if unknown.
Returns:
A TensorShape representing a matrix of the given size.
"""
return TensorShape([rows, cols])
| tensorflow/tensorboard | tensorboard/compat/tensorflow_stub/tensor_shape.py | Python | apache-2.0 | 34,472 | 0.000754 |
# -*- coding: utf-8 -*-
import fauxfactory
import pytest
from selenium.common.exceptions import NoSuchElementException
import cfme.tests.configure.test_access_control as tac
from cfme.base.login import BaseLoggedInPage
from cfme import test_requirements
from cfme.utils import error
from cfme.utils.blockers import BZ
from cfme.utils.log import logger
from cfme.utils.update import update
pytestmark = [test_requirements.service, pytest.mark.tier(3), pytest.mark.ignore_stream("upstream")]
@pytest.yield_fixture(scope="function")
def catalog_item(appliance, dialog, catalog):
cat_item = appliance.collections.catalog_items.create(
appliance.collections.catalog_items.GENERIC,
name='test_item_{}'.format(fauxfactory.gen_alphanumeric()),
description="my catalog item", display_in=True,
catalog=catalog, dialog=dialog
)
yield cat_item
# fixture cleanup
try:
cat_item.delete()
except NoSuchElementException:
logger.warning('test_catalog_item: catalog_item yield fixture cleanup, catalog item "{}" '
'not found'.format(cat_item.name))
@pytest.yield_fixture(scope="function")
def catalog_bundle(appliance, catalog_item):
""" Create catalog bundle
Args:
catalog_item: as resource for bundle creation
"""
bundle_name = "bundle" + fauxfactory.gen_alphanumeric()
catalog_bundle = appliance.collections.catalog_bundles.create(
bundle_name, description="catalog_bundle",
display_in=True, catalog=catalog_item.catalog,
dialog=catalog_item.dialog,
catalog_items=[catalog_item.name])
yield catalog_bundle
# fixture cleanup
try:
catalog_bundle.delete()
except NoSuchElementException:
logger.warning('test_catalog_item: catalog_item yield fixture cleanup, catalog item "{}" '
'not found'.format(catalog_bundle.name))
@pytest.fixture(scope="function")
def check_catalog_visibility(request, user_restricted, tag):
def _check_catalog_visibility(test_item_object):
"""
Args:
test_item_object: object for visibility check
"""
category_name = ' '.join((tag.category.display_name, '*'))
test_item_object.add_tag(category_name, tag.display_name)
with user_restricted:
assert test_item_object.exists
test_item_object.remove_tag(category_name, tag.display_name)
with user_restricted:
assert not test_item_object.exists
return _check_catalog_visibility
@pytest.mark.skip('Catalog items are converted to collections. Refactoring is required')
def test_create_catalog_item(catalog_item):
catalog_item.create()
def test_update_catalog_item(catalog_item):
with update(catalog_item):
catalog_item.description = "my edited item description"
def test_add_button_group(catalog_item, appliance):
button_name = catalog_item.add_button_group()
view = appliance.browser.create_view(BaseLoggedInPage)
if appliance.version.is_in_series('5.8'):
message = 'Buttons Group "{}" was added'.format(button_name)
else:
message = 'Button Group "{}" was added'.format(button_name)
view.flash.assert_success_message(message)
def test_add_button(catalog_item, appliance):
button_name = catalog_item.add_button()
view = appliance.browser.create_view(BaseLoggedInPage)
if appliance.version.is_in_series('5.8'):
message = 'Button "{}" was added'.format(button_name)
else:
message = 'Custom Button "{}" was added'.format(button_name)
view.flash.assert_success_message(message)
def test_edit_tags(catalog_item):
catalog_item.add_tag("Cost Center *", "Cost Center 001")
catalog_item.remove_tag("Cost Center *", "Cost Center 001")
@pytest.mark.skip('Catalog items are converted to collections. Refactoring is required')
@pytest.mark.meta(blockers=[BZ(1531512, forced_streams=["5.8", "5.9", "upstream"])])
def test_catalog_item_duplicate_name(catalog_item):
catalog_item.create()
with error.expected("Name has already been taken"):
catalog_item.create()
@pytest.mark.skip('Catalog items are converted to collections. Refactoring is required')
@pytest.mark.meta(blockers=[BZ(1460891, forced_streams=["5.8", "upstream"])])
def test_permissions_catalog_item_add(catalog_item):
"""Test that a catalog can be added only with the right permissions."""
tac.single_task_permission_test([['Everything', 'Services', 'Catalogs Explorer',
'Catalog Items']],
{'Add Catalog Item': catalog_item.create})
def test_tagvis_catalog_items(check_catalog_visibility, catalog_item):
""" Checks catalog item tag visibility for restricted user
Prerequisites:
Catalog, tag, role, group and restricted user should be created
Steps:
1. As admin add tag to catalog item
2. Login as restricted user, catalog item is visible for user
3. As admin remove tag
4. Login as restricted user, catalog item is not visible for user
"""
check_catalog_visibility(catalog_item)
def test_tagvis_catalog_bundle(check_catalog_visibility, catalog_bundle):
""" Checks catalog bundle tag visibility for restricted user
Prerequisites:
Catalog, tag, role, group, catalog item and restricted user should be created
Steps:
1. As admin add tag to catalog bundle
2. Login as restricted user, catalog bundle is visible for user
3. As admin remove tag
4. Login as restricted user, catalog bundle is not visible for user
"""
check_catalog_visibility(catalog_bundle)
| mfalesni/cfme_tests | cfme/tests/services/test_catalog_item.py | Python | gpl-2.0 | 5,757 | 0.001563 |
#!/usr/bin/env python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script uses the Vision API's label detection capabilities to find a label
based on an image's content.
To run the example, install the necessary libraries by running:
pip install -r requirements.txt
Run the script on an image to get a label, E.g.:
./label.py <path-to-image>
"""
# [START import_libraries]
import argparse
import base64
import httplib2
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
# The url template to retrieve the discovery document for trusted testers.
DISCOVERY_URL='https://{api}.googleapis.com/$discovery/rest?version={apiVersion}'
# [END import_libraries]
def main(photo_file):
"""Run a label request on a single image"""
# [START authenticate]
credentials = GoogleCredentials.get_application_default()
service = discovery.build('vision', 'v1', credentials=credentials,
discoveryServiceUrl=DISCOVERY_URL)
# [END authenticate]
# [START construct_request]
with open(photo_file, 'rb') as image:
image_content = base64.b64encode(image.read())
service_request = service.images().annotate(body={
'requests': [{
'image': {
'content': image_content.decode('UTF-8')
},
'features': [{
'type': 'LABEL_DETECTION',
'maxResults': 1
}]
}]
})
# [END construct_request]
# [START parse_response]
response = service_request.execute()
label = response['responses'][0]['labelAnnotations'][0]['description']
print('Found label: %s for %s' % (label, photo_file))
return 0
# [END parse_response]
# [START run_application]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('image_file', help='The image you\'d like to label.')
args = parser.parse_args()
main(args.image_file)
# [END run_application]
| csilzen/whatdoyousee | python/label/label.py | Python | apache-2.0 | 2,624 | 0.001143 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'G:\WorkDir\gas-sensing_resistors\ST_spectrum\ST_2400.ui'
#
# Created: Tue Apr 12 22:50:19 2016
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_UI_sens2400(object):
def setupUi(self, UI_sens2400):
UI_sens2400.setObjectName(_fromUtf8("UI_sens2400"))
UI_sens2400.resize(486, 360)
UI_sens2400.setMinimumSize(QtCore.QSize(480, 360))
UI_sens2400.setMaximumSize(QtCore.QSize(486, 360))
font = QtGui.QFont()
font.setPointSize(12)
UI_sens2400.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icon/icons/yb.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
UI_sens2400.setWindowIcon(icon)
self.verticalLayout_8 = QtGui.QVBoxLayout(UI_sens2400)
self.verticalLayout_8.setObjectName(_fromUtf8("verticalLayout_8"))
self.verticalLayout_7 = QtGui.QVBoxLayout()
self.verticalLayout_7.setSpacing(20)
self.verticalLayout_7.setObjectName(_fromUtf8("verticalLayout_7"))
self.horizontalLayout_14 = QtGui.QHBoxLayout()
self.horizontalLayout_14.setSpacing(20)
self.horizontalLayout_14.setObjectName(_fromUtf8("horizontalLayout_14"))
self.horizontalLayout_12 = QtGui.QHBoxLayout()
self.horizontalLayout_12.setSpacing(2)
self.horizontalLayout_12.setObjectName(_fromUtf8("horizontalLayout_12"))
self.label_13 = QtGui.QLabel(UI_sens2400)
self.label_13.setMaximumSize(QtCore.QSize(16777215, 22))
self.label_13.setObjectName(_fromUtf8("label_13"))
self.horizontalLayout_12.addWidget(self.label_13)
self.res_range = QtGui.QComboBox(UI_sens2400)
self.res_range.setMaximumSize(QtCore.QSize(16777215, 22))
self.res_range.setObjectName(_fromUtf8("res_range"))
self.res_range.addItem(_fromUtf8(""))
self.res_range.addItem(_fromUtf8(""))
self.res_range.addItem(_fromUtf8(""))
self.res_range.addItem(_fromUtf8(""))
self.res_range.addItem(_fromUtf8(""))
self.res_range.addItem(_fromUtf8(""))
self.res_range.addItem(_fromUtf8(""))
self.res_range.addItem(_fromUtf8(""))
self.horizontalLayout_12.addWidget(self.res_range)
self.horizontalLayout_14.addLayout(self.horizontalLayout_12)
self.res_detect = QtGui.QPushButton(UI_sens2400)
self.res_detect.setMaximumSize(QtCore.QSize(16777215, 22))
self.res_detect.setObjectName(_fromUtf8("res_detect"))
self.horizontalLayout_14.addWidget(self.res_detect)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_14.addItem(spacerItem)
self.verticalLayout_7.addLayout(self.horizontalLayout_14)
self.horizontalLayout_13 = QtGui.QHBoxLayout()
self.horizontalLayout_13.setSpacing(10)
self.horizontalLayout_13.setObjectName(_fromUtf8("horizontalLayout_13"))
self.horizontalLayout_9 = QtGui.QHBoxLayout()
self.horizontalLayout_9.setSpacing(2)
self.horizontalLayout_9.setObjectName(_fromUtf8("horizontalLayout_9"))
self.label_3 = QtGui.QLabel(UI_sens2400)
self.label_3.setMaximumSize(QtCore.QSize(32, 22))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_9.addWidget(self.label_3)
self.detV = QtGui.QLineEdit(UI_sens2400)
self.detV.setEnabled(False)
self.detV.setMinimumSize(QtCore.QSize(60, 22))
self.detV.setMaximumSize(QtCore.QSize(65535, 22))
self.detV.setObjectName(_fromUtf8("detV"))
self.horizontalLayout_9.addWidget(self.detV)
self.label_7 = QtGui.QLabel(UI_sens2400)
self.label_7.setMaximumSize(QtCore.QSize(16777215, 22))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.horizontalLayout_9.addWidget(self.label_7)
self.horizontalLayout_13.addLayout(self.horizontalLayout_9)
self.horizontalLayout_10 = QtGui.QHBoxLayout()
self.horizontalLayout_10.setSpacing(2)
self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10"))
self.label_6 = QtGui.QLabel(UI_sens2400)
self.label_6.setMaximumSize(QtCore.QSize(32, 22))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.horizontalLayout_10.addWidget(self.label_6)
self.detI = QtGui.QLineEdit(UI_sens2400)
self.detI.setEnabled(False)
self.detI.setMinimumSize(QtCore.QSize(60, 22))
self.detI.setMaximumSize(QtCore.QSize(65535, 22))
self.detI.setText(_fromUtf8(""))
self.detI.setObjectName(_fromUtf8("detI"))
self.horizontalLayout_10.addWidget(self.detI)
self.label_11 = QtGui.QLabel(UI_sens2400)
self.label_11.setMaximumSize(QtCore.QSize(16777215, 22))
self.label_11.setObjectName(_fromUtf8("label_11"))
self.horizontalLayout_10.addWidget(self.label_11)
self.horizontalLayout_13.addLayout(self.horizontalLayout_10)
self.horizontalLayout_11 = QtGui.QHBoxLayout()
self.horizontalLayout_11.setSpacing(2)
self.horizontalLayout_11.setObjectName(_fromUtf8("horizontalLayout_11"))
self.label_2 = QtGui.QLabel(UI_sens2400)
self.label_2.setMaximumSize(QtCore.QSize(32, 22))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout_11.addWidget(self.label_2)
self.detR = QtGui.QLineEdit(UI_sens2400)
self.detR.setEnabled(False)
self.detR.setMinimumSize(QtCore.QSize(60, 22))
self.detR.setMaximumSize(QtCore.QSize(65535, 22))
self.detR.setObjectName(_fromUtf8("detR"))
self.horizontalLayout_11.addWidget(self.detR)
self.label_12 = QtGui.QLabel(UI_sens2400)
self.label_12.setMaximumSize(QtCore.QSize(16777215, 22))
self.label_12.setObjectName(_fromUtf8("label_12"))
self.horizontalLayout_11.addWidget(self.label_12)
self.horizontalLayout_13.addLayout(self.horizontalLayout_11)
self.verticalLayout_7.addLayout(self.horizontalLayout_13)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(UI_sens2400)
font = QtGui.QFont()
font.setPointSize(12)
self.label.setFont(font)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.mode2or4 = QtGui.QComboBox(UI_sens2400)
font = QtGui.QFont()
font.setPointSize(12)
self.mode2or4.setFont(font)
self.mode2or4.setObjectName(_fromUtf8("mode2or4"))
self.mode2or4.addItem(_fromUtf8(""))
self.mode2or4.addItem(_fromUtf8(""))
self.horizontalLayout.addWidget(self.mode2or4)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.horizontalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.label_8 = QtGui.QLabel(UI_sens2400)
font = QtGui.QFont()
font.setPointSize(12)
self.label_8.setFont(font)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.horizontalLayout_3.addWidget(self.label_8)
self.output_mode = QtGui.QComboBox(UI_sens2400)
font = QtGui.QFont()
font.setPointSize(12)
self.output_mode.setFont(font)
self.output_mode.setObjectName(_fromUtf8("output_mode"))
self.output_mode.addItem(_fromUtf8(""))
self.output_mode.addItem(_fromUtf8(""))
self.horizontalLayout_3.addWidget(self.output_mode)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem2)
self.horizontalLayout_2.addLayout(self.horizontalLayout_3)
self.verticalLayout_7.addLayout(self.horizontalLayout_2)
self.horizontalLayout_7 = QtGui.QHBoxLayout()
self.horizontalLayout_7.setSpacing(10)
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
self.verticalLayout_6 = QtGui.QVBoxLayout()
self.verticalLayout_6.setSpacing(20)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.IV_mode = QtGui.QRadioButton(UI_sens2400)
font = QtGui.QFont()
font.setPointSize(12)
self.IV_mode.setFont(font)
self.IV_mode.setChecked(True)
self.IV_mode.setObjectName(_fromUtf8("IV_mode"))
self.verticalLayout_6.addWidget(self.IV_mode)
self.groupBox_2 = QtGui.QGroupBox(UI_sens2400)
self.groupBox_2.setMaximumSize(QtCore.QSize(16777215, 120))
font = QtGui.QFont()
font.setPointSize(12)
self.groupBox_2.setFont(font)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_2)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.label_4 = QtGui.QLabel(self.groupBox_2)
self.label_4.setMinimumSize(QtCore.QSize(64, 29))
self.label_4.setMaximumSize(QtCore.QSize(64, 29))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.horizontalLayout_4.addWidget(self.label_4)
self.VI_voltage = QtGui.QLineEdit(self.groupBox_2)
self.VI_voltage.setMinimumSize(QtCore.QSize(67, 22))
self.VI_voltage.setMaximumSize(QtCore.QSize(67, 22))
self.VI_voltage.setObjectName(_fromUtf8("VI_voltage"))
self.horizontalLayout_4.addWidget(self.VI_voltage)
self.voltage_unit = QtGui.QComboBox(self.groupBox_2)
self.voltage_unit.setObjectName(_fromUtf8("voltage_unit"))
self.voltage_unit.addItem(_fromUtf8(""))
self.voltage_unit.addItem(_fromUtf8(""))
self.horizontalLayout_4.addWidget(self.voltage_unit)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem3)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.label_9 = QtGui.QLabel(self.groupBox_2)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.horizontalLayout_5.addWidget(self.label_9)
self.VI_current_range = QtGui.QComboBox(self.groupBox_2)
self.VI_current_range.setObjectName(_fromUtf8("VI_current_range"))
self.VI_current_range.addItem(_fromUtf8(""))
self.VI_current_range.addItem(_fromUtf8(""))
self.VI_current_range.addItem(_fromUtf8(""))
self.VI_current_range.addItem(_fromUtf8(""))
self.VI_current_range.addItem(_fromUtf8(""))
self.VI_current_range.addItem(_fromUtf8(""))
self.VI_current_range.addItem(_fromUtf8(""))
self.VI_current_range.addItem(_fromUtf8(""))
self.VI_current_range.addItem(_fromUtf8(""))
self.VI_current_range.addItem(_fromUtf8(""))
self.horizontalLayout_5.addWidget(self.VI_current_range)
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem4)
self.verticalLayout.addLayout(self.horizontalLayout_5)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.verticalLayout_6.addWidget(self.groupBox_2)
self.horizontalLayout_7.addLayout(self.verticalLayout_6)
self.verticalLayout_5 = QtGui.QVBoxLayout()
self.verticalLayout_5.setSpacing(20)
self.verticalLayout_5.setContentsMargins(-1, -1, -1, 0)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.VI_mode = QtGui.QRadioButton(UI_sens2400)
font = QtGui.QFont()
font.setPointSize(12)
self.VI_mode.setFont(font)
self.VI_mode.setObjectName(_fromUtf8("VI_mode"))
self.verticalLayout_5.addWidget(self.VI_mode)
self.groupBox_3 = QtGui.QGroupBox(UI_sens2400)
self.groupBox_3.setMaximumSize(QtCore.QSize(16777215, 120))
font = QtGui.QFont()
font.setPointSize(12)
self.groupBox_3.setFont(font)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.groupBox_3)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.horizontalLayout_6 = QtGui.QHBoxLayout()
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.label_5 = QtGui.QLabel(self.groupBox_3)
self.label_5.setMinimumSize(QtCore.QSize(64, 29))
self.label_5.setMaximumSize(QtCore.QSize(64, 29))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.horizontalLayout_6.addWidget(self.label_5)
self.IV_current = QtGui.QLineEdit(self.groupBox_3)
self.IV_current.setMinimumSize(QtCore.QSize(67, 22))
self.IV_current.setMaximumSize(QtCore.QSize(67, 22))
self.IV_current.setObjectName(_fromUtf8("IV_current"))
self.horizontalLayout_6.addWidget(self.IV_current)
self.current_unit = QtGui.QComboBox(self.groupBox_3)
self.current_unit.setObjectName(_fromUtf8("current_unit"))
self.current_unit.addItem(_fromUtf8(""))
self.current_unit.addItem(_fromUtf8(""))
self.current_unit.addItem(_fromUtf8(""))
self.horizontalLayout_6.addWidget(self.current_unit)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem5)
self.verticalLayout_3.addLayout(self.horizontalLayout_6)
self.horizontalLayout_15 = QtGui.QHBoxLayout()
self.horizontalLayout_15.setObjectName(_fromUtf8("horizontalLayout_15"))
self.label_10 = QtGui.QLabel(self.groupBox_3)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.horizontalLayout_15.addWidget(self.label_10)
self.IV_voltage_range = QtGui.QComboBox(self.groupBox_3)
self.IV_voltage_range.setObjectName(_fromUtf8("IV_voltage_range"))
self.IV_voltage_range.addItem(_fromUtf8(""))
self.IV_voltage_range.addItem(_fromUtf8(""))
self.IV_voltage_range.addItem(_fromUtf8(""))
self.IV_voltage_range.addItem(_fromUtf8(""))
self.IV_voltage_range.addItem(_fromUtf8(""))
self.IV_voltage_range.addItem(_fromUtf8(""))
self.IV_voltage_range.addItem(_fromUtf8(""))
self.IV_voltage_range.addItem(_fromUtf8(""))
self.IV_voltage_range.addItem(_fromUtf8(""))
self.IV_voltage_range.addItem(_fromUtf8(""))
self.horizontalLayout_15.addWidget(self.IV_voltage_range)
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_15.addItem(spacerItem6)
self.verticalLayout_3.addLayout(self.horizontalLayout_15)
self.verticalLayout_4.addLayout(self.verticalLayout_3)
self.verticalLayout_5.addWidget(self.groupBox_3)
self.horizontalLayout_7.addLayout(self.verticalLayout_5)
self.verticalLayout_7.addLayout(self.horizontalLayout_7)
self.horizontalLayout_8 = QtGui.QHBoxLayout()
self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8"))
spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(spacerItem7)
self.ST2400_save = QtGui.QPushButton(UI_sens2400)
self.ST2400_save.setObjectName(_fromUtf8("ST2400_save"))
self.horizontalLayout_8.addWidget(self.ST2400_save)
spacerItem8 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(spacerItem8)
self.ST2400_exit = QtGui.QPushButton(UI_sens2400)
self.ST2400_exit.setObjectName(_fromUtf8("ST2400_exit"))
self.horizontalLayout_8.addWidget(self.ST2400_exit)
spacerItem9 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(spacerItem9)
self.verticalLayout_7.addLayout(self.horizontalLayout_8)
self.verticalLayout_8.addLayout(self.verticalLayout_7)
self.retranslateUi(UI_sens2400)
QtCore.QObject.connect(self.ST2400_exit, QtCore.SIGNAL(_fromUtf8("clicked()")), UI_sens2400.close)
QtCore.QMetaObject.connectSlotsByName(UI_sens2400)
def retranslateUi(self, UI_sens2400):
UI_sens2400.setWindowTitle(_translate("UI_sens2400", "灵敏度-温度谱仪器设置", None))
self.label_13.setText(_translate("UI_sens2400", "电阻范围", None))
self.res_range.setItemText(0, _translate("UI_sens2400", "100", None))
self.res_range.setItemText(1, _translate("UI_sens2400", "1K", None))
self.res_range.setItemText(2, _translate("UI_sens2400", "10K", None))
self.res_range.setItemText(3, _translate("UI_sens2400", "100K", None))
self.res_range.setItemText(4, _translate("UI_sens2400", "1M", None))
self.res_range.setItemText(5, _translate("UI_sens2400", "10M", None))
self.res_range.setItemText(6, _translate("UI_sens2400", "100M", None))
self.res_range.setItemText(7, _translate("UI_sens2400", "200M", None))
self.res_detect.setText(_translate("UI_sens2400", "电阻探测", None))
self.label_3.setText(_translate("UI_sens2400", "电压", None))
self.label_7.setText(_translate("UI_sens2400", "V", None))
self.label_6.setText(_translate("UI_sens2400", "电流", None))
self.label_11.setText(_translate("UI_sens2400", "A", None))
self.label_2.setText(_translate("UI_sens2400", "电阻", None))
self.label_12.setText(_translate("UI_sens2400", "Ω", None))
self.label.setText(_translate("UI_sens2400", "测量模式", None))
self.mode2or4.setItemText(0, _translate("UI_sens2400", "两线制", None))
self.mode2or4.setItemText(1, _translate("UI_sens2400", "四线制", None))
self.label_8.setText(_translate("UI_sens2400", "输出模式", None))
self.output_mode.setItemText(0, _translate("UI_sens2400", "脉冲输出", None))
self.output_mode.setItemText(1, _translate("UI_sens2400", "连续输出", None))
self.IV_mode.setText(_translate("UI_sens2400", "通电压,测电流", None))
self.groupBox_2.setTitle(_translate("UI_sens2400", "I-V模式设置", None))
self.label_4.setText(_translate("UI_sens2400", "激励电压", None))
self.voltage_unit.setItemText(0, _translate("UI_sens2400", "mV", None))
self.voltage_unit.setItemText(1, _translate("UI_sens2400", "V", None))
self.label_9.setText(_translate("UI_sens2400", "电流量程", None))
self.VI_current_range.setItemText(0, _translate("UI_sens2400", "AUTO", None))
self.VI_current_range.setItemText(1, _translate("UI_sens2400", "10pA", None))
self.VI_current_range.setItemText(2, _translate("UI_sens2400", "100pA", None))
self.VI_current_range.setItemText(3, _translate("UI_sens2400", "1uA", None))
self.VI_current_range.setItemText(4, _translate("UI_sens2400", "10uA", None))
self.VI_current_range.setItemText(5, _translate("UI_sens2400", "100uA", None))
self.VI_current_range.setItemText(6, _translate("UI_sens2400", "1mA", None))
self.VI_current_range.setItemText(7, _translate("UI_sens2400", "10mA", None))
self.VI_current_range.setItemText(8, _translate("UI_sens2400", "100mA", None))
self.VI_current_range.setItemText(9, _translate("UI_sens2400", "1A", None))
self.VI_mode.setText(_translate("UI_sens2400", "通电流,测电压", None))
self.groupBox_3.setTitle(_translate("UI_sens2400", "V-I模式设置", None))
self.label_5.setText(_translate("UI_sens2400", "激励电流", None))
self.current_unit.setItemText(0, _translate("UI_sens2400", "uA", None))
self.current_unit.setItemText(1, _translate("UI_sens2400", "mA", None))
self.current_unit.setItemText(2, _translate("UI_sens2400", "A", None))
self.label_10.setText(_translate("UI_sens2400", "电压量程", None))
self.IV_voltage_range.setItemText(0, _translate("UI_sens2400", "AUTO", None))
self.IV_voltage_range.setItemText(1, _translate("UI_sens2400", "1uV", None))
self.IV_voltage_range.setItemText(2, _translate("UI_sens2400", "10uV", None))
self.IV_voltage_range.setItemText(3, _translate("UI_sens2400", "100uV", None))
self.IV_voltage_range.setItemText(4, _translate("UI_sens2400", "1mV", None))
self.IV_voltage_range.setItemText(5, _translate("UI_sens2400", "10mV", None))
self.IV_voltage_range.setItemText(6, _translate("UI_sens2400", "100mV", None))
self.IV_voltage_range.setItemText(7, _translate("UI_sens2400", "1V", None))
self.IV_voltage_range.setItemText(8, _translate("UI_sens2400", "10V", None))
self.IV_voltage_range.setItemText(9, _translate("UI_sens2400", "210V", None))
self.ST2400_save.setText(_translate("UI_sens2400", "保存", None))
self.ST2400_exit.setText(_translate("UI_sens2400", "退出", None))
import mypic_rc
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
UI_sens2400 = QtGui.QDialog()
ui = Ui_UI_sens2400()
ui.setupUi(UI_sens2400)
UI_sens2400.show()
sys.exit(app.exec_())
| cygnushan/measurement | ST_spectrum/Ui_ST_2400.py | Python | mit | 22,827 | 0.002075 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import codecs
import csv
import fnmatch
import inspect
import locale
import os
import openerp.sql_db as sql_db
import re
import logging
import tarfile
import tempfile
import threading
from babel.messages import extract
from collections import defaultdict
from datetime import datetime
from lxml import etree
from os.path import join
from xml.sax.saxutils import escape
import config
import misc
from misc import SKIPPED_ELEMENT_TYPES
import osutil
import openerp
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
# used to notify web client that these translations should be loaded in the UI
WEB_TRANSLATION_COMMENT = "openerp-web"
SKIPPED_ELEMENTS = ('script', 'style')
_LOCALE2WIN32 = {
'af_ZA': 'Afrikaans_South Africa',
'sq_AL': 'Albanian_Albania',
'ar_SA': 'Arabic_Saudi Arabia',
'eu_ES': 'Basque_Spain',
'be_BY': 'Belarusian_Belarus',
'bs_BA': 'Bosnian_Bosnia and Herzegovina',
'bg_BG': 'Bulgarian_Bulgaria',
'ca_ES': 'Catalan_Spain',
'hr_HR': 'Croatian_Croatia',
'zh_CN': 'Chinese_China',
'zh_TW': 'Chinese_Taiwan',
'cs_CZ': 'Czech_Czech Republic',
'da_DK': 'Danish_Denmark',
'nl_NL': 'Dutch_Netherlands',
'et_EE': 'Estonian_Estonia',
'fa_IR': 'Farsi_Iran',
'ph_PH': 'Filipino_Philippines',
'fi_FI': 'Finnish_Finland',
'fr_FR': 'French_France',
'fr_BE': 'French_France',
'fr_CH': 'French_France',
'fr_CA': 'French_France',
'ga': 'Scottish Gaelic',
'gl_ES': 'Galician_Spain',
'ka_GE': 'Georgian_Georgia',
'de_DE': 'German_Germany',
'el_GR': 'Greek_Greece',
'gu': 'Gujarati_India',
'he_IL': 'Hebrew_Israel',
'hi_IN': 'Hindi',
'hu': 'Hungarian_Hungary',
'is_IS': 'Icelandic_Iceland',
'id_ID': 'Indonesian_indonesia',
'it_IT': 'Italian_Italy',
'ja_JP': 'Japanese_Japan',
'kn_IN': 'Kannada',
'km_KH': 'Khmer',
'ko_KR': 'Korean_Korea',
'lo_LA': 'Lao_Laos',
'lt_LT': 'Lithuanian_Lithuania',
'lat': 'Latvian_Latvia',
'ml_IN': 'Malayalam_India',
'mi_NZ': 'Maori',
'mn': 'Cyrillic_Mongolian',
'no_NO': 'Norwegian_Norway',
'nn_NO': 'Norwegian-Nynorsk_Norway',
'pl': 'Polish_Poland',
'pt_PT': 'Portuguese_Portugal',
'pt_BR': 'Portuguese_Brazil',
'ro_RO': 'Romanian_Romania',
'ru_RU': 'Russian_Russia',
'sr_CS': 'Serbian (Cyrillic)_Serbia and Montenegro',
'sk_SK': 'Slovak_Slovakia',
'sl_SI': 'Slovenian_Slovenia',
#should find more specific locales for spanish countries,
#but better than nothing
'es_AR': 'Spanish_Spain',
'es_BO': 'Spanish_Spain',
'es_CL': 'Spanish_Spain',
'es_CO': 'Spanish_Spain',
'es_CR': 'Spanish_Spain',
'es_DO': 'Spanish_Spain',
'es_EC': 'Spanish_Spain',
'es_ES': 'Spanish_Spain',
'es_GT': 'Spanish_Spain',
'es_HN': 'Spanish_Spain',
'es_MX': 'Spanish_Spain',
'es_NI': 'Spanish_Spain',
'es_PA': 'Spanish_Spain',
'es_PE': 'Spanish_Spain',
'es_PR': 'Spanish_Spain',
'es_PY': 'Spanish_Spain',
'es_SV': 'Spanish_Spain',
'es_UY': 'Spanish_Spain',
'es_VE': 'Spanish_Spain',
'sv_SE': 'Swedish_Sweden',
'ta_IN': 'English_Australia',
'th_TH': 'Thai_Thailand',
'tr_TR': 'Turkish_Turkey',
'uk_UA': 'Ukrainian_Ukraine',
'vi_VN': 'Vietnamese_Viet Nam',
'tlh_TLH': 'Klingon',
}
# These are not all english small words, just those that could potentially be isolated within views
ENGLISH_SMALL_WORDS = set("as at by do go if in me no of ok on or to up us we".split())
class UNIX_LINE_TERMINATOR(csv.excel):
lineterminator = '\n'
csv.register_dialect("UNIX", UNIX_LINE_TERMINATOR)
#
# Helper functions for translating fields
#
def encode(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
# which elements are translated inline
TRANSLATED_ELEMENTS = {
'abbr', 'b', 'bdi', 'bdo', 'br', 'cite', 'code', 'data', 'del', 'dfn', 'em',
'font', 'i', 'ins', 'kbd', 'keygen', 'mark', 'math', 'meter', 'output',
'progress', 'q', 'ruby', 's', 'samp', 'small', 'span', 'strong', 'sub',
'sup', 'time', 'u', 'var', 'wbr', 'text',
}
# which attributes must be translated
TRANSLATED_ATTRS = {
'string', 'help', 'sum', 'avg', 'confirm', 'placeholder', 'alt', 'title',
}
avoid_pattern = re.compile(r"[\s\n]*<!DOCTYPE", re.IGNORECASE)
class XMLTranslator(object):
""" A sequence of serialized XML/HTML items, with some of them to translate
(todo) and others already translated (done). The purpose of this object
is to simplify the handling of phrasing elements (like <b>) that must be
translated together with their surrounding text.
For instance, the content of the "div" element below will be translated
as a whole (without surrounding spaces):
<div>
Lorem ipsum dolor sit amet, consectetur adipiscing elit,
<b>sed</b> do eiusmod tempor incididunt ut labore et dolore
magna aliqua. <span class="more">Ut enim ad minim veniam,
<em>quis nostrud exercitation</em> ullamco laboris nisi ut
aliquip ex ea commodo consequat.</span>
</div>
"""
def __init__(self, callback, method, parser=None):
self.callback = callback # callback function to translate terms
self.method = method # serialization method ('xml' or 'html')
self.parser = parser # parser for validating translations
self._done = [] # translated strings
self._todo = [] # todo strings that come after _done
self.needs_trans = False # whether todo needs translation
def todo(self, text, needs_trans=True):
self._todo.append(text)
if needs_trans and text.strip():
self.needs_trans = True
def all_todo(self):
return not self._done
def get_todo(self):
return "".join(self._todo)
def flush(self):
if self._todo:
todo = "".join(self._todo)
done = self.process_text(todo) if self.needs_trans else todo
self._done.append(done)
del self._todo[:]
self.needs_trans = False
def done(self, text):
self.flush()
self._done.append(text)
def get_done(self):
""" Complete the translations and return the result. """
self.flush()
return "".join(self._done)
def process_text(self, text):
""" Translate text.strip(), but keep the surrounding spaces from text. """
term = text.strip()
trans = term and self.callback(term)
if trans:
try:
# parse the translation to validate it
etree.fromstring("<div>%s</div>" % encode(trans), parser=self.parser)
except etree.ParseError:
# fallback: escape the translation
trans = escape(trans)
text = text.replace(term, trans)
return text
def process_attr(self, attr):
""" Translate the given node attribute value. """
term = attr.strip()
trans = term and self.callback(term)
return attr.replace(term, trans) if trans else attr
def process(self, node):
""" Process the given xml `node`: collect `todo` and `done` items. """
if (
isinstance(node, SKIPPED_ELEMENT_TYPES) or
node.tag in SKIPPED_ELEMENTS or
node.get("t-translation", "").strip() == "off" or
node.tag == "attribute" and node.get("name") not in TRANSLATED_ATTRS
):
# do not translate the contents of the node
tail, node.tail = node.tail, None
self.done(etree.tostring(node, method=self.method))
self.todo(escape(tail or ""))
return
# process children nodes locally in child_trans
child_trans = XMLTranslator(self.callback, self.method, parser=self.parser)
if node.text:
if avoid_pattern.match(node.text):
child_trans.done(escape(node.text)) # do not translate <!DOCTYPE...
else:
child_trans.todo(escape(node.text))
for child in node:
child_trans.process(child)
if (child_trans.all_todo() and
node.tag in TRANSLATED_ELEMENTS and
not any(attr.startswith("t-") for attr in node.attrib)):
# serialize the node element as todo
self.todo(self.serialize(node.tag, node.attrib, child_trans.get_todo()),
child_trans.needs_trans)
else:
# complete translations and serialize result as done
for attr in TRANSLATED_ATTRS:
if node.get(attr):
node.set(attr, self.process_attr(node.get(attr)))
self.done(self.serialize(node.tag, node.attrib, child_trans.get_done()))
# add node tail as todo
self.todo(escape(node.tail or ""))
def serialize(self, tag, attrib, content):
""" Return a serialized element with the given `tag`, attributes
`attrib`, and already-serialized `content`.
"""
if content:
elem = etree.tostring(etree.Element(tag, attrib), method='xml')
assert elem.endswith("/>")
return "%s>%s</%s>" % (elem[:-2], content, tag)
else:
return etree.tostring(etree.Element(tag, attrib), method=self.method)
def xml_translate(callback, value):
""" Translate an XML value (string), using `callback` for translating text
appearing in `value`.
"""
if not value:
return value
trans = XMLTranslator(callback, 'xml')
try:
root = etree.fromstring(encode(value))
trans.process(root)
return trans.get_done()
except etree.ParseError:
# fallback for translated terms: use an HTML parser and wrap the term
wrapped = "<div>%s</div>" % encode(value)
root = etree.fromstring(wrapped, etree.HTMLParser(encoding='utf-8'))
trans.process(root[0][0]) # html > body > div
return trans.get_done()[5:-6] # remove tags <div> and </div>
def html_translate(callback, value):
""" Translate an HTML value (string), using `callback` for translating text
appearing in `value`.
"""
if not value:
return value
parser = etree.HTMLParser(encoding='utf-8')
trans = XMLTranslator(callback, 'html', parser)
wrapped = "<div>%s</div>" % encode(value)
root = etree.fromstring(wrapped, parser)
trans.process(root[0][0]) # html > body > div
return trans.get_done()[5:-6] # remove tags <div> and </div>
#
# Warning: better use self.pool.get('ir.translation')._get_source if you can
#
def translate(cr, name, source_type, lang, source=None):
if source and name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s and src=%s and md5(src)=md5(%s)', (lang, source_type, str(name), source, source))
elif name:
cr.execute('select value from ir_translation where lang=%s and type=%s and name=%s', (lang, source_type, str(name)))
elif source:
cr.execute('select value from ir_translation where lang=%s and type=%s and src=%s and md5(src)=md5(%s)', (lang, source_type, source, source))
res_trans = cr.fetchone()
res = res_trans and res_trans[0] or False
return res
class GettextAlias(object):
def _get_db(self):
# find current DB based on thread/worker db name (see netsvc)
db_name = getattr(threading.currentThread(), 'dbname', None)
if db_name:
return sql_db.db_connect(db_name)
def _get_cr(self, frame, allow_create=True):
# try, in order: cr, cursor, self.env.cr, self.cr,
# request.env.cr
if 'cr' in frame.f_locals:
return frame.f_locals['cr'], False
if 'cursor' in frame.f_locals:
return frame.f_locals['cursor'], False
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
return s.env.cr, False
if hasattr(s, 'cr'):
return s.cr, False
try:
from openerp.http import request
return request.env.cr, False
except RuntimeError:
pass
if allow_create:
# create a new cursor
db = self._get_db()
if db is not None:
return db.cursor(), True
return None, False
def _get_uid(self, frame):
# try, in order: uid, user, self.env.uid
if 'uid' in frame.f_locals:
return frame.f_locals['uid']
if 'user' in frame.f_locals:
return int(frame.f_locals['user']) # user may be a record
s = frame.f_locals.get('self')
return s.env.uid
def _get_lang(self, frame):
# try, in order: context.get('lang'), kwargs['context'].get('lang'),
# self.env.lang, self.localcontext.get('lang'), request.env.lang
lang = None
if frame.f_locals.get('context'):
lang = frame.f_locals['context'].get('lang')
if not lang:
kwargs = frame.f_locals.get('kwargs', {})
if kwargs.get('context'):
lang = kwargs['context'].get('lang')
if not lang:
s = frame.f_locals.get('self')
if hasattr(s, 'env'):
lang = s.env.lang
if not lang:
if hasattr(s, 'localcontext'):
lang = s.localcontext.get('lang')
if not lang:
try:
from openerp.http import request
lang = request.env.lang
except RuntimeError:
pass
if not lang:
# Last resort: attempt to guess the language of the user
# Pitfall: some operations are performed in sudo mode, and we
# don't know the originial uid, so the language may
# be wrong when the admin language differs.
pool = getattr(s, 'pool', None)
(cr, dummy) = self._get_cr(frame, allow_create=False)
uid = self._get_uid(frame)
if pool and cr and uid:
lang = pool['res.users'].context_get(cr, uid)['lang']
return lang
def __call__(self, source):
res = source
cr = None
is_new_cr = False
try:
frame = inspect.currentframe()
if frame is None:
return source
frame = frame.f_back
if not frame:
return source
lang = self._get_lang(frame)
if lang:
cr, is_new_cr = self._get_cr(frame)
if cr:
# Try to use ir.translation to benefit from global cache if possible
registry = openerp.registry(cr.dbname)
res = registry['ir.translation']._get_source(cr, SUPERUSER_ID, None, ('code','sql_constraint'), lang, source)
else:
_logger.debug('no context cursor detected, skipping translation for "%r"', source)
else:
_logger.debug('no translation language detected, skipping translation for "%r" ', source)
except Exception:
_logger.debug('translation went wrong for "%r", skipped', source)
# if so, double-check the root/base translations filenames
finally:
if cr and is_new_cr:
cr.close()
return res
_ = GettextAlias()
def quote(s):
"""Returns quoted PO term string, with special PO characters escaped"""
assert r"\n" not in s, "Translation terms may not include escaped newlines ('\\n'), please use only literal newlines! (in '%s')" % s
return '"%s"' % s.replace('\\','\\\\') \
.replace('"','\\"') \
.replace('\n', '\\n"\n"')
re_escaped_char = re.compile(r"(\\.)")
re_escaped_replacements = {'n': '\n', }
def _sub_replacement(match_obj):
return re_escaped_replacements.get(match_obj.group(1)[1], match_obj.group(1)[1])
def unquote(str):
"""Returns unquoted PO term string, with special PO characters unescaped"""
return re_escaped_char.sub(_sub_replacement, str[1:-1])
# class to handle po files
class TinyPoFile(object):
def __init__(self, buffer):
self.buffer = buffer
def warn(self, msg, *args):
_logger.warning(msg, *args)
def __iter__(self):
self.buffer.seek(0)
self.lines = self._get_lines()
self.lines_count = len(self.lines)
self.first = True
self.extra_lines= []
return self
def _get_lines(self):
lines = self.buffer.readlines()
# remove the BOM (Byte Order Mark):
if len(lines):
lines[0] = unicode(lines[0], 'utf8').lstrip(unicode( codecs.BOM_UTF8, "utf8"))
lines.append('') # ensure that the file ends with at least an empty line
return lines
def cur_line(self):
return self.lines_count - len(self.lines)
def next(self):
trans_type = name = res_id = source = trad = None
if self.extra_lines:
trans_type, name, res_id, source, trad, comments = self.extra_lines.pop(0)
if not res_id:
res_id = '0'
else:
comments = []
targets = []
line = None
fuzzy = False
while not line:
if 0 == len(self.lines):
raise StopIteration()
line = self.lines.pop(0).strip()
while line.startswith('#'):
if line.startswith('#~ '):
break
if line.startswith('#.'):
line = line[2:].strip()
if not line.startswith('module:'):
comments.append(line)
elif line.startswith('#:'):
# Process the `reference` comments. Each line can specify
# multiple targets (e.g. model, view, code, selection,
# ...). For each target, we will return an additional
# entry.
for lpart in line[2:].strip().split(' '):
trans_info = lpart.strip().split(':',2)
if trans_info and len(trans_info) == 2:
# looks like the translation trans_type is missing, which is not
# unexpected because it is not a GetText standard. Default: 'code'
trans_info[:0] = ['code']
if trans_info and len(trans_info) == 3:
# this is a ref line holding the destination info (model, field, record)
targets.append(trans_info)
elif line.startswith('#,') and (line[2:].strip() == 'fuzzy'):
fuzzy = True
line = self.lines.pop(0).strip()
if not self.lines:
raise StopIteration()
while not line:
# allow empty lines between comments and msgid
line = self.lines.pop(0).strip()
if line.startswith('#~ '):
while line.startswith('#~ ') or not line.strip():
if 0 == len(self.lines):
raise StopIteration()
line = self.lines.pop(0)
# This has been a deprecated entry, don't return anything
return self.next()
if not line.startswith('msgid'):
raise Exception("malformed file: bad line: %s" % line)
source = unquote(line[6:])
line = self.lines.pop(0).strip()
if not source and self.first:
self.first = False
# if the source is "" and it's the first msgid, it's the special
# msgstr with the informations about the traduction and the
# traductor; we skip it
self.extra_lines = []
while line:
line = self.lines.pop(0).strip()
return self.next()
while not line.startswith('msgstr'):
if not line:
raise Exception('malformed file at %d'% self.cur_line())
source += unquote(line)
line = self.lines.pop(0).strip()
trad = unquote(line[7:])
line = self.lines.pop(0).strip()
while line:
trad += unquote(line)
line = self.lines.pop(0).strip()
if targets and not fuzzy:
# Use the first target for the current entry (returned at the
# end of this next() call), and keep the others to generate
# additional entries (returned the next next() calls).
trans_type, name, res_id = targets.pop(0)
for t, n, r in targets:
if t == trans_type == 'code': continue
self.extra_lines.append((t, n, r, source, trad, comments))
if name is None:
if not fuzzy:
self.warn('Missing "#:" formated comment at line %d for the following source:\n\t%s',
self.cur_line(), source[:30])
return self.next()
return trans_type, name, res_id, source, trad, '\n'.join(comments)
def write_infos(self, modules):
import openerp.release as release
self.buffer.write("# Translation of %(project)s.\n" \
"# This file contains the translation of the following modules:\n" \
"%(modules)s" \
"#\n" \
"msgid \"\"\n" \
"msgstr \"\"\n" \
'''"Project-Id-Version: %(project)s %(version)s\\n"\n''' \
'''"Report-Msgid-Bugs-To: \\n"\n''' \
'''"POT-Creation-Date: %(now)s\\n"\n''' \
'''"PO-Revision-Date: %(now)s\\n"\n''' \
'''"Last-Translator: <>\\n"\n''' \
'''"Language-Team: \\n"\n''' \
'''"MIME-Version: 1.0\\n"\n''' \
'''"Content-Type: text/plain; charset=UTF-8\\n"\n''' \
'''"Content-Transfer-Encoding: \\n"\n''' \
'''"Plural-Forms: \\n"\n''' \
"\n"
% { 'project': release.description,
'version': release.version,
'modules': reduce(lambda s, m: s + "#\t* %s\n" % m, modules, ""),
'now': datetime.utcnow().strftime('%Y-%m-%d %H:%M')+"+0000",
}
)
def write(self, modules, tnrs, source, trad, comments=None):
plurial = len(modules) > 1 and 's' or ''
self.buffer.write("#. module%s: %s\n" % (plurial, ', '.join(modules)))
if comments:
self.buffer.write(''.join(('#. %s\n' % c for c in comments)))
code = False
for typy, name, res_id in tnrs:
self.buffer.write("#: %s:%s:%s\n" % (typy, name, res_id))
if typy == 'code':
code = True
if code:
# only strings in python code are python formated
self.buffer.write("#, python-format\n")
if not isinstance(trad, unicode):
trad = unicode(trad, 'utf8')
if not isinstance(source, unicode):
source = unicode(source, 'utf8')
msg = "msgid %s\n" \
"msgstr %s\n\n" \
% (quote(source), quote(trad))
self.buffer.write(msg.encode('utf8'))
# Methods to export the translation file
def trans_export(lang, modules, buffer, format, cr):
def _process(format, modules, rows, buffer, lang):
if format == 'csv':
writer = csv.writer(buffer, 'UNIX')
# write header first
writer.writerow(("module","type","name","res_id","src","value","comments"))
for module, type, name, res_id, src, trad, comments in rows:
comments = '\n'.join(comments)
writer.writerow((module, type, name, res_id, src, trad, comments))
elif format == 'po':
writer = TinyPoFile(buffer)
writer.write_infos(modules)
# we now group the translations by source. That means one translation per source.
grouped_rows = {}
for module, type, name, res_id, src, trad, comments in rows:
row = grouped_rows.setdefault(src, {})
row.setdefault('modules', set()).add(module)
if not row.get('translation') and trad != src:
row['translation'] = trad
row.setdefault('tnrs', []).append((type, name, res_id))
row.setdefault('comments', set()).update(comments)
for src, row in sorted(grouped_rows.items()):
if not lang:
# translation template, so no translation value
row['translation'] = ''
elif not row.get('translation'):
row['translation'] = src
writer.write(row['modules'], row['tnrs'], src, row['translation'], row['comments'])
elif format == 'tgz':
rows_by_module = {}
for row in rows:
module = row[0]
rows_by_module.setdefault(module, []).append(row)
tmpdir = tempfile.mkdtemp()
for mod, modrows in rows_by_module.items():
tmpmoddir = join(tmpdir, mod, 'i18n')
os.makedirs(tmpmoddir)
pofilename = (lang if lang else mod) + ".po" + ('t' if not lang else '')
buf = file(join(tmpmoddir, pofilename), 'w')
_process('po', [mod], modrows, buf, lang)
buf.close()
tar = tarfile.open(fileobj=buffer, mode='w|gz')
tar.add(tmpdir, '')
tar.close()
else:
raise Exception(_('Unrecognized extension: must be one of '
'.csv, .po, or .tgz (received .%s).') % format)
translations = trans_generate(lang, modules, cr)
modules = set(t[0] for t in translations)
_process(format, modules, translations, buffer, lang)
del translations
def trans_parse_rml(de):
res = []
for n in de:
for m in n:
if isinstance(m, SKIPPED_ELEMENT_TYPES) or not m.text:
continue
string_list = [s.replace('\n', ' ').strip() for s in re.split('\[\[.+?\]\]', m.text)]
for s in string_list:
if s:
res.append(s.encode("utf8"))
res.extend(trans_parse_rml(n))
return res
def _push(callback, term, source_line):
""" Sanity check before pushing translation terms """
term = (term or "").strip().encode('utf8')
# Avoid non-char tokens like ':' '...' '.00' etc.
if len(term) > 8 or any(x.isalpha() for x in term):
callback(term, source_line)
# tests whether an object is in a list of modules
def in_modules(object_name, modules):
if 'all' in modules:
return True
module_dict = {
'ir': 'base',
'res': 'base',
'workflow': 'base',
}
module = object_name.split('.')[0]
module = module_dict.get(module, module)
return module in modules
def _extract_translatable_qweb_terms(element, callback):
""" Helper method to walk an etree document representing
a QWeb template, and call ``callback(term)`` for each
translatable term that is found in the document.
:param etree._Element element: root of etree document to extract terms from
:param Callable callback: a callable in the form ``f(term, source_line)``,
that will be called for each extracted term.
"""
# not using elementTree.iterparse because we need to skip sub-trees in case
# the ancestor element had a reason to be skipped
for el in element:
if isinstance(el, SKIPPED_ELEMENT_TYPES): continue
if (el.tag.lower() not in SKIPPED_ELEMENTS
and "t-js" not in el.attrib
and not ("t-jquery" in el.attrib and "t-operation" not in el.attrib)
and el.get("t-translation", '').strip() != "off"):
_push(callback, el.text, el.sourceline)
for att in ('title', 'alt', 'label', 'placeholder'):
if att in el.attrib:
_push(callback, el.attrib[att], el.sourceline)
_extract_translatable_qweb_terms(el, callback)
_push(callback, el.tail, el.sourceline)
def babel_extract_qweb(fileobj, keywords, comment_tags, options):
"""Babel message extractor for qweb template files.
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a list of keywords (i.e. function names) that should
be recognized as translation functions
:param comment_tags: a list of translator tags to search for and
include in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)``
tuples
:rtype: Iterable
"""
result = []
def handle_text(text, lineno):
result.append((lineno, None, text, []))
tree = etree.parse(fileobj)
_extract_translatable_qweb_terms(tree.getroot(), handle_text)
return result
def trans_generate(lang, modules, cr):
dbname = cr.dbname
registry = openerp.registry(dbname)
trans_obj = registry['ir.translation']
model_data_obj = registry['ir.model.data']
uid = 1
query = 'SELECT name, model, res_id, module' \
' FROM ir_model_data'
query_models = """SELECT m.id, m.model, imd.module
FROM ir_model AS m, ir_model_data AS imd
WHERE m.id = imd.res_id AND imd.model = 'ir.model' """
if 'all_installed' in modules:
query += ' WHERE module IN ( SELECT name FROM ir_module_module WHERE state = \'installed\') '
query_models += " AND imd.module in ( SELECT name FROM ir_module_module WHERE state = 'installed') "
query_param = None
if 'all' not in modules:
query += ' WHERE module IN %s'
query_models += ' AND imd.module in %s'
query_param = (tuple(modules),)
else:
query += ' WHERE module != %s'
query_models += ' AND imd.module != %s'
query_param = ('__export__',)
query += ' ORDER BY module, model, name'
query_models += ' ORDER BY module, model'
cr.execute(query, query_param)
_to_translate = set()
def push_translation(module, type, name, id, source, comments=None):
# empty and one-letter terms are ignored, they probably are not meant to be
# translated, and would be very hard to translate anyway.
sanitized_term = (source or '').strip()
try:
# verify the minimal size without eventual xml tags
# wrap to make sure html content like '<a>b</a><c>d</c>' is accepted by lxml
wrapped = "<div>%s</div>" % sanitized_term
node = etree.fromstring(wrapped)
sanitized_term = etree.tostring(node, encoding='UTF-8', method='text')
except etree.ParseError:
pass
# remove non-alphanumeric chars
sanitized_term = re.sub(r'\W+', '', sanitized_term)
if not sanitized_term or len(sanitized_term) <= 1:
return
tnx = (module, source, name, id, type, tuple(comments or ()))
_to_translate.add(tnx)
def push(mod, type, name, res_id, term):
term = (term or '').strip()
if len(term) > 2 or term in ENGLISH_SMALL_WORDS:
push_translation(mod, type, name, res_id, term)
def get_root_view(xml_id):
view = model_data_obj.xmlid_to_object(cr, uid, xml_id)
if view:
while view.mode != 'primary':
view = view.inherit_id
xml_id = view.get_external_id(cr, uid).get(view.id, xml_id)
return xml_id
for (xml_name,model,res_id,module) in cr.fetchall():
module = encode(module)
model = encode(model)
xml_name = "%s.%s" % (module, encode(xml_name))
if model not in registry:
_logger.error("Unable to find object %r", model)
continue
Model = registry[model]
if not Model._translate:
# explicitly disabled
continue
obj = Model.browse(cr, uid, res_id)
if not obj.exists():
_logger.warning("Unable to find object %r with id %d", model, res_id)
continue
if model=='ir.model.fields':
try:
field_name = encode(obj.name)
except AttributeError, exc:
_logger.error("name error in %s: %s", xml_name, str(exc))
continue
field_model = registry.get(obj.model)
if (field_model is None or not field_model._translate or
field_name not in field_model._fields):
continue
field_def = field_model._fields[field_name]
if hasattr(field_def, 'selection') and isinstance(field_def.selection, (list, tuple)):
name = "%s,%s" % (encode(obj.model), field_name)
for dummy, val in field_def.selection:
push_translation(module, 'selection', name, 0, encode(val))
elif model=='ir.actions.report.xml':
name = encode(obj.report_name)
fname = ""
if obj.report_rml:
fname = obj.report_rml
parse_func = trans_parse_rml
report_type = "report"
elif obj.report_xsl:
continue
if fname and obj.report_type in ('pdf', 'xsl'):
try:
report_file = misc.file_open(fname)
try:
d = etree.parse(report_file)
for t in parse_func(d.iter()):
push_translation(module, report_type, name, 0, t)
finally:
report_file.close()
except (IOError, etree.XMLSyntaxError):
_logger.exception("couldn't export translation for report %s %s %s", name, report_type, fname)
for field_name, field_def in obj._fields.iteritems():
if getattr(field_def, 'translate', None):
name = model + "," + field_name
try:
value = obj[field_name] or ''
except Exception:
continue
for term in set(field_def.get_trans_terms(value)):
push_translation(module, 'model', name, xml_name, encode(term))
# End of data for ir.model.data query results
cr.execute(query_models, query_param)
def push_constraint_msg(module, term_type, model, msg):
if not hasattr(msg, '__call__'):
push_translation(encode(module), term_type, encode(model), 0, encode(msg))
def push_local_constraints(module, model, cons_type='sql_constraints'):
"""Climb up the class hierarchy and ignore inherited constraints
from other modules"""
term_type = 'sql_constraint' if cons_type == 'sql_constraints' else 'constraint'
msg_pos = 2 if cons_type == 'sql_constraints' else 1
for cls in model.__class__.__mro__:
if getattr(cls, '_module', None) != module:
continue
constraints = getattr(cls, '_local_' + cons_type, [])
for constraint in constraints:
push_constraint_msg(module, term_type, model._name, constraint[msg_pos])
for (_, model, module) in cr.fetchall():
if model not in registry:
_logger.error("Unable to find object %r", model)
continue
model_obj = registry[model]
if model_obj._constraints:
push_local_constraints(module, model_obj, 'constraints')
if model_obj._sql_constraints:
push_local_constraints(module, model_obj, 'sql_constraints')
installed_modules = map(
lambda m: m['name'],
registry['ir.module.module'].search_read(cr, uid, [('state', '=', 'installed')], fields=['name']))
path_list = [(path, True) for path in openerp.modules.module.ad_paths]
# Also scan these non-addon paths
for bin_path in ['osv', 'report', 'modules', 'service', 'tools']:
path_list.append((os.path.join(config.config['root_path'], bin_path), True))
# non-recursive scan for individual files in root directory but without
# scanning subdirectories that may contain addons
path_list.append((config.config['root_path'], False))
_logger.debug("Scanning modules at paths: %s", path_list)
def get_module_from_path(path):
for (mp, rec) in path_list:
if rec and path.startswith(mp) and os.path.dirname(path) != mp:
path = path[len(mp)+1:]
return path.split(os.path.sep)[0]
return 'base' # files that are not in a module are considered as being in 'base' module
def verified_module_filepaths(fname, path, root):
fabsolutepath = join(root, fname)
frelativepath = fabsolutepath[len(path):]
display_path = "addons%s" % frelativepath
module = get_module_from_path(fabsolutepath)
if ('all' in modules or module in modules) and module in installed_modules:
if os.path.sep != '/':
display_path = display_path.replace(os.path.sep, '/')
return module, fabsolutepath, frelativepath, display_path
return None, None, None, None
def babel_extract_terms(fname, path, root, extract_method="python", trans_type='code',
extra_comments=None, extract_keywords={'_': None}):
module, fabsolutepath, _, display_path = verified_module_filepaths(fname, path, root)
extra_comments = extra_comments or []
if not module: return
src_file = open(fabsolutepath, 'r')
try:
for extracted in extract.extract(extract_method, src_file,
keywords=extract_keywords):
# Babel 0.9.6 yields lineno, message, comments
# Babel 1.3 yields lineno, message, comments, context
lineno, message, comments = extracted[:3]
push_translation(module, trans_type, display_path, lineno,
encode(message), comments + extra_comments)
except Exception:
_logger.exception("Failed to extract terms from %s", fabsolutepath)
finally:
src_file.close()
for (path, recursive) in path_list:
_logger.debug("Scanning files of modules at %s", path)
for root, dummy, files in osutil.walksymlinks(path):
for fname in fnmatch.filter(files, '*.py'):
babel_extract_terms(fname, path, root)
# mako provides a babel extractor: http://docs.makotemplates.org/en/latest/usage.html#babel
for fname in fnmatch.filter(files, '*.mako'):
babel_extract_terms(fname, path, root, 'mako', trans_type='report')
# Javascript source files in the static/src/js directory, rest is ignored (libs)
if fnmatch.fnmatch(root, '*/static/src/js*'):
for fname in fnmatch.filter(files, '*.js'):
babel_extract_terms(fname, path, root, 'javascript',
extra_comments=[WEB_TRANSLATION_COMMENT],
extract_keywords={'_t': None, '_lt': None})
# QWeb template files
if fnmatch.fnmatch(root, '*/static/src/xml*'):
for fname in fnmatch.filter(files, '*.xml'):
babel_extract_terms(fname, path, root, 'openerp.tools.translate:babel_extract_qweb',
extra_comments=[WEB_TRANSLATION_COMMENT])
if not recursive:
# due to topdown, first iteration is in first level
break
out = []
# translate strings marked as to be translated
for module, source, name, id, type, comments in sorted(_to_translate):
trans = '' if not lang else trans_obj._get_source(cr, uid, name, type, lang, source)
out.append((module, type, name, id, source, encode(trans) or '', comments))
return out
def trans_load(cr, filename, lang, verbose=True, module_name=None, context=None):
try:
fileobj = misc.file_open(filename)
_logger.info("loading %s", filename)
fileformat = os.path.splitext(filename)[-1][1:].lower()
result = trans_load_data(cr, fileobj, fileformat, lang, verbose=verbose, module_name=module_name, context=context)
fileobj.close()
return result
except IOError:
if verbose:
_logger.error("couldn't read translation file %s", filename)
return None
def trans_load_data(cr, fileobj, fileformat, lang, lang_name=None, verbose=True, module_name=None, context=None):
"""Populates the ir_translation table."""
if verbose:
_logger.info('loading translation file for language %s', lang)
if context is None:
context = {}
db_name = cr.dbname
registry = openerp.registry(db_name)
lang_obj = registry.get('res.lang')
trans_obj = registry.get('ir.translation')
iso_lang = misc.get_iso_codes(lang)
try:
ids = lang_obj.search(cr, SUPERUSER_ID, [('code','=', lang)])
if not ids:
# lets create the language with locale information
lang_obj.load_lang(cr, SUPERUSER_ID, lang=lang, lang_name=lang_name)
# Parse also the POT: it will possibly provide additional targets.
# (Because the POT comments are correct on Launchpad but not the
# PO comments due to a Launchpad limitation. See LP bug 933496.)
pot_reader = []
# now, the serious things: we read the language file
fileobj.seek(0)
if fileformat == 'csv':
reader = csv.reader(fileobj, quotechar='"', delimiter=',')
# read the first line of the file (it contains columns titles)
for row in reader:
fields = row
break
elif fileformat == 'po':
reader = TinyPoFile(fileobj)
fields = ['type', 'name', 'res_id', 'src', 'value', 'comments']
# Make a reader for the POT file and be somewhat defensive for the
# stable branch.
if fileobj.name.endswith('.po'):
try:
# Normally the path looks like /path/to/xxx/i18n/lang.po
# and we try to find the corresponding
# /path/to/xxx/i18n/xxx.pot file.
# (Sometimes we have 'i18n_extra' instead of just 'i18n')
addons_module_i18n, _ignored = os.path.split(fileobj.name)
addons_module, i18n_dir = os.path.split(addons_module_i18n)
addons, module = os.path.split(addons_module)
pot_handle = misc.file_open(os.path.join(
addons, module, i18n_dir, module + '.pot'))
pot_reader = TinyPoFile(pot_handle)
except:
pass
else:
_logger.info('Bad file format: %s', fileformat)
raise Exception(_('Bad file format: %s') % fileformat)
# Read the POT references, and keep them indexed by source string.
class Target(object):
def __init__(self):
self.value = None
self.targets = set() # set of (type, name, res_id)
self.comments = None
pot_targets = defaultdict(Target)
for type, name, res_id, src, _ignored, comments in pot_reader:
if type is not None:
target = pot_targets[src]
target.targets.add((type, name, res_id))
target.comments = comments
# read the rest of the file
irt_cursor = trans_obj._get_import_cursor(cr, SUPERUSER_ID, context=context)
def process_row(row):
"""Process a single PO (or POT) entry."""
# dictionary which holds values for this line of the csv file
# {'lang': ..., 'type': ..., 'name': ..., 'res_id': ...,
# 'src': ..., 'value': ..., 'module':...}
dic = dict.fromkeys(('type', 'name', 'res_id', 'src', 'value',
'comments', 'imd_model', 'imd_name', 'module'))
dic['lang'] = lang
dic.update(zip(fields, row))
# discard the target from the POT targets.
src = dic['src']
if src in pot_targets:
target = pot_targets[src]
target.value = dic['value']
target.targets.discard((dic['type'], dic['name'], dic['res_id']))
# This would skip terms that fail to specify a res_id
res_id = dic['res_id']
if not res_id:
return
if isinstance(res_id, (int, long)) or \
(isinstance(res_id, basestring) and res_id.isdigit()):
dic['res_id'] = int(res_id)
if module_name:
dic['module'] = module_name
else:
# res_id is an xml id
dic['res_id'] = None
dic['imd_model'] = dic['name'].split(',')[0]
if '.' in res_id:
dic['module'], dic['imd_name'] = res_id.split('.', 1)
else:
dic['module'], dic['imd_name'] = module_name, res_id
irt_cursor.push(dic)
# First process the entries from the PO file (doing so also fills/removes
# the entries from the POT file).
for row in reader:
process_row(row)
# Then process the entries implied by the POT file (which is more
# correct w.r.t. the targets) if some of them remain.
pot_rows = []
for src, target in pot_targets.iteritems():
if target.value:
for type, name, res_id in target.targets:
pot_rows.append((type, name, res_id, src, target.value, target.comments))
pot_targets.clear()
for row in pot_rows:
process_row(row)
irt_cursor.finish()
trans_obj.clear_caches()
if verbose:
_logger.info("translation file loaded succesfully")
except IOError:
filename = '[lang: %s][format: %s]' % (iso_lang or 'new', fileformat)
_logger.exception("couldn't read translation file %s", filename)
def get_locales(lang=None):
if lang is None:
lang = locale.getdefaultlocale()[0]
if os.name == 'nt':
lang = _LOCALE2WIN32.get(lang, lang)
def process(enc):
ln = locale._build_localename((lang, enc))
yield ln
nln = locale.normalize(ln)
if nln != ln:
yield nln
for x in process('utf8'): yield x
prefenc = locale.getpreferredencoding()
if prefenc:
for x in process(prefenc): yield x
prefenc = {
'latin1': 'latin9',
'iso-8859-1': 'iso8859-15',
'cp1252': '1252',
}.get(prefenc.lower())
if prefenc:
for x in process(prefenc): yield x
yield lang
def resetlocale():
# locale.resetlocale is bugged with some locales.
for ln in get_locales():
try:
return locale.setlocale(locale.LC_ALL, ln)
except locale.Error:
continue
def load_language(cr, lang):
"""Loads a translation terms for a language.
Used mainly to automate language loading at db initialization.
:param lang: language ISO code with optional _underscore_ and l10n flavor (ex: 'fr', 'fr_BE', but not 'fr-BE')
:type lang: str
"""
registry = openerp.registry(cr.dbname)
language_installer = registry['base.language.install']
oid = language_installer.create(cr, SUPERUSER_ID, {'lang': lang})
language_installer.lang_install(cr, SUPERUSER_ID, [oid], context=None)
| AyoubZahid/odoo | openerp/tools/translate.py | Python | gpl-3.0 | 48,927 | 0.003352 |
"""Tests for the :mod:`campy.datastructures.basicgraph` module."""
| sredmond/acmpy | tests/datastructures/test_basicgraph.py | Python | mit | 67 | 0 |
# Extra Long Factorials
# Developer: Murillo Grubler
# https://www.hackerrank.com/challenges/extra-long-factorials/problem
# Time Complexity = O(n)
def factorial(n):
if n == 1:
return 1
total = n
while (n > 0):
if n == total:
total = total * (n - 1)
n -= 2
else:
total = total * n
n -= 1
return total
n = int(input().strip())
print (factorial(n)) | Murillo/Hackerrank-Algorithms | Algorithms/Implementation/extra-long-factorials.py | Python | mit | 437 | 0.006865 |
import functools
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = (args, tuple(kwargs.items()))
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
| GaretJax/i18n-utils | i18n_utils/utils.py | Python | mit | 291 | 0 |
from orm import model
from .user import User
from orm import fields
class Setting(model.Model):
owner = fields.ForeignKeyField(User)
company_name = fields.CharField(max_length=140, blank=True)
address = fields.CharField(max_length=240, blank=True)
zip_code = fields.CharField(max_length=140, blank=True)
city = fields.CharField(max_length=140, blank=True)
phone = fields.CharField(max_length=140, blank=True)
email = fields.CharField(max_length=140, blank=True)
vat_code = fields.CharField(max_length=140, blank=True)
iban = fields.CharField(max_length=140, blank=True)
bic = fields.CharField(max_length=140, blank=True)
def __repr__(self):
return str(self.company_name) | theikkila/lopputili | app/models/setting.py | Python | mit | 685 | 0.020438 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
try:
import json
except ImportError:
from django.utils import simplejson as json
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from django.shortcuts import get_object_or_404
from django.conf import settings
from django.contrib.auth import get_user_model
from geonode.utils import resolve_object
from geonode.base.models import ResourceBase
from geonode.layers.models import Layer
from geonode.people.models import Profile
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
def _perms_info(obj):
info = obj.get_all_level_info()
return info
def _perms_info_json(obj):
info = _perms_info(obj)
info['users'] = dict([(u.username, perms)
for u, perms in info['users'].items()])
info['groups'] = dict([(g.name, perms)
for g, perms in info['groups'].items()])
return json.dumps(info)
def resource_permissions(request, resource_id):
try:
resource = resolve_object(
request, ResourceBase, {
'id': resource_id}, 'base.change_resourcebase_permissions')
except PermissionDenied:
# we are handling this in a non-standard way
return HttpResponse(
'You are not allowed to change permissions for this resource',
status=401,
content_type='text/plain')
if request.method == 'POST':
success = True
message = "Permissions successfully updated!"
try:
permission_spec = json.loads(request.body)
resource.set_permissions(permission_spec)
# Check Users Permissions Consistency
view_any = False
info = _perms_info(resource)
info_users = dict([(u.username, perms) for u, perms in info['users'].items()])
for user, perms in info_users.items():
if user == 'AnonymousUser':
view_any = ('view_resourcebase' in perms)
break
for user, perms in info_users.items():
if 'download_resourcebase' in perms and 'view_resourcebase' not in perms and not view_any:
success = False
message = 'User ' + str(user) + ' has Download permissions but ' \
'cannot access the resource. ' \
'Please update permissions consistently!'
return HttpResponse(
json.dumps({'success': success, 'message': message}),
status=200,
content_type='text/plain'
)
except BaseException:
success = False
message = "Error updating permissions :("
return HttpResponse(
json.dumps({'success': success, 'message': message}),
status=500,
content_type='text/plain'
)
elif request.method == 'GET':
permission_spec = _perms_info_json(resource)
return HttpResponse(
json.dumps({'success': True, 'permissions': permission_spec}),
status=200,
content_type='text/plain'
)
else:
return HttpResponse(
'No methods other than get and post are allowed',
status=401,
content_type='text/plain')
@require_POST
def invalidate_permissions_cache(request):
from .utils import sync_resources_with_guardian
uuid = request.POST['uuid']
resource = get_object_or_404(ResourceBase, uuid=uuid)
can_change_permissions = request.user.has_perm(
'change_resourcebase_permissions',
resource)
if can_change_permissions:
# Push Security Rules
sync_resources_with_guardian(resource)
return HttpResponse(
json.dumps({'success': 'ok', 'message': 'Security Rules Cache Refreshed!'}),
status=200,
content_type='text/plain'
)
else:
return HttpResponse(
json.dumps({'success': 'false', 'message': 'You cannot modify this resource!'}),
status=200,
content_type='text/plain'
)
@require_POST
def attributes_sats_refresh(request):
from geonode.geoserver.helpers import set_attributes_from_geoserver
uuid = request.POST['uuid']
resource = get_object_or_404(ResourceBase, uuid=uuid)
can_change_data = request.user.has_perm(
'change_resourcebase',
resource)
layer = Layer.objects.get(id=resource.id)
if layer and can_change_data:
# recalculate the layer statistics
set_attributes_from_geoserver(layer, overwrite=True)
return HttpResponse(
json.dumps({'success': 'ok', 'message': 'Attributes/Stats Refreshed Successfully!'}),
status=200,
content_type='text/plain'
)
else:
return HttpResponse(
json.dumps({'success': 'false', 'message': 'You cannot modify this resource!'}),
status=200,
content_type='text/plain'
)
@require_POST
def invalidate_tiledlayer_cache(request):
from .utils import set_geowebcache_invalidate_cache
uuid = request.POST['uuid']
resource = get_object_or_404(ResourceBase, uuid=uuid)
can_change_data = request.user.has_perm(
'change_resourcebase',
resource)
layer = Layer.objects.get(id=resource.id)
if layer and can_change_data:
set_geowebcache_invalidate_cache(layer.alternate)
return HttpResponse(
json.dumps({'success': 'ok', 'message': 'GeoWebCache Tiled Layer Emptied!'}),
status=200,
content_type='text/plain'
)
else:
return HttpResponse(
json.dumps({'success': 'false', 'message': 'You cannot modify this resource!'}),
status=200,
content_type='text/plain'
)
@require_POST
def set_bulk_permissions(request):
permission_spec = json.loads(request.POST.get('permissions', None))
resource_ids = request.POST.getlist('resources', [])
if permission_spec is not None:
not_permitted = []
for resource_id in resource_ids:
try:
resource = resolve_object(
request, ResourceBase, {
'id': resource_id
},
'base.change_resourcebase_permissions')
resource.set_permissions(permission_spec)
except PermissionDenied:
not_permitted.append(ResourceBase.objects.get(id=resource_id).title)
return HttpResponse(
json.dumps({'success': 'ok', 'not_changed': not_permitted}),
status=200,
content_type='text/plain'
)
else:
return HttpResponse(
json.dumps({'error': 'Wrong permissions specification'}),
status=400,
content_type='text/plain')
@require_POST
def request_permissions(request):
""" Request permission to download a resource.
"""
uuid = request.POST['uuid']
resource = get_object_or_404(ResourceBase, uuid=uuid)
try:
notification.send(
[resource.owner],
'request_download_resourcebase',
{'from_user': request.user, 'resource': resource}
)
return HttpResponse(
json.dumps({'success': 'ok', }),
status=200,
content_type='text/plain')
except BaseException:
return HttpResponse(
json.dumps({'error': 'error delivering notification'}),
status=400,
content_type='text/plain')
def send_email_consumer(layer_uuid, user_id):
resource = get_object_or_404(ResourceBase, uuid=layer_uuid)
user = Profile.objects.get(id=user_id)
notification.send(
[resource.owner],
'request_download_resourcebase',
{'from_user': user, 'resource': resource}
)
def send_email_owner_on_view(owner, viewer, layer_id, geonode_email="email@geo.node"):
# get owner and viewer emails
owner_email = get_user_model().objects.get(username=owner).email
layer = Layer.objects.get(id=layer_id)
# check if those values are empty
if owner_email and geonode_email:
from django.core.mail import EmailMessage
# TODO: Copy edit message.
subject_email = "Your Layer has been seen."
msg = ("Your layer called {0} with uuid={1}"
" was seen by {2}").format(layer.name, layer.uuid, viewer)
try:
email = EmailMessage(
subject=subject_email,
body=msg,
from_email=geonode_email,
to=[owner_email, ],
reply_to=[geonode_email, ])
email.content_subtype = "html"
email.send()
except BaseException:
pass
| mcldev/geonode | geonode/security/views.py | Python | gpl-3.0 | 9,784 | 0.001124 |
#!/usr/bin/env python
# Copyright (c) 2013-2014 ZUYD Research
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author Robert Jacobs/info@rjpjacobs.nl
"""
This will make sure that the commands, as instructed by the user using the command gui, are sent to the robot
"""
import roslib; roslib.load_manifest('zuros_command_to_robot_sender')
import rospy
import tf
import math
import actionlib
import thread
#move_base_msgs
from move_base_msgs.msg import *
from geometry_msgs.msg import PoseStamped
from move_base_msgs.msg import *
## Command to robot sender class
class CommandToRobotSender(object):
## Constructor
def __init__(self):
self.action_client = actionlib.SimpleActionClient("/move_base", MoveBaseAction)
#self.pub_move_base_simple = rospy.Publisher("/move_base_simple/goal", PoseStamped)
## Sorts a dictionary alphabetically
def sort_dict(self,dictionary):
keys = sorted(dictionary.iterkeys())
k=[]
return [[key,dictionary[key]] for key in keys]
## The move method. Currently only base implemented, for further hardware you must implement
def move(self, component_name, parameter, blocking):
# Is this a base command?
if component_name == "base":
# Stop command?
if parameter == "stop":
return self.base_stop(component_name)
# Not a stop command, so it should be a move base command
else:
return self.move_base(component_name, parameter, blocking)
# Add your own component here
# if component_name == "my_component":
# No valid component (not implemented? Typo?)
else:
rospy.logerror(rospy.get_name() + "The component requested is not yet implemented");
## Base stop function - gets called if the component name is "base" and the parameter is "stop" in the move function above
def base_stop(self, component_name):
#base_client = actionlib.SimpleActionClient("move_base", MoveBaseAction)
rospy.loginfo("Stop <<%s>>", component_name)
self.action_client.cancel_all_goals()
## Move base funtion
def move_base(self, component_name, position, blocking):
#ah = action_handle("move_base", component_name, position, blocking, self.parse)
# Look up position in parameter server
nav_prefix = "~nav_positions"
# Not on parameter server?
if not rospy.has_param(nav_prefix):
rospy.logerr("parameter %s does not exist on ROS Parameter Server, aborting...",param_prefix)
return False
# Get parameters
navigation_positions_params = rospy.get_param(nav_prefix)
nav_param = self.sort_dict(navigation_positions_params)
nav_pos = None
# Check if this position is known
for nav in nav_param:
if(nav[0] == position):
nav_pos = nav[1]
# Position is known
if(nav_pos != None):
rospy.loginfo("Move <<%s>> to <<[x,y,yaw] %d, %d, %d>>", component_name, nav_pos[0], nav_pos[1], nav_pos[2])
# Position is not known
else:
ROS_ERROR("No valid position found, cancelling move command. Are you sure your position is added to the parameter server?")
return
# Convert to pose message
pose = PoseStamped()
pose.header.stamp = rospy.Time.now()
pose.header.frame_id = "/map"
pose.pose.position.x = nav_pos[0]
pose.pose.position.y = nav_pos[1]
pose.pose.position.z = 0
quat = tf.transformations.quaternion_from_euler(0, 0, nav_pos[2])
pose.pose.orientation.x = quat[0]
pose.pose.orientation.y = quat[1]
pose.pose.orientation.z = quat[2]
pose.pose.orientation.w = quat[3]
rospy.logdebug("waiting for move_base action server to start")
# Error: server did not respond within given time
if not self.action_client.wait_for_server(rospy.Duration(5)):
rospy.logerr("move_base action server not ready within timeout, aborting...")
return
else:
rospy.logdebug("move_base action server ready")
# sending goal
client_goal = MoveBaseGoal()
client_goal.target_pose = pose
thread.start_new_thread( self.handle, (client_goal,))
#self.pub_move_base_simple.publish(pose)
## Handle function which sends the command to the action server
def handle(self, goal):
self.action_client.send_goal(goal)
self.action_client.wait_for_result()
| robertjacobs/zuros | zuros_deliberator/zuros_command_to_robot_sender/src/zuros_command_to_robot_sender.py | Python | mit | 6,086 | 0.006408 |
from setuptools import setup
import os
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
version = "1.0b3"
setup(
version=version,
description="A plugin for ploy providing support for OpenVZ containers.",
long_description=README + "\n\n",
name="ploy_openvz",
author='Florian Schulze',
author_email='florian.schulze@gmx.net',
license="BSD 3-Clause License",
url='http://github.com/ployground/ploy_openvz',
classifiers=[
'Environment :: Console',
'Intended Audience :: System Administrators',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration'],
include_package_data=True,
zip_safe=False,
packages=['ploy_openvz'],
install_requires=[
'setuptools',
'ploy >= 1.0.0, < 2dev',
'lazy'],
entry_points="""
[ploy.plugins]
vz = ploy_openvz:plugin
""")
| ployground/ploy_openvz | setup.py | Python | bsd-3-clause | 1,066 | 0 |
# -*- coding: utf-8 -*-
class Company(object):
def __init__(self, name=None, code=None, phone=None, digit=None):
# Company's name
self.name = name
# Codename
self.code = code
# The digit of the invoice number
if digit is None:
digit = []
self.digit = digit
# Phone number of the service center
self.phone = phone
def __repr__(self):
return '[%s] %s (%s)' % (
self.code,
self.name,
self.phone
)
class Track(object):
def __init__(self, time=None, location=None, status=None,
phone1=None, phone2=None):
# Time
self.time = time
# Location
self.location = location
# Status
self.status = status
# Phone number 1
self.phone1 = phone1
# Phone number 2
self.phone2 = phone2
def __repr__(self):
return '[%s] %s - %s / %s / %s' % (
self.time,
self.status,
self.location,
self.phone1,
self.phone2
)
class Tracker(object):
def __init__(self):
self._tracks = []
@property
def tracks(self):
return self._tracks
def add_track(self, new_track):
if not isinstance(new_track, Track):
raise TypeError('The new_track must be Track!')
self._tracks.append(new_track)
def track_by_status(self, status):
"""
Find the tracking information matching the status
:param str status: The status to find the tracking information
:return: The tracking information matching the status
"""
tracks = list(filter(lambda x: x.status == status, self._tracks))
if len(tracks) > 0:
return tracks[-1]
raise LookupError("Can't find the track by status %s" % status)
def __iter__(self):
return iter(self._tracks)
class Parcel(object):
def __init__(self, sender=None, receiver=None, invoice_number=None,
address=None, note=None):
# The sender's name
self.sender = sender
# The receiver's name
self.receiver = receiver
# Invoice number
self.invoice_number = invoice_number
# The receiver's address
self.address = address
# Note for the parcel
self.note = note
def __repr__(self):
return '[%s] From: %s, To: %s, %s' % (
self.invoice_number,
self.sender,
self.receiver,
self.note
)
| iBluemind/armatis | armatis/models.py | Python | bsd-2-clause | 2,596 | 0 |
# Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._ssk.
String subsequence kernel (SSK) similarity
"""
from typing import Any, Optional
from ._token_distance import _TokenDistance
from ..tokenizer import QSkipgrams, _Tokenizer
__all__ = ['SSK']
class SSK(_TokenDistance):
r"""String subsequence kernel (SSK) similarity.
This is based on :cite:`Lodhi:2002`.
.. versionadded:: 0.4.1
"""
def __init__(
self,
tokenizer: Optional[_Tokenizer] = None,
ssk_lambda: float = 0.9,
**kwargs: Any
) -> None:
"""Initialize SSK instance.
Parameters
----------
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
ssk_lambda : float or Iterable
A value in the range (0.0, 1.0) used for discouting gaps between
characters according to the method described in :cite:`Lodhi:2002`.
To supply multiple values of lambda, provide an Iterable of numeric
values, such as (0.5, 0.05) or np.arange(0.05, 0.5, 0.05)
**kwargs
Arbitrary keyword arguments
Other Parameters
----------------
qval : int
The length of each q-skipgram. Using this parameter and
tokenizer=None will cause the instance to use the QGramskipgrams
tokenizer with this q value.
.. versionadded:: 0.4.1
"""
super(SSK, self).__init__(
tokenizer=tokenizer, ssk_lambda=ssk_lambda, **kwargs
)
qval = 2 if 'qval' not in self.params else self.params['qval']
self.params['tokenizer'] = (
tokenizer
if tokenizer is not None
else QSkipgrams(
qval=qval, start_stop='', scaler='SSK', ssk_lambda=ssk_lambda
)
)
def sim_score(self, src: str, tar: str) -> float:
"""Return the SSK similarity of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
Returns
-------
float
String subsequence kernel similarity
Examples
--------
>>> cmp = SSK()
>>> cmp.dist_abs('cat', 'hat')
0.6441281138790036
>>> cmp.dist_abs('Niall', 'Neil')
0.5290992177869402
>>> cmp.dist_abs('aluminum', 'Catalan')
0.862398428061774
>>> cmp.dist_abs('ATCG', 'TAGC')
0.38591004719395017
.. versionadded:: 0.4.1
"""
self._tokenize(src, tar)
src_wts = self._src_tokens
tar_wts = self._tar_tokens
score = sum(
src_wts[token] * tar_wts[token] for token in src_wts & tar_wts
)
return score
def sim(self, src: str, tar: str) -> float:
"""Return the normalized SSK similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Normalized string subsequence kernel similarity
Examples
--------
>>> cmp = SSK()
>>> cmp.sim('cat', 'hat')
0.3558718861209964
>>> cmp.sim('Niall', 'Neil')
0.4709007822130597
>>> cmp.sim('aluminum', 'Catalan')
0.13760157193822603
>>> cmp.sim('ATCG', 'TAGC')
0.6140899528060498
.. versionadded:: 0.4.1
"""
if src == tar:
return 1.0
self._tokenize(src, tar)
src_wts = self._src_tokens
tar_wts = self._tar_tokens
score = sum(
src_wts[token] * tar_wts[token] for token in src_wts & tar_wts
)
norm = (
sum(src_wts[token] * src_wts[token] for token in src_wts)
* sum(tar_wts[token] * tar_wts[token] for token in tar_wts)
) ** 0.5
if not score:
return 0.0
return score / norm
if __name__ == '__main__':
import doctest
doctest.testmod()
| chrislit/abydos | abydos/distance/_ssk.py | Python | gpl-3.0 | 4,880 | 0 |
from flask import request
from sqlalchemy.dialects.postgresql import array_agg
from zeus.config import db
from zeus.constants import Result
from zeus.db.func import array_agg_row
from zeus.models import Job, TestCase, Revision
from zeus.utils.builds import fetch_build_for_revision
from .base_revision import BaseRevisionResource
from ..schemas import AggregateTestCaseSummarySchema
class RevisionTestsResource(BaseRevisionResource):
def get(self, revision: Revision):
"""
Return a list of test cases for a given revision.
"""
build = fetch_build_for_revision(revision)
if not build:
return self.respond(status=404)
build_ids = [original.id for original in build.original]
job_query = db.session.query(Job.id).filter(Job.build_id.in_(build_ids))
result = request.args.get("allowed_failures")
if result == "false":
job_query = job_query.filter(Job.allow_failure == False) # NOQA
job_ids = job_query.subquery()
query = (
db.session.query(
TestCase.hash,
TestCase.name,
array_agg_row(
TestCase.id, TestCase.job_id, TestCase.duration, TestCase.result
).label("runs"),
)
.filter(TestCase.job_id.in_(job_ids))
.group_by(TestCase.hash, TestCase.name)
)
result = request.args.get("result")
if result:
try:
query = query.filter(TestCase.result == getattr(Result, result))
except AttributeError:
raise NotImplementedError
query = query.order_by(
(
array_agg(TestCase.result).label("results").contains([Result.failed])
).desc(),
TestCase.name.asc(),
)
schema = AggregateTestCaseSummarySchema(many=True, exclude=("build",))
return self.paginate_with_schema(schema, query)
| getsentry/zeus | zeus/api/resources/revision_tests.py | Python | apache-2.0 | 1,984 | 0.002016 |
# DExTer : Debugging Experience Tester
# ~~~~~~ ~ ~~ ~ ~~
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Set of data classes for representing the complete debug program state at a
fixed point in execution.
"""
import os
from collections import OrderedDict
from typing import List
class SourceLocation:
def __init__(self, path: str = None, lineno: int = None, column: int = None):
if path:
path = os.path.normcase(path)
self.path = path
self.lineno = lineno
self.column = column
def __str__(self):
return '{}({}:{})'.format(self.path, self.lineno, self.column)
def match(self, other) -> bool:
"""Returns true iff all the properties that appear in `self` have the
same value in `other`, but not necessarily vice versa.
"""
if not other or not isinstance(other, SourceLocation):
return False
if self.path and (self.path != other.path):
return False
if self.lineno and (self.lineno != other.lineno):
return False
if self.column and (self.column != other.column):
return False
return True
class StackFrame:
def __init__(self,
function: str = None,
is_inlined: bool = None,
location: SourceLocation = None,
watches: OrderedDict = None):
if watches is None:
watches = {}
self.function = function
self.is_inlined = is_inlined
self.location = location
self.watches = watches
def __str__(self):
return '{}{}: {} | {}'.format(
self.function,
' (inlined)' if self.is_inlined else '',
self.location,
{k: str(self.watches[k]) for k in self.watches})
def match(self, other) -> bool:
"""Returns true iff all the properties that appear in `self` have the
same value in `other`, but not necessarily vice versa.
"""
if not other or not isinstance(other, StackFrame):
return False
if self.location and not self.location.match(other.location):
return False
if self.watches:
for name in iter(self.watches):
try:
if isinstance(self.watches[name], dict):
for attr in iter(self.watches[name]):
if (getattr(other.watches[name], attr, None) !=
self.watches[name][attr]):
return False
else:
if other.watches[name].value != self.watches[name]:
return False
except KeyError:
return False
return True
class ProgramState:
def __init__(self, frames: List[StackFrame] = None):
self.frames = frames
def __str__(self):
return '\n'.join(map(
lambda enum: 'Frame {}: {}'.format(enum[0], enum[1]),
enumerate(self.frames)))
def match(self, other) -> bool:
"""Returns true iff all the properties that appear in `self` have the
same value in `other`, but not necessarily vice versa.
"""
if not other or not isinstance(other, ProgramState):
return False
if self.frames:
for idx, frame in enumerate(self.frames):
try:
if not frame.match(other.frames[idx]):
return False
except (IndexError, KeyError):
return False
return True
| endlessm/chromium-browser | third_party/llvm/debuginfo-tests/dexter/dex/dextIR/ProgramState.py | Python | bsd-3-clause | 3,820 | 0.000785 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-17 03:03
from __future__ import unicode_literals
from django.db import migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('ojp', '0004_problem_num_of_correct_tries'),
]
operations = [
migrations.AlterField(
model_name='problem',
name='description',
field=tinymce.models.HTMLField(blank=True, null=True),
),
]
| harshkothari410/ocportal | ojp/migrations/0005_auto_20161117_0303.py | Python | mit | 493 | 0 |
from kombu.tests.utils import unittest
from kombu.transport.virtual import exchange
from kombu.tests.mocks import Channel
class ExchangeCase(unittest.TestCase):
type = None
def setUp(self):
if self.type:
self.e = self.type(Channel())
class test_Direct(ExchangeCase):
type = exchange.DirectExchange
table = [("rFoo", None, "qFoo"),
("rFoo", None, "qFox"),
("rBar", None, "qBar"),
("rBaz", None, "qBaz")]
def test_lookup(self):
self.assertListEqual(self.e.lookup(
self.table, "eFoo", "rFoo", None),
["qFoo", "qFox"])
self.assertListEqual(self.e.lookup(
self.table, "eMoz", "rMoz", "DEFAULT"),
["DEFAULT"])
self.assertListEqual(self.e.lookup(
self.table, "eBar", "rBar", None),
["qBar"])
class test_Fanout(ExchangeCase):
type = exchange.FanoutExchange
table = [(None, None, "qFoo"),
(None, None, "qFox"),
(None, None, "qBar")]
def test_lookup(self):
self.assertListEqual(self.e.lookup(
self.table, "eFoo", "rFoo", None),
["qFoo", "qFox", "qBar"])
class test_Topic(ExchangeCase):
type = exchange.TopicExchange
table = [("stock.#", None, "rFoo"),
("stock.us.*", None, "rBar")]
def setUp(self):
super(test_Topic, self).setUp()
self.table = [(rkey, self.e.key_to_pattern(rkey), queue)
for rkey, _, queue in self.table]
def test_prepare_bind(self):
x = self.e.prepare_bind("qFoo", "eFoo", "stock.#", {})
self.assertTupleEqual(x, ("stock.#", r'^stock\..*?$', "qFoo"))
def test_lookup(self):
self.assertListEqual(self.e.lookup(
self.table, "eFoo", "stock.us.nasdaq", None),
["rFoo", "rBar"])
self.assertTrue(self.e._compiled)
self.assertListEqual(self.e.lookup(
self.table, "eFoo", "stock.europe.OSE", None),
["rFoo"])
self.assertListEqual(self.e.lookup(
self.table, "eFoo", "stockxeuropexOSE", None),
[None])
self.assertListEqual(self.e.lookup(
self.table, "eFoo", "candy.schleckpulver.snap_crackle", None),
[None])
class test_ExchangeType(ExchangeCase):
type = exchange.ExchangeType
def test_lookup(self):
self.assertRaises(NotImplementedError, self.e.lookup,
[], "eFoo", "rFoo", None)
def test_prepare_bind(self):
self.assertTupleEqual(self.e.prepare_bind("qFoo", "eFoo", "rFoo", {}),
("rFoo", None, "qFoo"))
def test_equivalent(self):
e1 = dict(type="direct",
durable=True,
auto_delete=True,
arguments={})
self.assertTrue(
self.e.equivalent(e1, "eFoo", "direct", True, True, {}))
self.assertFalse(
self.e.equivalent(e1, "eFoo", "topic", True, True, {}))
self.assertFalse(
self.e.equivalent(e1, "eFoo", "direct", False, True, {}))
self.assertFalse(
self.e.equivalent(e1, "eFoo", "direct", True, False, {}))
self.assertFalse(
self.e.equivalent(e1, "eFoo", "direct", True, True, {
"expires": 3000}))
e2 = dict(e1, arguments={"expires": 3000})
self.assertTrue(
self.e.equivalent(e2, "eFoo", "direct", True, True, {
"expires": 3000}))
self.assertFalse(
self.e.equivalent(e2, "eFoo", "direct", True, True, {
"expires": 6000}))
| pantheon-systems/kombu | kombu/tests/test_virtual_exchange.py | Python | bsd-3-clause | 3,747 | 0.000801 |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('textbox15.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox('E9', 'This is some text',
{'align': {'horizontal': 'center'}})
workbook.close()
self.assertExcelEqual()
| jmcnamara/XlsxWriter | xlsxwriter/test/comparison/test_textbox15.py | Python | bsd-2-clause | 900 | 0 |
from os.path import dirname
from os import listdir
path = dirname(__file__)
i = path.find(".zip")
if i == -1: # OS X app or unpacked python files
__all__ = [p[:-3] for p in listdir(path) if p.endswith(".py") and p != "__init__.py"]
del p
else: # Windows binary ziped .pyc files
import zipfile
__all__ = [f[8:-4] for f in zipfile.ZipFile(path[:i+4]).namelist() if f.find('plugins/') == 0 and
f.endswith(".pyc") and not f.endswith("__init__.pyc")]
del f
del zipfile
del i
del path
del dirname
del listdir
| chrmoritz/zoxel | src/plugins/__init__.py | Python | gpl-3.0 | 547 | 0.003656 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_epg_monitoring_policy
short_description: Manage monitoring policies on Cisco ACI fabrics (mon:EPGPol)
description:
- Manage monitoring policies on Cisco ACI fabrics.
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
- More information from the internal APIC class I(mon:EPGPol) at
U(https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
options:
monitoring_policy:
description:
- The name of the monitoring policy.
required: yes
aliases: [ name ]
description:
description:
- Description for the monitoring policy.
aliases: [ descr ]
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_epg_monitoring_policy:
host: '{{ hostname }}'
username: '{{ username }}'
password: '{{ password }}'
monitoring_policy: '{{ monitoring_policy }}'
description: '{{ description }}'
tenant: '{{ tenant }}'
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
monitoring_policy=dict(type='str', required=False, aliases=['name']), # Not required for querying all objects
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
protocol=dict(type='str', removed_in_version='2.6'), # Deprecated in v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['monitoring_policy', 'tenant']],
['state', 'present', ['monitoring_policy', 'tenant']],
],
)
monitoring_policy = module.params['monitoring_policy']
description = module.params['description']
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='monEPGPol',
aci_rn='monepg-{0}'.format(monitoring_policy),
filter_target='eq(monEPGPol.name, "{0}")'.format(monitoring_policy),
module_object=monitoring_policy,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='monEPGPol',
class_config=dict(
name=monitoring_policy,
descr=description,
),
)
aci.get_diff(aci_class='monEPGPol')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| wrouesnel/ansible | lib/ansible/modules/network/aci/aci_epg_monitoring_policy.py | Python | gpl-3.0 | 6,703 | 0.00179 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, too-many-lines, import-outside-toplevel
"""Tensorflow lite frontend."""
import math
import itertools
import numpy as np
import tvm
from tvm.ir import IRModule
from tvm import relay
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from .. import qnn as _qnn
from ... import nd as _nd
from .common import ExprTable
from .common import infer_shape as _infer_shape
from .tflite_flexbuffer import FlexBufferDecoder
__all__ = ["from_tflite"]
class TensorWrapper(object):
"""Tensor wrapper for TFLite Tensor"""
def __init__(self, tensor_idx, tensor, buffer, qnn_params=None):
self.tensor_idx = tensor_idx
self.tensor = tensor
self.buffer = buffer
self.qnn_params = qnn_params
class OperatorConverter(object):
"""Operator Converted for converting TFLite ops to Relay ops"""
def __init__(self, model, subgraph, exp_tab):
try:
from tflite.BuiltinOperator import BuiltinOperator
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ActivationFunctionType import ActivationFunctionType
except ImportError:
raise ImportError("The tflite package must be installed")
self.model = model
self.subgraph = subgraph
self.exp_tab = exp_tab
self.builtin_op_code = build_str_map(BuiltinOperator())
self.activation_fn_type = build_str_map(ActivationFunctionType())
self.builtin_options = build_str_map(BuiltinOptions())
# Add more operators
self.convert_map = {
"ABS": self.convert_abs,
"ADD": self.convert_add,
"ADD_N": self.convert_add_n,
"ARG_MAX": self.convert_arg_max,
"ARG_MIN": self.convert_arg_min,
"AVERAGE_POOL_2D": self.convert_average_pool2d,
"BATCH_TO_SPACE_ND": self.convert_batch_to_space_nd,
"CAST": self.convert_cast,
"CEIL": self.convert_ceil,
"CONCATENATION": self.convert_concatenation,
"CONV_2D": self.convert_conv2d,
"COS": self.convert_cos,
"DEPTH_TO_SPACE": self.convert_depth_to_space,
"DEPTHWISE_CONV_2D": self.convert_depthwise_conv2d,
"DEQUANTIZE": self.convert_dequantize,
"DETECTION_POSTPROCESS": self.convert_detection_postprocess,
"DIV": self.convert_div,
"ELU": self.convert_elu,
"EQUAL": self.convert_equal,
"EXP": self.convert_exp,
"EXPAND_DIMS": self.convert_expand_dims,
"FILL": self.convert_fill,
"FLOOR_DIV": self.convert_floor_div,
"FLOOR_MOD": self.convert_floor_mod,
"FLOOR": self.convert_floor,
"FULLY_CONNECTED": self.convert_fully_connected,
"GATHER": self.convert_gather,
"GATHER_ND": self.convert_gather_nd,
"GREATER_EQUAL": self.convert_greater_equal,
"GREATER": self.convert_greater,
"HARD_SWISH": self.convert_hard_swish,
"L2_NORMALIZATION": self.convert_l2_normalization,
"L2_POOL_2D": self.convert_l2_pool2d,
"LEAKY_RELU": self.convert_leaky_relu,
"LESS_EQUAL": self.convert_less_equal,
"LESS": self.convert_less,
"LOCAL_RESPONSE_NORMALIZATION": self.convert_lrn,
"LOG": self.convert_log,
"LOG_SOFTMAX": self.convert_log_softmax,
"LOGICAL_AND": self.convert_logical_and,
"LOGICAL_NOT": self.convert_logical_not,
"LOGICAL_OR": self.convert_logical_or,
"LOGISTIC": self.convert_logistic,
"MATRIX_DIAG": self.convert_matrix_diag,
"MATRIX_SET_DIAG": self.convert_matrix_set_diag,
"MAX_POOL_2D": self.convert_max_pool2d,
"MAXIMUM": self.convert_maximum,
"MEAN": self.convert_reduce_mean,
"MINIMUM": self.convert_minimum,
"MIRROR_PAD": self.convert_mirror_pad,
"MUL": self.convert_mul,
"NEG": self.convert_neg,
"NOT_EQUAL": self.convert_not_equal,
"ONE_HOT": self.convert_one_hot,
"PACK": self.convert_pack,
"PAD": self.convert_pad,
"PADV2": self.convert_pad,
"POW": self.convert_pow,
"PRELU": self.convert_prelu,
"RANGE": self.convert_range,
"QUANTIZE": self.convert_quantize,
"REDUCE_ANY": self.convert_reduce_any,
"REDUCE_MAX": self.convert_reduce_max,
"REDUCE_MIN": self.convert_reduce_min,
"REDUCE_PROD": self.convert_reduce_prod,
"RELU": self.convert_relu,
"RELU6": self.convert_relu6,
"RELU_N1_TO_1": self.convert_relu_n1_to_1,
"RESHAPE": self.convert_reshape,
"RESIZE_BILINEAR": self.convert_resize_bilinear,
"RESIZE_NEAREST_NEIGHBOR": self.convert_resize_nearest_neighbor,
"ROUND": self.convert_round,
"RSQRT": self.convert_rsqrt,
"REVERSE_SEQUENCE": self.convert_reverse_sequence,
"REVERSE_V2": self.convert_reverse_v2,
"SELECT": self.convert_select,
"SHAPE": self.convert_shape,
"SIN": self.convert_sin,
"SLICE": self.convert_slice,
"SOFTMAX": self.convert_softmax,
"SPACE_TO_BATCH_ND": self.convert_space_to_batch_nd,
"SPACE_TO_DEPTH": self.convert_space_to_depth,
"SPARSE_TO_DENSE": self.convert_sparse_to_dense,
"SPLIT": self.convert_split,
"SPLIT_V": self.convert_split_v,
"SQRT": self.convert_sqrt,
"SQUARE": self.convert_square,
"SQUARED_DIFFERENCE": self.convert_squared_difference,
"SQUEEZE": self.convert_squeeze,
"STRIDED_SLICE": self.convert_strided_slice,
"SUB": self.convert_sub,
"SUM": self.convert_reduce_sum,
"TAN": self.convert_tan,
"TANH": self.convert_tanh,
"TILE": self.convert_tile,
"TOPK_V2": self.convert_topk_v2,
"TRANSPOSE_CONV": self.convert_transpose_conv,
"TRANSPOSE": self.convert_transpose,
"UNPACK": self.convert_unpack,
"WHERE": self.convert_select,
"ZEROS_LIKE": self.convert_zeros_like,
}
def check_unsupported_ops(self):
"""Check unsupported TFLite ops in our converter."""
unsupported_ops_set = set()
for op_idx in range(self.subgraph.OperatorsLength()):
op = self.subgraph.Operators(op_idx)
op_code_str = self.get_op_code_str(op)
if op_code_str not in self.convert_map:
unsupported_ops_set.add(op_code_str)
if unsupported_ops_set:
msg = "The following operators are not supported in frontend " "TFLite: {}"
ops = str(list(unsupported_ops_set)).strip("[,]")
raise tvm.error.OpNotImplemented(msg.format(ops))
def convert_op_to_relay(self):
"""Convert TFLite ops to relay ops"""
for op_idx in range(self.subgraph.OperatorsLength()):
op = self.subgraph.Operators(op_idx)
op_code_str = self.get_op_code_str(op)
output_tensors = self.get_output_tensors(op)
try:
from tflite.Operator import Operator
except ImportError:
raise ImportError("The tflite package must be installed")
assert isinstance(op, Operator)
ret = self.convert_map[op_code_str](op)
if len(output_tensors) == 1:
tensor_idx = output_tensors[0].tensor_idx
self.exp_tab.set_expr(get_tensor_name(self.subgraph, tensor_idx), ret)
else:
for idx, output_tensor in enumerate(output_tensors):
self.exp_tab.set_expr(
get_tensor_name(self.subgraph, output_tensor.tensor_idx), ret[idx]
)
def get_op_code_str(self, op):
"""Get TFLite ops string representation"""
try:
from tflite.BuiltinOperator import BuiltinOperator
except ImportError:
raise ImportError("The tflite package must be installed")
op_code_list_idx = op.OpcodeIndex()
op_code_id = self.model.OperatorCodes(op_code_list_idx).BuiltinCode()
try:
op_code_str = self.builtin_op_code[op_code_id]
except KeyError:
raise NotImplementedError(
"TFLite operator with code "
+ str(op_code_id)
+ " is not supported by this version of the fbs schema."
)
if op_code_id == BuiltinOperator.CUSTOM:
# Custom operator
custom_op_code_str = self.model.OperatorCodes(op_code_list_idx).CustomCode()
if custom_op_code_str == b"TFLite_Detection_PostProcess":
return "DETECTION_POSTPROCESS"
raise NotImplementedError("Custom operators are currently not supported")
return op_code_str
def get_input_tensors(self, op):
operator_inputs = op.InputsAsNumpy()
return self.get_tensors(operator_inputs)
def get_output_tensors(self, op):
operator_outputs = op.OutputsAsNumpy()
return self.get_tensors(operator_outputs)
def get_tensors(self, tensors_idx_list):
"""Get tensor wrapper list from given TFLite tensor index list"""
return_list = list()
for tensor_idx in tensors_idx_list:
if tensor_idx < 0:
return_list.append(TensorWrapper(tensor_idx, 0, 0))
continue
tensor = self.subgraph.Tensors(tensor_idx)
buffer_idx = tensor.Buffer()
buffer = self.model.Buffers(buffer_idx)
# Check if the tensors are quantized. Parse if yes.
qnn_params = None
tflite_qnn_params = tensor.Quantization()
if tflite_qnn_params is not None:
# TFLite supports both per-tensor and per-axis (aka channel) quantization. For
# per-tensor quantization, scale and zero points are scalar values. For per-axis
# quantization, scale and zero points for the weights are tensors (activations are
# per-tensor quantized). However, the TFLite quantization spec puts restrictions on
# zero points for per-axis quantization. Specifically, the zero point is a tensor
# but all values are 0. More information can be found here -
# https://www.tensorflow.org/lite/performance/quantization_spec
tflite_scale = tflite_qnn_params.ScaleAsNumpy()
tflite_zero_point = tflite_qnn_params.ZeroPointAsNumpy()
is_qnn_params_valid = True
# Handle Per-axis and per-tensor cases
if isinstance(tflite_scale, np.ndarray):
assert isinstance(tflite_zero_point, np.ndarray)
# Tensor - Per-axis quantization
if tflite_scale.size != 1 and tflite_zero_point.size != 1:
scale = tflite_scale
# Ensure that all zero points are zeros
zero_point = tflite_zero_point
if not np.all(zero_point == 0):
raise tvm.error.OpAttributeInvalid(
"TFLite per-axis quantization restricts all zero points to be"
+ " 0, but a non-zero value is observed"
)
zero_point = int(zero_point[0])
# Scalar - Per-tensor quantization
elif tflite_scale.size == 1 and tflite_zero_point.size == 1:
scale = float(tflite_scale[0])
zero_point = int(tflite_zero_point[0])
else:
raise NotImplementedError(
"Quantized type {} (scale) and {} (zero point) not supported".format(
type(tflite_scale), type(tflite_zero_point)
)
)
elif tflite_scale == 0 and tflite_zero_point == 0:
# Handle corner case for ops like quantized reshape whose second operand (shape)
# has zero scale and zero zero point. This is not used.
is_qnn_params_valid = False
else:
raise NotImplementedError(
"Quantized type {} not supported".format(type(tflite_scale))
)
# Check that the scale and zero points are valid.
if is_qnn_params_valid:
qnn_params = dict()
qnn_params["scale"] = relay.const(scale, "float32")
qnn_params["zero_point"] = relay.const(zero_point, "int32")
return_list.append(TensorWrapper(tensor_idx, tensor, buffer, qnn_params))
return return_list
def get_tensor_type_as_numpy(self, tensor_wrapper):
"""Returns np.dtype out of TensorType"""
assert isinstance(tensor_wrapper, TensorWrapper)
try:
from tflite.TensorType import TensorType
return {
TensorType.UINT8: np.uint8,
TensorType.INT8: np.int8,
TensorType.FLOAT32: np.float32,
TensorType.INT32: np.int32,
TensorType.INT64: np.int64,
TensorType.BOOL: np.bool_,
}[tensor_wrapper.tensor.Type()]
except ImportError:
raise ImportError("The tflite package must be installed")
except KeyError:
raise NotImplementedError(
"Tensor type '{}' currently not supported".format(tensor_wrapper.tensor.Type())
)
def get_tensor_value(self, tensor_wrapper):
"""Get tensor buffer value from given tensor wrapper"""
assert isinstance(tensor_wrapper, TensorWrapper)
dtype = self.get_tensor_type_as_numpy(tensor_wrapper)
data = tensor_wrapper.buffer.DataAsNumpy()
if tensor_wrapper.tensor.ShapeLength() != 0:
shape = tensor_wrapper.tensor.ShapeAsNumpy()
else:
shape = []
return np.frombuffer(data, dtype=dtype).reshape(shape)
def get_tensor_type_str(self, tensor_type):
"""Get tensor type string representation when given TFLite tensor type"""
try:
from tflite.TensorType import TensorType
except ImportError:
raise ImportError("The tflite package must be installed")
if tensor_type == TensorType.INT8:
return "int8"
if tensor_type == TensorType.UINT8:
return "uint8"
if tensor_type == TensorType.FLOAT32:
return "float32"
if tensor_type == TensorType.INT32:
return "int32"
if tensor_type == TensorType.INT64:
return "int64"
if tensor_type == TensorType.BOOL:
return "bool"
raise NotImplementedError(
"Tensor type {} is currently not supported".format(str(tensor_type))
)
def has_same_qnn_params(self, lhs_tensor, rhs_tensor):
lhs_scale = lhs_tensor.qnn_params["scale"]
rhs_scale = rhs_tensor.qnn_params["scale"]
lhs_zero_point = lhs_tensor.qnn_params["zero_point"]
rhs_zero_point = rhs_tensor.qnn_params["zero_point"]
# 0.1 + 0.2 != 0.3
return np.allclose(
lhs_scale.data.asnumpy(), rhs_scale.data.asnumpy(), rtol=1e-5, atol=1e-5
) and np.allclose(
lhs_zero_point.data.asnumpy(), rhs_zero_point.data.asnumpy(), rtol=1e-5, atol=1e-5
)
def is_quantized(self, op):
"""Check if an input tensor is quantized."""
input_tensors = self.get_input_tensors(op)
first_tensor = input_tensors[0]
return first_tensor.qnn_params is not None
def quantize(self, expr, tensor_to_quantize):
""" Helper function to quantize a tensor with Relay """
tensor_type = tensor_to_quantize.tensor.Type()
tensor_type_str = self.get_tensor_type_str(tensor_type)
quantized = _qnn.op.quantize(
data=expr,
output_scale=tensor_to_quantize.qnn_params["scale"],
output_zero_point=tensor_to_quantize.qnn_params["zero_point"],
out_dtype=tensor_type_str,
)
return quantized
def dequantize(self, expr, tensor):
""" Helper function to dequantize a tensor with Relay """
dequantized = _qnn.op.dequantize(
data=expr,
input_scale=tensor.qnn_params["scale"],
input_zero_point=tensor.qnn_params["zero_point"],
)
return dequantized
def convert_qnn_fused_activation_function(
self, expr, fused_activation_fn, scale, zero_point, dtype
):
"""Convert TFLite fused activation function. The expr is an input quantized tensor with
scale and zero point"""
try:
from tflite.ActivationFunctionType import ActivationFunctionType
except ImportError:
raise ImportError("The tflite package must be installed")
# Quantize a float value to an quantized integer value
quantize = lambda x: float(int(round(x / scale)) + zero_point)
# Get min/max of the output dtype. This will be used to ensure that clip a_min/a_max are not
# beyond the dtype range.
qmin = float(tvm.tir.op.min_value(dtype).value)
qmax = float(tvm.tir.op.max_value(dtype).value)
# The input expr is a quantized tensor with its scale and zero point. We calculate the
# suitable clip off points based on these scale and zero point.
if fused_activation_fn == ActivationFunctionType.NONE:
return expr
if fused_activation_fn == ActivationFunctionType.RELU6:
return _op.clip(expr, a_min=max(qmin, quantize(0)), a_max=min(qmax, quantize(6.0)))
if fused_activation_fn == ActivationFunctionType.RELU_N1_TO_1:
return _op.clip(expr, a_min=max(qmin, quantize(-1.0)), a_max=min(qmax, quantize(1.0)))
if fused_activation_fn == ActivationFunctionType.RELU:
return _op.clip(expr, a_min=max(qmin, quantize(0.0)), a_max=qmax)
fused_activation_fn_str = self.activation_fn_type[fused_activation_fn]
raise tvm.error.OpNotImplemented(
"Quantized activation {} is not supported yet.".format(fused_activation_fn_str)
)
def convert_conv2d(self, op):
"""Convert TFLite conv2d"""
return self.convert_conv(op, "conv2d")
def convert_depthwise_conv2d(self, op):
"""Convert TFLite depthwise conv2d"""
return self.convert_conv(op, "depthwise")
def convert_average_pool2d(self, op):
"""Convert TFLite average pool2d"""
return self.convert_pool2d(op, "average")
def convert_max_pool2d(self, op):
"""Convert TFLite max pool2d"""
return self.convert_pool2d(op, "max")
def convert_l2_pool2d(self, op):
"""Convert TFLite l2 pool2d"""
return self.convert_pool2d(op, "l2")
def convert_reshape(self, op):
"""Convert TFLite reshape"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ReshapeOptions import ReshapeOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) in (1, 2), "input tensors should not be empty"
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "There should be only 1 output tensor"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
if len(input_tensors) == 2:
shape_tensor = input_tensors[1]
if self.has_expr(shape_tensor.tensor_idx):
target_shape = self.get_expr(shape_tensor.tensor_idx)
else:
target_shape = self.get_tensor_value(shape_tensor)
# convert to flattened list
from itertools import chain
try:
target_shape = list(chain(*target_shape))
except TypeError:
target_shape = list(chain(target_shape))
else:
assert op.BuiltinOptionsType() == BuiltinOptions.ReshapeOptions
op_options = op.BuiltinOptions()
reshape_options = ReshapeOptions()
reshape_options.Init(op_options.Bytes, op_options.Pos)
target_shape = tuple(reshape_options.NewShapeAsNumpy())
in_expr = self.get_expr(input_tensor_idx)
# If the tensors are quantized, ensure that input/output qnn params are same.
if input_tensor.qnn_params:
output_tensor = output_tensors[0]
assert self.has_same_qnn_params(
input_tensor, output_tensor
), "TFLite reshape requires input and output scale and zero points to be equal"
out = _op.reshape(in_expr, newshape=target_shape)
return out
def _convert_resize(self, method, op):
"""Generic method to Convert TFLite RESIZE operators"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ResizeBilinearOptions import ResizeBilinearOptions
# ResizeNearestNeighborOptions was added in tflite v1.13
tflite_ver = 1120
if "ResizeNearestNeighborOptions" in dir(BuiltinOptions):
from tflite.ResizeNearestNeighborOptions import ResizeNearestNeighborOptions
tflite_ver = 1130
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
# images, 4-D Tensor with shape NHWC.
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
# size - 1-D int32 Tensor of 2 elements: new_height, new_width
target_size = tuple(self.get_tensor_value(input_tensors[1]))
# Options - align_corners (bool)
resize_options = None
align_corners = False
if method == "bilinear":
assert op.BuiltinOptionsType() == BuiltinOptions.ResizeBilinearOptions
resize_options = ResizeBilinearOptions()
elif tflite_ver >= 1130:
assert op.BuiltinOptionsType() == BuiltinOptions.ResizeNearestNeighborOptions
resize_options = ResizeNearestNeighborOptions()
if resize_options is not None:
op_options = op.BuiltinOptions()
resize_options.Init(op_options.Bytes, op_options.Pos)
align_corners = resize_options.AlignCorners()
# Use layout NHWC
coord_trans = "align_corners" if align_corners else "asymmetric"
out = _op.image.resize(
in_expr, target_size, "NHWC", method, coordinate_transformation_mode=coord_trans
)
return out
def convert_resize_bilinear(self, op):
"""Convert TFLite RESIZE_BILINEAR"""
return self._convert_resize("bilinear", op)
def convert_resize_nearest_neighbor(self, op):
"""Convert TFLite RESIZE_NEAREST_NEIGHBOR"""
return self._convert_resize("nearest_neighbor", op)
def convert_l2_normalization(self, op):
"""Convert TFLite L2_NORMALIZATION """
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.L2NormOptions import L2NormOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
assert op.BuiltinOptionsType() == BuiltinOptions.L2NormOptions
op_options = op.BuiltinOptions()
l2_norm_options = L2NormOptions()
l2_norm_options.Init(op_options.Bytes, op_options.Pos)
fused_activation_fn = l2_norm_options.FusedActivationFunction()
# TFLite supports normalization only over the last dim
input_tensor_rank = len(input_tensor.tensor.ShapeAsNumpy())
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFLite quantized L2_NORMALIZATION operator is not supported yet."
)
# TFL uses only the default epsilon value
out = _op.nn.l2_normalize(in_expr, eps=1e-12, axis=[input_tensor_rank - 1])
# if we have fused activation fn
if output_tensor.qnn_params:
raise tvm.error.OpNotImplemented(
"TFLite quantized L2_NORMALIZATION operator is not supported yet."
)
out = self.convert_fused_activation_function(out, fused_activation_fn)
return out
def convert_lrn(self, op):
"""Convert TFLite LOCAL_RESPONSE_NORMALIZATION """
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.LocalResponseNormalizationOptions import LocalResponseNormalizationOptions
except ImportError:
raise ImportError("The tflite package must be installed")
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized LRN operator is not supported yet.")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
assert op.BuiltinOptionsType() == BuiltinOptions.LocalResponseNormalizationOptions
op_options = op.BuiltinOptions()
lrn_options = LocalResponseNormalizationOptions()
lrn_options.Init(op_options.Bytes, op_options.Pos)
radius = lrn_options.Radius()
bias = lrn_options.Bias()
alpha = lrn_options.Alpha()
beta = lrn_options.Beta()
size = (radius * 2) + 1
alpha = alpha * size
axis = 3 # NHWC format
out = _op.nn.lrn(in_expr, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)
return out
def convert_logistic(self, op):
"""Convert TFLite LOGISTIC"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
if input_tensor.qnn_params:
in_expr = self.dequantize(in_expr, input_tensor)
out = _op.sigmoid(in_expr)
if output_tensor.qnn_params:
out = self.quantize(out, output_tensor)
return out
def convert_softmax(self, op):
"""Convert TFLite softmax"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
params = {"axis": 1} # 1 is channel
in_expr = self.get_expr(input_tensor_idx)
# TODO - Naive softmax int8 implementation leads to bad accuracy. Currently, we can
# dequantize to FP32 and perform softmax on FP32. We can investigate an integer only softmax
# implementation in future.
if input_tensor.qnn_params:
in_expr = self.dequantize(in_expr, input_tensor)
out = _op.nn.softmax(in_expr, **params)
# Go back to integer dataype if the original operator was quantized.
if output_tensor.qnn_params:
out = self.quantize(out, output_tensor)
return out
def convert_tanh(self, op):
"""Convert TFLite TANH"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
out = _op.tanh(in_expr)
return out
def convert_range(self, op):
"""Convert TFLite Range"""
try:
from tflite.TensorType import TensorType
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 3, "input tensors length should be 3"
start, limit, delta = input_tensors[0], input_tensors[1], input_tensors[2]
expressions = [self.get_tensor_expr(t) for t in [start, limit, delta]]
# out type inference
if delta.tensor.Type() == TensorType.FLOAT32:
out_type = self.get_tensor_type_str(delta.tensor.Type())
else:
out_type = self.get_tensor_type_str(start.tensor.Type())
out = _op.arange(expressions[0], expressions[1], expressions[2], out_type)
return out
def convert_shape(self, op):
"""Convert TFLite Shape"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
out = _op.shape_of(self.get_tensor_expr(input_tensors[0]))
return out
def convert_relu(self, op):
"""Convert TFLite ReLU"""
try:
from tflite.ActivationFunctionType import ActivationFunctionType
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
if input_tensor.qnn_params:
# Quantize a float value to an quantized integer value
scale_val = get_scalar_from_constant(input_tensor.qnn_params["scale"])
zero_point_val = get_scalar_from_constant(input_tensor.qnn_params["zero_point"])
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = self.convert_qnn_fused_activation_function(
expr=in_expr,
fused_activation_fn=ActivationFunctionType.RELU,
scale=scale_val,
zero_point=zero_point_val,
dtype=output_tensor_type_str,
)
else:
out = _op.nn.relu(in_expr)
if output_tensor.qnn_params:
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = _qnn.op.requantize(
out,
input_scale=input_tensor.qnn_params["scale"],
input_zero_point=input_tensor.qnn_params["zero_point"],
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
out_dtype=output_tensor_type_str,
)
return out
def convert_hard_swish(self, op):
"""Convert TFLite Hard swish"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
def _relu6(data):
return _op.tensor.clip(data, 0.0, 6.0)
def _hard_swish(data):
return data * _relu6(data + relay.const(3.0)) / relay.const(6.0)
# Dequantize if the input is quantized.
if input_tensor.qnn_params:
in_expr = self.dequantize(in_expr, input_tensor)
# Perform hardswish
out = _hard_swish(in_expr)
# Go back to integer dataype if the original operator was quantized.
if output_tensor.qnn_params:
out = self.quantize(out, output_tensor)
return out
def convert_relu6(self, op):
"""Convert TFLite ReLU6"""
try:
from tflite.ActivationFunctionType import ActivationFunctionType
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
if input_tensor.qnn_params:
# Quantize a float value to an quantized integer value
scale_val = get_scalar_from_constant(input_tensor.qnn_params["scale"])
zero_point_val = get_scalar_from_constant(input_tensor.qnn_params["zero_point"])
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = self.convert_qnn_fused_activation_function(
expr=in_expr,
fused_activation_fn=ActivationFunctionType.RELU6,
scale=scale_val,
zero_point=zero_point_val,
dtype=output_tensor_type_str,
)
else:
out = _op.clip(in_expr, a_min=0, a_max=6)
if output_tensor.qnn_params:
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = _qnn.op.requantize(
out,
input_scale=input_tensor.qnn_params["scale"],
input_zero_point=input_tensor.qnn_params["zero_point"],
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
out_dtype=output_tensor_type_str,
)
return out
def convert_leaky_relu(self, op):
"""Convert TFLite LEAKY_RELU"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.LeakyReluOptions import LeakyReluOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
assert op.BuiltinOptionsType() == BuiltinOptions.LeakyReluOptions
op_options = op.BuiltinOptions()
leaky_relu_options = LeakyReluOptions()
leaky_relu_options.Init(op_options.Bytes, op_options.Pos)
alpha_tensor = leaky_relu_options.Alpha()
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
if input_tensor.qnn_params:
in_expr = self.dequantize(in_expr, input_tensor)
out = _op.nn.leaky_relu(in_expr, alpha_tensor)
if output_tensor.qnn_params:
out = self.quantize(out, output_tensor)
return out
def convert_relu_n1_to_1(self, op):
"""Convert TFLite RELU_N1_TO_1"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
if input_tensor.qnn_params:
# Quantize a float value to an quantized integer value
scale_val = get_scalar_from_constant(input_tensor.qnn_params["scale"])
zero_point_val = get_scalar_from_constant(input_tensor.qnn_params["zero_point"])
quantize = lambda x: float(int(round(x / scale_val)) + zero_point_val)
# Get min/max of the input dtype. This will be used to ensure that
# clip a_min/a_max are not beyond the dtype range.
input_tensor_type_str = self.get_tensor_type_str(input_tensor.tensor.Type())
qmin = float(tvm.tir.op.min_value(input_tensor_type_str).value)
qmax = float(tvm.tir.op.max_value(input_tensor_type_str).value)
out = _op.clip(in_expr, a_min=max(qmin, quantize(-1.0)), a_max=min(qmax, quantize(1.0)))
else:
out = _op.clip(in_expr, a_min=-1, a_max=1)
if output_tensor.qnn_params:
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = _qnn.op.requantize(
out,
input_scale=input_tensor.qnn_params["scale"],
input_zero_point=input_tensor.qnn_params["zero_point"],
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
out_dtype=output_tensor_type_str,
)
return out
def convert_log_softmax(self, op):
"""Convert TFLite LOG_SOFTMAX"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
if input_tensor.qnn_params:
in_expr = self.dequantize(in_expr, input_tensor)
out = _op.nn.log_softmax(in_expr)
if output_tensor.qnn_params:
out = self.quantize(out, output_tensor)
return out
def convert_concatenation(self, op):
"""Convert TFLite concatenation"""
try:
from tflite.ConcatenationOptions import ConcatenationOptions
from tflite.BuiltinOptions import BuiltinOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) >= 1, "input tensors should greater than 1"
in_exprs = [self.get_expr(input_tensor.tensor_idx) for input_tensor in input_tensors]
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
assert op.BuiltinOptionsType() == BuiltinOptions.ConcatenationOptions
op_options = op.BuiltinOptions()
concatenation_options = ConcatenationOptions()
concatenation_options.Init(op_options.Bytes, op_options.Pos)
concatenation_axis = concatenation_options.Axis()
fused_activation_fn = concatenation_options.FusedActivationFunction()
if not input_tensors[0].qnn_params:
out = _op.concatenate(in_exprs, axis=concatenation_axis)
else:
input_scales = [input_tensor.qnn_params["scale"] for input_tensor in input_tensors]
input_zero_points = [
input_tensor.qnn_params["zero_point"] for input_tensor in input_tensors
]
out = _qnn.op.concatenate(
in_exprs,
input_scales=input_scales,
input_zero_points=input_zero_points,
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
axis=concatenation_axis,
)
# Handle fused activations
if output_tensor.qnn_params:
scale_val = get_scalar_from_constant(output_tensor.qnn_params["scale"])
zero_point_val = get_scalar_from_constant(output_tensor.qnn_params["zero_point"])
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = self.convert_qnn_fused_activation_function(
expr=out,
fused_activation_fn=fused_activation_fn,
scale=scale_val,
zero_point=zero_point_val,
dtype=output_tensor_type_str,
)
else:
out = self.convert_fused_activation_function(out, fused_activation_fn)
return out
def _convert_unary_elemwise(self, relay_op, op):
"""Generic method to convert TFLite unary elemwise functions"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
out = relay_op(in_expr)
return out
def convert_abs(self, op):
"""Convert TFLite ABS"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized ABS operator is not supported yet.")
return self._convert_unary_elemwise(_op.abs, op)
def convert_ceil(self, op):
"""Convert TFLite CEIL"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized CEIL operator is not supported yet.")
return self._convert_unary_elemwise(_op.ceil, op)
def convert_floor(self, op):
"""Convert TFLite FLOOR"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized FLOOR operator is not supported yet."
)
return self._convert_unary_elemwise(_op.floor, op)
def convert_round(self, op):
"""Convert TFLite ROUND"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized ROUND operator is not supported yet."
)
return self._convert_unary_elemwise(_op.round, op)
def convert_exp(self, op):
"""Convert TFLite EXP"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized EXP operator is not supported yet.")
return self._convert_unary_elemwise(_op.exp, op)
def convert_log(self, op):
"""Convert TFLite LOG"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized LOG operator is not supported yet.")
return self._convert_unary_elemwise(_op.log, op)
def convert_sin(self, op):
"""Convert TFLite SIN"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized SIN operator is not supported yet.")
return self._convert_unary_elemwise(_op.sin, op)
def convert_tan(self, op):
"""Convert TFLite TAN"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized TAN operator is not supported yet.")
return self._convert_unary_elemwise(_op.tan, op)
def convert_cos(self, op):
"""Convert TFLite COS"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized COS operator is not supported yet.")
return self._convert_unary_elemwise(_op.cos, op)
def convert_sqrt(self, op):
"""Convert TFLite SQRT"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized SQRT operator is not supported yet.")
return self._convert_unary_elemwise(_op.sqrt, op)
def convert_rsqrt(self, op):
"""Convert TFLite RSQRT"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized RSQRT operator is not supported yet."
)
return self._convert_unary_elemwise(_op.rsqrt, op)
def convert_neg(self, op):
"""Convert TFLite NEG"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized NEG operator is not supported yet.")
return self._convert_unary_elemwise(_op.negative, op)
def convert_elu(self, op):
"""Convert TFLite ELU"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized ELU operator is not supported yet.")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
exp_type = self.get_tensor_type_str(input_tensor.tensor.Type())
out = relay.const(-1.0, exp_type) * _op.nn.relu(
relay.const(1.0, exp_type) - _op.exp(in_expr)
) + _op.nn.relu(in_expr)
return out
def convert_square(self, op):
"""Convert TFLite SQUARE"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized SQUARE operator is not supported yet."
)
exp_type = self.get_tensor_type_str(output_tensor.tensor.Type())
out = _op.power(in_expr, relay.const(2, exp_type))
return out
def _convert_elemwise(self, relay_op, op, ignore_qnn_params=False):
"""Generic method to Convert TFLite elemwise"""
try:
from tflite.AddOptions import AddOptions
from tflite.SubOptions import SubOptions
from tflite.MulOptions import MulOptions
from tflite.DivOptions import DivOptions
from tflite.BuiltinOptions import BuiltinOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
lhs_tensor = input_tensors[0]
rhs_tensor = input_tensors[1]
lhs_expr = self.get_tensor_expr(lhs_tensor)
rhs_expr = self.get_tensor_expr(rhs_tensor)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
# TFLite format demands equal scale and zero_point tuple parameters for some operations
# to allow us to use non-quantized operation instead of quantized if ignore_qnn_params=True
if ignore_qnn_params:
assert (
lhs_tensor.qnn_params
and self.has_same_qnn_params(lhs_tensor, output_tensor)
and self.has_same_qnn_params(rhs_tensor, output_tensor)
), "All tensors should be quantized with the same (scale,zero-point) tuple parameters"
# If quantized, extracts qnn params and call QNN add operator.
if not ignore_qnn_params and lhs_tensor.qnn_params:
assert rhs_tensor.qnn_params, "Both tensors should be quantized."
assert output_tensor.qnn_params, "Output tensor should be quantized."
out = relay_op(
lhs=lhs_expr,
rhs=rhs_expr,
lhs_scale=lhs_tensor.qnn_params["scale"],
lhs_zero_point=lhs_tensor.qnn_params["zero_point"],
rhs_scale=rhs_tensor.qnn_params["scale"],
rhs_zero_point=rhs_tensor.qnn_params["zero_point"],
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
)
else:
out = relay_op(lhs_expr, rhs_expr)
# Options (fused_activation_function)
options = None
if op.BuiltinOptionsType() == BuiltinOptions.AddOptions:
options = AddOptions()
elif op.BuiltinOptionsType() == BuiltinOptions.SubOptions:
options = SubOptions()
elif op.BuiltinOptionsType() == BuiltinOptions.MulOptions:
options = MulOptions()
elif op.BuiltinOptionsType() == BuiltinOptions.DivOptions:
options = DivOptions()
if options is not None:
op_options = op.BuiltinOptions()
options.Init(op_options.Bytes, op_options.Pos)
fused_activation_fn = options.FusedActivationFunction()
# Handle fused activations
if not ignore_qnn_params and output_tensor.qnn_params:
scale_val = get_scalar_from_constant(output_tensor.qnn_params["scale"])
zero_point_val = get_scalar_from_constant(output_tensor.qnn_params["zero_point"])
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = self.convert_qnn_fused_activation_function(
expr=out,
fused_activation_fn=fused_activation_fn,
scale=scale_val,
zero_point=zero_point_val,
dtype=output_tensor_type_str,
)
else:
out = self.convert_fused_activation_function(out, fused_activation_fn)
return out
def convert_add(self, op):
"""Convert TFLite ADD"""
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
return self._convert_elemwise(_qnn.op.add, op)
return self._convert_elemwise(_op.add, op)
def convert_add_n(self, op):
"""Convert TFLite ADD_N"""
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
input_tensors = self.get_input_tensors(op)
assert not input_tensors[0].qnn_params, "TFLite does not support quantized ADD_N."
lhs_expr = self.get_tensor_expr(input_tensors[0])
for rhs_tensor in input_tensors[1:]:
assert not rhs_tensor.qnn_params, "TFLite does not support quantized ADD_N"
rhs_expr = self.get_tensor_expr(rhs_tensor)
lhs_expr = _op.add(lhs_expr, rhs_expr)
return lhs_expr
def convert_sub(self, op):
"""Convert TFLite SUB"""
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
return self._convert_elemwise(_qnn.op.subtract, op)
return self._convert_elemwise(_op.subtract, op)
def convert_mul(self, op):
"""Convert TFLite MUL"""
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
return self._convert_elemwise(_qnn.op.mul, op)
return self._convert_elemwise(_op.multiply, op)
def convert_div(self, op):
"""Convert TFLite DIV"""
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized DIV operator is not supported yet.")
return self._convert_elemwise(_op.divide, op)
def convert_pow(self, op):
"""Convert TFLite POW"""
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized POW operator is not supported yet.")
return self._convert_elemwise(_op.power, op)
def convert_maximum(self, op):
"""Convert TFLite MAXIMUM"""
return self._convert_elemwise(_op.maximum, op, self.is_quantized(op))
def convert_minimum(self, op):
"""Convert TFLite MINIMUM"""
return self._convert_elemwise(_op.minimum, op, self.is_quantized(op))
def convert_greater(self, op):
"""Convert TFLite GREATER"""
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized GREATER operator is not supported yet."
)
return self._convert_elemwise(_op.greater, op)
def convert_squared_difference(self, op):
"""Convert TFLite SQUARED DIFFERENCE"""
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized squared difference operator is not supported yet."
)
difference = self._convert_elemwise(_op.subtract, op)
# _convert_elemwise has guaranteed only have one output tensor
exp_type = self.get_tensor_type_str(self.get_output_tensors(op)[0].tensor.Type())
out = _op.power(difference, relay.const(2, exp_type))
return out
def convert_greater_equal(self, op):
"""Convert TFLite GREATER_EQUAL"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized GREATER_EQUAL operator is not supported yet."
)
return self._convert_elemwise(_op.greater_equal, op)
def convert_less(self, op):
"""Convert TFLite LESS"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized LESS operator is not supported yet.")
return self._convert_elemwise(_op.less, op)
def convert_less_equal(self, op):
"""Convert TFLite LESS_EQUAL"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized LESS_EQUAL operator is not supported yet."
)
return self._convert_elemwise(_op.less_equal, op)
def convert_equal(self, op):
"""Convert TFLite EQUAL"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized EQUAL operator is not supported yet."
)
return self._convert_elemwise(_op.equal, op)
def convert_not_equal(self, op):
"""Convert TFLite NOT_EQUAL"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized NOT_EQUAL operator is not supported yet."
)
return self._convert_elemwise(_op.not_equal, op)
def _convert_logical_binary(self, relay_op, op):
"""Generic method to convert logical binary ops"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
lhs_tensor = input_tensors[0]
lhs_expr = self.get_expr(lhs_tensor.tensor_idx)
rhs_tensor = input_tensors[1]
rhs_expr = self.get_expr(rhs_tensor.tensor_idx)
out = relay_op(lhs_expr, rhs_expr)
return out
def convert_logical_and(self, op):
"""Convert tflite LOGICAL_AND"""
return self._convert_logical_binary(_op.logical_and, op)
def convert_logical_or(self, op):
"""Convert tflite LOGICAL_OR"""
return self._convert_logical_binary(_op.logical_or, op)
def convert_logical_not(self, op):
"""Convert tflite LOGICAL_NOT"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
data = self.get_expr(input_tensors[0].tensor_idx)
out = _op.logical_not(data)
return out
def convert_gather(self, op):
"""Method to Convert TFLite GATHER operator"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.GatherOptions import GatherOptions
from tflite.TensorType import TensorType
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
data = self.get_tensor_expr(input_tensors[0])
indices = input_tensors[1]
indices_type = indices.tensor.Type()
assert indices_type in (TensorType.INT32, TensorType.INT64)
assert op.BuiltinOptionsType() == BuiltinOptions.GatherOptions
op_options = op.BuiltinOptions()
gather_options = GatherOptions()
gather_options.Init(op_options.Bytes, op_options.Pos)
axis = gather_options.Axis()
# Check the indices are with in bounds.
data_shape = list(input_tensors[0].tensor.ShapeAsNumpy())
data_dim = len(data_shape)
axis = data_dim + axis if axis < 0 else axis
assert axis >= 0, "Axis out of bounds"
assert axis < data_dim, "Axis out of bounds"
if self.has_expr(indices.tensor_idx):
indices_expr = self.get_expr(indices.tensor_idx)
else:
indices_val = self.get_tensor_value(indices)
indices_expr = self.exp_tab.new_const(
indices_val, dtype=self.get_tensor_type_str(indices_type)
)
indices_shape = list(indices_val.shape)
indices_len = len(indices_shape)
out_shape = data_shape[:axis] + indices_shape[:] + data_shape[axis + 1 :]
loopover = [range(s) for s in out_shape]
for idx in list(itertools.product(*loopover)):
real_indices = (
list(idx[:axis])
+ [indices_val[idx[axis : axis + indices_len]]]
+ list(idx[axis + indices_len :])
)
if np.any(np.subtract(data_shape, real_indices) < 0):
raise ValueError("TFLite out of bound indices are not supported.")
# Use mode 'fast' since indices are already checked within bounds.
out = _op.take(data, indices_expr, axis=axis, mode="fast")
return out
def convert_gather_nd(self, op):
"""Method to Convert TFLite GATHER_ND operator"""
try:
from tflite.TensorType import TensorType
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
for t in input_tensors:
assert not t.qnn_params, "Quantized input is not expected."
data = self.get_tensor_expr(input_tensors[0])
indices = self.get_tensor_expr(input_tensors[1])
indices_type = input_tensors[1].tensor.Type()
assert indices_type in (TensorType.INT32, TensorType.INT64)
indices_dims = len(_infer_shape(indices))
indices_t = _op.transpose(indices, axes=[-1] + list(range(indices_dims - 1)))
out = _op.gather_nd(data, indices_t)
return out
def convert_strided_slice(self, op):
"""Method to Convert TFLite STRIDED_SLICE operator.
NOTE: Eventhough tensorflow supports begin_mask, end_mask, ellipsis_mask, new_axis_mask
and shrink_axis_mask, tflite doesn't support these and expect these values to be zero.
But in future, they may open up the mask implementation, so kept the implementation
same as tensorflow.
This op extracts a slice of size (end - begin) / stride from the given input tensor.
Starting at the location specified by begin the slice continues by adding stride to the
index until all dimensions are not less than end. Note that a stride can be negative,
which causes a reverse slice.
For slice input[val0, val1, ..., valn], begin/end/strides will be vectors of length n.
In each mask field(begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask)
the ith bit will correspond to the ith val.
If the ith bit of begin_mask is set, begin[i] is ignored and the fullest possible range
in that dimension is used instead.
If the ith bit of ellipsis_mask is set, as many unspecified dimensions as needed will be
inserted between other dimensions. Only one non-zero bit is allowed in ellipsis_mask.
If the ith bit of new_axis_mask is set, then begin, end, and stride are ignored and a
new length 1 dimension is added at this point in the output tensor.
If the ith bit of shrink_axis_mask is set, it implies that the ith specification shrinks
the dimensionality by 1, taking on the value at index begin[i]. end[i] and strides[i]
are ignored in this case.
begin and end are zero-indexed. strides entries must be non-zero.
TVM Relay implementation of doesn't support mask, so the mask values are processed in
this function and begin/end/strides are updated accordingly. If any mask is present, and
since tvm doesn't support mask computation directly, the output need a final reshape.
"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.StridedSliceOptions import StridedSliceOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 4, "input tensors length should be 4"
data_expr = self.get_expr(input_tensors[0].tensor_idx)
begin = list(self.get_tensor_value(input_tensors[1]))
end = list(self.get_tensor_value(input_tensors[2]))
stride = list(self.get_tensor_value(input_tensors[3]))
assert op.BuiltinOptionsType() == BuiltinOptions.StridedSliceOptions
op_options = op.BuiltinOptions()
options = StridedSliceOptions()
options.Init(op_options.Bytes, op_options.Pos)
begin_mask = options.BeginMask()
end_mask = options.EndMask()
ellipsis_mask = options.EllipsisMask()
new_axis_mask = options.NewAxisMask()
shrink_axis_mask = options.ShrinkAxisMask()
data_shape = list(input_tensors[0].tensor.ShapeAsNumpy())
data_dim = len(data_shape)
stride_dim = len(stride)
def _transform_mask(stride_dim, ellipsis_mask):
"""Handle mask inputs to create new begin, end, stride and output shape"""
m_begin = [0] * data_dim
m_end = [0] * data_dim
m_stride = [0] * data_dim
fshape_indices = []
# Count new axis after ellipsis_mask, consider while applying ellipsis_mask.
ellipsis_seen = False
new_axes_after_ellipsis = 0
for i in range(stride_dim):
mask = 1 << i
if ellipsis_seen and (mask & new_axis_mask) != 0:
new_axes_after_ellipsis += 1
if (mask & ellipsis_mask) != 0:
ellipsis_seen = True
if not ellipsis_seen:
# Used later for extending the stride attributes in the below loop.
ellipsis_mask |= 1 << stride_dim
stride_dim += 1
final_index = 0
for index in range(stride_dim):
mask = 1 << index
if mask & ellipsis_mask:
# Identify the end index for applying ellipsis_mask
to_index = min(
((data_dim - (stride_dim - index)) + 1 + new_axes_after_ellipsis), data_dim
)
for i in range(final_index, to_index):
m_begin[final_index] = 0
m_end[final_index] = data_shape[final_index]
m_stride[final_index] = 1
fshape_indices.append(final_index)
final_index += 1
elif mask & new_axis_mask:
fshape_indices.append(-1)
elif not mask & new_axis_mask:
if final_index == len(m_begin):
break
if mask & begin_mask:
m_begin[final_index] = data_shape[final_index] if stride[index] < 0 else 0
elif begin[index]:
m_begin[final_index] = begin[index]
if mask & end_mask:
m_end[final_index] = 0 if stride[index] < 0 else data_shape[final_index]
elif end[index]:
m_end[final_index] = end[index]
m_stride[final_index] = stride[index]
if mask & shrink_axis_mask:
# Tensorflow make axis with shrink_axis_mask as dimension 1
m_begin[final_index] = (
data_shape[final_index] + begin[index]
if begin[index] < 0
else begin[index]
)
m_end[final_index] = begin[index] + 1
m_stride[final_index] = 1
fshape_indices.append(-2)
else:
fshape_indices.append(final_index)
final_index += 1
return m_begin, m_end, m_stride, fshape_indices
fshape_indices = None
if begin_mask or end_mask or ellipsis_mask or new_axis_mask or shrink_axis_mask:
begin, end, stride, fshape_indices = _transform_mask(stride_dim, ellipsis_mask)
out = _op.strided_slice(data_expr, begin=begin, end=end, strides=stride)
out_shape = _infer_shape(out)
if not fshape_indices:
fshape_indices = range(len(out_shape))
# Create final output shape.
final_output = []
for gather_index in fshape_indices:
if gather_index == -1:
final_output.append(1)
elif gather_index == -2:
pass
else:
final_output.append(out_shape[gather_index])
if not final_output:
return out
return _op.reshape(out, newshape=tuple(final_output))
def convert_zeros_like(self, op):
"""Convert TFLite ZEROS LIKE"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
out = _op.zeros_like(in_expr)
return out
def convert_fill(self, op):
"""Convert TFLite FILL"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
if self.has_expr(input_tensors[0].tensor_idx):
raise tvm.error.OpNotImplemented(
"For dims parameter of Fill operator," " only constant values are supported."
)
in_dims = list(self.get_tensor_value(input_tensors[0]))
in_value_expr = self.get_expr(input_tensors[1].tensor_idx)
out = _op.full(in_value_expr, in_dims)
return out
def _convert_reduce(self, relay_op, op):
"""Generic method to Convert TFLite REDUCE operators"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ReducerOptions import ReducerOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
# input_tensor
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
# axis
axis = tuple(self.get_tensor_value(input_tensors[1]))
# Options - keep_dims (bool)
assert op.BuiltinOptionsType() == BuiltinOptions.ReducerOptions
reduce_options = ReducerOptions()
op_options = op.BuiltinOptions()
reduce_options.Init(op_options.Bytes, op_options.Pos)
keep_dims = reduce_options.KeepDims()
if input_tensor.qnn_params:
in_expr = _op.cast(in_expr, "int32")
out = relay_op(in_expr, axis, keep_dims)
# Finally if the reduce is quantized. Add a requantize at the end.
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
if output_tensor.qnn_params:
out = _qnn.op.requantize(
out,
input_scale=input_tensor.qnn_params["scale"],
input_zero_point=input_tensor.qnn_params["zero_point"],
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
out_dtype=output_tensor_type_str,
)
return out
def convert_reduce_min(self, op):
return self._convert_reduce(_op.reduce.min, op)
def convert_reduce_max(self, op):
return self._convert_reduce(_op.reduce.max, op)
def convert_reduce_mean(self, op):
return self._convert_reduce(_op.reduce.mean, op)
def convert_reduce_prod(self, op):
return self._convert_reduce(_op.reduce.prod, op)
def convert_reduce_sum(self, op):
return self._convert_reduce(_op.reduce.sum, op)
def convert_reduce_any(self, op):
return self._convert_reduce(_op.reduce.any, op)
def _convert_arg_min_max(self, relay_op, op):
"""Generic method converting TFLite arg_min_max"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ArgMinOptions import ArgMinOptions
from tflite.ArgMaxOptions import ArgMaxOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "two input tensor arguments expected"
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "one output tensor expected"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
axis_tensor = input_tensors[1]
# In Tensorflow, `axis` argument is a Tensor, not attribute. We
# support the case where it inputs from a scalar constant.
axis_value = self.get_tensor_value(axis_tensor)
assert axis_value.size == 1
axis_value = axis_value.item()
if op.BuiltinOptionsType() == BuiltinOptions.ArgMinOptions:
arg_min_max_options = ArgMinOptions()
elif op.BuiltinOptionsType() == BuiltinOptions.ArgMaxOptions:
arg_min_max_options = ArgMaxOptions()
op_options = op.BuiltinOptions()
arg_min_max_options.Init(op_options.Bytes, op_options.Pos)
# set keepdims to True since tflite 1.13 removes all dims of size 1
# WARNING: all other versions of tflite > 1.13 need keepdims=False
out = relay_op(in_expr, axis=axis_value, keepdims=False, exclude=False)
return out
def convert_arg_min(self, op):
"""Convert TFLite ARG_MIN"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized ARG_MIN operator is not supported yet."
)
return self._convert_arg_min_max(_op.argmin, op)
def convert_arg_max(self, op):
"""Convert TFLite ARG_MAX"""
return self._convert_arg_min_max(_op.argmax, op)
def convert_fully_connected(self, op):
"""Convert TFLite fully connected"""
try:
from tflite.FullyConnectedOptions import FullyConnectedOptions
from tflite.BuiltinOptions import BuiltinOptions
from tflite.TensorType import TensorType
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) in (2, 3), "input tensors length should be two or three"
input_tensor = input_tensors[0]
weight_tensor = input_tensors[1]
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
output_tensor_type = output_tensor.tensor.Type()
output_tensor_type_str = self.get_tensor_type_str(output_tensor_type)
weight_tensor_shape = weight_tensor.tensor.ShapeAsNumpy()
# Weight should have only 2 dimensions(TFLite convention)
assert len(weight_tensor_shape) == 2, "Weight should be only 2-dim"
# Input shape: [i_batch_size, ..., n_inputs]
# Filter shape: [n_inputs, n_units]
#
# As we will transform Fully_Connected Input to Dense Op inputs as below
# Dense expected Input shape: [batch_size, n_units]
# Dense expected Weight shape: [out_dim, n_units]
# Dense output shape: [batch_size, out_dim]
target_shape = tuple((-1, weight_tensor_shape[1]))
in_expr = self.get_tensor_expr(input_tensor)
in_expr = _op.reshape(in_expr, target_shape)
# TODO: Change the output shape calculation based on keep_dim option
assert op.BuiltinOptionsType() == BuiltinOptions.FullyConnectedOptions
op_options = op.BuiltinOptions()
fully_connected_options = FullyConnectedOptions()
fully_connected_options.Init(op_options.Bytes, op_options.Pos)
fused_activation_fn = fully_connected_options.FusedActivationFunction()
# weight tensor type should be INT8/UINT8 (quantization) or FLOAT32
weight_tensor_type = weight_tensor.tensor.Type()
assert weight_tensor_type in (TensorType.INT8, TensorType.UINT8, TensorType.FLOAT32)
weight_tensor_type_str = self.get_tensor_type_str(weight_tensor_type)
if self.has_expr(weight_tensor.tensor_idx):
weight_expr = self.get_expr(weight_tensor.tensor_idx)
else:
weight_value = self.get_tensor_value(weight_tensor)
weight_expr = self.exp_tab.new_const(weight_value, dtype=weight_tensor_type_str)
weight_shape = _infer_shape(weight_expr)
if input_tensor.qnn_params:
out = _qnn.op.dense(
in_expr,
weight_expr,
input_zero_point=input_tensor.qnn_params["zero_point"],
kernel_zero_point=weight_tensor.qnn_params["zero_point"],
input_scale=input_tensor.qnn_params["scale"],
kernel_scale=weight_tensor.qnn_params["scale"],
units=weight_shape[0],
out_dtype="int32",
)
else:
out = _op.nn.dense(in_expr, weight_expr)
# if we have bias
if len(input_tensors) == 3:
bias_tensor = input_tensors[2]
bias_tensor_type = bias_tensor.tensor.Type()
# bias tensor type should be INT32 (quantization) or FLOAT32
assert bias_tensor_type in (TensorType.INT32, TensorType.FLOAT32)
bias_tensor_type_str = self.get_tensor_type_str(bias_tensor_type)
bias_expr = self.exp_tab.new_const(
self.get_tensor_value(bias_tensor), dtype=bias_tensor_type_str
)
out = _op.nn.bias_add(out, bias_expr)
# Finally if the dense is quantized. Add a requantize at the end.
if output_tensor.qnn_params:
data_scale = input_tensor.qnn_params["scale"]
weight_scale = weight_tensor.qnn_params["scale"]
data_scale_val = get_scalar_from_constant(data_scale)
weight_scale_val = get_scalar_from_constant(weight_scale)
new_input_scale_val = data_scale_val * weight_scale_val
new_input_scale = relay.const(new_input_scale_val, "float32")
new_input_zero_point = relay.const(0, "int32")
# Requantize
out = _qnn.op.requantize(
out,
input_scale=new_input_scale,
input_zero_point=new_input_zero_point,
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
out_dtype=output_tensor_type_str,
)
# Call activation function
output_scale_val = get_scalar_from_constant(output_tensor.qnn_params["scale"])
output_zero_point_val = get_scalar_from_constant(output_tensor.qnn_params["zero_point"])
out = self.convert_qnn_fused_activation_function(
expr=out,
fused_activation_fn=fused_activation_fn,
scale=output_scale_val,
zero_point=output_zero_point_val,
dtype=output_tensor_type_str,
)
else:
out = self.convert_fused_activation_function(out, fused_activation_fn)
return out
def convert_squeeze(self, op):
"""Convert TFLite squeeze"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.SqueezeOptions import SqueezeOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
output_tensors = self.get_output_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
assert len(output_tensors) == 1, "output tensors length should be 1"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
assert op.BuiltinOptionsType() == BuiltinOptions.SqueezeOptions
op_options = op.BuiltinOptions()
squeeze_options = SqueezeOptions()
squeeze_options.Init(op_options.Bytes, op_options.Pos)
squeeze_axis = squeeze_options.SqueezeDimsAsNumpy()
in_expr = self.get_expr(input_tensor_idx)
out = _op.squeeze(in_expr, axis=tuple(squeeze_axis))
return out
def convert_fused_activation_function(self, in_expr, fused_activation_fn):
"""Convert TFLite fused activation function"""
try:
from tflite.ActivationFunctionType import ActivationFunctionType
except ImportError:
raise ImportError("The tflite package must be installed")
if fused_activation_fn == ActivationFunctionType.NONE:
return in_expr
if fused_activation_fn == ActivationFunctionType.RELU6:
return _op.clip(in_expr, a_min=0, a_max=6)
if fused_activation_fn == ActivationFunctionType.RELU:
return _op.nn.relu(in_expr)
if fused_activation_fn == ActivationFunctionType.RELU_N1_TO_1:
return _op.clip(in_expr, a_min=-1, a_max=1)
if fused_activation_fn == ActivationFunctionType.TANH:
return _op.tanh(in_expr)
fused_activation_fn_str = self.activation_fn_type[fused_activation_fn]
raise tvm.error.OpNotImplemented(
"Fused activation {} is not supported yet.".format(fused_activation_fn_str)
)
def convert_conv(self, op, conv_type):
"""convolution implementation."""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.TensorType import TensorType
from tflite.Conv2DOptions import Conv2DOptions
from tflite.DepthwiseConv2DOptions import DepthwiseConv2DOptions
from tflite.Padding import Padding
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) >= 2, "input tensors length should be >= 2"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
weight_tensor = input_tensors[1]
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
output_tensor_type = output_tensor.tensor.Type()
output_tensor_type_str = self.get_tensor_type_str(output_tensor_type)
is_depthwise_conv = False
if conv_type == "conv2d":
assert op.BuiltinOptionsType() == BuiltinOptions.Conv2DOptions
op_options = op.BuiltinOptions()
conv_options = Conv2DOptions()
conv_options.Init(op_options.Bytes, op_options.Pos)
elif conv_type == "depthwise":
is_depthwise_conv = True
assert op.BuiltinOptionsType() == BuiltinOptions.DepthwiseConv2DOptions
op_options = op.BuiltinOptions()
conv_options = DepthwiseConv2DOptions()
conv_options.Init(op_options.Bytes, op_options.Pos)
depth_multiplier = conv_options.DepthMultiplier()
else:
raise tvm.error.OpNotImplemented(
"Operator {} is not supported for frontend TFLite.".format(conv_type)
)
stride_h = conv_options.StrideH()
stride_w = conv_options.StrideW()
dilation_h = conv_options.DilationHFactor()
dilation_w = conv_options.DilationWFactor()
padding = conv_options.Padding()
fused_activation_fn = conv_options.FusedActivationFunction()
_, input_h, input_w, input_c = input_tensor.tensor.ShapeAsNumpy()
if is_depthwise_conv:
# TFLite depthwise convolution kernel layout is:
# 1 KH KW C(input_c * depth_multiplier)
_, kernel_h, kernel_w, in_channels = weight_tensor.tensor.ShapeAsNumpy()
assert in_channels == input_c * depth_multiplier
else:
output_channels, kernel_h, kernel_w, _ = weight_tensor.tensor.ShapeAsNumpy()
dilated_kernel_h = dilation_h * (kernel_h - 1) + 1
dilated_kernel_w = dilation_w * (kernel_w - 1) + 1
params = {
"kernel_size": [kernel_h, kernel_w],
"strides": [stride_h, stride_w],
"dilation": [dilation_h, dilation_w],
"padding": [0, 0],
"data_layout": "NHWC",
}
if is_depthwise_conv:
params["channels"] = int(in_channels)
params["groups"] = int(input_c)
# If number of input channels is 1, treat as normal
# convolution.
params["kernel_layout"] = "HWIO" if input_c == 1 else "HWOI"
else:
params["channels"] = int(output_channels)
params["kernel_layout"] = "HWIO"
# weight tensor type should be INT8/UINT8 (quantization) or FLOAT32
weight_tensor_type = weight_tensor.tensor.Type()
assert weight_tensor_type in (TensorType.INT8, TensorType.UINT8, TensorType.FLOAT32)
weight_tensor_type_str = self.get_tensor_type_str(weight_tensor_type)
in_expr = self.get_expr(input_tensor_idx)
weight_value = self.get_tensor_value(weight_tensor)
# TFLite kernel layout:
# convolution:
# OC KH KW IC, we require KH KW IC OC (HWIO)
# depthwise convolution:
# 1 KH KW C(input_c * depth_multiplier), we require
# KH KW IC M (depth_multiplier) (HWOI)
if is_depthwise_conv:
weight_value = weight_value.reshape(kernel_h, kernel_w, input_c, depth_multiplier)
else:
weight_value = weight_value.transpose((1, 2, 3, 0))
weight_expr = self.exp_tab.new_const(weight_value, dtype=weight_tensor_type_str)
if padding == Padding.VALID:
pass
elif padding == Padding.SAME:
pad_top, pad_bottom = get_pad_value(input_h, dilated_kernel_h, stride_h)
pad_left, pad_right = get_pad_value(input_w, dilated_kernel_w, stride_w)
do_pad = not (pad_top == 0 and pad_bottom == 0 and pad_left == 0 and pad_right == 0)
if do_pad:
params["padding"] = [pad_top, pad_left, pad_bottom, pad_right]
else:
raise tvm.error.OpAttributeUnImplemented(
"Padding format {} is not supported for operator Conv.".format(padding)
)
if input_tensor.qnn_params:
qnn_conv2d_params = dict(params)
qnn_conv2d_params["input_zero_point"] = input_tensor.qnn_params["zero_point"]
qnn_conv2d_params["kernel_zero_point"] = weight_tensor.qnn_params["zero_point"]
qnn_conv2d_params["out_dtype"] = "int32"
qnn_conv2d_params["input_scale"] = input_tensor.qnn_params["scale"]
qnn_conv2d_params["kernel_scale"] = weight_tensor.qnn_params["scale"]
out = _qnn.op.conv2d(in_expr, weight_expr, **qnn_conv2d_params)
else:
out = _op.nn.conv2d(in_expr, weight_expr, **params)
# if we have bias
if len(input_tensors) == 3:
bias_tensor = input_tensors[2]
bias_tensor_type = bias_tensor.tensor.Type()
# bias tensor type should be INT32 (quantization) or FLOAT32
assert bias_tensor_type in (TensorType.INT32, TensorType.FLOAT32)
bias_tensor_type_str = self.get_tensor_type_str(bias_tensor_type)
bias_expr = self.exp_tab.new_const(
self.get_tensor_value(bias_tensor), dtype=bias_tensor_type_str
)
channel_axis = 3
out = _op.nn.bias_add(out, bias_expr, axis=channel_axis)
# Handle fused activation.
if output_tensor.qnn_params:
# Calculate the intermediate scale and zero point of the int32 output.
data_scale = input_tensor.qnn_params["scale"]
data_scale_val = get_scalar_from_constant(data_scale)
weight_scale = weight_tensor.qnn_params["scale"]
# If weight scale is scalar, it is per-tensor quantization
if isinstance(weight_scale, float):
weight_scale_val = get_scalar_from_constant(weight_scale)
else:
weight_scale_val = get_tensor_from_constant(weight_scale)
new_input_scale_val = data_scale_val * weight_scale_val
new_input_scale = relay.const(new_input_scale_val, "float32")
new_input_zero_point = relay.const(0, "int32")
# Finally requantize
out = _qnn.op.requantize(
out,
input_scale=new_input_scale,
input_zero_point=new_input_zero_point,
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
out_dtype=output_tensor_type_str,
axis=3,
)
# Call activation function
output_scale_val = get_scalar_from_constant(output_tensor.qnn_params["scale"])
output_zero_point_val = get_scalar_from_constant(output_tensor.qnn_params["zero_point"])
out = self.convert_qnn_fused_activation_function(
expr=out,
fused_activation_fn=fused_activation_fn,
scale=output_scale_val,
zero_point=output_zero_point_val,
dtype=output_tensor_type_str,
)
else:
out = self.convert_fused_activation_function(out, fused_activation_fn)
return out
def convert_split(self, op):
"""split implementation."""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.SplitOptions import SplitOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be == 2"
axis_tensor = input_tensors[0]
split_axis = self.get_tensor_value(axis_tensor)
input_tensor = input_tensors[1]
input_tensor_idx = input_tensor.tensor_idx
assert op.BuiltinOptionsType() == BuiltinOptions.SplitOptions
op_options = op.BuiltinOptions()
split_options = SplitOptions()
split_options.Init(op_options.Bytes, op_options.Pos)
num_splits = split_options.NumSplits()
in_expr = self.get_expr(input_tensor_idx)
out = _op.split(in_expr, num_splits, axis=int(split_axis))
# Relay does not like a TupleWrapper of 1 element, further this
# only shows up with tf1.13 if we use a split with num_splits==1.
# In tf 1.14 this doesn't appear as it is automatically a reshape
# operation.
if isinstance(out, _expr.TupleWrapper):
if out.size == 1:
out = out[0]
return out
def convert_split_v(self, op):
"""SPLIT_V implementation."""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 3, "input tensors length should be 3"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
in_expr = self.get_expr(input_tensor_idx)
if self.has_expr(input_tensors[1].tensor_idx):
raise tvm.error.OpNotImplemented(
"For size_splits parameter of SPLIT_V operator, "
"only constant values are supported."
)
size_splits = list(self.get_tensor_value(input_tensors[1]))
size_splits = tuple(np.cumsum(size_splits)[:-1])
axis_tensor = input_tensors[2]
split_axis = self.get_tensor_value(axis_tensor)
out = _op.split(in_expr, size_splits, axis=int(split_axis))
# Relay does not like a TupleWrapper of 1 element, further this
# only shows up with tf1.13 if we use a split with num_splits==1.
# In tf 1.14 this doesn't appear as it is automatically a reshape
# operation.
if isinstance(out, _expr.TupleWrapper) and out.size == 1:
out = out[0]
return out
def convert_slice(self, op):
"""Convert TFLite SLICE"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 3, "input tensors length should be == 3"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
begin = list(self.get_tensor_value(input_tensors[1]))
size = list(self.get_tensor_value(input_tensors[2]))
# strided_slice(Relay) needs the slice's end indices, not the size
end = size
input_tensor_shape = input_tensor.tensor.ShapeAsNumpy()
input_tensor_rank = len(input_tensor_shape)
for i in range(input_tensor_rank):
if size[i] == -1:
end[i] = input_tensor_shape[i]
else:
end[i] += begin[i]
out = _op.strided_slice(in_expr, begin, end)
return out
def convert_select(self, op):
"""Convert TFLite SELECT"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 3, "input tensors length should be == 3"
cond = self.get_tensor_expr(input_tensors[0])
x = self.get_tensor_expr(input_tensors[1])
y = self.get_tensor_expr(input_tensors[2])
out = _op.where(cond, x, y)
return out
def convert_transpose(self, op):
"""transpose implementation."""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
in_expr = self.get_expr(input_tensor_idx)
# axis
in_axis = tuple(self.get_tensor_value(input_tensors[1]))
if not in_axis:
out = _op.transpose(in_expr)
else:
out = _op.transpose(in_expr, in_axis)
return out
def convert_reverse_sequence(self, op):
"""Convert TFLite REVERSE_SEQUENCE"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ReverseSequenceOptions import ReverseSequenceOptions
except ImportError:
raise ImportError("The tflite package must be installed")
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFLite does not support quantized REVERSE_SEQUENCE operator yet."
)
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
in_expr = self.get_tensor_expr(input_tensors[0])
length_expr = self.get_tensor_expr(input_tensors[1])
assert op.BuiltinOptionsType() == BuiltinOptions.ReverseSequenceOptions
op_options = op.BuiltinOptions()
options = ReverseSequenceOptions()
options.Init(op_options.Bytes, op_options.Pos)
batch_axis = options.BatchDim()
seq_axis = options.SeqDim()
return _op.reverse_sequence(in_expr, length_expr, seq_axis, batch_axis)
def convert_cast(self, op):
"""Convert TFLite CAST"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.CastOptions import CastOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
assert op.BuiltinOptionsType() == BuiltinOptions.CastOptions
op_options = op.BuiltinOptions()
cast_options = CastOptions()
cast_options.Init(op_options.Bytes, op_options.Pos)
cast_dtype = cast_options.OutDataType()
out = _op.cast(in_expr, self.get_tensor_type_str(cast_dtype))
return out
def convert_tile(self, op):
"""tile implementation."""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
in_expr = self.get_expr(input_tensor_idx)
# reps (tuple of int) – The number of times repeating the tensor data.
reps = tuple(self.get_tensor_value(input_tensors[1]))
out = _op.tile(in_expr, reps)
return out
def convert_topk_v2(self, op):
""" Convert TFLite TOPK_v2 """
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
in_expr = self.get_expr(input_tensor_idx)
k = self.get_tensor_value(input_tensors[1])
out = _op.topk(in_expr, int(k))
return out
def convert_pool2d(self, op, pool_type):
"""pool2d implementation."""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.Pool2DOptions import Pool2DOptions
from tflite.Padding import Padding
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors should be 1"
output_tensor = output_tensors[0]
output_tensor_type = output_tensor.tensor.Type()
output_tensor_type_str = self.get_tensor_type_str(output_tensor_type)
assert op.BuiltinOptionsType() == BuiltinOptions.Pool2DOptions
op_options = op.BuiltinOptions()
pool2d_options = Pool2DOptions()
pool2d_options.Init(op_options.Bytes, op_options.Pos)
stride_h = pool2d_options.StrideH()
stride_w = pool2d_options.StrideW()
padding = pool2d_options.Padding()
filter_h = pool2d_options.FilterHeight()
filter_w = pool2d_options.FilterWidth()
fused_activation_fn = pool2d_options.FusedActivationFunction()
params = {
"pool_size": (filter_h, filter_w),
"strides": (stride_h, stride_w),
"padding": [0, 0],
"layout": "NHWC",
}
in_expr = self.get_expr(input_tensor_idx)
_, input_h, input_w, _ = input_tensor.tensor.ShapeAsNumpy()
if padding == Padding.VALID:
pass
elif padding == Padding.SAME:
pad_top, pad_bottom = get_pad_value(input_h, filter_h, stride_h)
pad_left, pad_right = get_pad_value(input_w, filter_w, stride_w)
params["padding"] = [pad_top, pad_left, pad_bottom, pad_right]
else:
raise tvm.error.OpAttributeUnImplemented(
"Padding format {} for operator Pool2D is not supported.".format(padding)
)
if pool_type == "average":
if input_tensor.qnn_params:
assert self.has_same_qnn_params(input_tensor, output_tensor), (
"TFLite avg_pool2dreshape requires input and output scale"
"and zero points to be equal"
)
out = _op.cast(in_expr, dtype="int32")
out = _op.nn.avg_pool2d(out, **params)
out = _op.cast(out, dtype=output_tensor_type_str)
else:
out = _op.nn.avg_pool2d(in_expr, **params)
elif pool_type == "max":
if input_tensor.qnn_params:
assert self.has_same_qnn_params(
input_tensor, output_tensor
), "qnn.op.max_pool2d requires input and output qnn params to be same"
out = _op.nn.max_pool2d(in_expr, **params)
elif pool_type == "l2":
# L2_POOL_2D is equivalent to square_root(avg_pool(square(in_data)))
# TFLite does not have support for quantised L2_POOL_2D op.
assert (
not input_tensor.qnn_params
), "As TFLite does not have support for quantized L2_POOL_2D, \
Quantized input is not expected."
exp_type = self.get_tensor_type_str(output_tensor.tensor.Type())
square_exp = _op.power(in_expr, relay.const(2, exp_type))
avg_pool_exp = _op.nn.avg_pool2d(square_exp, **params)
out = _op.sqrt(avg_pool_exp)
else:
raise tvm.error.OpNotImplemented(
"Operator {} is not supported for frontend TFLite.".format(pool_type + " pool")
)
# Handle fused activations
if output_tensor.qnn_params:
scale_val = get_scalar_from_constant(output_tensor.qnn_params["scale"])
zero_point_val = get_scalar_from_constant(output_tensor.qnn_params["zero_point"])
out = self.convert_qnn_fused_activation_function(
expr=out,
fused_activation_fn=fused_activation_fn,
scale=scale_val,
zero_point=zero_point_val,
dtype=output_tensor_type_str,
)
else:
out = self.convert_fused_activation_function(out, fused_activation_fn)
return out
def convert_pad(self, op):
"""Convert TFLite PAD/PADV2 \
TFLite treats PAD and PADV2 operators identically"""
input_tensors = self.get_input_tensors(op)
# TFLite PAD/PADV2 only supports CONSTANT mode
assert (
len(input_tensors) == 2 or len(input_tensors) == 3
), "input tensor's length should be 2 for PAD and 3 for PADV2"
if len(input_tensors) == 3:
assert (
input_tensors[0].tensor.Type() == input_tensors[2].tensor.Type()
), "constant_values tensor must be of same type as input tensor"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
# paddings
pad_list = self.get_tensor_value(input_tensors[1])
# convert list of lists to tuple of tuples
paddings = tuple(tuple(l) for l in pad_list)
# Set the pad value, by default 0, unless constant_values parameter is provided
pad_value = 0
if input_tensor.qnn_params:
# Check that input and output tensor have same qnn params.
output_tensors = self.get_output_tensors(op)
output_tensor = output_tensors[0]
assert self.has_same_qnn_params(
input_tensor, output_tensor
), "TFLite PADV2 requires input and output scale and zero points to be equal"
# The pad value for quantized pad is the input zero point by default.
pad_value = float(input_tensor.qnn_params["zero_point"].data.asnumpy())
if len(input_tensors) == 3:
pad_value = self.get_tensor_value(input_tensors[2])
if isinstance(pad_value, np.ndarray):
pad_value = pad_value.tolist()
if isinstance(pad_value, list):
assert len(pad_value) == 1, "Only one constant value is expected."
pad_value = pad_value[0]
if input_tensor.qnn_params:
# Check that input tensor and constant_values have same qnn params.
assert self.has_same_qnn_params(
input_tensor, input_tensors[2]
), "TFLite PADV2 requires input and constant_values tensors' \
scale and zero points to be equal"
out = _op.nn.pad(in_expr, pad_width=paddings, pad_value=pad_value)
return out
def convert_floor_div(self, op):
"""Convert TFLite FLOOR_DIV"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized FLOOR DIV operator is not supported yet."
)
return self._convert_elemwise(_op.floor_divide, op)
def convert_floor_mod(self, op):
"""Convert TFLite FLOOR_MOD"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized FLOOR MOD operator is not supported yet."
)
return self._convert_elemwise(_op.floor_mod, op)
def convert_mirror_pad(self, op):
"""Convert TFLite MIRROR_PAD"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.MirrorPadOptions import MirrorPadOptions
except ImportError:
raise ImportError("The tflite package must be installed")
# the quantized form MirrorPad is not yet implemented in TFLite.
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized MIRROR_PAD operator is not supported yet."
)
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
# tensor
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
# paddings
pad_list = self.get_tensor_value(input_tensors[1])
# convert list of lists to tuple of tuples
paddings = tuple(tuple(l) for l in pad_list)
assert op.BuiltinOptionsType() == BuiltinOptions.MirrorPadOptions
op_options = op.BuiltinOptions()
mirror_pad_options = MirrorPadOptions()
mirror_pad_options.Init(op_options.Bytes, op_options.Pos)
mode_byte = mirror_pad_options.Mode()
mode = "REFLECT" if mode_byte == 0 else "SYMMETRIC"
out = _op.nn.mirror_pad(in_expr, paddings, mode)
return out
def convert_pack(self, op):
"""Convert TFLite pack"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.PackOptions import PackOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) >= 1, "input tensors should greater than 1"
in_exprs = [self.get_expr(input_tensor.tensor_idx) for input_tensor in input_tensors]
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
assert op.BuiltinOptionsType() == BuiltinOptions.PackOptions
op_options = op.BuiltinOptions()
pack_options = PackOptions()
pack_options.Init(op_options.Bytes, op_options.Pos)
pack_axis = pack_options.Axis()
in_exprs_reshaped = [_op.expand_dims(i, axis=pack_axis, num_newaxis=1) for i in in_exprs]
out = _op.concatenate(in_exprs_reshaped, pack_axis)
return out
def convert_unpack(self, op):
"""Convert TFLite unpack"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.UnpackOptions import UnpackOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
assert op.BuiltinOptionsType() == BuiltinOptions.UnpackOptions
op_options = op.BuiltinOptions()
unpack_options = UnpackOptions()
unpack_options.Init(op_options.Bytes, op_options.Pos)
num_unpacks = unpack_options.Num()
unpack_axis = unpack_options.Axis()
# Relay doesn't support 'unpack' operator so we use 'split' & 'squeeze' instead.
# We have to do 'squeeze' along the split axis but Relay expects
# squeeze_axis to be either None or List.
squeeze_axis = None if unpack_axis == 0 else [unpack_axis]
# Relay doesn't like TupleWrapper of 1 element so we isolate the case of unpacking
# a tensor by an axis with len(axis) == 1. For reference see convert_split().
# Such unpacking will result in the same tensor so we omit 'split' and only squeeze
# along the axis of dim == 1.
if num_unpacks == 1:
squeezed = _op.squeeze(in_expr, axis=squeeze_axis)
if isinstance(squeezed, _expr.TupleWrapper):
squeezed = squeezed[0]
else:
splitted = _op.split(in_expr, indices_or_sections=num_unpacks, axis=unpack_axis)
squeezed = _expr.TupleWrapper(
_expr.Tuple(
[_op.squeeze(split_item, axis=squeeze_axis) for split_item in splitted]
),
len(splitted),
)
return squeezed
def convert_batch_to_space_nd(self, op):
"""batch_to_space_nd implementation."""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 3, "input tensors length should be 3"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
in_expr = self.get_expr(input_tensor_idx)
input_shape = list(input_tensor.tensor.ShapeAsNumpy())
batch = input_shape[0]
block_shape = list(self.get_tensor_value(input_tensors[1]))
M = len(block_shape)
crops = list(self.get_tensor_value(input_tensors[2]))
# From https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/batch-to-space-n-d:
# Reshape input to reshaped of shape
shape1 = block_shape + [batch // np.prod(block_shape)] + input_shape[1:]
reshaped = _op.reshape(in_expr, newshape=shape1)
# Permute dimensions of reshaped to produce permuted of shape
axes = (
[M]
+ [axis for i in range(M) for axis in [M + i + 1, i]]
+ list(range(2 * M + 1, len(shape1)))
)
permuted = _op.transpose(reshaped, axes=axes)
# Reshape permuted to produce reshaped_permuted of shape
shape2 = [0] + [-3] * M + [-2]
reshaped_permuted = _op.reshape(permuted, newshape=shape2)
# Crop the start and end of dimensions [1, ..., M] of reshaped_permuted according to crops
# to produce the output of shape:
reshaped_permuted_shape = _infer_shape(reshaped_permuted)
cropped = reshaped_permuted
for axis in range(1, M + 1):
crop = crops[axis - 1]
if (crop != [0, 0]).any():
indices = _op.arange(
_expr.const(crop[0]),
_expr.const(reshaped_permuted_shape[axis] - crop[1]),
dtype="int32",
)
cropped = _op.take(cropped, indices=indices, axis=axis)
return cropped
def convert_space_to_batch_nd(self, op):
"""space_to_batch_nd implementation."""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 3, "input tensors length should be 3"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
in_expr = self.get_expr(input_tensor_idx)
input_shape = list(input_tensor.tensor.ShapeAsNumpy())
batch = input_shape[0]
N = len(input_shape)
block_shape = list(self.get_tensor_value(input_tensors[1]))
M = len(block_shape)
paddings = list(self.get_tensor_value(input_tensors[2]))
# From https://www.tensorflow.org/api_docs/python/tf/space_to_batch_nd:
# Zero-pad the start and end of dimensions [1, ..., M] of the input according to paddings
# to produce padded of shape padded_shape.
remaining_shape_length = N - M - 1
padded_list = [(0, 0)] + paddings + [(0, 0)] * remaining_shape_length
padded_shape = []
for element in padded_list:
if isinstance(element, np.ndarray):
element = element.tolist()
padded_shape.append(element)
padded_shape = tuple(padded_shape)
padded = _op.nn.pad(in_expr, pad_width=tuple(padded_shape))
# Reshape padded to reshaped_padded of shape:
shape1 = [batch] + [item for i in range(M) for item in [-4, -1, block_shape[i]]] + [-2]
reshaped_padded = _op.reshape(padded, newshape=shape1)
# Permute dimensions of reshaped_padded to produce permuted_reshaped_padded of shape:
axes = (
[2 * i + 2 for i in range(M)]
+ [0]
+ [2 * i + 1 for i in range(M)]
+ list(range(1 + 2 * M, 1 + 2 * M + remaining_shape_length))
)
permuted_reshaped_padded = _op.transpose(reshaped_padded, axes=axes)
permuted_reshaped_padded_shape = _infer_shape(permuted_reshaped_padded)
# Reshape permuted_reshaped_padded to flatten block_shape into the batch dimension,
# producing an output tensor of shape:
shape2 = [batch * np.prod(block_shape)] + list(permuted_reshaped_padded_shape)[M + 1 :]
reshaped_permuted_reshaped_padded = _op.reshape(permuted_reshaped_padded, newshape=shape2)
return reshaped_permuted_reshaped_padded
def convert_depth_to_space(self, op):
"""Convert TFLite DEPTH_TO_SPACE"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.DepthToSpaceOptions import DepthToSpaceOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
assert op.BuiltinOptionsType() == BuiltinOptions.DepthToSpaceOptions
op_options = op.BuiltinOptions()
depth_to_space_options = DepthToSpaceOptions()
depth_to_space_options.Init(op_options.Bytes, op_options.Pos)
block_size = depth_to_space_options.BlockSize()
out = _op.nn.depth_to_space(in_expr, block_size, layout="NHWC")
return out
def convert_space_to_depth(self, op):
"""Convert TFLite SPACE_TO_DEPTH"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.SpaceToDepthOptions import SpaceToDepthOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
assert op.BuiltinOptionsType() == BuiltinOptions.SpaceToDepthOptions
op_options = op.BuiltinOptions()
space_to_depth_options = SpaceToDepthOptions()
space_to_depth_options.Init(op_options.Bytes, op_options.Pos)
block_size = space_to_depth_options.BlockSize()
out = _op.nn.space_to_depth(in_expr, block_size, layout="NHWC")
return out
def convert_sparse_to_dense(self, op):
"""Convert TFLite SPARSE_TO_DENSE"""
try:
from tflite.TensorType import TensorType
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 4, "input tensors length should be 4"
indices, values = input_tensors[0], input_tensors[2]
default_value = input_tensors[3]
output_shape = input_tensors[1]
for t in input_tensors:
assert not t.qnn_params, "Quantized input is not expected."
for t in [indices, output_shape]:
t_type = t.tensor.Type()
assert t_type in (TensorType.INT32, TensorType.INT64)
out = _op.sparse_to_dense(
self.get_tensor_expr(indices),
list(self.get_tensor_value(output_shape)),
self.get_tensor_expr(values),
self.get_tensor_expr(default_value),
)
return out
def convert_prelu(self, op):
"""Convert TFLite PReLU"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
input_tensor = input_tensors[0]
alpha_tensor = input_tensors[1]
alpha_tensor_type = alpha_tensor.tensor.Type()
alpha_tensor_type_str = self.get_tensor_type_str(alpha_tensor_type)
alpha_expr = self.exp_tab.new_const(
self.get_tensor_value(alpha_tensor).flatten(), dtype=alpha_tensor_type_str
)
in_expr = self.get_expr(input_tensor.tensor_idx)
out = _op.nn.prelu(in_expr, alpha_expr, axis=3)
return out
def convert_transpose_conv(self, op):
"""Convert TFLite TRANSPOSE_CONV"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.TensorType import TensorType
from tflite.TransposeConvOptions import TransposeConvOptions
from tflite.Padding import Padding
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 3, "input tensors length should be 3"
# Input (data) Tensor. NHWC layout
input_tensor = input_tensors[2]
_, input_h, input_w, input_c = input_tensor.tensor.ShapeAsNumpy()
# Weights tensor. TFLite uses OHWI layout
weights_tensor = input_tensors[1]
out_channels, kernel_h, kernel_w, in_channels = weights_tensor.tensor.ShapeAsNumpy()
assert (
input_c == in_channels
), "Input channel in the filter should match to channel in the input"
# output_shape Tensor. NHWC layout
output_shape_tensor = input_tensors[0]
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
output_tensor_type = output_tensor.tensor.Type()
output_tensor_type_str = self.get_tensor_type_str(output_tensor_type)
assert op.BuiltinOptionsType() == BuiltinOptions.TransposeConvOptions
op_options = op.BuiltinOptions()
deconv_options = TransposeConvOptions()
deconv_options.Init(op_options.Bytes, op_options.Pos)
padding = deconv_options.Padding()
stride_h = deconv_options.StrideH()
stride_w = deconv_options.StrideW()
assert padding in (
Padding.VALID,
Padding.SAME,
), "Padding format {} is not supported for operator TRANSPOSE_CONV".format(padding)
# Data
in_expr = self.get_expr(input_tensor.tensor_idx)
# Weights
weights_tensor_type = weights_tensor.tensor.Type()
# weights tensor type should be UINT8 (quantization) or FLOAT32
assert weights_tensor_type in (TensorType.UINT8, TensorType.FLOAT32)
weight_tensor_type_str = self.get_tensor_type_str(weights_tensor_type)
weight_value_ohwi = self.get_tensor_value(weights_tensor)
# Relay kernel_layout should be OIHW
# Relay weights layout should be different from kernel_layout - it should be IOHW
weight_value_iohw = np.transpose(weight_value_ohwi, (3, 0, 1, 2))
weight_expr_iohw = self.exp_tab.new_const(weight_value_iohw, dtype=weight_tensor_type_str)
# Output shape value
output_shape_value = self.get_tensor_value(output_shape_tensor)
# Relay expects filter output channel to match to output tensor channel.
assert (
out_channels == output_shape_value[3]
), "Output channel in the filter should match to channel in the output_shape"
if padding == Padding.SAME:
pad_top, pad_bottom = get_pad_value(input_h, kernel_h, stride_h)
pad_left, pad_right = get_pad_value(input_w, kernel_w, stride_w)
padding = (pad_top, pad_left, pad_bottom, pad_right)
else:
padding = (0, 0, 0, 0)
out = _op.nn.conv2d_transpose(
in_expr,
weight_expr_iohw,
strides=(stride_h, stride_w),
padding=padding,
channels=int(out_channels),
kernel_size=(int(kernel_h), int(kernel_w)),
data_layout="NHWC",
kernel_layout="OIHW",
out_dtype=output_tensor_type_str,
)
return out
def convert_quantize(self, op):
"""Convert TFLite Quantize"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
input_tensor_type_str = self.get_tensor_type_str(input_tensor.tensor.Type())
in_expr = self.get_tensor_expr(input_tensor)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
# The output must be quantized
assert output_tensor.qnn_params
# TFLite Quantize op can also act as Requantize op
if input_tensor_type_str == "float32":
out = self.quantize(in_expr, output_tensor)
else:
out = _qnn.op.requantize(
in_expr,
input_scale=input_tensor.qnn_params["scale"],
input_zero_point=input_tensor.qnn_params["zero_point"],
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
out_dtype=output_tensor_type_str,
)
return out
def convert_dequantize(self, op):
"""Convert TFLite Dequantize"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
# The input must be quantized
assert input_tensor.qnn_params
# Dequantize the input.
out = self.dequantize(in_expr, input_tensor)
return out
def convert_detection_postprocess(self, op):
"""Convert TFLite_Detection_PostProcess"""
flexbuffer = op.CustomOptionsAsNumpy().tobytes()
custom_options = FlexBufferDecoder(flexbuffer).decode()
if "use_regular_nms" in custom_options:
if custom_options["use_regular_nms"]:
raise tvm.error.OpAttributeUnImplemented(
"use_regular_nms=True is not yet supported for operator {}.".format(
"TFLite_Detection_PostProcess"
)
)
inputs = self.get_input_tensors(op)
assert len(inputs) == 3, "inputs length should be 3"
cls_pred = self.get_expr(inputs[1].tensor_idx)
loc_prob = self.get_expr(inputs[0].tensor_idx)
batch_size = inputs[1].tensor.Shape(0)
anchor_values = self.get_tensor_value(inputs[2])
anchor_boxes = len(anchor_values)
anchor_type = self.get_tensor_type_str(inputs[2].tensor.Type())
anchor_expr = self.exp_tab.new_const(anchor_values, dtype=anchor_type)
if inputs[0].qnn_params:
loc_prob = _qnn.op.dequantize(
data=loc_prob,
input_scale=inputs[0].qnn_params["scale"],
input_zero_point=inputs[0].qnn_params["zero_point"],
)
if inputs[1].qnn_params:
cls_pred = _qnn.op.dequantize(
data=cls_pred,
input_scale=inputs[1].qnn_params["scale"],
input_zero_point=inputs[1].qnn_params["zero_point"],
)
if inputs[2].qnn_params:
anchor_expr = _qnn.op.dequantize(
data=anchor_expr,
input_scale=inputs[2].qnn_params["scale"],
input_zero_point=inputs[2].qnn_params["zero_point"],
)
# reshape the cls_pred and loc_prob tensors so
# they can be consumed by multibox_transform_loc
cls_pred = _op.transpose(cls_pred, [0, 2, 1])
# loc_prob coords are in yxhw format
# need to convert to xywh
loc_coords = _op.split(loc_prob, 4, axis=2)
loc_prob = _op.concatenate(
[loc_coords[1], loc_coords[0], loc_coords[3], loc_coords[2]], axis=2
)
loc_prob = _op.reshape(loc_prob, [batch_size, anchor_boxes * 4])
# anchor coords are in yxhw format
# need to convert to ltrb
anchor_coords = _op.split(anchor_expr, 4, axis=1)
anchor_y = anchor_coords[0]
anchor_x = anchor_coords[1]
anchor_h = anchor_coords[2]
anchor_w = anchor_coords[3]
plus_half = _expr.const(0.5, dtype="float32")
minus_half = _expr.const(-0.5, dtype="float32")
anchor_l = _op.add(anchor_x, _op.multiply(anchor_w, minus_half))
anchor_r = _op.add(anchor_x, _op.multiply(anchor_w, plus_half))
anchor_t = _op.add(anchor_y, _op.multiply(anchor_h, minus_half))
anchor_b = _op.add(anchor_y, _op.multiply(anchor_h, plus_half))
anchor_expr = _op.concatenate([anchor_l, anchor_t, anchor_r, anchor_b], axis=1)
anchor_expr = _op.expand_dims(anchor_expr, 0)
# attributes for multibox_transform_loc
multibox_transform_loc_attrs = {}
multibox_transform_loc_attrs["clip"] = False
multibox_transform_loc_attrs["threshold"] = custom_options["nms_score_threshold"]
multibox_transform_loc_attrs["variances"] = (
1 / custom_options["x_scale"],
1 / custom_options["y_scale"],
1 / custom_options["w_scale"],
1 / custom_options["h_scale"],
)
# attributes for non_max_suppression
non_max_suppression_attrs = {}
non_max_suppression_attrs["return_indices"] = False
non_max_suppression_attrs["iou_threshold"] = custom_options["nms_iou_threshold"]
non_max_suppression_attrs["force_suppress"] = False
non_max_suppression_attrs["top_k"] = anchor_boxes
non_max_suppression_attrs["max_output_size"] = custom_options["max_detections"]
non_max_suppression_attrs["invalid_to_bottom"] = False
ret = _op.vision.multibox_transform_loc(
cls_pred, loc_prob, anchor_expr, **multibox_transform_loc_attrs
)
ret = _op.vision.non_max_suppression(ret[0], ret[1], ret[1], **non_max_suppression_attrs)
ret = _op.vision.get_valid_counts(ret, 0)
valid_count = ret[0]
# keep only the top 'max_detections' rows
ret = _op.strided_slice(
ret[1], [0, 0, 0], [batch_size, custom_options["max_detections"], anchor_boxes]
)
# the output needs some reshaping to match tflite
ret = _op.split(ret, 6, axis=2)
cls_ids = _op.reshape(ret[0], [batch_size, -1])
scores = _op.reshape(ret[1], [batch_size, -1])
boxes = _op.concatenate([ret[3], ret[2], ret[5], ret[4]], axis=2)
ret = _expr.TupleWrapper(_expr.Tuple([boxes, cls_ids, scores, valid_count]), size=4)
return ret
def convert_expand_dims(self, op):
"""Convert TFLite EXPAND_DIMS"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
if input_tensors[0].qnn_params:
# Check that input and output tensor have same qnn params.
output_tensors = self.get_output_tensors(op)
assert self.has_same_qnn_params(
input_tensors[0], output_tensors[0]
), "TFLite EXPAND_DIMS requires input and output tensors' \
scale and zero points to be equal"
input_expr = self.get_tensor_expr(input_tensors[0])
axis = self.get_tensor_value(input_tensors[1])
if isinstance(axis, np.ndarray):
assert len(axis) == 1, "only one value is expected."
axis = int(axis)
ndims = len(input_tensors[0].tensor.ShapeAsNumpy())
assert -1 - ndims <= axis <= ndims, "axis out of range"
out = _op.expand_dims(input_expr, axis, 1)
return out
def convert_one_hot(self, op):
"""Convert TFLite ONE_HOT"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.OneHotOptions import OneHotOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 4, "Input tensor's length should be 4"
# Ensuring input isn't quantized
assert all(not i.qnn_params for i in input_tensors), "Quantized input is not expected."
# TFlite ONE_HOT requires both on_value
# and off_value, making dtype redundant.
indices = input_tensors[0]
depth = input_tensors[1]
on_value = input_tensors[2]
off_value = input_tensors[3]
assert (
on_value.tensor.Type() == off_value.tensor.Type()
), "on_value and off_value should be the same type"
# Getting relay expr
indices_expr = self.get_expr(indices.tensor_idx)
on_value_expr = self.get_expr(on_value.tensor_idx)
off_value_expr = self.get_expr(off_value.tensor_idx)
# Getting depth value
depth = self.get_tensor_value(depth)
if isinstance(depth, np.ndarray):
depth = int(depth)
# Getting Axis from Option (Attributes)
assert op.BuiltinOptionsType() == BuiltinOptions.OneHotOptions
op_options = op.BuiltinOptions()
one_hot_options = OneHotOptions()
one_hot_options.Init(op_options.Bytes, op_options.Pos)
axis = one_hot_options.Axis()
# Setting dtype
dtype = self.get_tensor_type_str(on_value.tensor.Type())
out = _op.one_hot(indices_expr, on_value_expr, off_value_expr, depth, axis, dtype)
return out
def convert_reverse_v2(self, op):
"""Convert TFLite REVERSE_V2"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensor's length should be 2"
input_expr = self.get_expr(input_tensors[0].tensor_idx)
# Getting axis value
axis = self.get_tensor_value(input_tensors[1])
if isinstance(axis, np.ndarray):
assert len(axis) == 1, "TFLite does not support multi-axis yet"
axis = int(axis)
out = _op.reverse(input_expr, axis)
return out
def convert_matrix_set_diag(self, op):
"""Convert TFLite MATRIX_SET_DIAG"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensor's length should be 2"
assert (
input_tensors[0].tensor.Type() == input_tensors[1].tensor.Type()
), "input and diagonal should be the same type of tensors"
if input_tensors[0].qnn_params:
# Check that input and output tensor have same qnn params.
output_tensors = self.get_output_tensors(op)
assert self.has_same_qnn_params(
input_tensors[0], output_tensors[0]
), "TFLite MATRIX_SET_DIAG requires input and output tensors' \
scale and zero points to be equal"
# Check that input and diagonal tensor have same qnn params.
assert self.has_same_qnn_params(
input_tensors[0], input_tensors[1]
), "TFLite MATRIX_SET_DIAG requires input and diagonal tensors' \
scale and zero points to be equal"
input_expr = self.get_tensor_expr(input_tensors[0])
diagonal_expr = self.get_tensor_expr(input_tensors[1])
out = _op.matrix_set_diag(input_expr, diagonal_expr)
return out
def convert_matrix_diag(self, op):
"""Convert TFLite MATRIX_DIAG"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensor's length should be 1"
diagonal = input_tensors[0]
if diagonal.qnn_params:
# Check that diagonal and output tensor have same qnn params.
output_tensors = self.get_output_tensors(op)
assert self.has_same_qnn_params(
diagonal, output_tensors[0]
), "TFLite MATRIX_DIAG requires diagonal and output tensors' \
scale and zero points to be equal"
shape = diagonal.tensor.ShapeAsNumpy()
shape = np.append(shape, shape[-1])
dtype = self.get_tensor_type_str(diagonal.tensor.Type())
input_expr = _op.zeros(tuple(shape), dtype)
diagonal_expr = self.get_tensor_expr(diagonal)
out = _op.matrix_set_diag(input_expr, diagonal_expr)
return out
def get_expr(self, input_tensor_idx):
return self.exp_tab.get_expr(get_tensor_name(self.subgraph, input_tensor_idx))
def has_expr(self, input_tensor_idx):
return self.exp_tab.has_expr(get_tensor_name(self.subgraph, input_tensor_idx))
def get_tensor_expr(self, tensor):
""" Return the Relay expr for tensor. """
if self.has_expr(tensor.tensor_idx):
expr = self.get_expr(tensor.tensor_idx)
else:
type_str = self.get_tensor_type_str(tensor.tensor.Type())
expr = self.exp_tab.new_const(self.get_tensor_value(tensor), dtype=type_str)
return expr
def get_scalar_from_constant(expr):
""" Returns scalar value from Relay constant scalar. """
assert (
isinstance(expr, _expr.Constant) and not expr.data.shape
), "Expr is not a constant scalar."
value = expr.data.asnumpy()
assert value.dtype == np.dtype(np.int32) or value.dtype == np.dtype(
np.float32
), "value must be float32/int32"
return np.asscalar(value)
def get_tensor_from_constant(expr):
""" Returns tensor of values from Relay constant node. """
assert isinstance(expr, _expr.Constant)
value = expr.data.asnumpy()
assert value.dtype == np.dtype(np.int32) or value.dtype == np.dtype(
np.float32
), "value must be float32/int32"
return value
def build_str_map(obj):
"""Build string map of TFLite enum int value
Parameters
----------
obj:
TFLite class which contains enum int value, such as BuiltInOptions
Returns
-------
String representation map of TFLite class enum int value
"""
ret = {}
for field_name in dir(obj):
if not field_name.startswith("_"):
field_value = getattr(obj, field_name)
if isinstance(field_value, int):
ret[field_value] = field_name
return ret
# SAME padding: https://www.tensorflow.org/api_guides/python/nn
def get_pad_value(data, kernel, stride):
"""Get the pad tuple of value for SAME padding
Parameters
----------
data:
1D input data
kernel:
1D input kernel
stride:
1D input stride
Returns
-------
pad tuple of value
"""
out = int(math.ceil(float(data) / float(stride)))
pad = max(0, (out - 1) * stride + kernel - data)
pad_before = pad // 2
pad_after = pad - pad_before
return pad_before, pad_after
def get_tensor_name(subgraph, tensor_idx):
"""Get the tensor name.
Parameters
----------
subgraph:
tflite.Subgraph.Subgraph
tensor:
tensor index in subgraph
Returns
-------
tensor name in UTF-8 encoding
"""
return subgraph.Tensors(tensor_idx).Name().decode("utf-8")
def from_tflite(model, shape_dict, dtype_dict):
"""Convert from tflite model into compatible relay Function.
Parameters
----------
model:
tflite.Model or tflite.Model.Model (depending on tflite version)
shape_dict : dict of str to int list/tuple
Input shapes of the model.
dtype_dict : dict of str to str
Input types of the model.
Returns
-------
mod : tvm.IRModule
The relay module for compilation.
params : dict of str to tvm.nd.NDArray
The parameter dict to be used by relay
"""
try:
import tflite.SubGraph
import tflite.BuiltinOperator
except ImportError:
raise ImportError("The tflite package must be installed")
# TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1
try:
import tflite
assert isinstance(model, tflite.Model)
except TypeError:
import tflite.Model
assert isinstance(model, tflite.Model.Model)
# keep the same as tflite
assert model.SubgraphsLength() == 1, "only support one subgraph (main subgraph)"
subgraph = model.Subgraphs(0)
# model inputs / outputs
model_inputs = subgraph.InputsAsNumpy()
model_outputs = subgraph.OutputsAsNumpy()
exp_tab = ExprTable()
for model_input in model_inputs:
model_input_name = get_tensor_name(subgraph, model_input)
shape = shape_dict[model_input_name] if model_input_name in shape_dict else None
dtype = dtype_dict[model_input_name] if model_input_name in dtype_dict else "float32"
exp_tab.set_expr(model_input_name, _expr.var(model_input_name, shape=shape, dtype=dtype))
# op code in model
op_converter = OperatorConverter(model, subgraph, exp_tab)
op_converter.check_unsupported_ops()
op_converter.convert_op_to_relay()
# params and outputs
params = {k: _nd.array(np.array(v)) for k, v in exp_tab.params.items()}
outputs = [exp_tab.get_expr(get_tensor_name(subgraph, i)) for i in model_outputs]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
func = _function.Function(analysis.free_vars(outputs), outputs)
mod = IRModule.from_expr(func)
return mod, params
| tqchen/tvm | python/tvm/relay/frontend/tflite.py | Python | apache-2.0 | 137,911 | 0.001581 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class VirtualNetworkGatewaysOperations(object):
"""VirtualNetworkGatewaysOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-03-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-01"
self.config = config
def create_or_update(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a virtual network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to create or update virtual
network gateway operation.
:type parameters: :class:`VirtualNetworkGateway
<azure.mgmt.network.v2017_03_01.models.VirtualNetworkGateway>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`VirtualNetworkGateway
<azure.mgmt.network.v2017_03_01.models.VirtualNetworkGateway>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualNetworkGateway')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network gateway by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualNetworkGateway
<azure.mgmt.network.v2017_03_01.models.VirtualNetworkGateway>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [204, 202, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual network gateways by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualNetworkGatewayPaged
<azure.mgmt.network.v2017_03_01.models.VirtualNetworkGatewayPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkGatewayPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkGatewayPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def reset(
self, resource_group_name, virtual_network_gateway_name, gateway_vip=None, custom_headers=None, raw=False, **operation_config):
"""Resets the primary of the virtual network gateway in the specified
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param gateway_vip: Virtual network gateway vip address supplied to
the begin reset of the active-active feature enabled gateway.
:type gateway_vip: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`VirtualNetworkGateway
<azure.mgmt.network.v2017_03_01.models.VirtualNetworkGateway>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if gateway_vip is not None:
query_parameters['gatewayVip'] = self._serialize.query("gateway_vip", gateway_vip, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def generatevpnclientpackage(
self, resource_group_name, virtual_network_gateway_name, processor_architecture, custom_headers=None, raw=False, **operation_config):
"""Generates VPN client package for P2S client of the virtual network
gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param processor_architecture: VPN client Processor Architecture.
Possible values are: 'AMD64' and 'X86'. Possible values include:
'Amd64', 'X86'
:type processor_architecture: str or :class:`ProcessorArchitecture
<azure.mgmt.network.v2017_03_01.models.ProcessorArchitecture>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: str
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.VpnClientParameters(processor_architecture=processor_architecture)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnClientParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_bgp_peer_status(
self, resource_group_name, virtual_network_gateway_name, peer=None, custom_headers=None, raw=False, **operation_config):
"""The GetBgpPeerStatus operation retrieves the status of all BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer to retrieve the status of.
:type peer: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`BgpPeerStatusListResult
<azure.mgmt.network.v2017_03_01.models.BgpPeerStatusListResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if peer is not None:
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BgpPeerStatusListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get_learned_routes(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""This operation retrieves a list of routes the virtual network gateway
has learned, including routes learned from BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`GatewayRouteListResult
<azure.mgmt.network.v2017_03_01.models.GatewayRouteListResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get_advertised_routes(
self, resource_group_name, virtual_network_gateway_name, peer, custom_headers=None, raw=False, **operation_config):
"""This operation retrieves a list of routes the virtual network gateway
is advertising to the specified peer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer
:type peer: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`GatewayRouteListResult
<azure.mgmt.network.v2017_03_01.models.GatewayRouteListResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| SUSE/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_03_01/operations/virtual_network_gateways_operations.py | Python | mit | 36,131 | 0.002353 |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Mixins to add common attributes and relationships. Note, all model classes
must also inherit from ``db.Model``. For example:
color
color
..
class Market(BusinessObject, db.Model):
__tablename__ = 'markets'
"""
# pylint: disable=no-self-argument
# All declared_attr properties that are class level as per sqlalchemy
# documentatio, are reported as false positives by pylint.
from logging import getLogger
from uuid import uuid1
import datetime
from sqlalchemy import event
from sqlalchemy import orm
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import validates
from sqlalchemy.orm.session import Session
from ggrc import builder
from ggrc import db
from ggrc.models import reflection
from ggrc.models.deferred import deferred
from ggrc.models.inflector import ModelInflectorDescriptor
from ggrc.models.reflection import AttributeInfo
from ggrc.models.mixins.customattributable import CustomAttributable
from ggrc.models.mixins.notifiable import Notifiable
from ggrc.utils import create_stub
from ggrc.fulltext import attributes
# pylint: disable=invalid-name
logger = getLogger(__name__)
class Identifiable(object):
"""A model with an ``id`` property that is the primary key."""
id = db.Column(db.Integer, primary_key=True) # noqa
# REST properties
_publish_attrs = ['id', 'type']
_update_attrs = []
_inflector = ModelInflectorDescriptor()
@builder.simple_property
def type(self):
return self.__class__.__name__
@classmethod
def eager_query(cls):
mapper_class = cls._sa_class_manager.mapper.base_mapper.class_
return db.session.query(cls).options(
db.Load(mapper_class).undefer_group(
mapper_class.__name__ + '_complete'),
)
@classmethod
def eager_inclusions(cls, query, include_links):
"""Load related items listed in include_links eagerly."""
options = []
for include_link in include_links:
inclusion_class = getattr(cls, include_link).property.mapper.class_
options.append(
orm.subqueryload(include_link)
.undefer_group(inclusion_class.__name__ + '_complete'))
return query.options(*options)
@declared_attr
def __table_args__(cls):
extra_table_args = AttributeInfo.gather_attrs(cls, '_extra_table_args')
table_args = []
table_dict = {}
for table_arg in extra_table_args:
if callable(table_arg):
table_arg = table_arg()
if isinstance(table_arg, (list, tuple, set)):
if isinstance(table_arg[-1], (dict,)):
table_dict.update(table_arg[-1])
table_args.extend(table_arg[:-1])
else:
table_args.extend(table_arg)
elif isinstance(table_arg, (dict,)):
table_dict.update(table_arg)
else:
table_args.append(table_arg)
if len(table_dict) > 0:
table_args.append(table_dict)
return tuple(table_args,)
class ChangeTracked(object):
"""A model with fields to tracked the last user to modify the model, the
creation time of the model, and the last time the model was updated.
"""
@declared_attr
def modified_by_id(cls):
"""Id of user who did the last modification of the object."""
return deferred(db.Column(db.Integer), cls.__name__)
@declared_attr
def created_at(cls):
"""Date of creation. Set to current time on object creation."""
column = db.Column(
db.DateTime,
nullable=False,
default=db.text('current_timestamp'),
)
return deferred(column, cls.__name__)
@declared_attr
def updated_at(cls):
"""Date of last update. Set to current time on object creation/update."""
column = db.Column(
db.DateTime,
nullable=False,
default=db.text('current_timestamp'),
onupdate=db.text('current_timestamp'),
)
return deferred(column, cls.__name__)
@declared_attr
def modified_by(cls):
"""Relationship to user referenced by modified_by_id."""
return db.relationship(
'Person',
primaryjoin='{0}.modified_by_id == Person.id'.format(cls.__name__),
foreign_keys='{0}.modified_by_id'.format(cls.__name__),
uselist=False,
)
@staticmethod
def _extra_table_args(model):
"""Apply extra table args (like indexes) to model definition."""
return (
db.Index('ix_{}_updated_at'.format(model.__tablename__), 'updated_at'),
)
# TODO Add a transaction id, this will be handy for generating etags
# and for tracking the changes made to several resources together.
# transaction_id = db.Column(db.Integer)
# REST properties
_publish_attrs = [
'modified_by',
'created_at',
'updated_at',
]
_fulltext_attrs = [
attributes.DatetimeFullTextAttr('created_at', 'created_at'),
attributes.DatetimeFullTextAttr('updated_at', 'updated_at'),
attributes.FullTextAttr("modified_by", "modified_by", ["name", "email"]),
]
_update_attrs = []
_aliases = {
"updated_at": {
"display_name": "Last Updated",
"filter_only": True,
},
"created_at": {
"display_name": "Created Date",
"filter_only": True,
},
}
@classmethod
def indexed_query(cls):
return super(ChangeTracked, cls).indexed_query().options(
orm.Load(cls).load_only("created_at", "updated_at"),
orm.Load(cls).joinedload(
"modified_by"
).load_only(
"name", "email", "id"
),
)
class Titled(object):
"""Mixin that defines `title` field.
Strips title on update and defines optional UNIQUE constraint on it.
"""
@validates('title')
def validate_title(self, key, value):
"""Validates and cleans Title that has leading/trailing spaces"""
# pylint: disable=unused-argument,no-self-use
return value if value is None else value.strip()
@declared_attr
def title(cls):
return deferred(db.Column(db.String, nullable=False), cls.__name__)
@classmethod
def indexed_query(cls):
return super(Titled, cls).indexed_query().options(
orm.Load(cls).load_only("title"),
)
@staticmethod
def _extra_table_args(model):
"""If model._title_uniqueness is set, apply UNIQUE constraint to title."""
if getattr(model, '_title_uniqueness', True):
return (
db.UniqueConstraint(
'title', name='uq_t_{}'.format(model.__tablename__)),
)
return ()
# REST properties
_publish_attrs = ['title']
_fulltext_attrs = ['title']
_sanitize_html = ['title']
_aliases = {"title": "Title"}
class Described(object):
"""Mixin that defines `description` field."""
@declared_attr
def description(cls):
return deferred(db.Column(db.Text), cls.__name__)
# REST properties
_publish_attrs = ['description']
_fulltext_attrs = ['description']
_sanitize_html = ['description']
_aliases = {"description": "Description"}
@classmethod
def indexed_query(cls):
return super(Described, cls).indexed_query().options(
orm.Load(cls).load_only("description"),
)
class Noted(object):
"""Mixin that defines `notes` field."""
@declared_attr
def notes(cls):
return deferred(db.Column(db.Text), cls.__name__)
# REST properties
_publish_attrs = ['notes']
_fulltext_attrs = ['notes']
_sanitize_html = ['notes']
_aliases = {"notes": "Notes"}
@classmethod
def indexed_query(cls):
return super(Noted, cls).indexed_query().options(
orm.Load(cls).load_only("notes"),
)
class Hyperlinked(object):
"""Mixin that defines `url` and `reference_url` fields."""
@declared_attr
def url(cls):
return deferred(db.Column(db.String), cls.__name__)
@declared_attr
def reference_url(cls):
return deferred(db.Column(db.String), cls.__name__)
# REST properties
_publish_attrs = ['url', 'reference_url']
_aliases = {
"url": "Url",
"reference_url": "Reference URL",
}
_fulltext_attrs = [
'url',
'reference_url',
]
@classmethod
def indexed_query(cls):
return super(Hyperlinked, cls).indexed_query().options(
orm.Load(cls).load_only("url", "reference_url"),
)
class Hierarchical(object):
"""Mixin that defines `parent` and `child` fields to organize hierarchy."""
@declared_attr
def parent_id(cls):
return deferred(db.Column(
db.Integer, db.ForeignKey('{0}.id'.format(cls.__tablename__))),
cls.__name__)
@declared_attr
def children(cls):
return db.relationship(
cls.__name__,
backref=db.backref(
'parent', remote_side='{0}.id'.format(cls.__name__)),
)
# REST properties
_publish_attrs = [
'children',
'parent',
]
_fulltext_attrs = [
'children',
'parent',
]
@classmethod
def indexed_query(cls):
return super(Hierarchical, cls).indexed_query().options(
orm.Load(cls).subqueryload("children"),
orm.Load(cls).joinedload("parent"),
)
@classmethod
def eager_query(cls):
query = super(Hierarchical, cls).eager_query()
return query.options(
orm.subqueryload('children'),
# orm.joinedload('parent'),
)
class Timeboxed(object):
"""Mixin that defines `start_date` and `end_date` fields."""
@declared_attr
def start_date(cls):
return deferred(db.Column(db.Date), cls.__name__)
@declared_attr
def end_date(cls):
return deferred(db.Column(db.Date), cls.__name__)
# REST properties
_publish_attrs = ['start_date', 'end_date']
_aliases = {
"start_date": "Effective Date",
"end_date": "Stop Date",
}
_fulltext_attrs = [
attributes.DateFullTextAttr('start_date', 'start_date'),
attributes.DateFullTextAttr('end_date', 'end_date'),
]
@classmethod
def indexed_query(cls):
return super(Timeboxed, cls).indexed_query().options(
orm.Load(cls).load_only("start_date", "end_date"),
)
class Stateful(object):
"""Mixin that defines `status` field and status validation logic.
TODO: unify with Statusable.
"""
@declared_attr
def status(cls):
return deferred(db.Column(
db.String, default=cls.default_status, nullable=False), cls.__name__)
_publish_attrs = ['status']
_fulltext_attrs = ['status']
_aliases = {
"status": {
"display_name": "State",
"mandatory": False
}
}
@classmethod
def default_status(cls):
return cls.valid_statuses()[0]
@classmethod
def valid_statuses(cls):
return cls.VALID_STATES
@validates('status')
def validate_status(self, key, value):
"""Use default status if value is None, check that it is in valid set."""
# Sqlalchemy only uses one validator per status (not necessarily the
# first) and ignores others. This enables cooperation between validators
# since there are other mixins that want to touch 'status'.
if hasattr(super(Stateful, self), "validate_status"):
value = super(Stateful, self).validate_status(key, value)
if value is None:
value = self.default_status()
if value not in self.valid_statuses():
message = u"Invalid state '{}'".format(value)
raise ValueError(message)
return value
@classmethod
def indexed_query(cls):
return super(Stateful, cls).indexed_query().options(
orm.Load(cls).load_only("status"),
)
class FinishedDate(object):
"""Adds 'Finished Date' which is set when status is set to a finished state.
Requires Stateful to be mixed in as well.
"""
NOT_DONE_STATES = None
DONE_STATES = {}
# pylint: disable=method-hidden
# because validator only sets date per model instance
@declared_attr
def finished_date(cls):
return deferred(
db.Column(db.DateTime, nullable=True),
cls.__name__
)
_publish_attrs = [
reflection.PublishOnly('finished_date')
]
_aliases = {
"finished_date": "Finished Date"
}
_fulltext_attrs = [
attributes.DatetimeFullTextAttr('finished_date', 'finished_date'),
]
@validates('status')
def validate_status(self, key, value):
"""Update finished_date given the right status change."""
# Sqlalchemy only uses one validator per status (not necessarily the
# first) and ignores others. This enables cooperation between validators
# since 'status' is not defined here.
if hasattr(super(FinishedDate, self), "validate_status"):
value = super(FinishedDate, self).validate_status(key, value)
# pylint: disable=unsupported-membership-test
# short circuit
if (value in self.DONE_STATES and
(self.NOT_DONE_STATES is None or
self.status in self.NOT_DONE_STATES)):
self.finished_date = datetime.datetime.now()
elif ((self.NOT_DONE_STATES is None or
value in self.NOT_DONE_STATES) and
self.status in self.DONE_STATES):
self.finished_date = None
return value
@classmethod
def indexed_query(cls):
return super(FinishedDate, cls).indexed_query().options(
orm.Load(cls).load_only("finished_date"),
)
class VerifiedDate(object):
"""Adds 'Verified Date' which is set when status is set to 'Verified'.
When object is verified the status is overridden to 'Final' and the
information about verification exposed as the 'verified' boolean.
Requires Stateful to be mixed in as well.
"""
VERIFIED_STATES = {u"Verified"}
DONE_STATES = {}
# pylint: disable=method-hidden
# because validator only sets date per model instance
@declared_attr
def verified_date(cls):
return deferred(
db.Column(db.DateTime, nullable=True),
cls.__name__
)
@hybrid_property
def verified(self):
return self.verified_date != None # noqa
_publish_attrs = [
reflection.PublishOnly('verified'),
reflection.PublishOnly('verified_date'),
]
_aliases = {
"verified_date": "Verified Date"
}
_fulltext_attrs = [
attributes.DatetimeFullTextAttr("verified_date", "verified_date"),
"verified",
]
@classmethod
def indexed_query(cls):
return super(VerifiedDate, cls).indexed_query().options(
orm.Load(cls).load_only("verified_date"),
)
@validates('status')
def validate_status(self, key, value):
"""Update verified_date on status change, make verified status final."""
# Sqlalchemy only uses one validator per status (not necessarily the
# first) and ignores others. This enables cooperation between validators
# since 'status' is not defined here.
if hasattr(super(VerifiedDate, self), "validate_status"):
value = super(VerifiedDate, self).validate_status(key, value)
if (value in self.VERIFIED_STATES and
self.status not in self.VERIFIED_STATES):
self.verified_date = datetime.datetime.now()
value = self.FINAL_STATE
elif (value not in self.VERIFIED_STATES and
value not in self.DONE_STATES and
(self.status in self.VERIFIED_STATES or
self.status in self.DONE_STATES)):
self.verified_date = None
return value
class ContextRBAC(object):
"""Defines `context` relationship for Context-based access control."""
@declared_attr
def context_id(cls):
return db.Column(db.Integer, db.ForeignKey('contexts.id'))
@declared_attr
def context(cls):
return db.relationship('Context', uselist=False)
@staticmethod
def _extra_table_args(model):
return (
db.Index('fk_{}_contexts'.format(model.__tablename__), 'context_id'),
)
_publish_attrs = ['context']
@classmethod
def indexed_query(cls):
return super(ContextRBAC, cls).indexed_query().options(
orm.Load(cls).load_only("context_id"),
)
# @classmethod
# def eager_query(cls):
# from sqlalchemy import orm
# query = super(ContextRBAC, cls).eager_query()
# return query.options(
# orm.subqueryload('context'))
def is_attr_of_type(object_, attr_name, mapped_class):
"""Check if relationship property points to mapped_class"""
cls = object_.__class__
if isinstance(attr_name, basestring):
if hasattr(cls, attr_name):
cls_attr = getattr(cls, attr_name)
if (hasattr(cls_attr, "property") and
isinstance(cls_attr.property,
orm.properties.RelationshipProperty) and
cls_attr.property.mapper.class_ == mapped_class):
return True
return False
class Base(ChangeTracked, ContextRBAC, Identifiable):
"""Several of the models use the same mixins. This class covers that common
case.
"""
_people_log_mappings = [
"principal_assessor_id",
"secondary_assessor_id",
"contact_id",
"secondary_contact_id",
"modified_by_id",
"attribute_object_id", # used for person mapping CA
]
@staticmethod
def _person_stub(id_):
return {
'type': u"Person",
'id': id_,
'context_id': None,
'href': u"/api/people/{}".format(id_),
}
def log_json_base(self):
"""Get a dict with attributes of self that is easy to serialize to json.
This method lists only first-class attributes.
"""
res = {}
for column in self.__table__.columns:
try:
res[column.name] = getattr(self, column.name)
except AttributeError:
pass
res["display_name"] = self.display_name
return res
def log_json(self):
"""Get a dict with attributes and related objects of self.
This method converts additionally person-mapping attributes and owners
to person stubs.
"""
from ggrc import models
res = self.log_json_base()
for attr in self._people_log_mappings:
if hasattr(self, attr):
value = getattr(self, attr)
# hardcoded [:-3] is used to strip "_id" suffix
res[attr[:-3]] = self._person_stub(value) if value else None
if hasattr(self, "owners"):
res["owners"] = [
self._person_stub(owner.id) for owner in self.owners if owner
]
for attr_name in AttributeInfo.gather_publish_attrs(self.__class__):
if is_attr_of_type(self, attr_name, models.Option):
attr = getattr(self, attr_name)
if attr:
stub = create_stub(attr)
stub["title"] = attr.title
else:
stub = None
res[attr_name] = stub
return res
@builder.simple_property
def display_name(self):
try:
return self._display_name()
except: # pylint: disable=bare-except
logger.warning("display_name error in %s", type(self), exc_info=True)
return ""
def _display_name(self):
return getattr(self, "title", None) or getattr(self, "name", "")
def copy_into(self, target_object, columns, **kwargs):
"""Copy current object values into a target object.
Copy all values listed in columns from current class to target class and
use kwargs as defaults with precedence. Note that this is a shallow copy
and any mutable values will be shared between current and target objects.
Args:
target_object: object to which we want to copy current values. This
function will mutate the target_object parameter if it is set.
columns: list with all attribute names that we want to set in the
target_object.
kwargs: additional default values.
Returns:
target_object with all values listed in columns set.
"""
target = target_object or type(self)()
columns = set(columns).union(kwargs.keys())
for name in columns:
if name in kwargs:
value = kwargs[name]
else:
value = getattr(self, name)
setattr(target, name, value)
return target
CACHED_ATTRIBUTE_MAP = None
@classmethod
def attributes_map(cls):
if cls.CACHED_ATTRIBUTE_MAP:
return cls.CACHED_ATTRIBUTE_MAP
aliases = AttributeInfo.gather_aliases(cls)
cls.CACHED_ATTRIBUTE_MAP = {}
for key, value in aliases.items():
if isinstance(value, dict):
name = value["display_name"]
filter_by = None
if value.get("filter_by"):
filter_by = getattr(cls, value["filter_by"], None)
else:
name = value
filter_by = None
if not name:
continue
tmp = getattr(cls, "PROPERTY_TEMPLATE", "{}")
name = tmp.format(name)
key = tmp.format(key)
cls.CACHED_ATTRIBUTE_MAP[name.lower()] = (key.lower(), filter_by)
return cls.CACHED_ATTRIBUTE_MAP
class Slugged(Base):
"""Several classes make use of the common mixins and additional are
"slugged" and have additional fields related to their publishing in the
system.
"""
@declared_attr
def slug(cls):
return deferred(db.Column(db.String, nullable=False), cls.__name__)
@staticmethod
def _extra_table_args(model):
if getattr(model, '_slug_uniqueness', True):
return (
db.UniqueConstraint('slug',
name='uq_{}'.format(model.__tablename__)),
)
return ()
# REST properties
_publish_attrs = ['slug']
_fulltext_attrs = ['slug']
_sanitize_html = ['slug']
_aliases = {
"slug": {
"display_name": "Code",
"description": ("Must be unique. Can be left empty for "
"auto generation. If updating or deleting, "
"code is required"),
}
}
@classmethod
def indexed_query(cls):
return super(Slugged, cls).indexed_query().options(
orm.Load(cls).load_only("slug"),
)
@classmethod
def generate_slug_for(cls, obj):
_id = getattr(obj, 'id', uuid1())
obj.slug = "{0}-{1}".format(cls.generate_slug_prefix_for(obj), _id)
# We need to make sure the generated slug is not already present in the
# database. If it is, we increment the id until we find a slug that is
# unique.
# A better approach would be to query the database for slug uniqueness
# only if the there was a conflict, but because we can't easily catch a
# session rollback at this point we are sticking with a
# suboptimal solution for now.
INCREMENT = 1000
while cls.query.filter(cls.slug == obj.slug).count():
_id += INCREMENT
obj.slug = "{0}-{1}".format(cls.generate_slug_prefix_for(obj), _id)
@classmethod
def generate_slug_prefix_for(cls, obj):
return obj.__class__.__name__.upper()
@classmethod
def ensure_slug_before_flush(cls, session, flush_context, instances):
"""Set the slug to a default string so we don't run afoul of the NOT NULL
constraint.
"""
# pylint: disable=unused-argument
for o in session.new:
if isinstance(o, Slugged) and (o.slug is None or o.slug == ''):
o.slug = str(uuid1())
o._replace_slug = True
@classmethod
def ensure_slug_after_flush_postexec(cls, session, flush_context):
"""Replace the placeholder slug with a real slug that will be set on the
next flush/commit.
"""
# pylint: disable=unused-argument
for o in session.identity_map.values():
if isinstance(o, Slugged) and hasattr(o, '_replace_slug'):
o.generate_slug_for(o)
delattr(o, '_replace_slug')
event.listen(Session, 'before_flush', Slugged.ensure_slug_before_flush)
event.listen(
Session, 'after_flush_postexec', Slugged.ensure_slug_after_flush_postexec)
class WithContact(object):
"""Mixin that defines `contact` and `secondary_contact` fields."""
@declared_attr
def contact_id(cls):
return deferred(
db.Column(db.Integer, db.ForeignKey('people.id')), cls.__name__)
@declared_attr
def secondary_contact_id(cls):
return deferred(
db.Column(db.Integer, db.ForeignKey('people.id')), cls.__name__)
@declared_attr
def contact(cls):
return db.relationship(
'Person',
uselist=False,
foreign_keys='{}.contact_id'.format(cls.__name__))
@declared_attr
def secondary_contact(cls):
return db.relationship(
'Person',
uselist=False,
foreign_keys='{}.secondary_contact_id'.format(cls.__name__))
@staticmethod
def _extra_table_args(model):
return (
db.Index('fk_{}_contact'.format(model.__tablename__), 'contact_id'),
db.Index('fk_{}_secondary_contact'.format(
model.__tablename__), 'secondary_contact_id'),
)
_publish_attrs = ['contact', 'secondary_contact']
_fulltext_attrs = [
attributes.FullTextAttr(
"contact",
"contact",
["name", "email"]
),
attributes.FullTextAttr(
'secondary_contact',
'secondary_contact',
["name", "email"]),
]
@classmethod
def indexed_query(cls):
return super(WithContact, cls).indexed_query().options(
orm.Load(cls).joinedload(
"contact"
).load_only(
"name",
"email",
"id"
),
orm.Load(cls).joinedload(
"secondary_contact"
).load_only(
"name",
"email",
"id"
),
)
_aliases = {
"contact": "Primary Contact",
"secondary_contact": "Secondary Contact",
}
class BusinessObject(Stateful, Noted, Described, Hyperlinked,
Titled, Slugged):
"""Mixin that groups most commonly-used mixins into one."""
VALID_STATES = (
'Draft',
'Active',
'Deprecated'
)
_aliases = {
"status": {
"display_name": "State",
"mandatory": False,
"description": "Options are:\n{}".format('\n'.join(VALID_STATES))
}
}
# This class is just a marker interface/mixin to indicate that a model type
# supports custom attributes.
class TestPlanned(object):
"""Mixin that defines `test_plan` field."""
@declared_attr
def test_plan(cls):
return deferred(db.Column(db.Text), cls.__name__)
# REST properties
_publish_attrs = ['test_plan']
_fulltext_attrs = ['test_plan']
_sanitize_html = ['test_plan']
_aliases = {"test_plan": "Test Plan"}
@classmethod
def indexed_query(cls):
return super(TestPlanned, cls).indexed_query().options(
orm.Load(cls).load_only("test_plan"),
)
__all__ = [
"Base",
"BusinessObject",
"ChangeTracked",
"ContextRBAC",
"CustomAttributable",
"Described",
"FinishedDate",
"Hierarchical",
"Hyperlinked",
"Identifiable",
"Noted",
"Notifiable",
"Slugged",
"Stateful",
"TestPlanned",
"Timeboxed",
"Titled",
"VerifiedDate",
"WithContact",
]
| AleksNeStu/ggrc-core | src/ggrc/models/mixins/__init__.py | Python | apache-2.0 | 26,526 | 0.010631 |
"""Parse the biothings schema"""
from .config import BIOTHINGS_SCHEMA_URL, PREFIX_TO_REMOVE
from .utils.dataload import load_json_or_yaml
from .utils.common import remove_prefix
class SchemaParser():
def __init__(self):
self.schema_json = remove_prefix(load_json_or_yaml(BIOTHINGS_SCHEMA_URL),
PREFIX_TO_REMOVE)
self.properties = {}
self.ids = []
self.clses = []
self.process_schema()
def process_schema(self):
for rec in self.schema_json['@graph']:
if "rdfs:subPropertyOf" in rec and rec["rdfs:subPropertyOf"]["@id"] == "http://schema.org/identifier":
self.ids.append(rec["@id"])
elif rec["@type"] == "rdf:Property":
self.properties[rec["@id"]] = {"inverse_property": None}
if "schema:inverseOf" in rec:
self.properties[rec["@id"]]["inverse_property"] = rec["schema:inverseOf"]["@id"]
elif rec["@type"] == "rdfs:Class":
self.clses.append(rec["@id"])
| biothings/biothings_explorer | biothings_explorer/_deprecated_schema_parser.py | Python | apache-2.0 | 1,071 | 0.003735 |
'''
Precondition
successfully pass a users test.
'''
from datetime import datetime, timedelta
import time
import pytest
import requests
from kii import AccountType, exceptions as exc, results as rs
from kii.data import BucketType, clauses as cl
from tests.conf import (
get_env,
get_api_with_test_user,
cleanup,
)
GROUP_NAME = 'test_group'
BUCKET_ID = 'test_bucket'
class TestApplicationScopeData:
def setup_method(self, method):
""" setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class.
"""
cleanup()
self.api = get_api_with_test_user()
self.scope = self.api.data.application
def teardown_method(self, method):
""" teardown any state that was previously setup with a setup_method
call.
"""
try:
self.scope.delete_a_bucket(BUCKET_ID)
except exc.KiiBucketNotFoundError:
pass
cleanup()
def test_retrieve_bucket(self):
obj = self.scope(BUCKET_ID).create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
assert obj
bucket = self.scope.retrieve_a_bucket(BUCKET_ID)
assert isinstance(bucket, rs.BucketResult)
assert bucket.bucket_type is BucketType.READ_WRITE
assert bucket.size > 0
def test_delete_bucket(self):
obj = self.scope(BUCKET_ID).create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
assert obj
self.scope.delete_a_bucket(BUCKET_ID)
with pytest.raises(exc.KiiBucketNotFoundError):
self.scope.delete_a_bucket(BUCKET_ID)
def test_create_an_object(self):
obj = self.scope(BUCKET_ID).create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
assert isinstance(obj, rs.CreateResult)
assert obj.object_id
assert obj.created_at
assert isinstance(obj.created_at, datetime)
assert obj.data_type
assert obj.data_type == 'application/json'
def test_retrieve_an_object(self):
obj = self.scope(BUCKET_ID).create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
result = self.scope(BUCKET_ID).retrieve_an_object(obj.object_id)
assert isinstance(result, rs.ObjectResult)
assert result._id
assert isinstance(result._id, str)
assert result._created
assert result._modified
def test_fully_update_an_object(self):
bucket = self.scope(BUCKET_ID)
obj = bucket.create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 1
created = bucket.retrieve_an_object(obj.object_id)
assert created['int key'] == 1
assert created['str key'] == 'this is string'
assert created['dict key'] == {
'nest': 'nest value',
}
assert created['list key'] == [1, 2, 3]
updated = bucket.fully_update_an_object(obj.object_id, {
'str key': 'updated string',
'dict key': {
'nest': {
'nest2': 'nest and nest',
},
},
'list key': [4, 5, 6],
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 1
updated = bucket.retrieve_an_object(obj.object_id)
assert 'int key' not in updated
assert updated['str key'] == 'updated string'
assert updated['dict key'] == {
'nest': {
'nest2': 'nest and nest',
}
}
assert updated['list key'] == [4, 5, 6]
assert created._created == updated._created
assert created._modified != updated._modified
assert created._version != updated._version
def test_create_a_new_object_with_an_id(self):
bucket = self.scope(BUCKET_ID)
obj = bucket.create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
created = bucket.retrieve_an_object(obj.object_id)
assert created['int key'] == 1
assert created['str key'] == 'this is string'
assert created['dict key'] == {
'nest': 'nest value',
}
assert created['list key'] == [1, 2, 3]
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 1
created2 = bucket.create_a_new_object_with_an_id('new-object-id', {
'str key': 'created2 string',
'dict key': {
'nest': {
'nest2': 'nest and nest',
},
},
'list key': [4, 5, 6],
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 2
created2 = bucket.retrieve_an_object('new-object-id')
assert 'int key' not in created2
assert created2['str key'] == 'created2 string'
assert created2['dict key'] == {
'nest': {
'nest2': 'nest and nest',
}
}
assert created2['list key'] == [4, 5, 6]
assert created._created != created2._created
assert created._modified != created2._modified
assert created._version == 1
assert created2._version == 1
def test_partially_update_an_object(self):
bucket = self.scope(BUCKET_ID)
obj = bucket.create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 1
created = bucket.retrieve_an_object(obj.object_id)
assert created['int key'] == 1
assert created['str key'] == 'this is string'
assert created['dict key'] == {
'nest': 'nest value',
}
assert created['list key'] == [1, 2, 3]
updated = bucket.partially_update_an_object(obj.object_id, {
'str key': 'updated string',
'dict key': {
'nest': {
'nest2': 'nest and nest',
},
},
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 1
updated = bucket.retrieve_an_object(obj.object_id)
assert 'int key' in updated
assert updated['int key'] == 1
assert updated['str key'] == 'updated string'
assert updated['dict key'] == {
'nest': {
'nest2': 'nest and nest',
}
}
assert 'list key' in updated
assert updated['list key'] == [1, 2, 3]
assert created._created == updated._created
assert created._modified != updated._modified
assert created._version == 1
assert updated._version == 2
def test_delete_an_object(self):
bucket = self.scope(BUCKET_ID)
obj = bucket.create_an_object({
'int key': 1,
'str key': 'this is string',
'dict key': {
'nest': 'nest value',
},
'list key': [1, 2, 3],
})
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 1
created = bucket.retrieve_an_object(obj.object_id)
assert created['int key'] == 1
assert created['str key'] == 'this is string'
assert created['dict key'] == {
'nest': 'nest value',
}
assert created['list key'] == [1, 2, 3]
bucket.delete_an_object(obj.object_id)
info = self.scope.retrieve_a_bucket(BUCKET_ID)
assert info.size == 0
with pytest.raises(exc.KiiObjectNotFoundError):
obj = bucket.retrieve_an_object(obj.object_id)
def test_query_for_objects(self):
bucket = self.scope(BUCKET_ID)
OBJ_COUNT = 10
for i in range(OBJ_COUNT):
even = i % 2 == 0
bucket.create_an_object({
'index': i,
'desc': 'An object number is {0}.'.format(i + 1),
'name': 'test user',
'even': even,
})
# all
results = bucket.query_for_objects()
assert len(results) == OBJ_COUNT
# equal
results = bucket.query_for_objects(cl.Clause.eq('index', 3))
assert len(results) == 1
assert results[0]['index'] == 3
assert results[0]['desc'] == 'An object number is 4.'
# not
results = bucket.query_for_objects(cl.Clause.not_(cl.Clause.eq('index', 2)))
assert len(results) == OBJ_COUNT - 1
for r in results:
assert r['index'] != 2
# prefix
results = bucket.query_for_objects(cl.Clause.prefix('name', 'tes'))
assert len(results) == OBJ_COUNT
# range
results = bucket.query_for_objects(cl.RangeClause('index').le(2))
assert len(results) == 3
results = bucket.query_for_objects(cl.RangeClause('index').lt(2))
assert len(results) == 2
results = bucket.query_for_objects(cl.RangeClause('index').ge(2))
assert len(results) == OBJ_COUNT - 2
results = bucket.query_for_objects(cl.RangeClause('index').gt(2))
assert len(results) == OBJ_COUNT - 3
# in
results = bucket.query_for_objects(cl.Clause.in_('index', [1, 3, 4]))
assert len(results) == 3
for r in results:
assert r['index'] in [1, 3, 4]
# has
results = bucket.query_for_objects(cl.HasFieldClause('index', 'INTEGER'))
assert len(results) == OBJ_COUNT
results = bucket.query_for_objects(cl.HasFieldClause('index', 'STRING'))
assert len(results) == 0
results = bucket.query_for_objects(
cl.HasFieldClause('index', cl.HasFieldClause.Types.integer))
assert len(results) == OBJ_COUNT
results = bucket.query_for_objects(
cl.HasFieldClause('index', cl.HasFieldClause.Types.string))
assert len(results) == 0
# and
results = bucket.query_for_objects(
cl.AndClause(
cl.Clause.eq('even', True),
cl.RangeClause('index').le(6)
)
)
assert len(results) == 6 // 2 + 1
# or
results = bucket.query_for_objects(
cl.OrClause(
cl.Clause.eq('even', True),
cl.RangeClause('index').le(6)
)
)
assert len(results) == 6 + (OBJ_COUNT - 6) // 2
# order_by, descending
results = bucket.query_for_objects(order_by='index', descending=True)
for i, r in enumerate(results):
assert r['index'] == OBJ_COUNT - i - 1
results = bucket.query_for_objects(order_by='index', descending=False)
for i, r in enumerate(results):
assert r['index'] == i
# limit
results = bucket.query_for_objects(limit=2)
assert len(results) == 2
results = bucket.query_for_objects(limit=4)
assert len(results) == 4
results = bucket.query_for_objects(limit=OBJ_COUNT + 20)
assert len(results) == OBJ_COUNT
def test_query_for_objects_pagination_key(self):
bucket = self.scope(BUCKET_ID)
OBJ_COUNT = 20
for i in range(OBJ_COUNT):
even = i % 2 == 0
bucket.create_an_object({
'index': i,
'desc': 'An object number is {0}.'.format(i + 1),
'name': 'test user',
'even': even,
})
# pagination_key
results = bucket.query_for_objects(limit=3)
assert len(results) == 3
results = bucket.query_for_objects(limit=3,
pagination_key=results.next_pagination_key)
assert len(results) == 3
results = bucket.query_for_objects(pagination_key=results.next_pagination_key)
assert len(results) == OBJ_COUNT - 6
assert results.next_pagination_key is None
def test_query_for_objects_huge(self):
bucket = self.scope(BUCKET_ID)
OBJ_COUNT = 410
for i in range(OBJ_COUNT):
even = i % 2 == 0
bucket.create_an_object({
'index': i,
'desc': 'An object number is {0}.'.format(i + 1),
'name': 'test user',
'even': even,
})
# pagination_key
results = bucket.query_for_objects()
assert len(results) == OBJ_COUNT
| ta2xeo/python3-kii | tests/test_data/application/test_application_scope_data.py | Python | mit | 13,511 | 0.00037 |
import dnest4.classic as dn4
from pylab import *
plt.rcParams["font.family"] = "serif"
plt.rcParams["font.size"] = 16
plt.rc("text", usetex=True)
data = loadtxt('galaxies.txt')
posterior_sample = atleast_2d(dn4.my_loadtxt('posterior_sample.txt'))
x = linspace(0., 50.0, 10001)
def mixture(x, params):
N = int(params[7])
centers = params[8:108][0:N]
widths = exp(params[108:208][0:N]) + 1.0
weights = exp(params[208:308][0:N])
weights /= weights.sum()
y = zeros(x.shape)
for i in range(0, N):
# Don't plot flukey narrow things (which ought to eventually average
# out, but won't in a finite sample)
# if widths[i] >= 0.02:
y += weights[i]/widths[i]/sqrt(2.*pi)*exp(-0.5*(x - centers[i])**2/widths[i]**2)
return y
clf()
hist(data, 100, alpha=0.2, color="k", density=True)
y_tot = zeros(len(x))
for i in range(0, posterior_sample.shape[0]):
y = mixture(x, posterior_sample[i, :])
y_tot += y
plot(x, y_tot/posterior_sample.shape[0], 'g', linewidth=2)
xlabel("Velocity (1000 km/s)")
ylabel("Density")
savefig("galaxies.pdf", bbox_inches="tight")
show()
width = 0.3
bins = arange(0, 101) - 0.5*width
hist(posterior_sample[:,7], bins, width=width, density=True, color="k", alpha=0.2)
xlim([0, 100.5])
ylim([0, 0.05])
xlabel("Number of gaussians, $N$")
ylabel("Posterior Probability")
savefig("galaxies_N.pdf", bbox_inches="tight")
show()
| eggplantbren/DNest4 | code/Examples/RJObject_1DMixture/display.py | Python | mit | 1,414 | 0.004243 |
#! /usr/bin/env python2
import sys, os
import flickrapi
import xml.etree.ElementTree
if len(sys.argv) < 2:
sys.stderr.write("usage: %s <filename> ..." % sys.argv[0])
sys.exit(1)
def auth():
api_key = "87af34fe62dafd3c5d6d4959ca92c193"
api_secret = "18ecfc909af569af"
flickr = flickrapi.FlickrAPI(api_key, api_secret)
(token, frob) = flickr.get_token_part_one(perms='write')
if not token: raw_input("Press ENTER after you authorized this program")
flickr.get_token_part_two((token, frob))
return flickr
def tags(filename):
dirname = os.path.dirname(filename)
res = ""
while len(dirname) > 0:
res = "%s %s" % (res, os.path.basename(dirname))
dirname = os.path.dirname(dirname)
return res
def upload(flickr, filename, tags):
response = flickr.upload(filename=filename, tags=tags, is_public=0)
if response.attrib["stat"] == "ok":
photoid = response.find("photoid").text
print("%s: stat:OK id:%s"% (filename, photoid))
else:
print("%s: stat:FAIL\n%s" % (filename, xml.etree.ElementTree.tostring(response)))
flickr = auth()
for filename in sys.argv[1:len(sys.argv)]:
upload(flickr, filename, tags(filename))
| juantascon/flickr_mass_upload | flickr_mass_upload.py | Python | gpl-3.0 | 1,220 | 0.009836 |
# -*- coding: utf-8 -*-
# © 2016 Comunitea
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Order scheduled shipment",
"summary": "Sale order with scheduled shipment",
"version": "8.0.1.0.0",
"category": "Connector",
"author": "Nadia Ferreyra",
"license": "AGPL-3",
"installable": True,
"depends": [
"base",
"sale",
"connector",
"sale_stock",
"picking_invoice_pending"
],
"data": [
'data/job_channel_data.xml',
'views/sale_view.xml',
],
}
| jgmanzanas/CMNT_004_15 | project-addons/scheduled_shipment/__openerp__.py | Python | agpl-3.0 | 566 | 0 |
#!/usr/bin/env python
import sys
import os
import unittest
import zlib
import rospy
from flexbe_onboard.flexbe_onboard import FlexbeOnboard
from flexbe_core.proxy import ProxySubscriberCached
from flexbe_msgs.msg import BehaviorSelection, BEStatus, BehaviorLog, BehaviorModification
class TestOnboard(unittest.TestCase):
def __init__(self, name):
super(TestOnboard, self).__init__(name)
self.sub = ProxySubscriberCached({
'flexbe/status': BEStatus,
'flexbe/log': BehaviorLog
})
self.rate = rospy.Rate(100)
# make sure that behaviors can be imported
data_folder = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, data_folder)
# run onboard and add custom test behaviors to onboard lib
self.onboard = FlexbeOnboard()
self.lib = self.onboard._behavior_lib
self.lib._add_behavior_manifests(data_folder)
def assertStatus(self, expected, timeout):
""" Assert that the expected onboard status is received before the timeout. """
for i in range(int(timeout*100)):
self.rate.sleep()
if self.sub.has_msg('flexbe/status'):
break
else:
raise AssertionError('Did not receive a status as required.')
msg = self.sub.get_last_msg('flexbe/status')
self.sub.remove_last_msg('flexbe/status')
self.assertEqual(msg.code, expected)
return msg
def test_onboard_behaviors(self):
behavior_pub = rospy.Publisher('flexbe/start_behavior', BehaviorSelection, queue_size=1)
rospy.sleep(0.5) # wait for publisher
# wait for the initial status message
self.assertStatus(BEStatus.READY, 1)
# send simple behavior request without checksum
be_id, _ = self.lib.find_behavior("Test Behavior Log")
request = BehaviorSelection()
request.behavior_id = be_id
request.autonomy_level = 255
behavior_pub.publish(request)
self.assertStatus(BEStatus.ERROR, 2)
# send valid simple behavior request
with open(self.lib.get_sourcecode_filepath(be_id)) as f:
request.behavior_checksum = zlib.adler32(f.read().encode()) & 0x7fffffff
self.sub.enable_buffer('flexbe/log')
behavior_pub.publish(request)
self.assertStatus(BEStatus.STARTED, 1)
self.assertStatus(BEStatus.FINISHED, 3)
behavior_logs = []
while self.sub.has_buffered('flexbe/log'):
behavior_logs.append(self.sub.get_from_buffer('flexbe/log').text)
self.assertIn('Test data', behavior_logs)
# send valid complex behavior request
be_id, _ = self.lib.find_behavior("Test Behavior Complex")
request = BehaviorSelection()
request.behavior_id = be_id
request.autonomy_level = 255
request.arg_keys = ['param']
request.arg_values = ['value_2']
request.input_keys = ['data']
request.input_values = ['2']
with open(self.lib.get_sourcecode_filepath(be_id)) as f:
content = f.read()
modifications = [('flexbe_INVALID', 'flexbe_core'), ('raise ValueError("TODO: Remove!")', '')]
for replace, by in modifications:
index = content.index(replace)
request.modifications.append(BehaviorModification(index, index + len(replace), by))
for replace, by in modifications:
content = content.replace(replace, by)
request.behavior_checksum = zlib.adler32(content.encode()) & 0x7fffffff
behavior_pub.publish(request)
self.assertStatus(BEStatus.STARTED, 1)
result = self.assertStatus(BEStatus.FINISHED, 3)
self.assertEqual(result.args[0], 'finished')
behavior_logs = []
while self.sub.has_buffered('flexbe/log'):
behavior_logs.append(self.sub.get_from_buffer('flexbe/log').text)
self.assertIn('value_2', behavior_logs)
# send the same behavior with different parameters
request.arg_keys = ['param', 'invalid']
request.arg_values = ['value_1', 'should be ignored']
request.input_keys = []
request.input_values = []
behavior_pub.publish(request)
self.assertStatus(BEStatus.STARTED, 1)
result = self.assertStatus(BEStatus.FINISHED, 3)
self.assertEqual(result.args[0], 'failed')
behavior_logs = []
while self.sub.has_buffered('flexbe/log'):
behavior_logs.append(self.sub.get_from_buffer('flexbe/log').text)
self.assertIn('value_1', behavior_logs)
if __name__ == '__main__':
rospy.init_node('test_flexbe_onboard')
import rostest
rostest.rosrun('flexbe_onboard', 'test_flexbe_onboard', TestOnboard)
| team-vigir/flexbe_behavior_engine | flexbe_onboard/test/test_onboard.py | Python | bsd-3-clause | 4,765 | 0.001259 |
from django import forms
from django.utils.translation import ugettext_lazy as _
from registration.models import UserModel
from models import User as AccountsUser
class UserRegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should avoid defining a ``save()`` method -- the actual
saving of collected user data is delegated to the active
registration backend.
"""
required_css_class = 'required'
username = forms.RegexField(regex=r'^[\w.@+-]+$',
max_length=30,
label=_("Create a username"),
error_messages={'invalid': _(
"Your username may contain only letters, numbers and @/./+/-/_ characters.")})
email = forms.EmailField(label=_("Your email address"))
first_name = forms.CharField(label=_("First Name"))
last_name = forms.CharField(label=_("Last Name"))
password1 = forms.CharField(widget=forms.PasswordInput,
label=_("Create a password"))
password2 = forms.CharField(widget=forms.PasswordInput,
label=_("Your password again"))
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already
in use.
"""
existing = UserModel().objects.filter(username__iexact=self.cleaned_data['username'])
if existing.exists():
raise forms.ValidationError(_("A user with that username already exists."))
else:
return self.cleaned_data['username']
def clean_email(self):
email = self.cleaned_data.get('email')
if email and UserModel().objects.filter(email__iexact=email).exists():
raise forms.ValidationError(_('A user with that email address already exists.'))
else:
return email
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
class UserProfileUpdateForm(forms.ModelForm):
class Meta:
model = UserModel()
fields = ['email', 'first_name', 'last_name']
required_css_class = 'required'
email = forms.EmailField(label=_("Your email address"))
first_name = forms.CharField(label=_("First Name"))
last_name = forms.CharField(label=_("Last Name"))
def clean_email(self):
email = self.cleaned_data.get('email')
# You cannot change your email to another user's email
if email and UserModel().objects.filter(email__iexact=email).exclude(pk=self.instance.pk).exists():
raise forms.ValidationError(_('A user with that email address already exists.'))
else:
return email
class ConsentForm(forms.ModelForm):
class Meta:
model = AccountsUser
fields = ['gives_consent', 'over18']
required_css_class = 'required'
gives_consent = forms.BooleanField(label=_("I agree to participate in the research"),
required=False)
over18 = forms.BooleanField(label=_("I am 18 years of age or older"),
required=False)
def clean(self):
over18 = self.cleaned_data.get('over18')
gives_consent = self.cleaned_data.get('gives_consent')
if gives_consent and not over18:
raise forms.ValidationError(_('You must be at least 18 years old to participate in the research.'))
return self.cleaned_data
| hds-lab/dsechatweb | dsechat/apps/accounts/forms.py | Python | mit | 4,148 | 0.002652 |
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Computes and caches various information about files.
"""
import itertools
import re
import os
import atexit
import ly.document
import lydocinfo
import ly.lex
import filecache
import util
import variables
_document_cache = filecache.FileCache()
_suffix_chars_re = re.compile(r'[^-\w]', re.UNICODE)
### XXX otherwise I get a segfault on shutdown when very large music trees
### are made (and every node references the document).
### (The segfault is preceded by a "corrupted double-linked list" message.)
atexit.register(_document_cache.clear)
class _CachedDocument(object):
"""Contains a document and related items."""
filename = None
document = None
variables = None
docinfo = None
music = None
def _cached(filename):
"""Return a _CachedDocument instance for the filename, else creates one."""
filename = os.path.realpath(filename)
try:
c = _document_cache[filename]
except KeyError:
with open(filename, 'rb') as f:
text = util.decode(f.read())
c = _document_cache[filename] = _CachedDocument()
c.variables = v = variables.variables(text)
c.document = ly.document.Document(text, v.get("mode"))
c.filename = c.document.filename = filename
return c
def document(filename):
"""Return a (cached) ly.document.Document for the filename."""
return _cached(filename).document
def docinfo(filename):
"""Return a (cached) LyDocInfo instance for the specified file."""
c = _cached(filename)
if c.docinfo is None:
c.docinfo = lydocinfo.DocInfo(c.document, c.variables)
return c.docinfo
def music(filename):
"""Return a (cached) music.Document instance for the specified file."""
c = _cached(filename)
if c.music is None:
import music
c.music = music.Document(c.document)
return c.music
def textmode(text, guess=True):
"""Returns the type of the given text ('lilypond, 'html', etc.).
Checks the mode variable and guesses otherwise if guess is True.
"""
mode = variables.variables(text).get("mode")
if mode in ly.lex.modes:
return mode
if guess:
return ly.lex.guessMode(text)
def includefiles(dinfo, include_path=()):
"""Returns a set of filenames that are included by the DocInfo's document.
The specified include path is used to find files. The own filename
is NOT added to the set. Included files are checked recursively,
relative to our file, relative to the including file, and if that
still yields no file, relative to the directories in the include_path.
If the document has no local filename, only the include_path is
searched for files.
"""
filename = dinfo.document.filename
basedir = os.path.dirname(filename) if filename else None
files = set()
def tryarg(directory, arg):
path = os.path.realpath(os.path.join(directory, arg))
if path not in files and os.path.isfile(path):
files.add(path)
args = docinfo(path).include_args()
find(args, os.path.dirname(path))
return True
def find(incl_args, directory):
for arg in incl_args:
# new, recursive, relative include
if not (directory and tryarg(directory, arg)):
# old include (relative to master file)
if not (basedir and tryarg(basedir, arg)):
# if path is given, also search there:
for p in include_path:
if tryarg(p, arg):
break
find(dinfo.include_args(), basedir)
return files
def basenames(dinfo, includefiles=(), filename=None, replace_suffix=True):
"""Returns the list of basenames a document is expected to create.
The list is created based on includefiles and the define output-suffix and
\bookOutputName and \bookOutputSuffix commands.
You should add '.ext' and/or '-[0-9]+.ext' to find created files.
If filename is given, it is regarded as the filename LilyPond is run on.
Otherwise, the filename of the info's document is read.
If replace_suffix is True (the default), special characters and spaces
in the suffix are replaced with underscores (in the same way as LilyPond
does it), using the replace_suffix_chars() function.
"""
basenames = []
basepath = os.path.splitext(filename or dinfo.document.filename)[0]
dirname, basename = os.path.split(basepath)
if basepath:
basenames.append(basepath)
def args():
yield dinfo.output_args()
for filename in includefiles:
yield docinfo(filename).output_args()
for type, arg in itertools.chain.from_iterable(args()):
if type == "suffix":
if replace_suffix:
# LilyPond (lily-library.scm:223) does this, too
arg = replace_suffix_chars(arg)
arg = basename + '-' + arg
path = os.path.normpath(os.path.join(dirname, arg))
if path not in basenames:
basenames.append(path)
return basenames
def replace_suffix_chars(s):
"""Replace spaces and most non-alphanumeric characters with underscores.
This is used to mimic the behaviour of LilyPond, which also does this,
for the output-suffix. (See scm/lily-library.scm:223.)
"""
return _suffix_chars_re.sub('_', s)
| anthonyfok/frescobaldi | frescobaldi_app/fileinfo.py | Python | gpl-2.0 | 6,388 | 0.004383 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.