text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Vertice:
def __init__(self, id, latitude, longitude):
self.id = id
self.latitude = latitude
self.longitude = longitude
|
Raul3212/TopicosAvancadosBD
|
dev/model/Vertice.py
|
Python
|
gpl-3.0
| 198 | 0.005051 |
import logging
from mobilebdd.drivers.android import AndroidWebDriver, SelendroidWebDriver
from mobilebdd.drivers.desktop import DesktopChromeWebDriver, DesktopInternetExplorerWebDriver, DesktopFirefoxWebDriver
from mobilebdd.drivers.ios import iOSWebDriver
from mobilebdd.drivers.mobileweb import ChromeWebDriver, WebViewAppDriver
from mobilebdd.hacks.webdriver import HackedWebDriver
log = logging.getLogger(u'mobilebdd')
# mapping of device types to custom, specific webdrivers
HackedDrivers = {
u'android': AndroidWebDriver,
u'selendroid': SelendroidWebDriver,
u'ios': iOSWebDriver,
u'ipad': iOSWebDriver,
u'iphone': iOSWebDriver,
# mobile browsers
u'chrome': ChromeWebDriver,
u'webviewapp': WebViewAppDriver,
# desktop browsers
u'desktop-chrome': DesktopChromeWebDriver,
u'desktop-firefox': DesktopFirefoxWebDriver,
u'desktop-ie': DesktopInternetExplorerWebDriver
}
# all the android versions that appium doesn't support natively. these have to
# use selendroid
SelendroidVersions = [
u'2.3',
u'3.0',
u'3.1',
u'3.2',
u'4.0',
u'4.1',
# at time writing, appium 1.x lost support for some things that make test
# writing easier. like find by link text or partial link text. i like those
# so im making everything use selendroid. seems to work fine so far.
# plus appium tries to do fancy chromedriver stuff for native webviews. prob
# a bug but i dont want to deal with it right now.
u'4.2',
u'4.3',
u'4.4',
]
def webdriver_me(os_ver, os_type, app_path=u'', device_type=u''):
"""
returns a ref to the class that matches for the given os and device type
:param os_ver: version of the os
:param os_type: device... type. like android/selendroid/ipad/ios/etc
:param app_path: the path to the application to be installed, or a browser name
:param device_type: the type of device ('phone' or 'tablet')
"""
# ensure these aren't none so we can work with them as strings
if not os_ver:
os_ver = u''
if not os_type:
os_type = u''
if not app_path:
app_path = u''
if not device_type:
device_type = u''
# start off vague with the os type, and hone in on a specific driver if one exists
driver_type = os_type.lower()
if os_ver in SelendroidVersions and driver_type == u'android' and not app_path.lower() == u'chrome':
driver_type = u'selendroid'
elif driver_type == u'kindle':
driver_type = u'android'
elif os_type.lower() == u'linux' or os_type.lower() == u'osx' or os_type.lower() == u'windows':
if app_path.lower() == u'chrome':
driver_type = u'desktop-chrome'
elif app_path.lower() == u'firefox':
driver_type = u'desktop-firefox'
elif app_path.lower() == u'ie' or app_path.lower() == u'internet explorer':
driver_type = u'desktop-ie'
elif app_path.lower() == u'chrome':
driver_type = u'chrome'
elif u'webviewapp' in app_path.lower():
driver_type = u'webviewapp'
if driver_type in HackedDrivers:
log.debug(u'using driver_type "{}" for driver_type "{}" with os_type "{}" and app_path "{}"'.format(HackedDrivers[driver_type], driver_type, os_type, app_path))
return HackedDrivers[driver_type]
else:
log.warning(u'could not find a specific webdriver for {}. using default'.format(driver_type))
return HackedWebDriver
|
PhoenixWright/MobileBDDCore
|
mobilebdd/drivers/drivers.py
|
Python
|
apache-2.0
| 3,463 | 0.002599 |
import functools
from framework.auth import Auth
from website.archiver import (
StatResult, AggregateStatResult,
ARCHIVER_NETWORK_ERROR,
ARCHIVER_SIZE_EXCEEDED,
ARCHIVER_FILE_NOT_FOUND,
ARCHIVER_FORCED_FAILURE,
)
from website import (
mails,
settings
)
from osf.utils.sanitize import unescape_entities
def send_archiver_size_exceeded_mails(src, user, stat_result, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_SIZE_EXCEEDED_DESK,
user=user,
src=src,
stat_result=stat_result,
can_change_preferences=False,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_SIZE_EXCEEDED_USER,
user=user,
src=src,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_copy_error_mails(src, user, results, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_COPY_ERROR_DESK,
user=user,
src=src,
results=results,
url=url,
can_change_preferences=False,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_COPY_ERROR_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_file_not_found_mails(src, user, results, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_FILE_NOT_FOUND_DESK,
can_change_preferences=False,
user=user,
src=src,
results=results,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_FILE_NOT_FOUND_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def send_archiver_uncaught_error_mails(src, user, results, url):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.ARCHIVE_UNCAUGHT_ERROR_DESK,
user=user,
src=src,
results=results,
can_change_preferences=False,
url=url,
)
mails.send_mail(
to_addr=user.username,
mail=mails.ARCHIVE_UNCAUGHT_ERROR_USER,
user=user,
src=src,
results=results,
can_change_preferences=False,
mimetype='html',
)
def handle_archive_fail(reason, src, dst, user, result):
url = settings.INTERNAL_DOMAIN + src._id
if reason == ARCHIVER_NETWORK_ERROR:
send_archiver_copy_error_mails(src, user, result, url)
elif reason == ARCHIVER_SIZE_EXCEEDED:
send_archiver_size_exceeded_mails(src, user, result, url)
elif reason == ARCHIVER_FILE_NOT_FOUND:
send_archiver_file_not_found_mails(src, user, result, url)
elif reason == ARCHIVER_FORCED_FAILURE: # Forced failure using scripts.force_fail_registration
pass
else: # reason == ARCHIVER_UNCAUGHT_ERROR
send_archiver_uncaught_error_mails(src, user, result, url)
dst.root.sanction.forcibly_reject()
dst.root.sanction.save()
dst.root.delete_registration_tree(save=True)
def archive_provider_for(node, user):
"""A generic function to get the archive provider for some node, user pair.
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
return node.get_addon(settings.ARCHIVE_PROVIDER)
def has_archive_provider(node, user):
"""A generic function for checking whether or not some node, user pair has
an attached provider for archiving
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
return node.has_addon(settings.ARCHIVE_PROVIDER)
def link_archive_provider(node, user):
"""A generic function for linking some node, user pair with the configured
archive provider
:param node: target node
:param user: target user (currently unused, but left in for future-proofing
the code for use with archive providers other than OSF Storage)
"""
addon = node.get_or_add_addon(settings.ARCHIVE_PROVIDER, auth=Auth(user), log=False)
if hasattr(addon, 'on_add'):
addon.on_add()
node.save()
def aggregate_file_tree_metadata(addon_short_name, fileobj_metadata, user):
"""Recursively traverse the addon's file tree and collect metadata in AggregateStatResult
:param src_addon: AddonNodeSettings instance of addon being examined
:param fileobj_metadata: file or folder metadata of current point of reference
in file tree
:param user: archive initatior
:return: top-most recursive call returns AggregateStatResult containing addon file tree metadata
"""
disk_usage = fileobj_metadata.get('size')
if fileobj_metadata['kind'] == 'file':
result = StatResult(
target_name=fileobj_metadata['name'],
target_id=fileobj_metadata['path'].lstrip('/'),
disk_usage=disk_usage or 0,
)
return result
else:
return AggregateStatResult(
target_id=fileobj_metadata['path'].lstrip('/'),
target_name=fileobj_metadata['name'],
targets=[aggregate_file_tree_metadata(addon_short_name, child, user) for child in fileobj_metadata.get('children', [])],
)
def before_archive(node, user):
from osf.models import ArchiveJob
link_archive_provider(node, user)
job = ArchiveJob.objects.create(
src_node=node.registered_from,
dst_node=node,
initiator=user
)
job.set_targets()
def _do_get_file_map(file_tree):
"""Reduces a tree of folders and files into a list of (<sha256>, <file_metadata>) pairs
"""
file_map = []
stack = [file_tree]
while len(stack):
tree_node = stack.pop(0)
if tree_node['kind'] == 'file':
file_map.append((tree_node['extra']['hashes']['sha256'], tree_node))
else:
stack = stack + tree_node['children']
return file_map
def _memoize_get_file_map(func):
cache = {}
@functools.wraps(func)
def wrapper(node):
if node._id not in cache:
osf_storage = node.get_addon('osfstorage')
file_tree = osf_storage._get_file_tree(user=node.creator)
cache[node._id] = _do_get_file_map(file_tree)
return func(node, cache[node._id])
return wrapper
@_memoize_get_file_map
def get_file_map(node, file_map):
"""
note:: file_map is injected implictly by the decorator; this method is called like:
get_file_map(node)
"""
for (key, value) in file_map:
yield (key, value, node._id)
for child in node.nodes_primary:
for key, value, node_id in get_file_map(child):
yield (key, value, node_id)
def find_registration_file(value, node):
from osf.models import AbstractNode
orig_sha256 = value['sha256']
orig_name = unescape_entities(
value['selectedFileName'],
safe={
'<': '<',
'>': '>'
}
)
orig_node = value['nodeId']
file_map = get_file_map(node)
for sha256, value, node_id in file_map:
registered_from_id = AbstractNode.load(node_id).registered_from._id
if sha256 == orig_sha256 and registered_from_id == orig_node and orig_name == value['name']:
return value, node_id
return None, None
def find_registration_files(values, node):
ret = []
for i in range(len(values.get('extra', []))):
ret.append(find_registration_file(values['extra'][i], node) + (i,))
return ret
def get_title_for_question(schema, path):
path = path.split('.')
root = path.pop(0)
item = None
for page in schema['pages']:
questions = {
q['qid']: q
for q in page['questions']
}
if root in questions:
item = questions[root]
title = item.get('title')
while len(path):
item = item.get(path.pop(0), {})
title = item.get('title', title)
return title
def find_selected_files(schema, metadata):
targets = []
paths = [('', p) for p in schema.schema['pages']]
while len(paths):
prefix, path = paths.pop(0)
if path.get('questions'):
paths = paths + [('', q) for q in path['questions']]
elif path.get('type'):
qid = path.get('qid', path.get('id'))
if path['type'] == 'object':
paths = paths + [('{}.{}.value'.format(prefix, qid), p) for p in path['properties']]
elif path['type'] == 'osf-upload':
targets.append('{}.{}'.format(prefix, qid).lstrip('.'))
selected = {}
for t in targets:
parts = t.split('.')
value = metadata.get(parts.pop(0))
while value and len(parts):
value = value.get(parts.pop(0))
if value:
selected[t] = value
return selected
VIEW_FILE_URL_TEMPLATE = '/project/{node_id}/files/osfstorage/{path}/'
def deep_get(obj, path):
parts = path.split('.')
item = obj
key = None
while len(parts):
key = parts.pop(0)
item[key] = item.get(key, {})
item = item[key]
return item
def migrate_file_metadata(dst, schema):
metadata = dst.registered_meta[schema._id]
missing_files = []
selected_files = find_selected_files(schema, metadata)
for path, selected in selected_files.items():
for registration_file, node_id, index in find_registration_files(selected, dst):
if not registration_file:
missing_files.append({
'file_name': selected['extra'][index]['selectedFileName'],
'question_title': get_title_for_question(schema.schema, path)
})
continue
target = deep_get(metadata, path)
target['extra'][index]['viewUrl'] = VIEW_FILE_URL_TEMPLATE.format(node_id=node_id, path=registration_file['path'].lstrip('/'))
if missing_files:
from website.archiver.tasks import ArchivedFileNotFound
raise ArchivedFileNotFound(
registration=dst,
missing_files=missing_files
)
dst.registered_meta[schema._id] = metadata
dst.save()
|
erinspace/osf.io
|
website/archiver/utils.py
|
Python
|
apache-2.0
| 10,456 | 0.002965 |
from django.conf.urls import *
from django.contrib.auth import views as auth_views
from businesstest.views import Messages
urlpatterns = patterns("businesstest.views",
# (r"messages/$", "message_list"),
(r"messages/$", Messages.as_view(), {}, "messages"),
(r"(\d+)/$", "test"),
(r"test_done/$", "test_done"),
# (r"^melting-temp-rc/$", "melting_temp_rc"),
# (r"^search/(oligo|half|construct|parse|project)/(toggledir|up|down)/(.+)/$", "search", {}, "parse9_search"),
(r"^$", redirect_to, {"url": "/bt/1/"}),
)
urlpatterns += patterns('',
(r"^account/", include("registration.urls")),
)
|
pythonbyexample/PBE
|
dbe/businesstest/urls.py
|
Python
|
bsd-3-clause
| 619 | 0.006462 |
"""
Tests for install.py for SUSE based Linux distributions
"""
import os
import shutil
from unittest import mock
import pytest
from install import Cmd, CmdError, RemoteFileNotFoundError
pytestmark = pytest.mark.skipif(
not pytest.helpers.helper_is_suse(),
reason="Tests for openSUSE/SUSE"
)
def test_rpm_download_raise_not_found_error(sys_rpm):
with mock.patch.object(Cmd, 'sh_e') as mock_sh_e:
ce = CmdError('test.')
ce.stderr = 'Package \'dummy\' not found.\n'
mock_sh_e.side_effect = ce
with pytest.raises(RemoteFileNotFoundError) as exc:
sys_rpm.download('dummy')
assert mock_sh_e.called
assert str(exc.value) == 'Package dummy not found on remote'
def test_rpm_extract_is_ok(sys_rpm, rpm_files, monkeypatch):
# mocking arch object for multi arch test cases.
sys_rpm.arch = 'x86_64'
with pytest.helpers.work_dir():
for rpm_file in rpm_files:
shutil.copy(rpm_file, '.')
sys_rpm.extract('rpm-build-libs')
files = os.listdir('./usr/lib64')
files.sort()
assert files == [
'librpmbuild.so.7',
'librpmbuild.so.7.0.1',
'librpmsign.so.7',
'librpmsign.so.7.0.1',
]
@pytest.mark.network
def test_app_verify_system_status_is_ok_on_sys_rpm_and_missing_pkgs(app):
app.linux.rpm.is_system_rpm = mock.MagicMock(return_value=True)
app.linux.verify_system_status()
|
junaruga/rpm-py-installer
|
tests/test_install_suse.py
|
Python
|
mit
| 1,463 | 0 |
"""'d987888a7460' - Tags table."""
import sqlalchemy as sa
from alembic import op
#from lib.util_datetime import tzware_datetime
#from lib.util_sqlalchemy import AwareDateTime
"""
Tags table
Revision ID: d987888a7460
Revises: 216ce379d3f0
Create Date: 2016-11-01 14:13:28.216736
"""
# Revision identifiers, used by Alembic.
revision = 'd987888a7460'
down_revision = '216ce379d3f0'
branch_labels = None
depends_on = None
def upgrade():
"""Command to migrate database forward."""
### commands auto generated by Alembic - please adjust! ###
op.create_table(
'tag',
sa.Column(
'id', sa.Integer(), nullable=False),
sa.Column(
'title', sa.String(length=128), nullable=False),
sa.PrimaryKeyConstraint('id'))
op.add_column('tags', sa.Column('tag_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'tags', 'tag', ['tag_id'], ['id'])
op.drop_column('tags', 'title')
op.drop_column('tags', 'id')
### end Alembic commands ###
def downgrade():
"""Command to migrate database backwards."""
### commands auto generated by Alembic - please adjust! ###
op.add_column('tags', sa.Column('id', sa.INTEGER(), nullable=False))
op.add_column(
'tags',
sa.Column(
'title',
sa.VARCHAR(length=128),
autoincrement=False,
nullable=False))
op.drop_constraint(None, 'tags', type_='foreignkey')
op.drop_column('tags', 'tag_id')
op.drop_table('tag')
### end Alembic commands ###
|
hoytnix/hoyt.io
|
flask-server/hoyt/migrations/versions/d987888a7460_tags_table.py
|
Python
|
gpl-3.0
| 1,553 | 0.003863 |
# -*- coding: utf-8 -*-
# Copyright (c) 2006 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the Editor Calltips configuration page.
"""
from __future__ import unicode_literals
from PyQt5.Qsci import QsciScintilla
from QScintilla.QsciScintillaCompat import QSCINTILLA_VERSION
from .ConfigurationPageBase import ConfigurationPageBase
from .Ui_EditorCalltipsPage import Ui_EditorCalltipsPage
import Preferences
class EditorCalltipsPage(ConfigurationPageBase, Ui_EditorCalltipsPage):
"""
Class implementing the Editor Calltips configuration page.
"""
def __init__(self):
"""
Constructor
"""
super(EditorCalltipsPage, self).__init__()
self.setupUi(self)
self.setObjectName("EditorCalltipsPage")
if QSCINTILLA_VERSION() >= 0x020700:
self.positionComboBox.addItem(
self.tr("Below Text"),
QsciScintilla.CallTipsBelowText)
self.positionComboBox.addItem(
self.tr("Above Text"),
QsciScintilla.CallTipsAboveText)
else:
self.calltipsPositionBox.hide()
# set initial values
self.ctEnabledCheckBox.setChecked(
Preferences.getEditor("CallTipsEnabled"))
self.ctVisibleSlider.setValue(
Preferences.getEditor("CallTipsVisible"))
self.initColour("CallTipsBackground", self.calltipsBackgroundButton,
Preferences.getEditorColour)
self.ctScintillaCheckBox.setChecked(
Preferences.getEditor("CallTipsScintillaOnFail"))
if QSCINTILLA_VERSION() >= 0x020700:
self.positionComboBox.setCurrentIndex(
self.positionComboBox.findData(
Preferences.getEditor("CallTipsPosition")))
def save(self):
"""
Public slot to save the EditorCalltips configuration.
"""
Preferences.setEditor(
"CallTipsEnabled",
self.ctEnabledCheckBox.isChecked())
Preferences.setEditor(
"CallTipsVisible",
self.ctVisibleSlider.value())
self.saveColours(Preferences.setEditorColour)
Preferences.setEditor(
"CallTipsScintillaOnFail",
self.ctScintillaCheckBox.isChecked())
if QSCINTILLA_VERSION() >= 0x020700:
Preferences.setEditor(
"CallTipsPosition",
self.positionComboBox.itemData(
self.positionComboBox.currentIndex()))
def create(dlg):
"""
Module function to create the configuration page.
@param dlg reference to the configuration dialog
@return reference to the instantiated page (ConfigurationPageBase)
"""
page = EditorCalltipsPage()
return page
|
davy39/eric
|
Preferences/ConfigurationPages/EditorCalltipsPage.py
|
Python
|
gpl-3.0
| 2,886 | 0.003465 |
import webapp2
import json
from collections import defaultdict
from generic_handler import GenericHandler
from google.appengine.api import users
from google.appengine.ext import ndb
from handler_utils import is_int
from handler_utils import GetPairIdFromRequest
from handler_utils import GetTourneyWithIdAndMaybeReturnStatus
from handler_utils import SetErrorStatus
from models import HandScore
from models import PlayerPair
from models import Tournament
from movements import Movement
class MovementHandler(GenericHandler):
''' Class to handle requests to /api/tournaments/:id/movement/:pair_no '''
def get(self, id, pair_no):
''' Fetch movement for tournament with id and team pair_no.
Args:
id: String. Tournament id.
pair_no: Integer. Pair number for the team whose movement we're getting.
See api for request and response documentation.
'''
tourney = GetTourneyWithIdAndMaybeReturnStatus(self.response, id)
if not tourney:
return
if not self._CheckValidPairMaybeSetStatus(tourney, pair_no):
return
player_pair = PlayerPair.GetByPairNo(tourney, int(pair_no))
if not player_pair:
SetErrorStatus(self.response, 404, "Invalid Request",
"Player pair {} not in tournament".format(pair_no))
return
if not self._CheckUserAllowedToSeeMovementMaybeSetStatus(
tourney, player_pair):
return
no_hands_per_round, no_rounds = Movement.NumBoardsPerRoundFromTotal(
tourney.no_pairs, tourney.no_boards)
try:
movement = Movement.CreateMovement(
tourney.no_pairs, no_hands_per_round, no_rounds,
tourney.legacy_version_id).GetMovement(int(pair_no))
except ValueError:
SetErrorStatus(self.response, 500, "Corrupted Data",
"No valid movement for this tourney's config")
return
movement_list = self._GetMovementHandsAsync(tourney, movement, int(pair_no))
combined_dict = {
'name' : tourney.name,
'players' : player_pair.player_list(),
'allow_score_overwrites' : tourney.IsUnlocked(),
'movement': movement_list
}
self.response.headers['Content-Type'] = 'application/json'
self.response.set_status(200)
self.response.out.write(json.dumps(combined_dict, indent=2))
def _GetMovementHandsAsync(self, tourney, movement, pair_no):
''' Converts movement information to a json interpretable string adding
scored hands if any exist.
Args:
movement: Movement. Movement for this pair.
tourney: Tournament. Tournament in which this is happening.
pair_no: Pair from whose point of view this movement is seen.
Returns:
List as expected by api. Includes any scores that have already been added.
'''
# Dict from round number to list of futures
hand_futures_dict = defaultdict(list)
players_futures_dict = {}
movement_list = []
for round in movement:
hands = round.hands
if not hands:
continue
opp = round.opponent
players_futures_dict[round.round] = PlayerPair.GetByPairNoAsync(tourney, opp)
for h in hands:
if round.is_north:
hand_futures_dict[round.round].append(
HandScore.GetByHandParamsAsync(tourney, h, pair_no, round.opponent))
else:
hand_futures_dict[round.round].append(
HandScore.GetByHandParamsAsync(tourney, h, round.opponent, pair_no))
for round in movement:
hands = round.hands
round_str = round.to_dict()
opp = round.opponent
if opp:
opp_pp = players_futures_dict[round.round].get_result()
if opp_pp:
round_str["opponent_names"] = [x.get("name") for x in
opp_pp.player_list()]
if hands:
del round_str['hands']
for i in xrange(len(hands)):
hand_score = hand_futures_dict[round.round][i].get_result()
if hand_score and not hand_score.deleted:
round_str.setdefault('hands', []).append({
'hand_no' : hands[i],
'score': {
'calls' : hand_score.calls_dict(),
'ns_score' : hand_score.get_ns_score(),
'ew_score' : hand_score.get_ew_score(),
'notes' : hand_score.notes,
}})
else:
round_str.setdefault('hands', []).append({ 'hand_no' : hands[i] })
movement_list.append(round_str)
return movement_list
def _CheckValidPairMaybeSetStatus(self, tourney, pair_no):
''' Test if the provided pair number is valid for tourney.
Args:
tourney: Tournament. Tournament the pair number is being validated for.
pair_no: Integer. Pair number for the team we are validating.
'''
error = "Invalid Input"
if (not is_int(pair_no)) or int(pair_no) < 1 or int(pair_no) > tourney.no_pairs:
SetErrorStatus(self.response, 404, error,
"Pair number {} is invalid".format(pair_no))
return False
return True
def _CheckUserAllowedToSeeMovementMaybeSetStatus(self, tourney, player_pair):
error = "Forbidden User"
user = users.get_current_user()
if user and tourney.owner_id == user.user_id():
return True
pair_id = GetPairIdFromRequest(self.request)
if not pair_id:
SetErrorStatus(self.response, 403, error,
"User does not own tournament and is not authenticated " +
"with a pair code to see this movement")
return False
if pair_id != player_pair.id:
SetErrorStatus(self.response, 403, error,
"User does not own tournament and is authenticated with " +
"the wrong code for pair {}".format(player_pair.pair_no))
return False
return True
|
aragos/tichu-tournament
|
api/src/movement_handler.py
|
Python
|
mit
| 5,763 | 0.011279 |
# -*- coding: utf-8 -*-
dCellSize = 20
WindowWidth = 400
WindowHeight = 400
class SCell(object):
def __init__(self, xmin, xmax, ymin, ymax):
self._iTicksSpentHere = 0
self._left = xmin
self._right = xmax
self._top = ymin
self.bottom = ymax
def Update(self):
self._iTicksSpentHere += 1
def Reset(self):
self._iTicksSpentHere = 0
class CMapper(object):
def __init__(self, MaxRangeX, MaxRangeY):
self._dCellSize = dCellSize
self._NumCellsX = (MaxRangeX/self._dCellSize) + 1
self._NumCellsY = (MaxRangeY/self._dCellSize) + 1
self._2DvecCells = []
for x in xrange(self._NumCellsX):
temp = []
for y in xrange(self._NumCellsY):
temp.append(SCell(x*self._dCellSize, (x+1)*self._dCellSize, y*self._dCellSize, (y+1)*self._dCellSize))
self._2DvecCells.append(temp)
self._iTotalCells = self._NumCellsX * self._NumCellsY
def Update(self, xPos, yPos):
if ((xPos < 0) or (xPos > WindowWidth) or (yPos < 0) or (yPos > WindowHeight)):
return
cellX = int(xPos/self._dCellSize)
cellY = int(yPos/self._dCellSize)
self._2DvecCells[cellX][cellY].Update()
def TicksLingered(self, xPos, yPos):
if ((xPos < 0) or (xPos > WindowWidth) or (yPos < 0) or (yPos > WindowHeight)):
return 999
cellX = int(xPos/self._dCellSize)
cellY = int(yPos/self._dCellSize)
return self._2DvecCells[cellX][cellY]._iTicksSpentHere
def BeenVisited(self, xPos, yPos):
print "Not implemented!"
def Render(self):
print "To be implemented"
def Reset(self):
for i in xrange(self._NumCellsX):
for j in xrange(self._NumCellsY):
self._2DvecCells[i][j].Reset()
def NumCellsVisited(self):
total = 0
for i in xrange(self._NumCellsX):
for j in xrange(self._NumCellsY):
if self._2DvecCells[i][j]._iTicksSpentHere > 0:
total += 1
return total
|
crazyskady/ai-game-python
|
Chapter08/CMapper.py
|
Python
|
mit
| 1,821 | 0.031851 |
import locale
import os
import uuid
import pytest
from funcy import cached_property
from dvc.utils import env2bool
from .base import Base
from .path_info import CloudURLInfo
TEST_AWS_REPO_BUCKET = os.environ.get("DVC_TEST_AWS_REPO_BUCKET", "dvc-temp")
TEST_AWS_ENDPOINT_URL = "http://127.0.0.1:{port}/"
class S3(Base, CloudURLInfo):
IS_OBJECT_STORAGE = True
TEST_AWS_ENDPOINT_URL = None
@cached_property
def config(self):
return {"url": self.url, "endpointurl": self.TEST_AWS_ENDPOINT_URL}
@staticmethod
def should_test():
do_test = env2bool("DVC_TEST_AWS", undefined=None)
if do_test is not None:
return do_test
if os.getenv("AWS_ACCESS_KEY_ID") and os.getenv(
"AWS_SECRET_ACCESS_KEY"
):
return True
return False
@staticmethod
def _get_storagepath():
return (
TEST_AWS_REPO_BUCKET
+ "/"
+ "dvc_test_caches"
+ "/"
+ str(uuid.uuid4())
)
@staticmethod
def get_url():
return "s3://" + S3._get_storagepath()
@cached_property
def _s3(self):
import boto3
return boto3.client("s3", endpoint_url=self.config["endpointurl"])
def is_file(self):
from botocore.exceptions import ClientError
if self.path.endswith("/"):
return False
try:
self._s3.head_object(Bucket=self.bucket, Key=self.path)
except ClientError as exc:
if exc.response["Error"]["Code"] != "404":
raise
return False
return True
def is_dir(self):
path = (self / "").path
resp = self._s3.list_objects(Bucket=self.bucket, Prefix=path)
return bool(resp.get("Contents"))
def exists(self):
return self.is_file() or self.is_dir()
def mkdir(self, mode=0o777, parents=False, exist_ok=False):
assert mode == 0o777
assert parents
def write_bytes(self, contents):
self._s3.put_object(Bucket=self.bucket, Key=self.path, Body=contents)
def read_bytes(self):
data = self._s3.get_object(Bucket=self.bucket, Key=self.path)
return data["Body"].read()
def read_text(self, encoding=None, errors=None):
if not encoding:
encoding = locale.getpreferredencoding(False)
assert errors is None
return self.read_bytes().decode(encoding)
@property
def fs_path(self):
return self.bucket + "/" + self.path.lstrip("/")
@pytest.fixture
def s3_fake_creds_file(monkeypatch):
# https://github.com/spulec/moto#other-caveats
import pathlib
aws_dir = pathlib.Path("~").expanduser() / ".aws"
aws_dir.mkdir(exist_ok=True)
aws_creds = aws_dir / "credentials"
initially_exists = aws_creds.exists()
if not initially_exists:
aws_creds.touch()
try:
with monkeypatch.context() as m:
m.setenv("AWS_ACCESS_KEY_ID", "testing")
m.setenv("AWS_SECRET_ACCESS_KEY", "testing")
m.setenv("AWS_SECURITY_TOKEN", "testing")
m.setenv("AWS_SESSION_TOKEN", "testing")
yield
finally:
if aws_creds.exists() and not initially_exists:
aws_creds.unlink()
@pytest.fixture(scope="session")
def s3_server(test_config, docker_compose, docker_services):
import requests
test_config.requires("s3")
port = docker_services.port_for("motoserver", 5000)
endpoint_url = TEST_AWS_ENDPOINT_URL.format(port=port)
def _check():
try:
r = requests.get(endpoint_url)
return r.ok
except requests.RequestException:
return False
docker_services.wait_until_responsive(
timeout=60.0, pause=0.1, check=_check
)
S3.TEST_AWS_ENDPOINT_URL = endpoint_url
return endpoint_url
@pytest.fixture
def s3(test_config, s3_server, s3_fake_creds_file):
test_config.requires("s3")
workspace = S3(S3.get_url())
workspace._s3.create_bucket(Bucket=TEST_AWS_REPO_BUCKET)
yield workspace
@pytest.fixture
def real_s3():
if not S3.should_test():
pytest.skip("no real s3")
yield S3(S3.get_url())
|
dmpetrov/dataversioncontrol
|
tests/remotes/s3.py
|
Python
|
apache-2.0
| 4,225 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author:
# Mateusz Kruszyński <mateusz.kruszynski@gmail.com>
#
import time
from obci.utils import tags_helper
from multiplexer.multiplexer_constants import peers, types
from obci.logic import logic_helper
from obci.logic.logic_decision_peer import LogicDecision
from obci.logic.engines.speller_engine import SpellerEngine
from obci.utils import context as ctx
from obci.configs import settings, variables_pb2
from obci.utils.openbci_logging import log_crash
class LogicSpeller(LogicDecision, SpellerEngine):
"""A class for creating a manifest file with metadata."""
@log_crash
def __init__(self, addresses):
LogicDecision.__init__(self, addresses=addresses)
context = ctx.get_new_context()
context['logger'] = self.logger
SpellerEngine.__init__(self, self.config.param_values(), context)
self.ready()
self._update_letters()
def _run_post_actions(self, p_decision):
self._update_letters()
if __name__ == "__main__":
LogicSpeller(settings.MULTIPLEXER_ADDRESSES).loop()
|
BrainTech/openbci
|
obci/logic/logic_speller_peer.py
|
Python
|
gpl-3.0
| 1,099 | 0.002732 |
"""Functions to make simple plots with M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
import copy
import warnings
from glob import glob
import os.path as op
from itertools import cycle
import numpy as np
from scipy import linalg
from ..surface import read_surface
from ..io.proj import make_projector
from ..utils import logger, verbose, get_subjects_dir
from ..io.pick import pick_types
from .utils import tight_layout, COLORS, _prepare_trellis, plt_show
@verbose
def plot_cov(cov, info, exclude=[], colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
"""Plot Covariance data
Parameters
----------
cov : instance of Covariance
The covariance matrix.
info: dict
Measurement info.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig_cov : instance of matplotlib.pyplot.Figure
The covariance plot.
fig_svd : instance of matplotlib.pyplot.Figure | None
The SVD spectra plot of the covariance.
"""
if exclude == 'bads':
exclude = info['bads']
ch_names = [n for n in cov.ch_names if n not in exclude]
ch_idx = [cov.ch_names.index(n) for n in ch_names]
info_ch_names = info['ch_names']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=exclude)
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
exclude=exclude)
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
exclude=exclude)
idx_eeg = [ch_names.index(info_ch_names[c])
for c in sel_eeg if info_ch_names[c] in ch_names]
idx_mag = [ch_names.index(info_ch_names[c])
for c in sel_mag if info_ch_names[c] in ch_names]
idx_grad = [ch_names.index(info_ch_names[c])
for c in sel_grad if info_ch_names[c] in ch_names]
idx_names = [(idx_eeg, 'EEG covariance', 'uV', 1e6),
(idx_grad, 'Gradiometers', 'fT/cm', 1e13),
(idx_mag, 'Magnetometers', 'fT', 1e15)]
idx_names = [(idx, name, unit, scaling)
for idx, name, unit, scaling in idx_names if len(idx) > 0]
C = cov.data[ch_idx][:, ch_idx]
if proj:
projs = copy.deepcopy(info['projs'])
# Activate the projection items
for p in projs:
p['active'] = True
P, ncomp, _ = make_projector(projs, ch_names)
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension'
' = %d)' % ncomp)
C = np.dot(P, np.dot(C, P.T))
else:
logger.info(' The projection vectors do not apply to these '
'channels.')
import matplotlib.pyplot as plt
fig_cov = plt.figure(figsize=(2.5 * len(idx_names), 2.7))
for k, (idx, name, _, _) in enumerate(idx_names):
plt.subplot(1, len(idx_names), k + 1)
plt.imshow(C[idx][:, idx], interpolation="nearest", cmap='RdBu_r')
plt.title(name)
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
tight_layout(fig=fig_cov)
fig_svd = None
if show_svd:
fig_svd = plt.figure()
for k, (idx, name, unit, scaling) in enumerate(idx_names):
s = linalg.svd(C[idx][:, idx], compute_uv=False)
plt.subplot(1, len(idx_names), k + 1)
plt.ylabel('Noise std (%s)' % unit)
plt.xlabel('Eigenvalue index')
plt.semilogy(np.sqrt(s) * scaling)
plt.title(name)
tight_layout(fig=fig_svd)
plt_show(show)
return fig_cov, fig_svd
def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,
source_index=None, colorbar=False, show=True):
"""Plot source power in time-freqency grid.
Parameters
----------
stcs : list of SourceEstimate
Source power for consecutive time windows, one SourceEstimate object
should be provided for each frequency bin.
freq_bins : list of tuples of float
Start and end points of frequency bins of interest.
tmin : float
Minimum time instant to show.
tmax : float
Maximum time instant to show.
source_index : int | None
Index of source for which the spectrogram will be plotted. If None,
the source with the largest activation will be selected.
colorbar : bool
If true, a colorbar will be added to the plot.
show : bool
Show figure if True.
"""
import matplotlib.pyplot as plt
# Input checks
if len(stcs) == 0:
raise ValueError('cannot plot spectrogram if len(stcs) == 0')
stc = stcs[0]
if tmin is not None and tmin < stc.times[0]:
raise ValueError('tmin cannot be smaller than the first time point '
'provided in stcs')
if tmax is not None and tmax > stc.times[-1] + stc.tstep:
raise ValueError('tmax cannot be larger than the sum of the last time '
'point and the time step, which are provided in stcs')
# Preparing time-frequency cell boundaries for plotting
if tmin is None:
tmin = stc.times[0]
if tmax is None:
tmax = stc.times[-1] + stc.tstep
time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep)
freq_bounds = sorted(set(np.ravel(freq_bins)))
freq_ticks = copy.deepcopy(freq_bounds)
# Reject time points that will not be plotted and gather results
source_power = []
for stc in stcs:
stc = stc.copy() # copy since crop modifies inplace
stc.crop(tmin, tmax - stc.tstep)
source_power.append(stc.data)
source_power = np.array(source_power)
# Finding the source with maximum source power
if source_index is None:
source_index = np.unravel_index(source_power.argmax(),
source_power.shape)[1]
# If there is a gap in the frequency bins record its locations so that it
# can be covered with a gray horizontal bar
gap_bounds = []
for i in range(len(freq_bins) - 1):
lower_bound = freq_bins[i][1]
upper_bound = freq_bins[i + 1][0]
if lower_bound != upper_bound:
freq_bounds.remove(lower_bound)
gap_bounds.append((lower_bound, upper_bound))
# Preparing time-frequency grid for plotting
time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)
# Plotting the results
fig = plt.figure(figsize=(9, 6))
plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],
cmap='Reds')
ax = plt.gca()
plt.title('Time-frequency source power')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]
n_skip = 1 + len(time_bounds) // 10
for i in range(len(time_bounds)):
if i % n_skip != 0:
time_tick_labels[i] = ''
ax.set_xticks(time_bounds)
ax.set_xticklabels(time_tick_labels)
plt.xlim(time_bounds[0], time_bounds[-1])
plt.yscale('log')
ax.set_yticks(freq_ticks)
ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])
plt.ylim(freq_bounds[0], freq_bounds[-1])
plt.grid(True, ls='-')
if colorbar:
plt.colorbar()
tight_layout(fig=fig)
# Covering frequency gaps with horizontal bars
for lower_bound, upper_bound in gap_bounds:
plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound -
lower_bound, time_bounds[0], color='#666666')
plt_show(show)
return fig
def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
slices=None, show=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
mri_fname : str
The name of the file containing anatomical data.
surf_fnames : list of str
The filenames for the BEM surfaces in the format
['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
orientation : str
'coronal' or 'axial' or 'sagittal'
slices : list of int
Slice indices.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
import nibabel as nib
if orientation not in ['coronal', 'axial', 'sagittal']:
raise ValueError("Orientation must be 'coronal', 'axial' or "
"'sagittal'. Got %s." % orientation)
# Load the T1 data
nim = nib.load(mri_fname)
data = nim.get_data()
affine = nim.get_affine()
n_sag, n_axi, n_cor = data.shape
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
if slices is None:
n_slices = data.shape[orientation_axis]
slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int)
# create of list of surfaces
surfs = list()
trans = linalg.inv(affine)
# XXX : next line is a hack don't ask why
trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2]
for surf_fname in surf_fnames:
surf = dict()
surf['rr'], surf['tris'] = read_surface(surf_fname)
# move back surface to MRI coordinate system
surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
surfs.append(surf)
fig, axs = _prepare_trellis(len(slices), 4)
for ax, sl in zip(axs, slices):
# adjust the orientations for good view
if orientation == 'coronal':
dat = data[:, :, sl].transpose()
elif orientation == 'axial':
dat = data[:, sl, :]
elif orientation == 'sagittal':
dat = data[sl, :, :]
# First plot the anatomical data
ax.imshow(dat, cmap=plt.cm.gray)
ax.axis('off')
# and then plot the contours on top
for surf in surfs:
if orientation == 'coronal':
ax.tricontour(surf['rr'][:, 0], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 2],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'axial':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 0],
surf['tris'], surf['rr'][:, 1],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'sagittal':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 0],
levels=[sl], colors='yellow', linewidths=2.0)
plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt_show(show)
return fig
def plot_bem(subject=None, subjects_dir=None, orientation='coronal',
slices=None, show=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
subject : str
Subject name.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
orientation : str
'coronal' or 'axial' or 'sagittal'.
slices : list of int
Slice indices.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# Get the MRI filename
mri_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(mri_fname):
raise IOError('MRI file "%s" does not exist' % mri_fname)
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(bem_path):
raise IOError('Subject bem directory "%s" does not exist' % bem_path)
surf_fnames = []
for surf_name in ['*inner_skull', '*outer_skull', '*outer_skin']:
surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
if len(surf_fname) > 0:
surf_fname = surf_fname[0]
logger.info("Using surface: %s" % surf_fname)
surf_fnames.append(surf_fname)
if len(surf_fnames) == 0:
raise IOError('No surface files found. Surface files must end with '
'inner_skull.surf, outer_skull.surf or outer_skin.surf')
# Plot the contours
return _plot_mri_contours(mri_fname, surf_fnames, orientation=orientation,
slices=slices, show=show)
def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,
axes=None, equal_spacing=True, show=True):
"""Plot events to get a visual display of the paradigm
Parameters
----------
events : array, shape (n_events, 3)
The events.
sfreq : float | None
The sample frequency. If None, data will be displayed in samples (not
seconds).
first_samp : int
The index of the first sample. Typically the raw.first_samp
attribute. It is needed for recordings on a Neuromag
system as the events are defined relative to the system
start and not to the beginning of the recording.
color : dict | None
Dictionary of event_id value and its associated color. If None,
colors are automatically drawn from a default list (cycled through if
number of events longer than list of default colors).
event_id : dict | None
Dictionary of event label (e.g. 'aud_l') and its associated
event_id value. Label used to plot a legend. If None, no legend is
drawn.
axes : instance of matplotlib.axes.AxesSubplot
The subplot handle.
equal_spacing : bool
Use equal spacing between events in y-axis.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
if sfreq is None:
sfreq = 1.0
xlabel = 'samples'
else:
xlabel = 'Time (s)'
events = np.asarray(events)
unique_events = np.unique(events[:, 2])
if event_id is not None:
# get labels and unique event ids from event_id dict,
# sorted by value
event_id_rev = dict((v, k) for k, v in event_id.items())
conditions, unique_events_id = zip(*sorted(event_id.items(),
key=lambda x: x[1]))
for this_event in unique_events_id:
if this_event not in unique_events:
raise ValueError('%s from event_id is not present in events.'
% this_event)
for this_event in unique_events:
if this_event not in unique_events_id:
warnings.warn('event %s missing from event_id will be ignored.'
% this_event)
else:
unique_events_id = unique_events
if color is None:
if len(unique_events) > len(COLORS):
warnings.warn('More events than colors available. '
'You should pass a list of unique colors.')
colors = cycle(COLORS)
color = dict()
for this_event, this_color in zip(unique_events_id, colors):
color[this_event] = this_color
else:
for this_event in color:
if this_event not in unique_events_id:
raise ValueError('%s from color is not present in events '
'or event_id.' % this_event)
for this_event in unique_events_id:
if this_event not in color:
warnings.warn('Color is not available for event %d. Default '
'colors will be used.' % this_event)
import matplotlib.pyplot as plt
fig = None
if axes is None:
fig = plt.figure()
ax = axes if axes else plt.gca()
unique_events_id = np.array(unique_events_id)
min_event = np.min(unique_events_id)
max_event = np.max(unique_events_id)
for idx, ev in enumerate(unique_events_id):
ev_mask = events[:, 2] == ev
kwargs = {}
if event_id is not None:
event_label = '{0} ({1})'.format(event_id_rev[ev],
np.sum(ev_mask))
kwargs['label'] = event_label
if ev in color:
kwargs['color'] = color[ev]
if equal_spacing:
ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
(idx + 1) * np.ones(ev_mask.sum()), '.', **kwargs)
else:
ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
events[ev_mask, 2], '.', **kwargs)
if equal_spacing:
ax.set_ylim(0, unique_events_id.size + 1)
ax.set_yticks(1 + np.arange(unique_events_id.size))
ax.set_yticklabels(unique_events_id)
else:
ax.set_ylim([min_event - 1, max_event + 1])
ax.set_xlabel(xlabel)
ax.set_ylabel('Events id')
ax.grid('on')
fig = fig if fig is not None else plt.gcf()
if event_id is not None:
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
fig.canvas.draw()
plt_show(show)
return fig
def _get_presser(fig):
"""Helper to get our press callback"""
callbacks = fig.canvas.callbacks.callbacks['button_press_event']
func = None
for key, val in callbacks.items():
if val.func.__class__.__name__ == 'partial':
func = val.func
break
assert func is not None
return func
def plot_dipole_amplitudes(dipoles, colors=None, show=True):
"""Plot the amplitude traces of a set of dipoles
Parameters
----------
dipoles : list of instance of Dipoles
The dipoles whose amplitudes should be shown.
colors: list of colors | None
Color to plot with each dipole. If None default colors are used.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
if colors is None:
colors = cycle(COLORS)
fig, ax = plt.subplots(1, 1)
xlim = [np.inf, -np.inf]
for dip, color in zip(dipoles, colors):
ax.plot(dip.times, dip.amplitude, color=color, linewidth=1.5)
xlim[0] = min(xlim[0], dip.times[0])
xlim[1] = max(xlim[1], dip.times[-1])
ax.set_xlim(xlim)
ax.set_xlabel('Time (sec)')
ax.set_ylabel('Amplitude (nAm)')
if show:
fig.show(warn=False)
return fig
|
cmoutard/mne-python
|
mne/viz/misc.py
|
Python
|
bsd-3-clause
| 19,712 | 0 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
class base_bundle(object):
workflow = []
__all__ = ('base_bundle', )
|
dset0x/invenio-checker
|
invenio_checker/workflows/base_bundle.py
|
Python
|
gpl-2.0
| 847 | 0.001181 |
# -*- coding: utf-8 -*-
import numpy as np
from numpy import linalg as la
from math import exp,pi
import time
from optparse import OptionParser
import json
import os
import gnm
'''
Calculating the auto-corrolation time which is an ill-posed problem.
Generating the theoretical (quadrature) curve for the auto-corrolation.
'''
print
print 'Auto-corrolation time: sampling'
# command line options to set parameters
parser = OptionParser()
# experiment number
parser.add_option('-c', dest='count', type='int',
default=0, help='count of experiment')
# seeding
parser.add_option('-s', dest='seed', type='int',
default=5, help='random number generator seed')
# for the sampler
parser.add_option('-n', dest='num_samples', type='int',
default=10000, help='number of samples')
parser.add_option('-b', dest='num_burn', type='int',
default=1000, help='number of samples burned')
parser.add_option('-m', dest='max_steps', type='int',
default=4, help='max back off steps')
parser.add_option('-z', dest='step_size', type='float',
default=0.1, help='step size of back off')
(opts, arg) = parser.parse_args()
# seed the random number generator
np.random.seed(opts.seed)
# get the data
try:
print 'Importing Data...\n'
folder = 'acor_data_%d/' % opts.count
path = os.path.join(folder, 'data')
data_file = open(path, 'r')
data = json.load(data_file)
data_file.close()
args = data['args']
m = data['m']
H = data['H']
sigma = data['s']
y = data['y']
except:
print "Data could not be imported."
exit(0)
# make function instance
from acor_func import funky
f = gnm.F(funky,args)
# creating sampler object
sampler = gnm.sampler(m,H,y,sigma,f)
# sample the likelihood
print 'Sampling {:.2e} points...'.format(opts.num_samples)
start_time = time.time()
chain,stats = sampler.sample(m,opts.num_samples,
max_steps=opts.max_steps,
step_size=opts.step_size)
chain = chain[opts.num_burn:]
end_time = time.time()
T = end_time-start_time
print 'Acceptence Percentage : {:.3}'.format(stats['accept_rate'])
print 'Ellapsed Time : %d h %d m %d s' % (T/3600,T/60%60,T%60)
print
# write data to file
path = os.path.join(folder, 'chain')
file = open(path, 'w')
json.dump(chain.tolist(), file)
file.close()
path = os.path.join(folder, 'stats')
file = open(path, 'w')
json.dump(stats, file)
file.close()
|
mugurbil/gnm
|
examples/exp_time_series/acor_sample.py
|
Python
|
mit
| 2,362 | 0.023709 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Last-Updated : <2013/08/18 22:47:11 by samui>
import sys,os
def mkdir(folder):
if not os.path.isdir(folder):
os.system("mkdir {0}".format(folder))
if __name__ == "__main__":
data_file = os.path.abspath(sys.argv[1])
root,ext = os.path.splitext(os.path.basename(data_file))
data_folder = os.path.join(os.path.dirname(data_file),"split-{0}".format(root))
sdata_folder = os.path.join(os.path.dirname(data_file),"split-{0}".format(root),"data")
png_folder = os.path.join(data_folder,"png")
gnuplot_file = os.path.join(data_folder,"gnuplot.txt")
mkdir(data_folder)
mkdir(png_folder)
mkdir(sdata_folder)
#Split Phase
Nx = 50
Ny = 50
Nz = 50
data = open(data_file,"r")
data_list = []
for k in range(Nz+1):
out_data = os.path.join(sdata_folder,"data{0}.txt".format(k))
data_list.append(out_data)
out_file = open(out_data,"w");
for j in range(0,Ny+1):
for i in range(0,Nx+1):
out_file.write(data.readline())
out_file.write(data.readline())
out_file.close()
data.readline()
data.close()
# Gnuplot File Output
gnup_file = open(gnuplot_file,"w")
gnup_file.write("set pm3d map\n")
gnup_file.write("set cbrange[0:1.0]\n")
gnup_file.write("set term png\n")
for data in (data_list):
root,ext = os.path.splitext(os.path.basename(data))
gnup_file.write("set output \"{0}\"\n".format(os.path.join(png_folder,"{0}.png".format(root))))
gnup_file.write("splot \"{0}\" title \"\"\n".format(data))
gnup_file.close()
# Gnuplot Image file
os.system("gnuplot {0}".format(gnuplot_file))
|
samui13/Gnuplot3D
|
script/split3D.py
|
Python
|
mit
| 1,765 | 0.015864 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Gengeo(AutotoolsPackage):
"""GenGeo is a library of tools for creating complex particle
geometries for use in ESyS-Particle simulations. GenGeo is a standalone
application with a Python API that creates geometry files suitable for
importing into ESyS-Particle simulations. The functionality of GenGeo far
exceeds the in-simulation geometry creation utilities
provided by ESyS-Particle itself."""
homepage = "https://launchpad.net/esys-particle/gengeo"
url = "https://launchpad.net/esys-particle/trunk/3.0-alpha/+download/gengeo-163.tar.gz"
maintainers = ['dorton21']
version('163', sha256='9c896d430d8f315a45379d2b82e7d374f36259af66a745bfdee4c022a080d34d')
extends('python')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('boost+python')
depends_on('openmpi')
def autoreconf(self, spec, prefix):
autogen = Executable('./autogen.sh')
autogen()
def configure_args(self):
args = [
'--verbose',
'--with-boost=' + self.spec['boost'].prefix,
'CCFLAGS=-fpermissive',
'CXXFLAGS=-fpermissive',
]
return args
|
LLNL/spack
|
var/spack/repos/builtin/packages/gengeo/package.py
|
Python
|
lgpl-2.1
| 1,514 | 0.001982 |
#!/usr/bin/env python
# convertAWT.py - Mass Table Conversion Utility
import os
massFile = 'AWTMass-2003.dat'
newFile = os.path.join('..', 'nmrfreq', 'masstable.py')
def main():
with open(massFile, 'r') as file:
massDict = extractMasses(file)
writeToFile(newFile, massDict, massFile)
def extractMasses(file):
massdict = {}
for line in file:
line = adjustLine(line)
if line is not None:
isotope, Z, mass = getValuesFrom(line)
mass = convertMass(mass)
massdict[isotope] = (Z, mass)
return massdict
def adjustLine(line):
line = line.strip()
if line[0] != '#' and line[-1] != '#':
line = line[9:].strip()
line = line.split()
return line
def getValuesFrom(splitline):
isotope = '{0}{1}'.format(splitline[2], splitline[1])
isotope = isotope.upper()
Z = int(splitline[0])
mass = '{0}{1}'.format(splitline[-3], splitline[-2])
return isotope, Z, mass
def convertMass(mass):
mass = float(mass) / 1000000.0
return mass
def writeToFile(filename, massdict, massFile):
with open(filename, 'w') as f:
f.write('# Mass table for use in nmrfreq from {0}\n'.format(massFile))
f.write('table = {\n')
f.write(createIsotopesString(massdict))
f.write('}\n')
def createIsotopesString(massdict):
string = ''
for key in sorted(massdict.iterkeys()):
string = '{2} "{0}": {1},\n'.format(key, massdict[key], string)
return string
if __name__ == '__main__':
main()
|
mmoran0032/NMRpy
|
data/AWTconvertTable.py
|
Python
|
mit
| 1,558 | 0 |
import json
import requests
class SlackNotification(object):
icon_url = "https://github-bogdal.s3.amazonaws.com/freepacktbook/icon.png"
def __init__(self, slack_url, channel):
self.slack_url = slack_url
self.channel = channel
if not self.channel.startswith("#"):
self.channel = "#%s" % (self.channel,)
def notify(self, data):
if not all([self.slack_url, self.channel]):
return
payload = {
"channel": self.channel,
"username": "PacktPub Free Learning",
"icon_url": self.icon_url,
"attachments": [
{
"fallback": "Today's Free eBook: %s" % data["title"],
"pretext": "Today's Free eBook:",
"title": data["title"],
"title_link": data["book_url"],
"color": "#ff7f00",
"text": "%s\n%s" % (data["description"], data.get("url", "")),
"thumb_url": data["image_url"].replace(" ", "%20"),
}
],
}
requests.post(self.slack_url, data={"payload": json.dumps(payload)})
|
bogdal/freepacktbook
|
freepacktbook/slack.py
|
Python
|
mit
| 1,181 | 0.000847 |
import unittest
import sys
import os
from Infrastructure.FileUtilities import FileUtilities
from Infrastructure.TipsManager import TipsManager
class TestsTipsManage(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.fileUtilities = FileUtilities()
def test_random_tip_from_file(self):
seed = 0
dirname = os.path.dirname(__file__)
path = self.fileUtilities.go_up_dirs(dirname, 2) + "\\Tips"
tips_manager = TipsManager(seed, path)
result = tips_manager.get_random_tip()
self.assertEqual(result, 'TestTips2.txt: Words\n')
def test_random_tip_from_file_second(self):
seed = 1
dirname = os.path.dirname(__file__)
path = self.fileUtilities.go_up_dirs(dirname, 2) + "\\Tips"
tips_manager = TipsManager(seed, path)
result = tips_manager.get_random_tip()
self.assertEqual(result, 'TestTips.txt: Customer collaboration over contract negotiation\n')
def test_random_tip_from_file_second_alternate_slashes(self):
seed = 1
dirname = os.path.dirname(__file__)
path = self.fileUtilities.go_up_dirs(dirname, 2) + "\\Tips"
path = path.replace("\\", "/")
tips_manager = TipsManager(seed, path)
result = tips_manager.get_random_tip()
self.assertEqual(result, 'TestTips.txt: Customer collaboration over contract negotiation\n')
if __name__ == '__main__':
unittest.main()
|
MobProgramming/MobTimer.Python
|
tests/Infrastructure/TipsManager/test_TipsManager.py
|
Python
|
mit
| 1,455 | 0.002062 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import networkx as nx
# REF [site] >> https://networkx.github.io/documentation/latest/tutorial.html
def basic_operation_tutorial():
# Create a graph.
G = nx.Graph()
# Nodes.
G.add_node(1)
G.add_nodes_from([2, 3])
H = nx.path_graph(10) # Creates a graph.
G.add_nodes_from(H)
G.add_node(H)
#print('G.nodes = {}.'.format(G.nodes))
print('G.nodes = {}.'.format(list(G.nodes)))
# Edges.
G.add_edge(1, 2)
e = (2, 3)
G.add_edge(*e) # Unpack edge tuple.
G.add_edges_from([(1, 2), (1, 3)])
G.add_edges_from(H.edges)
#print('G.edges = {}.'.format(G.edges))
print('G.edges = {}.'.format(list(G.edges)))
# Remove all nodes and edges.
G.clear()
#--------------------
G.add_edges_from([(1, 2), (1, 3)])
G.add_node(1)
G.add_edge(1, 2)
G.add_node('spam') # Adds node 'spam'.
G.add_nodes_from('spam') # Adds 4 nodes: 's', 'p', 'a', 'm'.
G.add_edge(3, 'm')
print('G.number_of_nodes() = {}.'.format(G.number_of_nodes()))
print('G.number_of_edges() = {}.'.format(G.number_of_edges()))
# Set-like views of the nodes, edges, neighbors (adjacencies), and degrees of nodes in a graph.
print('G.adj[1] = {}.'.format(list(G.adj[1]))) # or G.neighbors(1).
print('G.degree[1] = {}.'.format(G.degree[1])) # The number of edges incident to 1.
# Report the edges and degree from a subset of all nodes using an nbunch.
# An nbunch is any of: None (meaning all nodes), a node, or an iterable container of nodes that is not itself a node in the graph.
print("G.edges([2, 'm']) = {}.".format(G.edges([2, 'm'])))
print('G.degree([2, 3]) = {}.'.format(G.degree([2, 3])))
# Remove nodes and edges from the graph in a similar fashion to adding.
G.remove_node(2)
G.remove_nodes_from('spam')
print('G.nodes = {}.'.format(list(G.nodes)))
G.remove_edge(1, 3)
# When creating a graph structure by instantiating one of the graph classes you can specify data in several formats.
G.add_edge(1, 2)
H = nx.DiGraph(G) # Creates a DiGraph using the connections from G.
print('H.edges() = {}.'.format(list(H.edges())))
edgelist = [(0, 1), (1, 2), (2, 3)]
H = nx.Graph(edgelist)
#--------------------
# Access edges and neighbors.
print('G[1] = {}.'.format(G[1])) # Same as G.adj[1].
print('G[1][2] = {}.'.format(G[1][2])) # Edge 1-2.
print('G.edges[1, 2] = {}.'.format(G.edges[1, 2]))
# Get/set the attributes of an edge using subscript notation if the edge already exists.
G.add_edge(1, 3)
G[1][3]['color'] = 'blue'
G.edges[1, 2]['color'] = 'red'
# Fast examination of all (node, adjacency) pairs is achieved using G.adjacency(), or G.adj.items().
# Note that for undirected graphs, adjacency iteration sees each edge twice.
FG = nx.Graph()
FG.add_weighted_edges_from([(1, 2, 0.125), (1, 3, 0.75), (2, 4, 1.2), (3, 4, 0.375)])
for n, nbrs in FG.adj.items():
for nbr, eattr in nbrs.items():
wt = eattr['weight']
if wt < 0.5: print(f'({n}, {nbr}, {wt:.3})')
# Convenient access to all edges is achieved with the edges property.
for (u, v, wt) in FG.edges.data('weight'):
if wt < 0.5: print(f'({u}, {v}, {wt:.3})')
#--------------------
# Attributes.
# Graph attributes.
G = nx.Graph(day='Friday')
print('G.graph = {}.'.format(G.graph))
G.graph['day'] = 'Monday'
# Node attributes: add_node(), add_nodes_from(), or G.nodes.
G.add_node(1, time='5pm')
G.add_nodes_from([3], time='2pm')
print('G.nodes[1] = {}.'.format(G.nodes[1]))
G.nodes[1]['room'] = 714
print('G.nodes.data() = {}.'.format(G.nodes.data()))
print('G.nodes[1] = {}.'.format(G.nodes[1])) # List the attributes of a node.
print('G.nodes[1].keys() = {}.'.format(G.nodes[1].keys()))
#print('G[1] = {}.'.format(G[1])) # G[1] = G.adj[1].
# Edge attributes: add_edge(), add_edges_from(), or subscript notation.
G.add_edge(1, 2, weight=4.7)
G.add_edges_from([(3, 4), (4, 5)], color='red')
G.add_edges_from([(1, 2, {'color': 'blue'}), (2, 3, {'weight': 8})])
G[1][2]['weight'] = 4.7
G.edges[3, 4]['weight'] = 4.2
print('G.edges.data() = {}.'.format(G.edges.data()))
print('G.edges[3, 4] = {}.'.format(G.edges[3, 4])) # List the attributes of an edge.
print('G.edges[3, 4].keys() = {}.'.format(G.edges[3, 4].keys()))
#--------------------
# Directed graphs.
DG = nx.DiGraph()
DG.add_weighted_edges_from([(1, 2, 0.5), (3, 1, 0.75)])
print("DG.out_degree(1, weight='weight') = {}.".format(DG.out_degree(1, weight='weight')))
print("DG.degree(1, weight='weight') = {}.".format(DG.degree(1, weight='weight'))) # The sum of in_degree() and out_degree().
print('DG.successors(1) = {}.'.format(list(DG.successors(1))))
print('DG.neighbors(1) = {}.'.format(list(DG.neighbors(1))))
# Convert G to undirected graph.
#H = DG.to_undirected()
H = nx.Graph(DG)
#--------------------
# Multigraphs: Graphs which allow multiple edges between any pair of nodes.
MG = nx.MultiGraph()
#MDG = nx.MultiDiGraph()
MG.add_weighted_edges_from([(1, 2, 0.5), (1, 2, 0.75), (2, 3, 0.5)])
print("MG.degree(weight='weight') = {}.".format(dict(MG.degree(weight='weight'))))
GG = nx.Graph()
for n, nbrs in MG.adjacency():
for nbr, edict in nbrs.items():
minvalue = min([d['weight'] for d in edict.values()])
GG.add_edge(n, nbr, weight = minvalue)
print('nx.shortest_path(GG, 1, 3) = {}.'.format(nx.shortest_path(GG, 1, 3)))
#--------------------
# Classic graph operations:
"""
subgraph(G, nbunch): induced subgraph view of G on nodes in nbunch
union(G1,G2): graph union
disjoint_union(G1,G2): graph union assuming all nodes are different
cartesian_product(G1,G2): return Cartesian product graph
compose(G1,G2): combine graphs identifying nodes common to both
complement(G): graph complement
create_empty_copy(G): return an empty copy of the same graph class
to_undirected(G): return an undirected representation of G
to_directed(G): return a directed representation of G
"""
#--------------------
# Graph generators.
# Use a call to one of the classic small graphs:
petersen = nx.petersen_graph()
tutte = nx.tutte_graph()
maze = nx.sedgewick_maze_graph()
tet = nx.tetrahedral_graph()
# Use a (constructive) generator for a classic graph:
K_5 = nx.complete_graph(5)
K_3_5 = nx.complete_bipartite_graph(3, 5)
barbell = nx.barbell_graph(10, 10)
lollipop = nx.lollipop_graph(10, 20)
# Use a stochastic graph generator:
er = nx.erdos_renyi_graph(100, 0.15)
ws = nx.watts_strogatz_graph(30, 3, 0.1)
ba = nx.barabasi_albert_graph(100, 5)
red = nx.random_lobster(100, 0.9, 0.9)
#--------------------
# Read a graph stored in a file using common graph formats, such as edge lists, adjacency lists, GML, GraphML, pickle, LEDA and others.
nx.write_gml(red, './test.gml')
mygraph = nx.read_gml('./test.gml')
# REF [site] >> https://networkx.github.io/documentation/latest/tutorial.html
def drawing_tutorial():
import matplotlib.pyplot as plt
G = nx.petersen_graph()
plt.subplot(121)
nx.draw(G, with_labels=True, font_weight='bold')
plt.subplot(122)
nx.draw_shell(G, nlist=[range(5, 10), range(5)], with_labels=True, font_weight='bold')
plt.show()
options = {
'node_color': 'black',
'node_size': 100,
'width': 3,
}
plt.subplot(221)
nx.draw_random(G, **options)
plt.subplot(222)
#nx.draw_planar(G, **options)
nx.draw_circular(G, **options)
plt.subplot(223)
nx.draw_spectral(G, **options)
#nx.draw_spring(G, **options)
#nx.draw_kamada_kawai(G, **options)
plt.subplot(224)
nx.draw_shell(G, nlist=[range(5, 10), range(5)], **options)
plt.show()
G = nx.dodecahedral_graph()
shells = [[2, 3, 4, 5, 6], [8, 1, 0, 19, 18, 17, 16, 15, 14, 7], [9, 10, 11, 12, 13]]
nx.draw_shell(G, nlist=shells, **options)
plt.show()
# Save drawings to a file.
nx.draw(G)
plt.savefig('./path.png')
# If Graphviz and PyGraphviz or pydot are available on your system,
# you can also use nx_agraph.graphviz_layout(G) or nx_pydot.graphviz_layout(G) to get the node positions,
# or write the graph in dot format for further processing.
pos = nx.nx_agraph.graphviz_layout(G) # e.g.) pos = {1: (10, 10), 2: (30, 20)}.
nx.draw(G, pos=pos)
nx.drawing.nx_pydot.write_dot(G, './file.dot')
#--------------------
G = nx.complete_graph(15)
#pos = nx.get_node_attributes(G, 'pos')
#pos = nx.nx_agraph.graphviz_layout(G)
#pos = nx.drawing.nx_pydot.graphviz_layout(G)
#pos = nx.drawing.nx_pydot.pydot_layout(G)
#pos = nx.random_layout(G)
#pos = nx.planar_layout(G)
#pos = nx.circular_layout(G)
pos = nx.spectral_layout(G)
#pos = nx.spiral_layout(G)
#pos = nx.spring_layout(G)
#pos = nx.shell_layout(G)
#pos = nx.kamada_kawai_layout(G)
#pos = nx.rescale_layout(G)
#pos = nx.rescale_layout_dict(G)
#pos = nx.bipartite_layout(G)
#pos = nx.multipartite_layout(G)
plt.figure(figsize=(10, 6))
nx.draw_networkx_nodes(G, pos, node_size=400, alpha=1.0, node_shape='o', node_color='red')
nx.draw_networkx_edges(G, pos, width=5, alpha=0.8, edge_color='blue')
#nx.draw_networkx_labels(G, pos, labels=None, font_size=12, font_color='k', font_family='sans-serif', font_weight='normal')
#nx.draw_networkx_edge_labels(G, pos, edge_labels=None, label_pos=0.5, font_size=12, font_color='k', font_family='sans-serif', font_weight='normal')
plt.tight_layout()
plt.axis('off')
#plt.savefig('./graph_drawing_1.svg')
plt.figure(figsize=(10, 6))
#nx.draw(G, pos, ax=None)
#nx.draw(G, pos, labels=node_labels, **options)
#nx.draw(G, pos, labels=nx.get_node_attributes(G, 'node_labels'), **options)
nx.draw_networkx(G, pos, arrows=None, with_labels=True)
plt.tight_layout()
plt.axis('off')
#plt.savefig('./graph_drawing_2.svg')
plt.show()
def main():
#basic_operation_tutorial()
drawing_tutorial()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
|
sangwook236/SWDT
|
sw_dev/python/ext/test/graph/networkx/networkx_basic.py
|
Python
|
gpl-3.0
| 9,760 | 0.029816 |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin, GroupAdmin
from django.contrib.auth.admin import Group as AuthGroup
from .models import User
class Group(AuthGroup):
class Meta:
proxy = True
app_label = "accounts"
verbose_name_plural = "Группы"
verbose_name = "Группа"
class MyUserAdmin(UserAdmin):
model = User
fieldsets = UserAdmin.fieldsets
list_display = ('username', 'email', 'is_staff', 'is_active')
# fieldsets = UserAdmin.fieldsets + (
# (None, {'fields': ('some_extra_data',)}),
# )
admin.site.unregister(AuthGroup)
admin.site.register(User, MyUserAdmin)
admin.site.register(Group, GroupAdmin)
|
escsun/radio-shop
|
accounts/admin.py
|
Python
|
gpl-3.0
| 722 | 0.001408 |
#encoding=utf-8
# python3
import asyncio
@asyncio.coroutine
def hello():
print("Hello world!")
# 异步调用asyncio.sleep(1):
r = yield from asyncio.sleep(1)
print("Hello again!")
# 获取EventLoop:
loop = asyncio.get_event_loop()
# 执行coroutine
loop.run_until_complete(hello())
loop.close()
|
solvery/lang-features
|
python/use_lib/asyncio_1.py
|
Python
|
gpl-2.0
| 314 | 0.010067 |
# This file is generated from pydcs_export.lua
class Weapons:
AB_250_2___144_x_SD_2__250kg_CBU_with_HE_submunitions = {"clsid": "{AB_250_2_SD_2}", "name": "AB 250-2 - 144 x SD-2, 250kg CBU with HE submunitions", "weight": 280}
AB_250_2___17_x_SD_10A__250kg_CBU_with_10kg_Frag_HE_submunitions = {"clsid": "{AB_250_2_SD_10A}", "name": "AB 250-2 - 17 x SD-10A, 250kg CBU with 10kg Frag/HE submunitions", "weight": 220}
AB_500_1___34_x_SD_10A__500kg_CBU_with_10kg_Frag_HE_submunitions = {"clsid": "{AB_500_1_SD_10A}", "name": "AB 500-1 - 34 x SD-10A, 500kg CBU with 10kg Frag/HE submunitions", "weight": 470}
ADEN_GUNPOD = {"clsid": "{ADEN_GUNPOD}", "name": "ADEN GUNPOD", "weight": 87}
ADM_141A = {"clsid": "{BRU42_ADM141}", "name": "ADM_141A", "weight": 308}
ADM_141A_ = {"clsid": "{BRU3242_ADM141}", "name": "ADM_141A", "weight": 365.38}
ADM_141A_TALD = {"clsid": "{ADM_141A}", "name": "ADM-141A TALD", "weight": 180}
ADM_141B_TALD = {"clsid": "{ADM_141B}", "name": "ADM-141B TALD", "weight": 180}
AERO_1D_300_Gallons_Fuel_Tank_ = {"clsid": "{AV8BNA_AERO1D}", "name": "AERO 1D 300 Gallons Fuel Tank ", "weight": 1002.439}
AERO_1D_300_Gallons_Fuel_Tank__Empty_ = {"clsid": "{AV8BNA_AERO1D_EMPTY}", "name": "AERO 1D 300 Gallons Fuel Tank (Empty)", "weight": 93.89362}
AGM114x2_OH_58 = {"clsid": "AGM114x2_OH_58", "name": "AGM-114K * 2", "weight": 250}
AGM_114K = {"clsid": "{ee368869-c35a-486a-afe7-284beb7c5d52}", "name": "AGM-114K", "weight": 65}
AGM_114K___4 = {"clsid": "{88D18A5E-99C8-4B04-B40B-1C02F2018B6E}", "name": "AGM-114K * 4", "weight": 250}
AGM_119B_Penguin_ASM = {"clsid": "{7B8DCEB4-820B-4015-9B48-1028A4195692}", "name": "AGM-119B Penguin ASM", "weight": 300}
AGM_122_Sidearm = {"clsid": "{AGM_122_SIDEARM}", "name": "AGM-122 Sidearm", "weight": 92}
AGM_122_Sidearm_ = {"clsid": "{LAU_7_AGM_122_SIDEARM}", "name": "AGM-122 Sidearm", "weight": 107}
AGM_122_Sidearm___light_ARM = {"clsid": "{AGM_122}", "name": "AGM-122 Sidearm - light ARM", "weight": 88}
AGM_154A___JSOW_CEB__CBU_type_ = {"clsid": "{AGM-154A}", "name": "AGM-154A - JSOW CEB (CBU-type)", "weight": 485}
AGM_154B___JSOW_Anti_Armour = {"clsid": "{AGM-154B}", "name": "AGM-154B - JSOW Anti-Armour", "weight": 485}
AGM_154C___JSOW_Unitary_BROACH = {"clsid": "{9BCC2A2B-5708-4860-B1F1-053A18442067}", "name": "AGM-154C - JSOW Unitary BROACH", "weight": 484}
AGM_45A_Shrike_ARM = {"clsid": "{AGM_45A}", "name": "AGM-45A Shrike ARM", "weight": 177}
AGM_45B_Shrike_ARM__Imp_ = {"clsid": "{3E6B632D-65EB-44D2-9501-1C2D04515404}", "name": "AGM-45B Shrike ARM (Imp)", "weight": 177}
AGM_62_Walleye_II___Guided_Weapon_Mk_5__TV_Guided_ = {"clsid": "{C40A1E3A-DD05-40D9-85A4-217729E37FAE}", "name": "AGM-62 Walleye II - Guided Weapon Mk 5 (TV Guided)", "weight": 1061}
AGM_65D___Maverick_D__IIR_ASM_ = {"clsid": "{444BA8AE-82A7-4345-842E-76154EFCCA47}", "name": "AGM-65D - Maverick D (IIR ASM)", "weight": 218}
AGM_65E___Maverick_E__Laser_ASM___Lg_Whd_ = {"clsid": "{F16A4DE0-116C-4A71-97F0-2CF85B0313EF}", "name": "AGM-65E - Maverick E (Laser ASM - Lg Whd)", "weight": 286}
AGM_65K___Maverick_K__CCD_Imp_ASM_ = {"clsid": "{69DC8AE7-8F77-427B-B8AA-B19D3F478B65}", "name": "AGM-65K - Maverick K (CCD Imp ASM)", "weight": 360}
AGM_84 = {"clsid": "AGM_84", "name": "AGM-84 HARPOON", "weight": None}
AGM_84A_Harpoon_ASM = {"clsid": "{8B7CADF9-4954-46B3-8CFB-93F2F5B90B03}", "name": "AGM-84A Harpoon ASM", "weight": 661.5}
AGM_84D_Harpoon_AShM = {"clsid": "{AGM_84D}", "name": "AGM-84D Harpoon AShM", "weight": 540}
AGM_84E_Harpoon_SLAM__Stand_Off_Land_Attack_Missile_ = {"clsid": "{AF42E6DF-9A60-46D8-A9A0-1708B241AADB}", "name": "AGM-84E Harpoon/SLAM (Stand-Off Land-Attack Missile)", "weight": 628}
AGM_84E_Harpoon_SLAM__Stand_Off_Land_Attack_Missile__ = {"clsid": "{AGM_84E}", "name": "AGM-84E Harpoon/SLAM (Stand-Off Land-Attack Missile)", "weight": 628}
AGM_84H_SLAM_ER__Expanded_Response_ = {"clsid": "{AGM_84H}", "name": "AGM-84H SLAM-ER (Expanded Response)", "weight": 675}
AGM_86C_ALCM = {"clsid": "{769A15DF-6AFB-439F-9B24-5B7A45C59D16}", "name": "AGM-86C ALCM", "weight": 1950}
AGM_88C_HARM___High_Speed_Anti_Radiation_Missile = {"clsid": "{B06DD79A-F21E-4EB9-BD9D-AB3844618C9C}", "name": "AGM-88C HARM - High Speed Anti-Radiation Missile", "weight": 361}
AGM_88C_HARM___High_Speed_Anti_Radiation_Missile_ = {"clsid": "{B06DD79A-F21E-4EB9-BD9D-AB3844618C93}", "name": "AGM-88C HARM - High Speed Anti-Radiation Missile", "weight": 406.4}
AIM_120B_AMRAAM___Active_Rdr_AAM = {"clsid": "{C8E06185-7CD6-4C90-959F-044679E90751}", "name": "AIM-120B AMRAAM - Active Rdr AAM", "weight": 156}
AIM_120C_5_AMRAAM___Active_Rdr_AAM = {"clsid": "{40EF17B7-F508-45de-8566-6FFECC0C1AB8}", "name": "AIM-120C-5 AMRAAM - Active Rdr AAM", "weight": 161.5}
AIM_54A_Mk47 = {"clsid": "{AIM_54A_Mk47}", "name": "AIM-54A-Mk47", "weight": 444}
AIM_54A_Mk47_ = {"clsid": "{SHOULDER AIM_54A_Mk47 L}", "name": "AIM-54A-Mk47", "weight": 489.36}
AIM_54A_Mk47__ = {"clsid": "{SHOULDER AIM_54A_Mk47 R}", "name": "AIM-54A-Mk47", "weight": 489.36}
AIM_54A_Mk60 = {"clsid": "{AIM_54A_Mk60}", "name": "AIM-54A-Mk60", "weight": 471.7}
AIM_54A_Mk60_ = {"clsid": "{SHOULDER AIM_54A_Mk60 L}", "name": "AIM-54A-Mk60", "weight": 517.06}
AIM_54A_Mk60__ = {"clsid": "{SHOULDER AIM_54A_Mk60 R}", "name": "AIM-54A-Mk60", "weight": 517.06}
AIM_54C_Mk47 = {"clsid": "{AIM_54C_Mk47}", "name": "AIM-54C-Mk47", "weight": 465.6}
AIM_54C_Mk47_ = {"clsid": "{SHOULDER AIM_54C_Mk47 L}", "name": "AIM-54C-Mk47", "weight": 510.96}
AIM_54C_Mk47_Phoenix_IN__Semi_Active_Radar = {"clsid": "{7575BA0B-7294-4844-857B-031A144B2595}", "name": "AIM-54C-Mk47 Phoenix IN & Semi-Active Radar", "weight": 463}
AIM_54C_Mk47__ = {"clsid": "{SHOULDER AIM_54C_Mk47 R}", "name": "AIM-54C-Mk47", "weight": 510.96}
AIM_7E_Sparrow_Semi_Active_Radar = {"clsid": "{AIM-7E}", "name": "AIM-7E Sparrow Semi-Active Radar", "weight": 230}
AIM_7F = {"clsid": "{SHOULDER AIM-7F}", "name": "AIM-7F", "weight": 284.4}
AIM_7F_ = {"clsid": "{BELLY AIM-7F}", "name": "AIM-7F", "weight": 230}
AIM_7F_Sparrow_Semi_Active_Radar = {"clsid": "{AIM-7F}", "name": "AIM-7F Sparrow Semi-Active Radar", "weight": 231}
AIM_7M = {"clsid": "{SHOULDER AIM-7M}", "name": "AIM-7M", "weight": 284.4}
AIM_7MH = {"clsid": "{SHOULDER AIM-7MH}", "name": "AIM-7MH", "weight": 284.4}
AIM_7MH_ = {"clsid": "{BELLY AIM-7MH}", "name": "AIM-7MH", "weight": 230}
AIM_7MH_Sparrow_Semi_Active_Radar = {"clsid": "{AIM-7H}", "name": "AIM-7MH Sparrow Semi-Active Radar", "weight": 231}
AIM_7M_ = {"clsid": "{BELLY AIM-7M}", "name": "AIM-7M", "weight": 230}
AIM_7M_Sparrow_Semi_Active_Radar = {"clsid": "{8D399DDA-FF81-4F14-904D-099B34FE7918}", "name": "AIM-7M Sparrow Semi-Active Radar", "weight": 231.1}
AIM_9B_Sidewinder_IR_AAM = {"clsid": "{AIM-9B}", "name": "AIM-9B Sidewinder IR AAM", "weight": 74.39}
AIM_9L_Sidewinder_IR_AAM = {"clsid": "{AIM-9L}", "name": "AIM-9L Sidewinder IR AAM", "weight": 85.73}
AIM_9M_Sidewinder_IR_AAM = {"clsid": "{6CEB49FC-DED8-4DED-B053-E1F033FF72D3}", "name": "AIM-9M Sidewinder IR AAM", "weight": 85.73}
AIM_9P5_Sidewinder_IR_AAM = {"clsid": "{AIM-9P5}", "name": "AIM-9P5 Sidewinder IR AAM", "weight": 85.5}
AIM_9P_Sidewinder_IR_AAM = {"clsid": "{9BFD8C90-F7AE-4e90-833B-BFD0CED0E536}", "name": "AIM-9P Sidewinder IR AAM", "weight": 86.18}
AIM_9X_Sidewinder_IR_AAM = {"clsid": "{5CE2FF2A-645A-4197-B48D-8720AC69394F}", "name": "AIM-9X Sidewinder IR AAM", "weight": 84.46}
AJS_External_tank_1013kg_fuel = {"clsid": "{VIGGEN_X-TANK}", "name": "AJS External-tank 1013kg fuel", "weight": 1208}
AKAN_M_55_Gunpod__150_rnds_MINGR55_HE = {"clsid": "{AKAN}", "name": "AKAN M/55 Gunpod, 150 rnds MINGR55-HE", "weight": 276}
ALARM = {"clsid": "{E6747967-B1F0-4C77-977B-AB2E6EB0C102}", "name": "ALARM", "weight": 268}
ALQ_131___ECM_Pod = {"clsid": "{6D21ECEA-F85B-4E8D-9D51-31DC9B8AA4EF}", "name": "ALQ-131 - ECM Pod", "weight": 305}
ALQ_184 = {"clsid": "ALQ_184", "name": "ALQ-184 - ECM Pod", "weight": 215}
ALQ_184_Long = {"clsid": "ALQ_184_Long", "name": "ALQ-184 Long - ECM Pod", "weight": 286}
AN_AAQ_28_LITENING___Targeting_Pod = {"clsid": "{A111396E-D3E8-4b9c-8AC9-2432489304D5}", "name": "AN/AAQ-28 LITENING - Targeting Pod", "weight": 208}
AN_AAQ_28_LITENING___Targeting_Pod_ = {"clsid": "{AAQ-28_LEFT}", "name": "AN/AAQ-28 LITENING - Targeting Pod", "weight": 208}
AN_ALQ_164_DECM_Pod = {"clsid": "{ALQ_164_RF_Jammer}", "name": "AN/ALQ-164 DECM Pod", "weight": 143.789}
AN_ASQ_173_Laser_Spot_Tracker_Strike_CAMera__LST_SCAM_ = {"clsid": "{1C2B16EB-8EB0-43de-8788-8EBB2D70B8BC}", "name": "AN/ASQ-173 Laser Spot Tracker/Strike CAMera (LST/SCAM)", "weight": 250}
AN_ASQ_213_HTS___HARM_Targeting_System = {"clsid": "{AN_ASQ_213}", "name": "AN/ASQ-213 HTS - HARM Targeting System", "weight": 57.2}
AN_ASQ_228_ATFLIR___Targeting_Pod = {"clsid": "{AN_ASQ_228}", "name": "AN/ASQ-228 ATFLIR - Targeting Pod", "weight": 195}
AN_ASQ_T50_TCTS_Pod___ACMI_Pod = {"clsid": "{AIS_ASQ_T50}", "name": "AN/ASQ-T50 TCTS Pod - ACMI Pod", "weight": 62.6}
AN_M30A1___100lb_GP_Bomb_LD = {"clsid": "{AN_M30A1}", "name": "AN-M30A1 - 100lb GP Bomb LD", "weight": 45.8}
AN_M3___2_Browning_Machine_Guns_12_7mm = {"clsid": "{AN-M3}", "name": "AN-M3 - 2*Browning Machine Guns 12.7mm", "weight": 218}
AN_M57___250lb_GP_Bomb_LD = {"clsid": "{AN_M57}", "name": "AN-M57 - 250lb GP Bomb LD", "weight": 113}
AN_M64___500lb_GP_Bomb_LD = {"clsid": "{AN-M64}", "name": "AN-M64 - 500lb GP Bomb LD", "weight": 227}
AN_M64___500lb_GP_Bomb_LD_ = {"clsid": "{F86ANM64}", "name": "AN-M64 - 500lb GP Bomb LD", "weight": 227}
AN_M65___1000lb_GP_Bomb_LD = {"clsid": "{AN_M65}", "name": "AN-M65 - 1000lb GP Bomb LD", "weight": 475}
AN_M66___2000lb_GP_Bomb_LD = {"clsid": "{AN_M66}", "name": "AN-M66 - 2000lb GP Bomb LD", "weight": 977}
APU_60_1M_with_R_60M__AA_8_Aphid____Infra_Red = {"clsid": "{APU-60-1_R_60M}", "name": "APU-60-1M with R-60M (AA-8 Aphid) - Infra Red", "weight": 76}
APU_60_2M_with_2_x_R_60M__AA_8_Aphid____Infra_Red = {"clsid": "{B0DBC591-0F52-4F7D-AD7B-51E67725FB81}", "name": "APU-60-2M with 2 x R-60M (AA-8 Aphid) - Infra Red", "weight": 148}
APU_60_2M_with_2_x_R_60M__AA_8_Aphid____Infra_Red_ = {"clsid": "{275A2855-4A79-4B2D-B082-91EA2ADF4691}", "name": "APU-60-2M with 2 x R-60M (AA-8 Aphid) - Infra Red", "weight": 148}
APU_68___S_24B = {"clsid": "{APU_68_S-24}", "name": "APU-68 - S-24B", "weight": 273.5}
APU_6___6_9A4172_Vikhr = {"clsid": "{A6FD14D3-6D30-4C85-88A7-8D17BEE120E2}", "name": "APU-6 - 6 9A4172 Vikhr", "weight": 330}
APU_8___8_9A4172_Vikhr = {"clsid": "{F789E86A-EE2E-4E6B-B81E-D5E5F903B6ED}", "name": "APU-8 - 8 9A4172 Vikhr", "weight": 404}
ARAK_M_70B_AP_6x_135mm_UnGd_Rkts__Pshu70_HEAT = {"clsid": "{ARAKM70BAP}", "name": "ARAK M/70B AP 6x 135mm UnGd Rkts, Pshu70 HEAT", "weight": 372.2}
ARAK_M_70B_HE_6x_135mm_UnGd_Rkts__Shu70_HE_FRAG = {"clsid": "{ARAKM70BHE}", "name": "ARAK M/70B HE 6x 135mm UnGd Rkts, Shu70 HE/FRAG", "weight": 372.2}
ASO_2___countermeasures_pod = {"clsid": "{ASO-2}", "name": "ASO-2 - countermeasures pod", "weight": 22}
AUF2_BLG_66_AC_x_2 = {"clsid": "{M2KC_RAFAUT_BLG66}", "name": "AUF2 BLG-66-AC x 2", "weight": 685}
AUF2_GBU_12_x_2 = {"clsid": "{M2KC_RAFAUT_GBU12}", "name": "AUF2 GBU-12 x 2", "weight": 621}
AUF2_MK_82_Air_x_2 = {"clsid": "{M2KC_RAFAUT_MK82A}", "name": "AUF2 MK-82 Air x 2", "weight": 525}
AUF2_MK_82_Snakeyes_x_2 = {"clsid": "{M2KC_RAFAUT_MK82S}", "name": "AUF2 MK-82 Snakeyes x 2", "weight": 525}
AUF2_MK_82_x_2 = {"clsid": "{M2KC_RAFAUT_MK82}", "name": "AUF2 MK-82 x 2", "weight": 525}
AUF2_ROCKEYE_x_2 = {"clsid": "{M2KC_RAFAUT_ROCKEYE}", "name": "AUF2 ROCKEYE x 2", "weight": 525}
AWW_13_DATALINK_POD = {"clsid": "{AWW-13}", "name": "AWW-13 DATALINK POD", "weight": 200}
A_A_Training = {"clsid": "{M2KC_AAF}", "name": "A/A Training", "weight": 0}
A_G_Training = {"clsid": "{M2KC_AGF}", "name": "A/G Training", "weight": 0}
BAP_100_Anti_Runway = {"clsid": "{BAP_100}", "name": "BAP-100 Anti-Runway", "weight": None}
BAP_100_x_12 = {"clsid": "{M2KC_BAP100_12_RACK}", "name": "BAP-100 x 12", "weight": 465}
BAP_100_x_18 = {"clsid": "{M2KC_BAP100_18_RACK}", "name": "BAP-100 x 18", "weight": 660}
BAP_100_x_6 = {"clsid": "{M2KC_BAP100_6_RACK}", "name": "BAP-100 x 6", "weight": 270}
BDU_33___25lb_Practice_Bomb_LD = {"clsid": "{BDU-33}", "name": "BDU-33 - 25lb Practice Bomb LD", "weight": 11}
BDU_45 = {"clsid": "{BDU_45}", "name": "BDU-45", "weight": 232}
BDU_45B = {"clsid": "{BDU_45B}", "name": "BDU-45B", "weight": 232}
BDU_45B_ = {"clsid": "{BRU-32 BDU-45B}", "name": "BDU-45B", "weight": 298.38}
BDU_45_ = {"clsid": "{BRU-32 BDU-45}", "name": "BDU-45", "weight": 298.38}
BDU_45_LG = {"clsid": "{BDU_45LG}", "name": "BDU-45 LG", "weight": 277}
BDU_50HD___500lb_Inert_Practice_Bomb_HD = {"clsid": "{BDU-50HD}", "name": "BDU-50HD - 500lb Inert Practice Bomb HD", "weight": 232}
BDU_50LD___500lb_Inert_Practice_Bomb_LD = {"clsid": "{BDU-50LD}", "name": "BDU-50LD - 500lb Inert Practice Bomb LD", "weight": 232}
BDU_50LGB___500lb_Laser_Guided_Inert_Practice_Bomb_LD = {"clsid": "{BDU-50LGB}", "name": "BDU-50LGB - 500lb Laser Guided Inert Practice Bomb LD", "weight": 280}
BETAB_500M___479_kg__bomb__penetrating = {"clsid": "{BETAB-500M}", "name": "BETAB-500M - 479 kg, bomb, penetrating", "weight": 479}
BETAB_500S___425_kg__bomb__penetrating = {"clsid": "{BETAB-500S}", "name": "BETAB-500S - 425 kg, bomb, penetrating", "weight": 425}
Beer_Bomb = {"clsid": "{BEER_BOMB}", "name": "\"Beer Bomb\"", "weight": 100}
Beer_Bomb__D__on_LH_Spitfire_Wing_Carrier = {"clsid": "Beer_Bomb_(D)_on_LH_Spitfire_Wing_Carrier", "name": "\"Beer Bomb\" (Bitter Ale)", "weight": 104}
Beer_Bomb__D__on_RH_Spitfire_Wing_Carrier = {"clsid": "Beer_Bomb_(D)_on_RH_Spitfire_Wing_Carrier", "name": "\"Beer Bomb\" (Bitter Ale)", "weight": 104}
Beer_Bomb__L__on_LH_Spitfire_Wing_Carrier = {"clsid": "Beer_Bomb_(L)_on_LH_Spitfire_Wing_Carrier", "name": "\"Beer Bomb\" (Pale Ale)", "weight": 104}
Beer_Bomb__L__on_RH_Spitfire_Wing_Carrier = {"clsid": "Beer_Bomb_(L)_on_RH_Spitfire_Wing_Carrier", "name": "\"Beer Bomb\" (Pale Ale)", "weight": 104}
Belouga = {"clsid": "{BLG66_BELOUGA}", "name": "Belouga", "weight": 290}
BetAB_500ShP___500kg_Concrete_Piercing_HD_w_booster_Bomb = {"clsid": "{BD289E34-DF84-4C5E-9220-4B14C346E79D}", "name": "BetAB-500ShP - 500kg Concrete Piercing HD w booster Bomb", "weight": 424}
BetAB_500___500kg_Concrete_Piercing_Bomb_LD = {"clsid": "{35B698AC-9FEF-4EC4-AD29-484A0085F62B}", "name": "BetAB-500 - 500kg Concrete Piercing Bomb LD", "weight": 430}
BF109K_4_FUEL_TANK = {"clsid": "BF109K_4_FUEL_TANK", "name": "300 liter Fuel Tank", "weight": 266}
BGM_109 = {"clsid": "BGM_109", "name": "BGM-109B Tomahawk", "weight": None}
BGM_109B = {"clsid": "BGM-109B", "name": "BGM-109B", "weight": None}
BIN_200 = {"clsid": "BIN_200", "name": "BIN-200 - 200kg Napalm Incendiary Bomb", "weight": 200}
BKF___12_x_AO_2_5RT = {"clsid": "{BKF_AO2_5RT}", "name": "BKF - 12 x AO-2.5RT", "weight": 76}
BKF___12_x_PTAB_2_5KO = {"clsid": "{BKF_PTAB2_5KO}", "name": "BKF - 12 x PTAB-2.5KO", "weight": 63.2}
BK_90_MJ12__12x_MJ2_HEAT___36x_MJ1_HE_FRAG_Bomblets_ = {"clsid": "{BK90}", "name": "BK-90 MJ1+2 (12x MJ2 HEAT / 36x MJ1 HE-FRAG Bomblets)", "weight": 605}
BK_90_MJ1__72_x_MJ1_HE_FRAG_Bomblets_ = {"clsid": "{BK90MJ1}", "name": "BK-90 MJ1 (72 x MJ1 HE-FRAG Bomblets)", "weight": 605}
BK_90_MJ2__24_x_MJ2_HEAT_Bomblets_ = {"clsid": "{BK90MJ2}", "name": "BK-90 MJ2 (24 x MJ2 HEAT Bomblets)", "weight": 605}
BLG_66_AC_Belouga = {"clsid": "{BLG66_BELOUGA_AC}", "name": "BLG-66-AC Belouga", "weight": 305}
BLG_66_Belouga___290kg_CBU__151_Frag_Pen_bomblets = {"clsid": "{BLG66_AC}", "name": "BLG-66 Belouga - 290kg CBU, 151 Frag/Pen bomblets", "weight": 305}
BLU_107___440lb_Anti_Runway_Penetrator_Bomb = {"clsid": "{752B9781-F962-11d5-9190-00A0249B6F00}", "name": "BLU-107 - 440lb Anti-Runway Penetrator Bomb", "weight": 185}
BL_755_CBU___450kg__147_Frag_Pen_bomblets = {"clsid": "{08164777-5E9C-4B08-B48E-5AA7AFB246E2}", "name": "BL-755 CBU - 450kg, 147 Frag/Pen bomblets", "weight": 264}
BOZ_107___Countermeasure_Dispenser = {"clsid": "{8C3F26A1-FA0F-11d5-9190-00A0249B6F00}", "name": "BOZ-107 - Countermeasure Dispenser", "weight": 200}
BRU_33_with_1_x_LAU_10_pod___4_x_127mm_ZUNI__UnGd_Rkts_Mk71__HE_FRAG = {"clsid": "{BRU33_LAU10}", "name": "BRU-33 with 1 x LAU-10 pod - 4 x 127mm ZUNI, UnGd Rkts Mk71, HE/FRAG", "weight": 407.6}
BRU_33_with_1_x_LAU_61_pod___19_x_2_75_Hydra__UnGd_Rkts_M151__HE = {"clsid": "{BRU33_LAU61}", "name": "BRU-33 with 1 x LAU-61 pod - 19 x 2.75\" Hydra, UnGd Rkts M151, HE", "weight": 364.4}
BRU_33_with_1_x_LAU_61_pod___19_x_2_75_Hydra__UnGd_Rkts_M282__HEDP = {"clsid": "{BRU33_LAU61_M282}", "name": "BRU-33 with 1 x LAU-61 pod - 19 x 2.75\" Hydra, UnGd Rkts M282, HEDP", "weight": 400.88}
BRU_33_with_1_x_LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M151__HE = {"clsid": "{BRU33_LAU68}", "name": "BRU-33 with 1 x LAU-68 pod - 7 x 2.75\" Hydra, UnGd Rkts M151, HE", "weight": 204.9}
BRU_33_with_1_x_LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M282__HEDP = {"clsid": "{BRU33_LAU68_M282}", "name": "BRU-33 with 1 x LAU-68 pod - 7 x 2.75\" Hydra, UnGd Rkts M282, HEDP", "weight": 218.34}
BRU_33_with_1_x_LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_Mk5__HEAT = {"clsid": "{BRU33_LAU68_MK5}", "name": "BRU-33 with 1 x LAU-68 pod - 7 x 2.75\" Hydra, UnGd Rkts Mk5, HEAT", "weight": 193.1}
BRU_33_with_2_x_BDU_45B___500lb_Practice_Bomb = {"clsid": "{BRU33_2X_BDU-45B}", "name": "BRU-33 with 2 x BDU-45B - 500lb Practice Bomb", "weight": 555}
BRU_33_with_2_x_BDU_45_LG_500lb_Practice_Laser_Guided_Bomb = {"clsid": "{BRU33_2X_BDU_45LG}", "name": "BRU-33 with 2 x BDU-45 LG 500lb Practice Laser Guided Bomb", "weight": 645}
BRU_33_with_2_x_BDU_45___500lb_Practice_Bomb = {"clsid": "{BRU33_2X_BDU-45}", "name": "BRU-33 with 2 x BDU-45 - 500lb Practice Bomb", "weight": 555}
BRU_33_with_2_x_CBU_99___490lbs__247_x_HEAT_Bomblets = {"clsid": "{BRU33_2X_CBU-99}", "name": "BRU-33 with 2 x CBU-99 - 490lbs, 247 x HEAT Bomblets", "weight": 535}
BRU_33_with_2_x_GBU_12___500lb_Laser_Guided_Bomb = {"clsid": "{BRU33_2X_GBU-12}", "name": "BRU-33 with 2 x GBU-12 - 500lb Laser Guided Bomb", "weight": 645}
BRU_33_with_2_x_GBU_16___1000lb_Laser_Guided_Bomb = {"clsid": "{BRU33_2X_GBU-16}", "name": "BRU-33 with 2 x GBU-16 - 1000lb Laser Guided Bomb", "weight": 1117}
BRU_33_with_2_x_LAU_10_pod___4_x_127mm_ZUNI__UnGd_Rkts_Mk71__HE_FRAG = {"clsid": "{BRU33_2*LAU10}", "name": "BRU-33 with 2 x LAU-10 pod - 4 x 127mm ZUNI, UnGd Rkts Mk71, HE/FRAG", "weight": 724.2}
BRU_33_with_2_x_LAU_61_pod___19_x_2_75_Hydra__UnGd_Rkts_M151__HE = {"clsid": "{BRU33_2*LAU61}", "name": "BRU-33 with 2 x LAU-61 pod - 19 x 2.75\" Hydra, UnGd Rkts M151, HE", "weight": 637.8}
BRU_33_with_2_x_LAU_61_pod___19_x_2_75_Hydra__UnGd_Rkts_M282__HEDP = {"clsid": "{BRU33_2*LAU61_M282}", "name": "BRU-33 with 2 x LAU-61 pod - 19 x 2.75\" Hydra, UnGd Rkts M282, HEDP", "weight": 710.76}
BRU_33_with_2_x_LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M151__HE = {"clsid": "{BRU33_2*LAU68}", "name": "BRU-33 with 2 x LAU-68 pod - 7 x 2.75\" Hydra, UnGd Rkts M151, HE", "weight": 318.8}
BRU_33_with_2_x_LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M282__HEDP = {"clsid": "{BRU33_2*LAU68_M282}", "name": "BRU-33 with 2 x LAU-68 pod - 7 x 2.75\" Hydra, UnGd Rkts M282, HEDP", "weight": 345.68}
BRU_33_with_2_x_LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_Mk5__HEAT = {"clsid": "{BRU33_2*LAU68_MK5}", "name": "BRU-33 with 2 x LAU-68 pod - 7 x 2.75\" Hydra, UnGd Rkts Mk5, HEAT", "weight": 295.2}
BRU_33_with_2_x_Mk_20_Rockeye___490lbs_CBU__247_x_HEAT_Bomblets = {"clsid": "{BRU33_2X_ROCKEYE}", "name": "BRU-33 with 2 x Mk-20 Rockeye - 490lbs CBU, 247 x HEAT Bomblets", "weight": 535}
BRU_33_with_2_x_Mk_82Y___500lb_GP_Chute_Retarded_HD = {"clsid": "{BRU33_2X_MK-82Y}", "name": "BRU-33 with 2 x Mk-82Y - 500lb GP Chute Retarded HD", "weight": 555}
BRU_33_with_2_x_Mk_82_Snakeye___500lb_GP_Bomb_HD = {"clsid": "{BRU33_2X_MK-82_Snakeye}", "name": "BRU-33 with 2 x Mk-82 Snakeye - 500lb GP Bomb HD", "weight": 590}
BRU_33_with_2_x_Mk_82___500lb_GP_Bomb_LD = {"clsid": "{BRU33_2X_MK-82}", "name": "BRU-33 with 2 x Mk-82 - 500lb GP Bomb LD", "weight": 547}
BRU_33_with_2_x_Mk_83___1000lb_GP_Bomb_LD = {"clsid": "{BRU33_2X_MK-83}", "name": "BRU-33 with 2 x Mk-83 - 1000lb GP Bomb LD", "weight": 999}
BRU_41A_with_6_x_BDU_33___25lb_Practice_Bomb_LD = {"clsid": "{BRU41_6X_BDU-33}", "name": "BRU-41A with 6 x BDU-33 - 25lb Practice Bomb LD", "weight": 195.713}
BRU_41A_with_6_x_Mk_82___500lb_GP_Bomb_LD = {"clsid": "{BRU41_6X_MK-82}", "name": "BRU-41A with 6 x Mk-82 - 500lb GP Bomb LD", "weight": 1495.913}
BRU_42_3_BDU_33 = {"clsid": "BRU-42_3*BDU-33", "name": "BRU-42 with 3 x BDU-33 - 25lb Practice Bombs LD", "weight": 90.15}
BRU_42_3_GBU_12 = {"clsid": "BRU-42_3*GBU-12", "name": "BRU-42 with 3 x GBU-12 - 500lb Laser Guided Bombs", "weight": 887.25}
BRU_42_LS = {"clsid": "BRU-42_LS", "name": "BRU-42 - Triple Ejector Rack (TER)", "weight": 56.25}
BRU_42_with_2_x_GBU_10___2000lb_Laser_Guided_Bombs = {"clsid": "{62BE78B1-9258-48AE-B882-279534C0D278}", "name": "BRU-42 with 2 x GBU-10 - 2000lb Laser Guided Bombs", "weight": 1974.25}
BRU_42_with_2_x_GBU_27___2000lb_Laser_Guided_Penetrator_Bombs = {"clsid": "{EB969276-1922-4ED1-A5CB-18590F45D7FE}", "name": "BRU-42 with 2 x GBU-27 - 2000lb Laser Guided Penetrator Bombs", "weight": 2038.25}
BRU_42_with_3_x_GBU_16___1000lb_Laser_Guided_Bombs = {"clsid": "{88D49E04-78DF-4F08-B47E-B81247A9E3C5}", "name": "BRU-42 with 3 x GBU-16 - 1000lb Laser Guided Bombs", "weight": 1595.25}
BRU_42_with_3_x_LAU_131_pods___7_x_2_75_Hydra__Laser_Guided_Rkts_M151__HE_APKWS = {"clsid": "{LAU-131x3 - 7 AGR-20A}", "name": "BRU-42 with 3 x LAU-131 pods - 7 x 2.75\" Hydra, Laser Guided Rkts M151, HE APKWS", "weight": 454.3}
BRU_42_with_3_x_LAU_131_pods___7_x_2_75_Hydra__Laser_Guided_Rkts_M282__MPP_APKWS = {"clsid": "{LAU-131x3 - 7 AGR-20 M282}", "name": "BRU-42 with 3 x LAU-131 pods - 7 x 2.75\" Hydra, Laser Guided Rkts M282, MPP APKWS", "weight": 496.3}
BRU_42_with_3_x_LAU_68_pods___21_x_2_75_Hydra__UnGd_Rkts_M151__HE = {"clsid": "{64329ED9-B14C-4c0b-A923-A3C911DA1527}", "name": "BRU-42 with 3 x LAU-68 pods - 21 x 2.75\" Hydra, UnGd Rkts M151, HE", "weight": 397.95}
BRU_42_with_3_x_LAU_68_pods___21_x_2_75_Hydra__UnGd_Rkts_M156__Wht_Phos = {"clsid": "{C2593383-3CA8-4b18-B73D-0E750BCA1C85}", "name": "BRU-42 with 3 x LAU-68 pods - 21 x 2.75\" Hydra, UnGd Rkts M156, Wht Phos", "weight": 399.63}
BRU_42_with_3_x_LAU_68_pods___21_x_2_75_Hydra__UnGd_Rkts_M257__Para_Illum = {"clsid": "{E6966004-A525-4f47-AF94-BCFEDF8FDBDA}", "name": "BRU-42 with 3 x LAU-68 pods - 21 x 2.75\" Hydra, UnGd Rkts M257, Para Illum", "weight": 412.65}
BRU_42_with_3_x_LAU_68_pods___21_x_2_75_Hydra__UnGd_Rkts_M274__Practice_Smk = {"clsid": "{4C044B08-886B-46c8-9B1F-AB05B3ED9C1D}", "name": "BRU-42 with 3 x LAU-68 pods - 21 x 2.75\" Hydra, UnGd Rkts M274, Practice Smk", "weight": 395.85}
BRU_42_with_3_x_LAU_68_pods___21_x_2_75_Hydra__UnGd_Rkts_Mk1__Practice = {"clsid": "{443364AE-D557-488e-9499-45EDB3BA6730}", "name": "BRU-42 with 3 x LAU-68 pods - 21 x 2.75\" Hydra, UnGd Rkts Mk1, Practice", "weight": 368.76}
BRU_42_with_3_x_LAU_68_pods___21_x_2_75_Hydra__UnGd_Rkts_Mk5__HEAT = {"clsid": "{9BC82B3D-FE70-4910-B2B7-3E54EFE73262}", "name": "BRU-42 with 3 x LAU-68 pods - 21 x 2.75\" Hydra, UnGd Rkts Mk5, HEAT", "weight": 362.46}
BRU_42_with_3_x_LAU_68_pods___21_x_2_75_Hydra__UnGd_Rkts_Mk61__Practice = {"clsid": "{C0FA251E-B645-4ce5-926B-F4BC20822F8B}", "name": "BRU-42 with 3 x LAU-68 pods - 21 x 2.75\" Hydra, UnGd Rkts Mk61, Practice", "weight": 368.76}
BRU_42_with_3_x_LAU_68_pods___21_x_2_75_Hydra__UnGd_Rkts_WTU_1_B__Practice = {"clsid": "{A1853B38-2160-4ffe-B7E9-9BF81E6C3D77}", "name": "BRU-42 with 3 x LAU-68 pods - 21 x 2.75\" Hydra, UnGd Rkts WTU-1/B, Practice", "weight": 395.85}
BRU_42_with_3_x_LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M282__HEDP = {"clsid": "{BRU_42_3xLAU68_M282}", "name": "BRU-42 with 3 x LAU-68 pod - 7 x 2.75\" Hydra, UnGd Rkts M282, HEDP", "weight": 438.27}
BRU_42_with_3_x_Mk_20_Rockeye___490lbs_CBUs__247_x_HEAT_Bomblets = {"clsid": "{B83CB620-5BBE-4BEA-910C-EB605A327EF9}", "name": "BRU-42 with 3 x Mk-20 Rockeye - 490lbs CBUs, 247 x HEAT Bomblets", "weight": 722.25}
BRU_42_with_3_x_Mk_81___250lb_GP_Bombs_LD = {"clsid": "{7B34E0BB-E427-4C2A-A61A-8407CE18B54D}", "name": "BRU-42 with 3 x Mk-81 - 250lb GP Bombs LD", "weight": 396.45}
BRU_42_with_3_x_Mk_82_AIR_Ballute___500lb_GP_Bombs_HD = {"clsid": "{BRU-42_3*Mk-82AIR}", "name": "BRU-42 with 3 x Mk-82 AIR Ballute - 500lb GP Bombs HD", "weight": 782.25}
BRU_42_with_3_x_Mk_82___500lb_GP_Bombs_LD = {"clsid": "{60CC734F-0AFA-4E2E-82B8-93B941AB11CF}", "name": "BRU-42 with 3 x Mk-82 - 500lb GP Bombs LD", "weight": 740.25}
BRU_42_with_3_x_SUU_25_x_8_LUU_2___Target_Marker_Flares = {"clsid": "{BRU-42_LS_3*SUU-25_8*LUU-2}", "name": "BRU-42 with 3 x SUU-25 x 8 LUU-2 - Target Marker Flares", "weight": 736.65}
BRU_55_with_2_x_AGM_154A___JSOW_CEB__CBU_type_ = {"clsid": "{BRU55_2*AGM-154A}", "name": "BRU-55 with 2 x AGM-154A - JSOW CEB (CBU-type)", "weight": 1057.5}
BRU_55_with_2_x_AGM_154C___JSOW_Unitary_BROACH = {"clsid": "{BRU55_2*AGM-154C}", "name": "BRU-55 with 2 x AGM-154C - JSOW Unitary BROACH", "weight": 1055.5}
BRU_55_with_2_x_GBU_38___JDAM__500lb_GPS_Guided_Bomb = {"clsid": "{BRU55_2*GBU-38}", "name": "BRU-55 with 2 x GBU-38 - JDAM, 500lb GPS Guided Bomb", "weight": 573}
BRU_57_with_2_x_AGM_154A___JSOW_CEB__CBU_type_ = {"clsid": "{BRU57_2*AGM-154A}", "name": "BRU-57 with 2 x AGM-154A - JSOW CEB (CBU-type)", "weight": 1082}
BRU_57_with_2_x_AGM_154B___JSOW_Anti_Armour = {"clsid": "{BRU57_2*AGM-154B}", "name": "BRU-57 with 2 x AGM-154B - JSOW Anti-Armour", "weight": 1082}
BRU_57_with_2_x_CBU_103___202_x_CEM__CBU_with_WCMD = {"clsid": "{BRU57_2*CBU-103}", "name": "BRU-57 with 2 x CBU-103 - 202 x CEM, CBU with WCMD", "weight": 951}
BRU_57_with_2_x_CBU_105___10_x_SFW__CBU_with_WCMD = {"clsid": "{BRU57_2*CBU-105}", "name": "BRU-57 with 2 x CBU-105 - 10 x SFW, CBU with WCMD", "weight": 925}
BRU_57_with_2_x_GBU_38___JDAM__500lb_GPS_Guided_Bomb = {"clsid": "{BRU57_2*GBU-38}", "name": "BRU-57 with 2 x GBU-38 - JDAM, 500lb GPS Guided Bomb", "weight": 573}
BR_250 = {"clsid": "BR_250", "name": "BR-250 - 250kg GP Bomb LD", "weight": 250}
BR_500 = {"clsid": "BR_500", "name": "BR-500 - 500kg GP Bomb LD", "weight": 500}
British_GP_250LBS_Bomb_MK4_on_LH_Spitfire_Wing_Carrier = {"clsid": "British_GP_250LBS_Bomb_MK4_on_LH_Spitfire_Wing_Carrier", "name": "250 lb GP Mk.I", "weight": 108.326}
British_GP_250LBS_Bomb_MK4_on_RH_Spitfire_Wing_Carrier = {"clsid": "British_GP_250LBS_Bomb_MK4_on_RH_Spitfire_Wing_Carrier", "name": "250 lb GP Mk.I", "weight": 108.326}
British_GP_500LBS_Bomb_MK4_on_British_UniversalBC_MK3 = {"clsid": "British_GP_500LBS_Bomb_MK4_on_British_UniversalBC_MK3", "name": "500 lb GP Mk.I", "weight": 225.188}
B_13L_pod___5_x_S_13_OF__122mm_UnGd_Rkts__Blast_Frag = {"clsid": "{FC56DF80-9B09-44C5-8976-DCFAFF219062}", "name": "B-13L pod - 5 x S-13-OF, 122mm UnGd Rkts, Blast/Frag", "weight": 510}
B_1B_Mk_84_8 = {"clsid": "B-1B_Mk-84*8", "name": "8 x Mk-84 - 2000lb GP Bombs LD", "weight": 7152}
B_8M1_pod___20_x_S_8KOM__80mm_UnGd_Rkts__HEAT_AP = {"clsid": "{F72F47E5-C83A-4B85-96ED-D3E46671EE9A}", "name": "B-8M1 pod - 20 x S-8KOM, 80mm UnGd Rkts, HEAT/AP", "weight": 363.5}
B_8M1_pod___20_x_S_8TsM__80mm_UnGd_Rkts__Smk = {"clsid": "{3DFB7320-AB0E-11d7-9897-000476191836}", "name": "B-8M1 pod - 20 x S-8TsM, 80mm UnGd Rkts, Smk", "weight": 359.5}
B_8M1___20_S_8OFP2 = {"clsid": "B-8M1 - 20 S-8OFP2", "name": "B-8M1 pod - 20 x S-8OFP2, 80mm UnGd Rkts, HE/Frag/AP", "weight": 471.5}
B_8V20A_CM = {"clsid": "B_8V20A_CM", "name": "B-8V20A pod - 20 x S-8TsM, 80mm UnGd Rkts, Smk, OG", "weight": 345}
B_8V20A_CM_BU = {"clsid": "B_8V20A_CM_BU", "name": "B-8V20A pod - 20 x S-8TsM, 80mm UnGd Rkts, Smk, BU", "weight": 345}
B_8V20A_CM_GN = {"clsid": "B_8V20A_CM_GN", "name": "B-8V20A pod - 20 x S-8TsM, 80mm UnGd Rkts, Smk, GN", "weight": 345}
B_8V20A_CM_RD = {"clsid": "B_8V20A_CM_RD", "name": "B-8V20A pod - 20 x S-8TsM, 80mm UnGd Rkts, Smk, RD", "weight": 345}
B_8V20A_CM_VT = {"clsid": "B_8V20A_CM_VT", "name": "B-8V20A pod - 20 x S-8TsM, 80mm UnGd Rkts, Smk, VT", "weight": 345}
B_8V20A_CM_WH = {"clsid": "B_8V20A_CM_WH", "name": "B-8V20A pod - 20 x S-8TsM, 80mm UnGd Rkts, Smk, WH", "weight": 345}
B_8V20A_CM_YE = {"clsid": "B_8V20A_CM_YE", "name": "B-8V20A pod - 20 x S-8TsM, 80mm UnGd Rkts, Smk, YE", "weight": 345}
B_8V20A_OFP2 = {"clsid": "B_8V20A_OFP2", "name": "B-8V20A pod - 20 x S-8OFP2, 80mm UnGd Rkts, HE/Frag/AP", "weight": 457}
B_8V20A_OM = {"clsid": "B_8V20A_OM", "name": "B-8V20A pod - 20 x S-8OM, 80mm UnGd Rkts, Illum", "weight": 365}
B_8V20A_pod___20_x_S_8KOM__80mm_UnGd_Rkts__HEAT_AP = {"clsid": "{6A4B9E69-64FE-439a-9163-3A87FB6A4D81}", "name": "B-8V20A pod - 20 x S-8KOM, 80mm UnGd Rkts, HEAT/AP", "weight": 349}
CATM_9M = {"clsid": "CATM-9M", "name": "Captive AIM-9M for ACM", "weight": 85.73}
CBLS_200 = {"clsid": "CBLS-200", "name": "4*BDU-33 - AF/B37K Rack with 4*25lb Practice Bomb LD", "weight": 84.4}
CBU87_10 = {"clsid": "CBU87*10", "name": "10 x CBU-87 - 202 x CEM Cluster Bombs", "weight": 4300}
CBU97_10 = {"clsid": "CBU97*10", "name": "10 x CBU-97 - 10 x SFW Cluster Bombs", "weight": 4170}
CBU_103___202_x_CEM__CBU_with_WCMD = {"clsid": "{CBU_103}", "name": "CBU-103 - 202 x CEM, CBU with WCMD", "weight": 430}
CBU_105___10_x_SFW__CBU_with_WCMD = {"clsid": "{CBU_105}", "name": "CBU-105 - 10 x SFW, CBU with WCMD", "weight": 417}
CBU_52B___220_x_HE_Frag_bomblets = {"clsid": "{CBU-52B}", "name": "CBU-52B - 220 x HE/Frag bomblets", "weight": 356}
CBU_87___202_x_CEM_Cluster_Bomb = {"clsid": "{CBU-87}", "name": "CBU-87 - 202 x CEM Cluster Bomb", "weight": 430}
CBU_97___10_x_SFW_Cluster_Bomb = {"clsid": "{5335D97A-35A5-4643-9D9B-026C75961E52}", "name": "CBU-97 - 10 x SFW Cluster Bomb", "weight": 417}
CBU_99___490lbs__247_x_HEAT_Bomblets = {"clsid": "{CBU_99}", "name": "CBU-99 - 490lbs, 247 x HEAT Bomblets", "weight": 222}
CM_802AKG = {"clsid": "{CM_802AKG}", "name": "CM-802AKG", "weight": None}
C_802AK = {"clsid": "{C_802AK}", "name": "C-802AK", "weight": 600}
DEFA_553___30mm_Revolver_Cannon = {"clsid": "{C-101-DEFA553}", "name": "DEFA-553 - 30mm Revolver Cannon", "weight": 218}
DIS_AKD_10 = {"clsid": "DIS_AKD-10", "name": "AKD-10", "weight": 58}
DIS_AKG_DLPOD = {"clsid": "DIS_AKG_DLPOD", "name": "DATA-LINK POD", "weight": 295}
DIS_BOMB_250_2 = {"clsid": "DIS_BOMB_250_2", "name": "250-2 - 250kg GP Bombs HD", "weight": 250}
DIS_BOMB_250_3 = {"clsid": "DIS_BOMB_250_3", "name": "250-3 - 250kg GP Bombs LD", "weight": 250}
DIS_BRM1_90 = {"clsid": "DIS_BRM1_90", "name": "BRM-1_90MM", "weight": 462.5}
DIS_CM_802AKG = {"clsid": "DIS_CM-802AKG", "name": "CM-802AKG", "weight": 765}
DIS_C_701IR = {"clsid": "DIS_C-701IR", "name": "C-701IR", "weight": 170}
DIS_C_701T = {"clsid": "DIS_C-701T", "name": "C-701T", "weight": 170}
DIS_C_802AK = {"clsid": "DIS_C-802AK", "name": "C-802AK", "weight": 765}
DIS_DF4A_KD20 = {"clsid": "DIS_DF4A_KD20", "name": "KD-20", "weight": 1750}
DIS_DF4B_YJ12 = {"clsid": "DIS_DF4B_YJ12", "name": "YJ-12", "weight": 2550}
DIS_GB6 = {"clsid": "DIS_GB6", "name": "GB-6", "weight": 672}
DIS_GB6_HE = {"clsid": "DIS_GB6_HE", "name": "GB-6-HE", "weight": 672}
DIS_GB6_TSP = {"clsid": "DIS_GB6_TSP", "name": "GB-6-SFW", "weight": 672}
DIS_GBU_10 = {"clsid": "DIS_GBU_10", "name": "GBU-10", "weight": 1162}
DIS_GBU_12 = {"clsid": "DIS_GBU_12", "name": "GBU-12", "weight": 275}
DIS_GBU_12_DUAL_GDJ_II19_L = {"clsid": "DIS_GBU_12_DUAL_GDJ_II19_L", "name": "GDJ-II19 - 2 x GBU-12", "weight": 629}
DIS_GBU_12_DUAL_GDJ_II19_R = {"clsid": "DIS_GBU_12_DUAL_GDJ_II19_R", "name": "GDJ-II19 - 2 x GBU-12", "weight": 629}
DIS_GBU_16 = {"clsid": "DIS_GBU_16", "name": "GBU-16", "weight": 564}
DIS_GDJ_KD63 = {"clsid": "DIS_GDJ_KD63", "name": "KD-63", "weight": 2050}
DIS_GDJ_KD63B = {"clsid": "DIS_GDJ_KD63B", "name": "KD-63B", "weight": 2050}
DIS_GDJ_YJ83K = {"clsid": "DIS_GDJ_YJ83K", "name": "YJ-83K", "weight": 765}
DIS_H6_250_2_N12 = {"clsid": "DIS_H6_250_2_N12", "name": "12 x 250-2 - 250kg GP Bombs HD", "weight": 3000}
DIS_H6_250_2_N24 = {"clsid": "DIS_H6_250_2_N24", "name": "24 x 250-2 - 250kg GP Bombs HD", "weight": 6000}
DIS_KD20 = {"clsid": "DIS_KD20", "name": "KD-20", "weight": 1700}
DIS_KD63 = {"clsid": "DIS_KD63", "name": "KD-63", "weight": 2000}
DIS_KD63B = {"clsid": "DIS_KD63B", "name": "KD-63B", "weight": 2000}
DIS_LAU68_MK5_DUAL_GDJ_II19_L = {"clsid": "DIS_LAU68_MK5_DUAL_GDJ_II19_L", "name": "GDJ-II19 - 2 x LAU68 MK5", "weight": 261.06}
DIS_LAU68_MK5_DUAL_GDJ_II19_R = {"clsid": "DIS_LAU68_MK5_DUAL_GDJ_II19_R", "name": "GDJ-II19 - 2 x LAU68 MK5", "weight": 261.06}
DIS_LD_10 = {"clsid": "DIS_LD-10", "name": "LD-10", "weight": 289}
DIS_LD_10_DUAL_L = {"clsid": "DIS_LD-10_DUAL_L", "name": "LD-10 x 2", "weight": 558}
DIS_LD_10_DUAL_R = {"clsid": "DIS_LD-10_DUAL_R", "name": "LD-10 x 2", "weight": 558}
DIS_LS_6_500 = {"clsid": "DIS_LS_6_500", "name": "LS-6-500", "weight": 570}
DIS_MER6_250_3_N6 = {"clsid": "DIS_MER6_250_3_N6", "name": "MER6 - 6 x 250-3 - 250kg GP Bombs LD", "weight": 1550}
DIS_MK_20 = {"clsid": "DIS_MK_20", "name": "Mk-20", "weight": 222}
DIS_MK_20_DUAL_GDJ_II19_L = {"clsid": "DIS_MK_20_DUAL_GDJ_II19_L", "name": "GDJ-II19 - 2 x Mk-20", "weight": 523}
DIS_MK_20_DUAL_GDJ_II19_R = {"clsid": "DIS_MK_20_DUAL_GDJ_II19_R", "name": "GDJ-II19 - 2 x Mk-20", "weight": 523}
DIS_MK_82S_DUAL_GDJ_II19_L = {"clsid": "DIS_MK_82S_DUAL_GDJ_II19_L", "name": "GDJ-II19 - 2 x Mk-82 SnakeEye", "weight": 543}
DIS_MK_82S_DUAL_GDJ_II19_R = {"clsid": "DIS_MK_82S_DUAL_GDJ_II19_R", "name": "GDJ-II19 - 2 x Mk-82 SnakeEye", "weight": 543}
DIS_MK_82_DUAL_GDJ_II19_L = {"clsid": "DIS_MK_82_DUAL_GDJ_II19_L", "name": "GDJ-II19 - 2 x Mk-82", "weight": 561}
DIS_MK_82_DUAL_GDJ_II19_R = {"clsid": "DIS_MK_82_DUAL_GDJ_II19_R", "name": "GDJ-II19 - 2 x Mk-82", "weight": 561}
DIS_PL_12 = {"clsid": "DIS_PL-12", "name": "PL-12", "weight": 199}
DIS_PL_5EII = {"clsid": "DIS_PL-5EII", "name": "PL-5EII", "weight": 153}
DIS_PL_8A = {"clsid": "DIS_PL-8A", "name": "PL-8A", "weight": 115}
DIS_PL_8B = {"clsid": "DIS_PL-8B", "name": "PL-8B", "weight": 115}
DIS_RKT_90_UG = {"clsid": "DIS_RKT_90_UG", "name": "UG_90MM", "weight": 382.5}
DIS_SD_10 = {"clsid": "DIS_SD-10", "name": "SD-10", "weight": 289}
DIS_SD_10_DUAL_L = {"clsid": "DIS_SD-10_DUAL_L", "name": "SD-10 x 2", "weight": 558}
DIS_SD_10_DUAL_R = {"clsid": "DIS_SD-10_DUAL_R", "name": "SD-10 x 2", "weight": 558}
DIS_SMOKE_GENERATOR_B = {"clsid": "DIS_SMOKE_GENERATOR_B", "name": "Smoke Generator - blue", "weight": 0}
DIS_SMOKE_GENERATOR_G = {"clsid": "DIS_SMOKE_GENERATOR_G", "name": "Smoke Generator - green", "weight": 0}
DIS_SMOKE_GENERATOR_O = {"clsid": "DIS_SMOKE_GENERATOR_O", "name": "Smoke Generator - orange", "weight": 0}
DIS_SMOKE_GENERATOR_R = {"clsid": "DIS_SMOKE_GENERATOR_R", "name": "Smoke Generator - red", "weight": 0}
DIS_SMOKE_GENERATOR_W = {"clsid": "DIS_SMOKE_GENERATOR_W", "name": "Smoke Generator - white", "weight": 0}
DIS_SMOKE_GENERATOR_Y = {"clsid": "DIS_SMOKE_GENERATOR_Y", "name": "Smoke Generator - yellow", "weight": 0}
DIS_SPJ_POD = {"clsid": "DIS_SPJ_POD", "name": "KG-600", "weight": 270}
DIS_TANK1100 = {"clsid": "DIS_TANK1100", "name": "1100L Tank", "weight": 1064}
DIS_TANK1100_EMPTY = {"clsid": "DIS_TANK1100_EMPTY", "name": "1100L Tank Empty", "weight": 75}
DIS_TANK800 = {"clsid": "DIS_TANK800", "name": "800L Tank", "weight": 730}
DIS_TANK800_EMPTY = {"clsid": "DIS_TANK800_EMPTY", "name": "800L Tank Empty", "weight": 45}
DIS_TYPE200 = {"clsid": "DIS_TYPE200", "name": "TYPE-200A", "weight": 200}
DIS_TYPE200_DUAL_L = {"clsid": "DIS_TYPE200_DUAL_L", "name": "TYPE-200A Dual", "weight": 400}
DIS_TYPE200_DUAL_R = {"clsid": "DIS_TYPE200_DUAL_R", "name": "TYPE-200A Dual", "weight": 400}
DIS_WMD7 = {"clsid": "DIS_WMD7", "name": "WMD7 POD", "weight": 295}
DIS_YJ12 = {"clsid": "DIS_YJ12", "name": "YJ-12", "weight": 2500}
DIS_YJ83K = {"clsid": "DIS_YJ83K", "name": "YJ-83K", "weight": 715}
DWS39_MJ1 = {"clsid": "{DWS39_MJ1}", "name": "DWS39 MJ1", "weight": 605}
DWS39_MJ1_MJ2 = {"clsid": "{DWS39_MJ1_MJ2}", "name": "DWS39 MJ1-MJ2", "weight": 605}
DWS39_MJ2 = {"clsid": "{DWS39_MJ2}", "name": "DWS39 MJ2", "weight": 605}
Eclair = {"clsid": "{Eclair}", "name": "Eclair", "weight": 20}
ER_4_SC50 = {"clsid": "ER_4_SC50", "name": "4 x SC 50 - 50kg GP Bomb LD", "weight": 220}
ETHER = {"clsid": "{0519A261-0AB6-11d6-9193-00A0249B6F00}", "name": "ETHER", "weight": 200}
FAB_100M = {"clsid": "FAB_100M", "name": "FAB-100M - 100kg GP Bomb LD", "weight": 100}
FAB_100M_ = {"clsid": "FAB_100M", "name": "FAB-100M", "weight": 100}
FAB_100_x_4 = {"clsid": "{FAB-100-4}", "name": "FAB-100 x 4", "weight": 465}
FAB_100___100kg_GP_Bomb_LD = {"clsid": "{FB3CE165-BF07-4979-887C-92B87F13276B}", "name": "FAB-100 - 100kg GP Bomb LD", "weight": 100}
FAB_1500_M_54___1500kg_GP_Bomb_LD = {"clsid": "{40AA4ABE-D6EB-4CD6-AEFE-A1A0477B24AB}", "name": "FAB-1500 M-54 - 1500kg GP Bomb LD", "weight": 1392}
FAB_250_M54_TU___235_kg__bomb__parachute = {"clsid": "{FAB-250-M54-TU}", "name": "FAB-250 M54 TU - 235 kg, bomb, parachute", "weight": 235}
FAB_250_M54___235_kg__bomb__parachute = {"clsid": "{FAB-250-M54}", "name": "FAB-250 M54 - 235 kg, bomb, parachute", "weight": 235}
FAB_250_M62___250kg_GP_Bomb_LD = {"clsid": "{FAB_250_M62}", "name": "FAB-250-M62 - 250kg GP Bomb LD", "weight": 227}
FAB_250___250kg_GP_Bomb_LD = {"clsid": "{3C612111-C7AD-476E-8A8E-2485812F4E5C}", "name": "FAB-250 - 250kg GP Bomb LD", "weight": 250}
FAB_50 = {"clsid": "FAB_50", "name": "FAB-50 - 50kg GP Bomb LD", "weight": 50}
FAB_500_M54_TU___480_kg__bomb__parachute = {"clsid": "{FAB-500-M54-TU}", "name": "FAB-500 M54 TU - 480 kg, bomb, parachute", "weight": 480}
FAB_500_M54___474_kg__bomb__free_fall = {"clsid": "{FAB-500-M54}", "name": "FAB-500 M54 - 474 kg, bomb, free-fall", "weight": 474}
FAB_500_M_62___500kg_GP_Bomb_LD = {"clsid": "{37DCC01E-9E02-432F-B61D-10C166CA2798}", "name": "FAB-500 M-62 - 500kg GP Bomb LD", "weight": 506}
FAB_500_SL___515_kg__bomb__parachute = {"clsid": "{FAB-500-SL}", "name": "FAB-500 SL - 515 kg, bomb, parachute", "weight": 515}
FAB_500_TA___477_kg__bomb__free_fall = {"clsid": "{FAB-500-TA}", "name": "FAB-500 TA - 477 kg, bomb, free-fall", "weight": 477}
FAB_50_ = {"clsid": "FAB_50", "name": "FAB-50", "weight": 50}
FIM_92 = {"clsid": "FIM_92", "name": "STINGER", "weight": None}
FPU_8A_Fuel_Tank_330_gallons = {"clsid": "{FPU_8A_FUEL_TANK}", "name": "FPU-8A Fuel Tank 330 gallons", "weight": 1150}
Fuel_Tank_120_gallons = {"clsid": "{PTB_120_F86F35}", "name": "Fuel Tank 120 gallons", "weight": 413.36}
Fuel_Tank_150_liters = {"clsid": "{PTB_150L_L39}", "name": "Fuel Tank 150 liters", "weight": 124.25}
Fuel_Tank_200_gallons = {"clsid": "{PTB_200_F86F35}", "name": "Fuel Tank 200 gallons", "weight": 675.6}
Fuel_Tank_350_liters = {"clsid": "{PTB_350L_L39}", "name": "Fuel Tank 350 liters", "weight": 283.25}
Fuel_Tank_490_L_Central__21_ = {"clsid": "{PTB_490C_MIG21}", "name": "Fuel Tank 490 L Central (21)", "weight": 434}
Fuel_Tank_490_L__21_ = {"clsid": "{PTB_490_MIG21}", "name": "Fuel Tank 490 L (21)", "weight": 434}
Fuel_Tank_800_L__21_ = {"clsid": "{PTB_800_MIG21}", "name": "Fuel Tank 800 L (21)", "weight": 682}
Fuel_Tank_FT600 = {"clsid": "Fuel_Tank_FT600", "name": "Fuel tank FT600", "weight": 1925}
Fuel_tank_1150L = {"clsid": "{414E383A-59EB-41BC-8566-2B5E0788ED1F}", "name": "Fuel tank 1150L", "weight": 975.25}
Fuel_tank_1150L_MiG_29 = {"clsid": "{C0FF4842-FBAC-11d5-9190-00A0249B6F00}", "name": "Fuel tank 1150L MiG-29", "weight": 975.25}
Fuel_tank_1400L = {"clsid": "{2BEC576B-CDF5-4B7F-961F-B0FA4312B841}", "name": "Fuel tank 1400L", "weight": 1262.5}
Fuel_tank_2000L = {"clsid": "{16602053-4A12-40A2-B214-AB60D481B20E}", "name": "Fuel tank 2000L", "weight": 1700}
Fuel_tank_3000L = {"clsid": "{7D7EC917-05F6-49D4-8045-61FC587DD019}", "name": "Fuel tank 3000L", "weight": 2550}
Fuel_tank_300_gal = {"clsid": "{8A0BE8AE-58D4-4572-9263-3144C0D06364}", "name": "Fuel tank 300 gal", "weight": 1083.5076415}
Fuel_tank_300_gal_ = {"clsid": "{F14-300gal}", "name": "Fuel tank 300 gal", "weight": 958.4}
Fuel_tank_300_gal__empty_ = {"clsid": "{F14-300gal-empty}", "name": "Fuel tank 300 gal (empty)", "weight": 70}
Fuel_tank_330_gal = {"clsid": "{EFEC8200-B922-11d7-9897-000476191836}", "name": "Fuel tank 330 gal", "weight": 1067.750921}
Fuel_tank_330_gal_ = {"clsid": "{EFEC8201-B922-11d7-9897-000476191836}", "name": "Fuel tank 330 gal", "weight": 1067.750921}
Fuel_tank_367_gal = {"clsid": "{82364E69-5564-4043-A866-E13032926C3E}", "name": "Fuel tank 367 gal", "weight": 1181.8623879}
Fuel_tank_370_gal = {"clsid": "{F376DBEE-4CAE-41BA-ADD9-B2910AC95DEC}", "name": "Fuel tank 370 gal", "weight": 1338.1101068}
Fuel_tank_5000L = {"clsid": "{0855A3A1-FA50-4C89-BDBB-5D5360ABA071}", "name": "Fuel tank 5000L", "weight": 4420}
Fuel_tank_610_gal = {"clsid": "{E1F29B21-F291-4589-9FD8-3272EEC69506}", "name": "Fuel tank 610 gal", "weight": 2010.8766885}
Fuel_tank_800L = {"clsid": "{A5BAEAB7-6FAF-4236-AF72-0FD900F493F9}", "name": "Fuel tank 800L", "weight": 680}
Fuel_tank_800L_Wing = {"clsid": "{E8D4652F-FD48-45B7-BA5B-2AE05BB5A9CF}", "name": "Fuel tank 800L Wing", "weight": 760}
Fuel_tank_PTB_450 = {"clsid": "{B99EE8A8-99BC-4a8d-89AC-A26831920DCE}", "name": "Fuel tank PTB-450", "weight": 550}
Fuel_tank_PTB_450_ = {"clsid": "{PTB_450}", "name": "Fuel tank PTB-450", "weight": 550}
Fuel_tank_S_3 = {"clsid": "{A504D93B-4E80-4B4F-A533-0D9B65F2C55F}", "name": "Fuel tank S-3", "weight": 964}
FW109_FUEL_TANK = {"clsid": "FW109_FUEL_TANK", "name": "300 liter Fuel Tank Type E2", "weight": 266}
F_4_Fuel_tank_C = {"clsid": "{8B9E3FD0-F034-4A07-B6CE-C269884CC71B}", "name": "F-4 Fuel tank-C", "weight": 2345}
F_4_Fuel_tank_W = {"clsid": "{7B4B122D-C12C-4DB4-834E-4D8BB4D863A8}", "name": "F-4 Fuel tank-W", "weight": 1420}
F_5_150Gal_Fuel_tank = {"clsid": "{PTB-150GAL}", "name": "F-5 150Gal Fuel tank", "weight": 509}
F_5_275Gal_Fuel_tank = {"clsid": "{0395076D-2F77-4420-9D33-087A4398130B}", "name": "F-5 275Gal Fuel tank", "weight": 909}
GAU_12_Gunpod_w_AP_M79 = {"clsid": "{GAU_12_Equalizer_AP}", "name": "GAU 12 Gunpod w/AP M79", "weight": 283.9}
GAU_12_Gunpod_w_HE_M792 = {"clsid": "{GAU_12_Equalizer_HE}", "name": "GAU 12 Gunpod w/HE M792", "weight": 283.9}
GAU_12_Gunpod_w_SAPHEI_T = {"clsid": "{GAU_12_Equalizer}", "name": "GAU 12 Gunpod w/SAPHEI-T", "weight": 283.9}
GBU_10 = {"clsid": "{BRU-32 GBU-10}", "name": "GBU-10", "weight": 997.38}
GBU_10___2000lb_Laser_Guided_Bomb = {"clsid": "{51F9AAE5-964F-4D21-83FB-502E3BFE5F8A}", "name": "GBU-10 - 2000lb Laser Guided Bomb", "weight": 959}
GBU_12 = {"clsid": "{BRU-32 GBU-12}", "name": "GBU-12", "weight": 332.38}
GBU_12___500lb_Laser_Guided_Bomb = {"clsid": "{DB769D48-67D7-42ED-A2BE-108D566C8B1E}", "name": "GBU-12 - 500lb Laser Guided Bomb", "weight": 277}
GBU_16 = {"clsid": "{BRU-32 GBU-16}", "name": "GBU-16", "weight": 621.38}
GBU_16___1000lb_Laser_Guided_Bomb = {"clsid": "{0D33DDAE-524F-4A4E-B5B8-621754FE3ADE}", "name": "GBU-16 - 1000lb Laser Guided Bomb", "weight": 513}
GBU_24 = {"clsid": "{BRU-32 GBU-24}", "name": "GBU-24", "weight": 1107.38}
GBU_24_Paveway_III___2000lb_Laser_Guided_Bomb = {"clsid": "{34759BBC-AF1E-4AEE-A581-498FF7A6EBCE}", "name": "GBU-24 Paveway III - 2000lb Laser Guided Bomb", "weight": 1087}
GBU_24_Paveway_III___2000lb_Laser_Guided_Bomb_ = {"clsid": "{GBU-24}", "name": "GBU-24 Paveway III - 2000lb Laser Guided Bomb", "weight": 1087}
GBU_27___2000lb_Laser_Guided_Penetrator_Bomb = {"clsid": "{EF0A9419-01D6-473B-99A3-BEBDB923B14D}", "name": "GBU-27 - 2000lb Laser Guided Penetrator Bomb", "weight": 1200}
GBU_28___5000lb_Laser_Guided_Penetrator_Bomb = {"clsid": "{F06B775B-FC70-44B5-8A9F-5B5E2EB839C7}", "name": "GBU-28 - 5000lb Laser Guided Penetrator Bomb", "weight": 2130}
GBU_31V3B_8 = {"clsid": "GBU-31V3B*8", "name": "8 x GBU-31(V)3/B - JDAM, 2000lb GPS Guided Penetrator Bombs", "weight": 7848}
GBU_31_8 = {"clsid": "GBU-31*8", "name": "8 x GBU-31(V)1/B - JDAM, 2000lb GPS Guided Bombs", "weight": 7152}
GBU_31_V_1_B___JDAM__2000lb_GPS_Guided_Bomb = {"clsid": "{GBU-31}", "name": "GBU-31(V)1/B - JDAM, 2000lb GPS Guided Bomb", "weight": 934}
GBU_31_V_2_B___JDAM__2000lb_GPS_Guided_Bomb = {"clsid": "{GBU_31_V_2B}", "name": "GBU-31(V)2/B - JDAM, 2000lb GPS Guided Bomb", "weight": 934}
GBU_31_V_3_B___JDAM__2000lb_GPS_Guided_Penetrator_Bomb = {"clsid": "{GBU-31V3B}", "name": "GBU-31(V)3/B - JDAM, 2000lb GPS Guided Penetrator Bomb", "weight": 981}
GBU_31_V_4_B___JDAM__2000lb_GPS_Guided_Penetrator_Bomb = {"clsid": "{GBU_31_V_4B}", "name": "GBU-31(V)4/B - JDAM, 2000lb GPS Guided Penetrator Bomb", "weight": 970}
GBU_32_V_2_B___JDAM__1000lb_GPS_Guided_Bomb = {"clsid": "{GBU_32_V_2B}", "name": "GBU-32(V)2/B - JDAM, 1000lb GPS Guided Bomb", "weight": 467}
GBU_38_16 = {"clsid": "GBU-38*16", "name": "16 x GBU-38 - JDAM, 500lb GPS Guided Bombs", "weight": 3856}
GBU_38___JDAM__500lb_GPS_Guided_Bomb = {"clsid": "{GBU-38}", "name": "GBU-38 - JDAM, 500lb GPS Guided Bomb", "weight": 241}
GBU_54B___LJDAM__500lb_Laser__GPS_Guided_Bomb_LD = {"clsid": "{GBU_54_V_1B}", "name": "GBU-54B - LJDAM, 500lb Laser & GPS Guided Bomb LD", "weight": 253}
GUV_VOG = {"clsid": "GUV_VOG", "name": "GUV-8700 w AP-30 - 30mm Grenade Launcher", "weight": 274}
GUV_YakB_GSHP = {"clsid": "GUV_YakB_GSHP", "name": "GUV-8700 w 1x12.7 mm & 2x7.62 mm Rotary HMG", "weight": 452}
HOT3 = {"clsid": "{HOT3G}", "name": "HOT3", "weight": 32}
HOT3_ = {"clsid": "{HOT3D}", "name": "HOT3", "weight": 32}
HSAB_with_9_x_Mk_20_Rockeye___490lbs_CBUs__247_x_HEAT_Bomblets = {"clsid": "{4CD2BB0F-5493-44EF-A927-9760350F7BA1}", "name": "HSAB with 9 x Mk-20 Rockeye - 490lbs CBUs, 247 x HEAT Bomblets", "weight": 2050}
HSAB_with_9_x_Mk_83___1000lb_GP_Bombs_LD = {"clsid": "{696CFFC4-0BDE-42A8-BE4B-0BE3D9DD723C}", "name": "HSAB with 9 x Mk-83 - 1000lb GP Bombs LD", "weight": 8100}
HVAR_SMOKE__UnGd_Rkt = {"clsid": "{HVAR_SMOKE_2}", "name": "HVAR SMOKE, UnGd Rkt", "weight": 100}
HVAR_Smoke_Generator = {"clsid": "{HVAR_SMOKE_GENERATOR}", "name": "HVAR Smoke Generator", "weight": 64}
HVAR__UnGd_Rkt = {"clsid": "{HVAR}", "name": "HVAR, UnGd Rkt", "weight": 64}
I16_DROP_FUEL_TANK = {"clsid": "I16_DROP_FUEL_TANK", "name": "I-16 External Fuel Tank", "weight": 73}
I16_FAB_100SV = {"clsid": "I16_FAB_100SV", "name": "FAB-100SV", "weight": 100}
I16_RS_82 = {"clsid": "I16_RS_82", "name": "RS-82", "weight": 9.7}
IAB_500___470_kg__bomb__free_fall = {"clsid": "{IAB-500}", "name": "IAB-500 - 470 kg, bomb, free fall", "weight": 470}
IR_Deflector = {"clsid": "{IR_Deflector}", "name": "IR Deflector", "weight": 5}
KAB_1500Kr___1500kg_TV_Guided_Bomb = {"clsid": "{KAB_1500Kr_LOADOUT}", "name": "KAB-1500Kr - 1500kg TV Guided Bomb", "weight": 1525}
KAB_1500LG_Pr___1500kg_Laser_Guided_Penetrator_Bomb = {"clsid": "{KAB_1500LG_LOADOUT}", "name": "KAB-1500LG-Pr - 1500kg Laser Guided Penetrator Bomb", "weight": 1525}
KAB_1500L___1500kg_Laser_Guided_Bomb = {"clsid": "{39821727-F6E2-45B3-B1F0-490CC8921D1E}", "name": "KAB-1500L - 1500kg Laser Guided Bomb", "weight": 1560}
KAB_500Kr___500kg_TV_Guided_Bomb = {"clsid": "{E2C426E3-8B10-4E09-B733-9CDC26520F48}", "name": "KAB-500Kr - 500kg TV Guided Bomb", "weight": 560}
KAB_500LG___500kg_Laser_Guided_Bomb = {"clsid": "{BA565F89-2373-4A84-9502-A0E017D3A44A}", "name": "KAB-500LG - 500kg Laser Guided Bomb", "weight": 534}
KAB_500S___500kg_GPS_Guided_Bomb = {"clsid": "{KAB_500S_LOADOUT}", "name": "KAB-500S - 500kg GPS Guided Bomb", "weight": 500}
KB_Flare_Chaff_dispenser_pod = {"clsid": "{KB}", "name": "KB Flare/Chaff dispenser pod", "weight": 296}
Kh_22__AS_4_Kitchen____1000kg__AShM__IN__Act_Pas_Rdr = {"clsid": "{12429ECF-03F0-4DF6-BCBD-5D38B6343DE1}", "name": "Kh-22 (AS-4 Kitchen) - 1000kg, AShM, IN & Act/Pas Rdr", "weight": 6800}
Kh_23L_Grom__AS_7_Kerry____286kg__ASM__Laser_Guided = {"clsid": "{9F390892-E6F9-42C9-B84E-1136A881DCB2}", "name": "Kh-23L Grom (AS-7 Kerry) - 286kg, ASM, Laser Guided", "weight": 288}
Kh_25ML__AS_10_Karen____300kg__ASM__Semi_Act_Laser = {"clsid": "{6DADF342-D4BA-4D8A-B081-BA928C4AF86D}", "name": "Kh-25ML (AS-10 Karen) - 300kg, ASM, Semi-Act Laser", "weight": 360}
Kh_25ML__AS_10_Karen____300kg__ASM__Semi_Act_Laser_ = {"clsid": "{79D73885-0801-45a9-917F-C90FE1CE3DFC}", "name": "Kh-25ML (AS-10 Karen) - 300kg, ASM, Semi-Act Laser", "weight": 360}
Kh_25ML__AS_10_Karen____300kg__ASM__Semi_Act_Laser__ = {"clsid": "{X-25ML}", "name": "Kh-25ML (AS-10 Karen) - 300kg, ASM, Semi-Act Laser", "weight": 360}
Kh_25MPU__Updated_AS_12_Kegler____320kg__ARM__IN__Pas_Rdr = {"clsid": "{E86C5AA5-6D49-4F00-AD2E-79A62D6DDE26}", "name": "Kh-25MPU (Updated AS-12 Kegler) - 320kg, ARM, IN & Pas Rdr", "weight": 370}
Kh_25MPU__Updated_AS_12_Kegler____320kg__ARM__IN__Pas_Rdr_ = {"clsid": "{752AF1D2-EBCC-4bd7-A1E7-2357F5601C70}", "name": "Kh-25MPU (Updated AS-12 Kegler) - 320kg, ARM, IN & Pas Rdr", "weight": 370}
Kh_25MPU__Updated_AS_12_Kegler____320kg__ARM__IN__Pas_Rdr__ = {"clsid": "{X-25MPU}", "name": "Kh-25MPU (Updated AS-12 Kegler) - 320kg, ARM, IN & Pas Rdr", "weight": 370}
Kh_25MP__AS_12_Kegler____320kg__ARM__Pas_Rdr = {"clsid": "{Kh-25MP}", "name": "Kh-25MP (AS-12 Kegler) - 320kg, ARM, Pas Rdr", "weight": 355}
Kh_25MR__AS_10_Karen____300kg__ASM__10km__RC_Guided = {"clsid": "{292960BB-6518-41AC-BADA-210D65D5073C}", "name": "Kh-25MR (AS-10 Karen) - 300kg, ASM, 10km, RC Guided", "weight": 360}
Kh_25MR__AS_10_Karen____300kg__ASM__RC_Guided = {"clsid": "{X-25MR}", "name": "Kh-25MR (AS-10 Karen) - 300kg, ASM, RC Guided", "weight": 360}
Kh_28__AS_9_Kyle____720kg__ARM__Pas_Rdr = {"clsid": "{Kh-28}", "name": "Kh-28 (AS-9 Kyle) - 720kg, ARM, Pas Rdr", "weight": 715}
Kh_29L__AS_14_Kedge____657kg__ASM__Semi_Act_Laser = {"clsid": "{3468C652-E830-4E73-AFA9-B5F260AB7C3D}", "name": "Kh-29L (AS-14 Kedge) - 657kg, ASM, Semi-Act Laser", "weight": 747}
Kh_29L__AS_14_Kedge____657kg__ASM__Semi_Act_Laser_ = {"clsid": "{D4A8D9B9-5C45-42e7-BBD2-0E54F8308432}", "name": "Kh-29L (AS-14 Kedge) - 657kg, ASM, Semi-Act Laser", "weight": 747}
Kh_29L__AS_14_Kedge____657kg__ASM__Semi_Act_Laser__ = {"clsid": "{X-29L}", "name": "Kh-29L (AS-14 Kedge) - 657kg, ASM, Semi-Act Laser", "weight": 747}
Kh_29T__AS_14_Kedge____670kg__ASM__TV_Guided = {"clsid": "{B4FC81C9-B861-4E87-BBDC-A1158E648EBF}", "name": "Kh-29T (AS-14 Kedge) - 670kg, ASM, TV Guided", "weight": 760}
Kh_29T__AS_14_Kedge____670kg__ASM__TV_Guided_ = {"clsid": "{601C99F7-9AF3-4ed7-A565-F8B8EC0D7AAC}", "name": "Kh-29T (AS-14 Kedge) - 670kg, ASM, TV Guided", "weight": 760}
Kh_29T__AS_14_Kedge____670kg__ASM__TV_Guided__ = {"clsid": "{X-29T}", "name": "Kh-29T (AS-14 Kedge) - 670kg, ASM, TV Guided", "weight": 760}
Kh_31A__AS_17_Krypton____610kg__AShM__IN__Act_Rdr = {"clsid": "{4D13E282-DF46-4B23-864A-A9423DFDE504}", "name": "Kh-31A (AS-17 Krypton) - 610kg, AShM, IN & Act Rdr", "weight": 690}
Kh_31A__AS_17_Krypton____610kg__AShM__IN__Act_Rdr_ = {"clsid": "{4D13E282-DF46-4B23-864A-A9423DFDE50A}", "name": "Kh-31A (AS-17 Krypton) - 610kg, AShM, IN & Act Rdr", "weight": 690}
Kh_31A__AS_17_Krypton____610kg__AShM__IN__Act_Rdr__ = {"clsid": "{X-31A}", "name": "Kh-31A (AS-17 Krypton) - 610kg, AShM, IN & Act Rdr", "weight": 690}
Kh_31P__AS_17_Krypton____600kg__ARM__IN__Pas_Rdr = {"clsid": "{D8F2C90B-887B-4B9E-9FE2-996BC9E9AF03}", "name": "Kh-31P (AS-17 Krypton) - 600kg, ARM, IN & Pas Rdr", "weight": 690}
Kh_31P__AS_17_Krypton____600kg__ARM__IN__Pas_Rdr_ = {"clsid": "{D8F2C90B-887B-4B9E-9FE2-996BC9E9AF0A}", "name": "Kh-31P (AS-17 Krypton) - 600kg, ARM, IN & Pas Rdr", "weight": 690}
Kh_31P__AS_17_Krypton____600kg__ARM__IN__Pas_Rdr__ = {"clsid": "{X-31P}", "name": "Kh-31P (AS-17 Krypton) - 600kg, ARM, IN & Pas Rdr", "weight": 690}
Kh_35__AS_20_Kayak____520kg__AShM__IN__Act_Rdr = {"clsid": "{2234F529-1D57-4496-8BB0-0150F9BDBBD2}", "name": "Kh-35 (AS-20 Kayak) - 520kg, AShM, IN & Act Rdr", "weight": 520}
Kh_35__AS_20_Kayak____520kg__AShM__IN__Act_Rdr_ = {"clsid": "{2234F529-1D57-4496-8BB0-0150F9BDBBD3}", "name": "Kh-35 (AS-20 Kayak) - 520kg, AShM, IN & Act Rdr", "weight": 570}
Kh_41__SS_N_22_Sunburn____4500kg__AShM__IN__Act_Rdr = {"clsid": "{3F26D9C5-5CC3-4E42-BC79-82FAA54E9F26}", "name": "Kh-41 (SS-N-22-Sunburn) - 4500kg, AShM, IN & Act Rdr", "weight": 4500}
Kh_58U__AS_11_Kilter____640kg__ARM__IN__Pas_Rdr = {"clsid": "{FE382A68-8620-4AC0-BDF5-709BFE3977D7}", "name": "Kh-58U (AS-11 Kilter) - 640kg, ARM, IN & Pas Rdr", "weight": 730}
Kh_58U__AS_11_Kilter____640kg__ARM__IN__Pas_Rdr_ = {"clsid": "{B5CA9846-776E-4230-B4FD-8BCC9BFB1676}", "name": "Kh-58U (AS-11 Kilter) - 640kg, ARM, IN & Pas Rdr", "weight": 730}
Kh_59M__AS_18_Kazoo____930kg__ASM__IN = {"clsid": "{40AB87E8-BEFB-4D85-90D9-B2753ACF9514}", "name": "Kh-59M (AS-18 Kazoo) - 930kg, ASM, IN", "weight": 1115}
Kh_65__AS_15B_Kent____1250kg__ASM__IN__MCC = {"clsid": "{BADAF2DE-68B5-472A-8AAC-35BAEFF6B4A1}", "name": "Kh-65 (AS-15B Kent) - 1250kg, ASM, IN & MCC", "weight": 1250}
Kh_66_Grom__21____AGM__radar_guided_APU_68 = {"clsid": "{Kh-66_Grom}", "name": "Kh-66 Grom (21) - AGM, radar guided APU-68", "weight": 300}
KMGU_2___96_x_AO_2_5RT_Dispenser__CBU__HE_Frag = {"clsid": "{96A7F676-F956-404A-AD04-F33FB2C74884}", "name": "KMGU-2 - 96 x AO-2.5RT Dispenser (CBU) HE/Frag", "weight": 778}
KMGU_2___96_x_PTAB_2_5KO_Dispenser__CBU__HEAT_AP = {"clsid": "{96A7F676-F956-404A-AD04-F33FB2C74881}", "name": "KMGU-2 - 96 x PTAB-2.5KO Dispenser (CBU) HEAT/AP", "weight": 675.6}
KORD_12_7 = {"clsid": "KORD_12_7", "name": "Kord 12.7mm HMG", "weight": 95}
Kopyo_radar_pod = {"clsid": "{F4920E62-A99A-11d8-9897-000476191836}", "name": "Kopyo radar pod", "weight": 115}
Kormoran___ASM = {"clsid": "{7210496B-7B81-4B52-80D6-8529ECF847CD}", "name": "Kormoran - ASM", "weight": 660}
K_13A = {"clsid": "{K-13A}", "name": "K-13A", "weight": 90}
L005_Sorbtsiya_ECM_pod__left_ = {"clsid": "{44EE8698-89F9-48EE-AF36-5FD31896A82F}", "name": "L005 Sorbtsiya ECM pod (left)", "weight": 150}
L005_Sorbtsiya_ECM_pod__right_ = {"clsid": "{44EE8698-89F9-48EE-AF36-5FD31896A82A}", "name": "L005 Sorbtsiya ECM pod (right)", "weight": 150}
L175V_Khibiny_ECM_pod = {"clsid": "{ECM_POD_L_175V}", "name": "L175V Khibiny ECM pod", "weight": 150}
LANTIRN_Targeting_Pod = {"clsid": "{F14-LANTIRN-TP}", "name": "LANTIRN Targeting Pod", "weight": 342}
LAU3_HE151 = {"clsid": "LAU3_HE151", "name": "LAU-3 pod - 19 x 2.75\" Hydra, UnGd Rkts M151, HE", "weight": 234}
LAU3_HE5 = {"clsid": "LAU3_HE5", "name": "LAU-3 pod - 19 x 2.75\" Hydra, UnGd Rkts Mk5, HEAT", "weight": 234}
LAU3_WP156 = {"clsid": "LAU3_WP156", "name": "LAU-3 pod - 19 x 2.75\" Hydra, UnGd Rkts M156, Wht Phos", "weight": 234}
LAU3_WP1B = {"clsid": "LAU3_WP1B", "name": "LAU-3 pod - 19 x 2.75\" Hydra, UnGd Rkts WTU-1/B, Practice", "weight": 234}
LAU3_WP61 = {"clsid": "LAU3_WP61", "name": "LAU-3 pod - 19 x 2.75\" Hydra, UnGd Rkts Mk61, Practice", "weight": 234}
LAU_105 = {"clsid": "LAU-105", "name": "LAU-105", "weight": 18}
LAU_105_1_AIM_9L_L = {"clsid": "LAU-105_1*AIM-9L_L", "name": "LAU-105 with 1 x AIM-9L Sidewinder IR AAM", "weight": 115.73}
LAU_105_1_AIM_9L_R = {"clsid": "LAU-105_1*AIM-9L_R", "name": "LAU-105 with 1 x AIM-9L Sidewinder IR AAM", "weight": 115.73}
LAU_105_1_AIM_9M_L = {"clsid": "LAU-105_1*AIM-9M_L", "name": "LAU-105 with 1 x AIM-9M Sidewinder IR AAM", "weight": 115.73}
LAU_105_1_AIM_9M_R = {"clsid": "LAU-105_1*AIM-9M_R", "name": "LAU-105 with 1 x AIM-9M Sidewinder IR AAM", "weight": 115.73}
LAU_105_1_CATM_9M_L = {"clsid": "LAU-105_1*CATM-9M_L", "name": "LAU-105 with 1 x Captive AIM-9M for ACM", "weight": 115.73}
LAU_105_1_CATM_9M_R = {"clsid": "LAU-105_1*CATM-9M_R", "name": "LAU-105 with 1 x Captive AIM-9M for ACM", "weight": 115.73}
LAU_105_2_AIM_9L = {"clsid": "LAU-105_2*AIM-9L", "name": "LAU-105 with 2 x AIM-9L Sidewinder IR AAM", "weight": 201.46}
LAU_105_2_AIM_9P5 = {"clsid": "LAU-105_2*AIM-9P5", "name": "LAU-105 with 2 x AIM-9P5 Sidewinder IR AAM", "weight": 201}
LAU_105_2_CATM_9M = {"clsid": "LAU-105_2*CATM-9M", "name": "LAU-105 with 2 x Captive AIM-9M for ACM", "weight": 201.46}
LAU_105_AIS_ASQ_T50_L = {"clsid": "LAU-105_AIS_ASQ_T50_L", "name": "LAU-105 with 1 x AN/ASQ-T50 TCTS Pod - ACMI Pod", "weight": 92.6}
LAU_105_AIS_ASQ_T50_R = {"clsid": "LAU-105_AIS_ASQ_T50_R", "name": "LAU-105 with 1 x AN/ASQ-T50 TCTS Pod - ACMI Pod", "weight": 92.6}
LAU_105_with_2_x_AIM_9M_Sidewinder_IR_AAM = {"clsid": "{DB434044-F5D0-4F1F-9BA9-B73027E18DD3}", "name": "LAU-105 with 2 x AIM-9M Sidewinder IR AAM", "weight": 201.46}
LAU_105_with_2_x_AIM_9P_Sidewinder_IR_AAM = {"clsid": "{3C0745ED-8B0B-42eb-B907-5BD5C1717447}", "name": "LAU-105 with 2 x AIM-9P Sidewinder IR AAM", "weight": 202.36}
LAU_10R_pod___4_x_127mm_ZUNI__UnGd_Rkts_Mk71__HE_FRAG = {"clsid": "{LAU_10R}", "name": "LAU-10R pod - 4 x 127mm ZUNI, UnGd Rkts Mk71, HE/FRAG", "weight": 316.6}
LAU_10_pod___4_x_127mm_ZUNI__UnGd_Rkts_Mk71__HE_FRAG = {"clsid": "{F3EFE0AB-E91A-42D8-9CA2-B63C91ED570A}", "name": "LAU-10 pod - 4 x 127mm ZUNI, UnGd Rkts Mk71, HE/FRAG", "weight": 316.6}
LAU_10___4_ZUNI_MK_71 = {"clsid": "{BRU42_LAU10}", "name": "LAU-10 - 4 ZUNI MK 71", "weight": 568}
LAU_10___4_ZUNI_MK_71_ = {"clsid": "{BRU3242_LAU10}", "name": "LAU-10 - 4 ZUNI MK 71", "weight": 625.38}
LAU_115C_with_AIM_7E_Sparrow_Semi_Active_Radar = {"clsid": "{LAU-115 - AIM-7E}", "name": "LAU-115C with AIM-7E Sparrow Semi-Active Radar", "weight": 284.4}
LAU_115C_with_AIM_7F_Sparrow_Semi_Active_Radar = {"clsid": "{LAU-115 - AIM-7F}", "name": "LAU-115C with AIM-7F Sparrow Semi-Active Radar", "weight": 285.4}
LAU_115C_with_AIM_7MH_Sparrow_Semi_Active_Radar = {"clsid": "{LAU-115 - AIM-7H}", "name": "LAU-115C with AIM-7MH Sparrow Semi-Active Radar", "weight": 285.4}
LAU_115_2_LAU_127_AIM_120B = {"clsid": "LAU-115_2*LAU-127_AIM-120B", "name": "LAU-115 with 2 x LAU-127 AIM-120B AMRAAM - Active Rdr AAM", "weight": 457}
LAU_115_2_LAU_127_AIM_120C = {"clsid": "LAU-115_2*LAU-127_AIM-120C", "name": "LAU-115 with 2 x LAU-127 AIM-120C-5 AMRAAM - Active Rdr AAM", "weight": 468}
LAU_115_2_LAU_127_AIM_9L = {"clsid": "LAU-115_2*LAU-127_AIM-9L", "name": "LAU-115 with 2 x LAU-127 AIM-9L Sidewinder IR AAM", "weight": 316.46}
LAU_115_2_LAU_127_AIM_9M = {"clsid": "LAU-115_2*LAU-127_AIM-9M", "name": "LAU-115 with 2 x LAU-127 AIM-9M Sidewinder IR AAM", "weight": 316.46}
LAU_115_2_LAU_127_AIM_9X = {"clsid": "LAU-115_2*LAU-127_AIM-9X", "name": "LAU-115 with 2 x LAU-127 AIM-9X Sidewinder IR AAM", "weight": 313.92}
LAU_115_2_LAU_127_CATM_9M = {"clsid": "LAU-115_2*LAU-127_CATM-9M", "name": "LAU-115 with 2 x LAU-127 Captive AIM-9M for ACM", "weight": 316.46}
LAU_115_LAU_127_AIM_9L = {"clsid": "LAU-115_LAU-127_AIM-9L", "name": "LAU-115 with 1 x LAU-127 AIM-9L Sidewinder IR AAM", "weight": 230.73}
LAU_115_LAU_127_AIM_9L_R = {"clsid": "LAU-115_LAU-127_AIM-9L_R", "name": "LAU-115 with 1 x LAU-127 AIM-9L Sidewinder IR AAM", "weight": 230.73}
LAU_115_LAU_127_AIM_9M = {"clsid": "LAU-115_LAU-127_AIM-9M", "name": "LAU-115 with 1 x LAU-127 AIM-9M Sidewinder IR AAM", "weight": 230.73}
LAU_115_LAU_127_AIM_9M_R = {"clsid": "LAU-115_LAU-127_AIM-9M_R", "name": "LAU-115 with 1 x LAU-127 AIM-9M Sidewinder IR AAM", "weight": 230.73}
LAU_115_LAU_127_AIM_9X = {"clsid": "LAU-115_LAU-127_AIM-9X", "name": "LAU-115 with 1 x LAU-127 AIM-9X Sidewinder IR AAM", "weight": 229.46}
LAU_115_LAU_127_AIM_9X_R = {"clsid": "LAU-115_LAU-127_AIM-9X_R", "name": "LAU-115 with 1 x LAU-127 AIM-9X Sidewinder IR AAM", "weight": 229.46}
LAU_115_LAU_127_CATM_9M = {"clsid": "LAU-115_LAU-127_CATM-9M", "name": "LAU-115 with 1 x LAU-127 Captive AIM-9M for ACM", "weight": 230.73}
LAU_115_LAU_127_CATM_9M_R = {"clsid": "LAU-115_LAU-127_CATM-9M_R", "name": "LAU-115 with 1 x LAU-127 Captive AIM-9M for ACM", "weight": 230.73}
LAU_115_with_1_x_LAU_127_AIM_120B_AMRAAM___Active_Rdr_AAM = {"clsid": "{LAU-115 - AIM-120B}", "name": "LAU-115 with 1 x LAU-127 AIM-120B AMRAAM - Active Rdr AAM", "weight": 301}
LAU_115_with_1_x_LAU_127_AIM_120B_AMRAAM___Active_Rdr_AAM_ = {"clsid": "{LAU-115 - AIM-120B_R}", "name": "LAU-115 with 1 x LAU-127 AIM-120B AMRAAM - Active Rdr AAM", "weight": 301}
LAU_115_with_1_x_LAU_127_AIM_120C_5_AMRAAM___Active_Rdr_AAM = {"clsid": "{LAU-115 - AIM-120C}", "name": "LAU-115 with 1 x LAU-127 AIM-120C-5 AMRAAM - Active Rdr AAM", "weight": 306.5}
LAU_115_with_1_x_LAU_127_AIM_120C_5_AMRAAM___Active_Rdr_AAM_ = {"clsid": "{LAU-115 - AIM-120C_R}", "name": "LAU-115 with 1 x LAU-127 AIM-120C-5 AMRAAM - Active Rdr AAM", "weight": 306.5}
LAU_115_with_AIM_7M_Sparrow_Semi_Active_Radar = {"clsid": "{LAU-115 - AIM-7M}", "name": "LAU-115 with AIM-7M Sparrow Semi-Active Radar", "weight": 285.5}
LAU_117_AGM_65A = {"clsid": "LAU_117_AGM_65A", "name": "LAU-117 with AGM-65A - Maverick A (TV Guided)", "weight": 269.5}
LAU_117_AGM_65B = {"clsid": "LAU_117_AGM_65B", "name": "LAU-117 with AGM-65B - Maverick B (TV Guided)", "weight": 269.5}
LAU_117_AGM_65F = {"clsid": "LAU_117_AGM_65F", "name": "LAU-117 with AGM-65F - Maverick F (IIR ASM)", "weight": 360}
LAU_117_AGM_65G = {"clsid": "LAU_117_AGM_65G", "name": "LAU-117 with AGM-65G - Maverick G (IIR ASM - Lg Whd)", "weight": 360}
LAU_117_AGM_65H = {"clsid": "LAU_117_AGM_65H", "name": "LAU-117 with AGM-65H - Maverick H (CCD Imp ASM)", "weight": 267}
LAU_117_AGM_65L = {"clsid": "LAU_117_AGM_65L", "name": "LAU-117 with AGM-65L - Maverick E2/L (CCD Laser ASM)", "weight": 351}
LAU_117_CATM_65K = {"clsid": "LAU_117_CATM_65K", "name": "LAU-117 with CATM-65K - Captive Trg Round for Mav K (CCD)", "weight": 356}
LAU_117_TGM_65D = {"clsid": "LAU_117_TGM_65D", "name": "LAU-117 with TGM-65D - Trg Round for Mav D (IIR)", "weight": 277}
LAU_117_TGM_65G = {"clsid": "LAU_117_TGM_65G", "name": "LAU-117 with TGM-65G - Trg Round for Mav G (IIR)", "weight": 360}
LAU_117_TGM_65H = {"clsid": "LAU_117_TGM_65H", "name": "LAU-117 with TGM-65H - Trg Round for Mav H (CCD)", "weight": 267}
LAU_117_with_AGM_65D___Maverick_D__IIR_ASM_ = {"clsid": "{444BA8AE-82A7-4345-842E-76154EFCCA46}", "name": "LAU-117 with AGM-65D - Maverick D (IIR ASM)", "weight": 277}
LAU_117_with_AGM_65E___Maverick_E__Laser_ASM___Lg_Whd_ = {"clsid": "{F16A4DE0-116C-4A71-97F0-2CF85B0313EC}", "name": "LAU-117 with AGM-65E - Maverick E (Laser ASM - Lg Whd)", "weight": 345}
LAU_117_with_AGM_65K___Maverick_K__CCD_Imp_ASM_ = {"clsid": "{69DC8AE7-8F77-427B-B8AA-B19D3F478B66}", "name": "LAU-117 with AGM-65K - Maverick K (CCD Imp ASM)", "weight": 356}
LAU_118a_with_AGM_45B_Shrike_ARM__Imp_ = {"clsid": "{3E6B632D-65EB-44D2-9501-1C2D04515405}", "name": "LAU-118a with AGM-45B Shrike ARM (Imp)", "weight": 222.4}
LAU_127_AIM_9L = {"clsid": "LAU-127_AIM-9L", "name": "LAU-127 AIM-9L Sidewinder IR AAM", "weight": 131.03}
LAU_127_AIM_9M = {"clsid": "LAU-127_AIM-9M", "name": "LAU-127 AIM-9M Sidewinder IR AAM", "weight": 131.03}
LAU_127_AIM_9X = {"clsid": "LAU-127_AIM-9X", "name": "LAU-127 AIM-9X Sidewinder IR AAM", "weight": 129.76}
LAU_127_CATM_9M = {"clsid": "LAU-127_CATM-9M", "name": "LAU-127 Captive AIM-9M for ACM", "weight": 131.03}
LAU_131x3_HYDRA_70_M151 = {"clsid": "LAU_131x3_HYDRA_70_M151", "name": "BRU-42 with 3 x LAU-131 pods - 21 x 2.75\" Hydra, UnGd Rkts M151, HE", "weight": 406.65}
LAU_131x3_HYDRA_70_M156 = {"clsid": "LAU_131x3_HYDRA_70_M156", "name": "BRU-42 with 3 x LAU-131 pods - 21 x 2.75\" Hydra, UnGd Rkts M156, Wht Phos", "weight": 410.43}
LAU_131x3_HYDRA_70_M257 = {"clsid": "LAU_131x3_HYDRA_70_M257", "name": "BRU-42 with 3 x LAU-131 pods - 21 x 2.75\" Hydra, UnGd Rkts M257, Para Illum", "weight": 423.45}
LAU_131x3_HYDRA_70_M274 = {"clsid": "LAU_131x3_HYDRA_70_M274", "name": "BRU-42 with 3 x LAU-131 pods - 21 x 2.75\" Hydra, UnGd Rkts M274, Practice Smk", "weight": 406.65}
LAU_131x3_HYDRA_70_MK1 = {"clsid": "LAU_131x3_HYDRA_70_MK1", "name": "BRU-42 with 3 x LAU-131 pods - 21 x 2.75\" Hydra, UnGd Rkts Mk1, Practice", "weight": 379.56}
LAU_131x3_HYDRA_70_MK5 = {"clsid": "LAU_131x3_HYDRA_70_MK5", "name": "BRU-42 with 3 x LAU-131 pods - 21 x 2.75\" Hydra, UnGd Rkts Mk5, HEAT", "weight": 373.26}
LAU_131x3_HYDRA_70_MK61 = {"clsid": "LAU_131x3_HYDRA_70_MK61", "name": "BRU-42 with 3 x LAU-131 pods - 21 x 2.75\" Hydra, UnGd Rkts Mk61, Practice", "weight": 379.56}
LAU_131x3_HYDRA_70_WTU1B = {"clsid": "LAU_131x3_HYDRA_70_WTU1B", "name": "BRU-42 with 3 x LAU-131 pods - 21 x 2.75\" Hydra, UnGd Rkts WTU-1/B, Practice", "weight": 406.65}
LAU_131_pod___7_x_2_75_Hydra__Laser_Guided_Rkts_M151__HE_APKWS = {"clsid": "{LAU-131 - 7 AGR-20A}", "name": "LAU-131 pod - 7 x 2.75\" Hydra, Laser Guided Rkts M151, HE APKWS", "weight": 134.5}
LAU_131_pod___7_x_2_75_Hydra__Laser_Guided_Rkts_M282__MPP_APKWS = {"clsid": "{LAU-131 - 7 AGR-20 M282}", "name": "LAU-131 pod - 7 x 2.75\" Hydra, Laser Guided Rkts M282, MPP APKWS", "weight": 148.5}
LAU_131_pod___7_x_2_75_Hydra__UnGd_Rkts_M151__HE = {"clsid": "{69926055-0DA8-4530-9F2F-C86B157EA9F6}", "name": "LAU-131 pod - 7 x 2.75\" Hydra, UnGd Rkts M151, HE", "weight": 102.3}
LAU_131_pod___7_x_2_75_Hydra__UnGd_Rkts_M156__Wht_Phos = {"clsid": "{2AF2EC3F-9065-4de5-93E1-1739C9A71EF7}", "name": "LAU-131 pod - 7 x 2.75\" Hydra, UnGd Rkts M156, Wht Phos", "weight": 103.56}
LAU_131_pod___7_x_2_75_Hydra__UnGd_Rkts_M257__Para_Illum = {"clsid": "{DAD45FE5-CFF0-4a2b-99D4-5D044D3BC22F}", "name": "LAU-131 pod - 7 x 2.75\" Hydra, UnGd Rkts M257, Para Illum", "weight": 107.9}
LAU_131_pod___7_x_2_75_Hydra__UnGd_Rkts_M274__Practice_Smk = {"clsid": "{6D6D5C07-2A90-4a68-9A74-C5D0CFFB05D9}", "name": "LAU-131 pod - 7 x 2.75\" Hydra, UnGd Rkts M274, Practice Smk", "weight": 102.3}
LAU_131_pod___7_x_2_75_Hydra__UnGd_Rkts_Mk1__Practice = {"clsid": "{D22C2D63-E5C9-4247-94FB-5E8F3DE22B71}", "name": "LAU-131 pod - 7 x 2.75\" Hydra, UnGd Rkts Mk1, Practice", "weight": 93.27}
LAU_131_pod___7_x_2_75_Hydra__UnGd_Rkts_Mk5__HEAT = {"clsid": "{319293F2-392C-4617-8315-7C88C22AF7C4}", "name": "LAU-131 pod - 7 x 2.75\" Hydra, UnGd Rkts Mk5, HEAT", "weight": 91.17}
LAU_131_pod___7_x_2_75_Hydra__UnGd_Rkts_Mk61__Practice = {"clsid": "{1CA5E00B-D545-4ff9-9B53-5970E292F14D}", "name": "LAU-131 pod - 7 x 2.75\" Hydra, UnGd Rkts Mk61, Practice", "weight": 93.27}
LAU_131_pod___7_x_2_75_Hydra__UnGd_Rkts_WTU_1_B__Practice = {"clsid": "{DDCE7D70-5313-4181-8977-F11018681662}", "name": "LAU-131 pod - 7 x 2.75\" Hydra, UnGd Rkts WTU-1/B, Practice", "weight": 102.3}
LAU_138_AIM_9L = {"clsid": "{LAU-138 wtip - AIM-9L}", "name": "LAU-138 AIM-9L", "weight": 85.5}
LAU_138_AIM_9M = {"clsid": "{LAU-138 wtip - AIM-9M}", "name": "LAU-138 AIM-9M", "weight": 86.64}
LAU_3_pod___19_x_2_75_FFAR__UnGd_Rkts_M156__Wht_Phos = {"clsid": "{LAU3_FFAR_WP156}", "name": "LAU-3 pod - 19 x 2.75\" FFAR, UnGd Rkts M156, Wht Phos", "weight": 312.8707256}
LAU_3_pod___19_x_2_75_FFAR__UnGd_Rkts_Mk1__HE = {"clsid": "{LAU3_FFAR_MK1HE}", "name": "LAU-3 pod - 19 x 2.75\" FFAR, UnGd Rkts Mk1, HE", "weight": 285.292332}
LAU_3_pod___19_x_2_75_FFAR__UnGd_Rkts_Mk5__HEAT = {"clsid": "{LAU3_FFAR_MK5HEAT}", "name": "LAU-3 pod - 19 x 2.75\" FFAR, UnGd Rkts Mk5, HEAT", "weight": 286.1541568}
LAU_61R_pod___19_x_2_75_Hydra__UnGd_Rkts_M151__HE = {"clsid": "{LAU_61R}", "name": "LAU-61R pod - 19 x 2.75\" Hydra, UnGd Rkts M151, HE", "weight": 271.5}
LAU_61_pod___19_x_2_75_Hydra__UnGd_Rkts_M151__HE = {"clsid": "{FD90A1DC-9147-49FA-BF56-CB83EF0BD32B}", "name": "LAU-61 pod - 19 x 2.75\" Hydra, UnGd Rkts M151, HE", "weight": 273.4}
LAU_61_pod___19_x_2_75_Hydra__UnGd_Rkts_M156__Wht_Phos = {"clsid": "{3DFB7321-AB0E-11d7-9897-000476191836}", "name": "LAU-61 pod - 19 x 2.75\" Hydra, UnGd Rkts M156, Wht Phos", "weight": 274.92}
LAU_61_pod___19_x_2_75_Hydra__UnGd_Rkts_M282__HEDP = {"clsid": "{LAU_61_M282}", "name": "LAU-61 pod - 19 x 2.75\" Hydra, UnGd Rkts M282, HEDP", "weight": 309.88}
LAU_68_pod___7_x_2_75_FFAR__UnGd_Rkts_M156__Wht_Phos = {"clsid": "{LAU68_FFAR_WP156}", "name": "LAU-68 pod - 7 x 2.75\" FFAR, UnGd Rkts M156, Wht Phos", "weight": 120.1560568}
LAU_68_pod___7_x_2_75_FFAR__UnGd_Rkts_Mk1__HE = {"clsid": "{LAU68_FFAR_MK1HE}", "name": "LAU-68 pod - 7 x 2.75\" FFAR, UnGd Rkts Mk1, HE", "weight": 109.995596}
LAU_68_pod___7_x_2_75_FFAR__UnGd_Rkts_Mk5__HEAT = {"clsid": "{LAU68_FFAR_MK5HEAT}", "name": "LAU-68 pod - 7 x 2.75\" FFAR, UnGd Rkts Mk5, HEAT", "weight": 110.3131104}
LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M151__HE = {"clsid": "{A021F29D-18AB-4d3e-985C-FC9C60E35E9E}", "name": "LAU-68 pod - 7 x 2.75\" Hydra, UnGd Rkts M151, HE", "weight": 113.9}
LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M156__Wht_Phos = {"clsid": "{4F977A2A-CD25-44df-90EF-164BFA2AE72F}", "name": "LAU-68 pod - 7 x 2.75\" Hydra, UnGd Rkts M156, Wht Phos", "weight": 114.46}
LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M257__Para_Illum = {"clsid": "{647C5F26-BDD1-41e6-A371-8DE1E4CC0E94}", "name": "LAU-68 pod - 7 x 2.75\" Hydra, UnGd Rkts M257, Para Illum", "weight": 118.8}
LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M274__Practice_Smk = {"clsid": "{0877B74B-5A00-4e61-BA8A-A56450BA9E27}", "name": "LAU-68 pod - 7 x 2.75\" Hydra, UnGd Rkts M274, Practice Smk", "weight": 113.2}
LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M282__HEDP = {"clsid": "{LAU_68_M282}", "name": "LAU-68 pod - 7 x 2.75\" Hydra, UnGd Rkts M282, HEDP", "weight": 127.34}
LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_Mk1__Practice = {"clsid": "{FC85D2ED-501A-48ce-9863-49D468DDD5FC}", "name": "LAU-68 pod - 7 x 2.75\" Hydra, UnGd Rkts Mk1, Practice", "weight": 104.17}
LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_Mk5__HEAT = {"clsid": "{174C6E6D-0C3D-42ff-BCB3-0853CB371F5C}", "name": "LAU-68 pod - 7 x 2.75\" Hydra, UnGd Rkts Mk5, HEAT", "weight": 102.07}
LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_Mk61__Practice = {"clsid": "{65396399-9F5C-4ec3-A7D2-5A8F4C1D90C4}", "name": "LAU-68 pod - 7 x 2.75\" Hydra, UnGd Rkts Mk61, Practice", "weight": 104.17}
LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_WTU_1_B__Practice = {"clsid": "{1F7136CB-8120-4e77-B97B-945FF01FB67C}", "name": "LAU-68 pod - 7 x 2.75\" Hydra, UnGd Rkts WTU-1/B, Practice", "weight": 113.2}
LAU_7_AIM_9L = {"clsid": "{LAU-7 - AIM-9L}", "name": "LAU-7 AIM-9L", "weight": 100.5}
LAU_7_AIM_9M = {"clsid": "{LAU-7 - AIM-9M}", "name": "LAU-7 AIM-9M", "weight": 101.64}
LAU_7_with_2_x_AIM_9B_Sidewinder_IR_AAM = {"clsid": "{F4-2-AIM9B}", "name": "LAU-7 with 2 x AIM-9B Sidewinder IR AAM", "weight": 178.78}
LAU_7_with_2_x_AIM_9L_Sidewinder_IR_AAM = {"clsid": "{F4-2-AIM9L}", "name": "LAU-7 with 2 x AIM-9L Sidewinder IR AAM", "weight": 201.46}
LAU_7_with_2_x_AIM_9M_Sidewinder_IR_AAM = {"clsid": "{9DDF5297-94B9-42FC-A45E-6E316121CD85}", "name": "LAU-7 with 2 x AIM-9M Sidewinder IR AAM", "weight": 201.46}
LAU_7_with_2_x_AIM_9P5_Sidewinder_IR_AAM = {"clsid": "{F4-2-AIM9P5}", "name": "LAU-7 with 2 x AIM-9P5 Sidewinder IR AAM", "weight": 201}
LAU_7_with_2_x_AIM_9P_Sidewinder_IR_AAM = {"clsid": "{773675AB-7C29-422f-AFD8-32844A7B7F17}", "name": "LAU-7 with 2 x AIM-9P Sidewinder IR AAM", "weight": 202.36}
LAU_7_with_AIM_9B_Sidewinder_IR_AAM = {"clsid": "{GAR-8}", "name": "LAU-7 with AIM-9B Sidewinder IR AAM", "weight": 89.39}
LAU_7_with_AIM_9M_Sidewinder_IR_AAM = {"clsid": "{AIM-9M-ON-ADAPTER}", "name": "LAU-7 with AIM-9M Sidewinder IR AAM", "weight": 100.73}
LAU_7_with_AIM_9P5_Sidewinder_IR_AAM = {"clsid": "{AIM-9P5-ON-ADAPTER}", "name": "LAU-7 with AIM-9P5 Sidewinder IR AAM", "weight": 100.5}
LAU_7_with_AIM_9P_Sidewinder_IR_AAM = {"clsid": "{AIM-9P-ON-ADAPTER}", "name": "LAU-7 with AIM-9P Sidewinder IR AAM", "weight": 101.18}
LAU_7_with_AIM_9X_Sidewinder_IR_AAM = {"clsid": "{AIM-9X-ON-ADAPTER}", "name": "LAU-7 with AIM-9X Sidewinder IR AAM", "weight": 99.46}
LAU_7_with_AN_ASQ_T50_TCTS_Pod___ACMI_Pod = {"clsid": "{LAU-7_AIS_ASQ_T50}", "name": "LAU-7 with AN/ASQ-T50 TCTS Pod - ACMI Pod", "weight": 92.6}
LAU_88_AGM_65D_ONE = {"clsid": "LAU_88_AGM_65D_ONE", "name": "LAU-88 with 1 x AGM-65D - Maverick D (IIR ASM)", "weight": 429}
LAU_88_AGM_65H = {"clsid": "LAU_88_AGM_65H", "name": "LAU-88 with 1 x AGM-65H - Maverick H (CCD Imp ASM)", "weight": 419}
LAU_88_AGM_65H_2_L = {"clsid": "LAU_88_AGM_65H_2_L", "name": "LAU-88 with 2 x AGM-65H - Maverick H (CCD Imp ASM)", "weight": 627}
LAU_88_AGM_65H_2_R = {"clsid": "LAU_88_AGM_65H_2_R", "name": "LAU-88 with 2 x AGM-65H - Maverick H (CCD Imp ASM)", "weight": 627}
LAU_88_AGM_65H_3 = {"clsid": "LAU_88_AGM_65H_3", "name": "LAU-88 with 3 x AGM-65H - Maverick H (CCD Imp ASM)", "weight": 835}
LAU_88_with_2_x_AGM_65D___Maverick_D__IIR_ASM_ = {"clsid": "{E6A6262A-CA08-4B3D-B030-E1A993B98452}", "name": "LAU-88 with 2 x AGM-65D - Maverick D (IIR ASM)", "weight": 647}
LAU_88_with_2_x_AGM_65D___Maverick_D__IIR_ASM__ = {"clsid": "{E6A6262A-CA08-4B3D-B030-E1A993B98453}", "name": "LAU-88 with 2 x AGM-65D - Maverick D (IIR ASM)", "weight": 647}
LAU_88_with_2_x_AGM_65E___Maverick_E__Laser_ASM___Lg_Whd_ = {"clsid": "{2CC29C7A-E863-411C-8A6E-BD6F0E730548}", "name": "LAU-88 with 2 x AGM-65E - Maverick E (Laser ASM - Lg Whd)", "weight": 783}
LAU_88_with_2_x_AGM_65E___Maverick_E__Laser_ASM___Lg_Whd__ = {"clsid": "{2CC29C7A-E863-411C-8A6E-BD6F0E730547}", "name": "LAU-88 with 2 x AGM-65E - Maverick E (Laser ASM - Lg Whd)", "weight": 783}
LAU_88_with_2_x_AGM_65K___Maverick_K__CCD_Imp_ASM_ = {"clsid": "{D7670BC7-881B-4094-906C-73879CF7EB28}", "name": "LAU-88 with 2 x AGM-65K - Maverick K (CCD Imp ASM)", "weight": 805}
LAU_88_with_2_x_AGM_65K___Maverick_K__CCD_Imp_ASM__ = {"clsid": "{D7670BC7-881B-4094-906C-73879CF7EB27}", "name": "LAU-88 with 2 x AGM-65K - Maverick K (CCD Imp ASM)", "weight": 805}
LAU_88_with_3_x_AGM_65D___Maverick_D__IIR_ASM_ = {"clsid": "{DAC53A2F-79CA-42FF-A77A-F5649B601308}", "name": "LAU-88 with 3 x AGM-65D - Maverick D (IIR ASM)", "weight": 865}
LAU_88_with_3_x_AGM_65E___Maverick_E__Laser_ASM___Lg_Whd_ = {"clsid": "{71AAB9B8-81C1-4925-BE50-1EF8E9899271}", "name": "LAU-88 with 3 x AGM-65E - Maverick E (Laser ASM - Lg Whd)", "weight": 1069}
LAU_88_with_3_x_AGM_65K___Maverick_K__CCD_Imp_ASM_ = {"clsid": "{907D835F-E650-4154-BAFD-C656882555C0}", "name": "LAU-88 with 3 x AGM-65K - Maverick K (CCD Imp ASM)", "weight": 1102}
LAU_SNEB68G___8xSNEB68_EAP = {"clsid": "{LAU_SNEB68G}", "name": "LAU_SNEB68G - 8xSNEB68_EAP", "weight": 50.08}
LAU_SNEB68G___8xSNEB68_WP = {"clsid": "{LAU_SNEB68_WP}", "name": "LAU_SNEB68G - 8xSNEB68_WP", "weight": 50.08}
Lantirn_F_16 = {"clsid": "{CAAC1CFD-6745-416B-AFA4-CB57414856D0}", "name": "Lantirn F-16", "weight": 445}
Lantirn_Target_Pod = {"clsid": "{D1744B93-2A8A-4C4D-B004-7A09CD8C8F3F}", "name": "Lantirn Target Pod", "weight": 200}
LR_25___25_x_ARF_8_M3_API = {"clsid": "{LR25_ARF8M3_API}", "name": "LR-25 - 25 x ARF-8/M3 API", "weight": 141}
LR_25___25_x_ARF_8_M3_HEI = {"clsid": "{LR25_ARF8M3_HEI}", "name": "LR-25 - 25 x ARF-8/M3 HEI", "weight": 161}
LR_25___25_x_ARF_8_M3_TP_SM = {"clsid": "{LR25_ARF8M3_TPSM}", "name": "LR-25 - 25 x ARF-8/M3 TP-SM", "weight": 141}
L_081_Fantasmagoria_ELINT_pod = {"clsid": "{0519A264-0AB6-11d6-9193-00A0249B6F00}", "name": "L-081 Fantasmagoria ELINT pod", "weight": 300}
M10_Smoke_Tank___blue = {"clsid": "{US_M10_SMOKE_TANK_BLUE}", "name": "M10 Smoke Tank - blue", "weight": 266.7}
M10_Smoke_Tank___green = {"clsid": "{US_M10_SMOKE_TANK_GREEN}", "name": "M10 Smoke Tank - green", "weight": 266.7}
M10_Smoke_Tank___orange = {"clsid": "{US_M10_SMOKE_TANK_ORANGE}", "name": "M10 Smoke Tank - orange", "weight": 266.7}
M10_Smoke_Tank___red = {"clsid": "{US_M10_SMOKE_TANK_RED}", "name": "M10 Smoke Tank - red", "weight": 266.7}
M10_Smoke_Tank___white = {"clsid": "{US_M10_SMOKE_TANK_WHITE}", "name": "M10 Smoke Tank - white", "weight": 266.7}
M10_Smoke_Tank___yellow = {"clsid": "{US_M10_SMOKE_TANK_YELLOW}", "name": "M10 Smoke Tank - yellow", "weight": 266.7}
M117___750lb_GP_Bomb_LD = {"clsid": "{00F5DAC4-0466-4122-998F-B1A298E34113}", "name": "M117 - 750lb GP Bomb LD", "weight": 340}
M134_L = {"clsid": "M134_L", "name": "M134 - 6 x 7.62mm MiniGun left", "weight": 146.4}
M134_R = {"clsid": "M134_R", "name": "M134 - 6 x 7.62mm MiniGun right", "weight": 146.4}
M134_SIDE_L = {"clsid": "M134_SIDE_L", "name": "M134 - 6 x 7.62mm MiniGun left door", "weight": 270.4}
M134_SIDE_R = {"clsid": "M134_SIDE_R", "name": "M134 - 6 x 7.62mm MiniGun right door", "weight": 270.4}
M2000_Fuel_tank = {"clsid": "{414DA830-B61A-4F9E-B71B-C2F6832E1D7A}", "name": "M2000 Fuel tank", "weight": 1050}
M260_HYDRA = {"clsid": "M260_HYDRA", "name": "M260 pod - 7 x 2.75\" Hydra, UnGd Rkts Mk5, HEAT", "weight": 112}
M260_HYDRA_WP = {"clsid": "M260_HYDRA_WP", "name": "M260 pod - 7 x 2.75\" Hydra, UnGd Rkts M156, Wht Phos", "weight": 112}
M261_MK151 = {"clsid": "M261_MK151", "name": "M261 pod - 19 x 2.75\" Hydra, UnGd Rkts M151, HE", "weight": 234}
M261_MK156 = {"clsid": "M261_MK156", "name": "M261 pod - 19 x 2.75\" Hydra, UnGd Rkts M156, Wht Phos", "weight": 234}
M60_SIDE_L = {"clsid": "M60_SIDE_L", "name": "M60 - 7.62mm MG left door", "weight": 141.4}
M60_SIDE_R = {"clsid": "M60_SIDE_R", "name": "M60 - 7.62mm MG right door", "weight": 141.4}
MAK79_2_MK_20 = {"clsid": "{MAK79_MK20 2L}", "name": "MAK79 2 MK-20", "weight": 464}
MAK79_2_MK_20_ = {"clsid": "{MAK79_MK20 2R}", "name": "MAK79 2 MK-20", "weight": 464}
MAK79_3_BDU_33 = {"clsid": "{MAK79_BDU33 3L}", "name": "MAK79 3 BDU-33", "weight": 63}
MAK79_3_BDU_33_ = {"clsid": "{MAK79_BDU33 3R}", "name": "MAK79 3 BDU-33", "weight": 63}
MAK79_3_BDU_45 = {"clsid": "{MAK79_BDU45 3L}", "name": "MAK79 3 BDU-45", "weight": 726}
MAK79_3_BDU_45B = {"clsid": "{MAK79_BDU45B 3L}", "name": "MAK79 3 BDU-45B", "weight": 726}
MAK79_3_BDU_45B_ = {"clsid": "{MAK79_BDU45B 3R}", "name": "MAK79 3 BDU-45B", "weight": 726}
MAK79_3_BDU_45_ = {"clsid": "{MAK79_BDU45 3R}", "name": "MAK79 3 BDU-45", "weight": 726}
MAK79_3_Mk_81 = {"clsid": "{MAK79_MK81 3L}", "name": "MAK79 3 Mk-81", "weight": 384}
MAK79_3_Mk_81_ = {"clsid": "{MAK79_MK81 3R}", "name": "MAK79 3 Mk-81", "weight": 384}
MAK79_3_Mk_82 = {"clsid": "{MAK79_MK82 3L}", "name": "MAK79 3 Mk-82", "weight": 753}
MAK79_3_Mk_82AIR = {"clsid": "{MAK79_MK82AIR 3L}", "name": "MAK79 3 Mk-82AIR", "weight": 753}
MAK79_3_Mk_82AIR_ = {"clsid": "{MAK79_MK82AIR 3R}", "name": "MAK79 3 Mk-82AIR", "weight": 753}
MAK79_3_Mk_82_ = {"clsid": "{MAK79_MK82 3R}", "name": "MAK79 3 Mk-82", "weight": 753}
MAK79_3_Mk_82_SnakeEye = {"clsid": "{MAK79_MK82SE 3L}", "name": "MAK79 3 Mk-82 SnakeEye", "weight": 753}
MAK79_3_Mk_82_SnakeEye_ = {"clsid": "{MAK79_MK82SE 3R}", "name": "MAK79 3 Mk-82 SnakeEye", "weight": 753}
MAK79_3_Mk_83 = {"clsid": "{MAK79_MK83 3L}", "name": "MAK79 3 Mk-83", "weight": 1371}
MAK79_3_Mk_83_ = {"clsid": "{MAK79_MK83 3R}", "name": "MAK79 3 Mk-83", "weight": 1371}
MAK79_4_BDU_33 = {"clsid": "{MAK79_BDU33 4}", "name": "MAK79 4 BDU-33", "weight": 84}
MAK79_4_BDU_45 = {"clsid": "{MAK79_BDU45 4}", "name": "MAK79 4 BDU-45", "weight": 968}
MAK79_4_BDU_45B = {"clsid": "{MAK79_BDU45B 4}", "name": "MAK79 4 BDU-45B", "weight": 968}
MAK79_4_Mk_81 = {"clsid": "{MAK79_MK81 4}", "name": "MAK79 4 Mk-81", "weight": 512}
MAK79_4_Mk_82 = {"clsid": "{MAK79_MK82 4}", "name": "MAK79 4 Mk-82", "weight": 1004}
MAK79_4_Mk_82AIR = {"clsid": "{MAK79_MK82AIR 4}", "name": "MAK79 4 Mk-82AIR", "weight": 1004}
MAK79_4_Mk_82_SnakeEye = {"clsid": "{MAK79_MK82SE 4}", "name": "MAK79 4 Mk-82 SnakeEye", "weight": 1004}
MAK79_MK_20 = {"clsid": "{MAK79_MK20 1R}", "name": "MAK79 MK-20", "weight": 232}
MAK79_MK_20_ = {"clsid": "{MAK79_MK20 1L}", "name": "MAK79 MK-20", "weight": 232}
MAK79_Mk_83 = {"clsid": "{MAK79_MK83 1R}", "name": "MAK79 Mk-83", "weight": 457}
MAK79_Mk_83_ = {"clsid": "{MAK79_MK83 1L}", "name": "MAK79 Mk-83", "weight": 457}
Matra_Magic_II = {"clsid": "{MMagicII}", "name": "Matra Magic II", "weight": 85}
Matra_Super_530D = {"clsid": "{Matra_S530D}", "name": "Matra Super 530D", "weight": 350}
Matra_Type_155_Rocket_Pod = {"clsid": "{Matra155RocketPod}", "name": "Matra Type 155 Rocket Pod", "weight": 190}
MBD2_67U_with_4_x_FAB_100___100kg_GP_Bombs_LD = {"clsid": "{5A1AC2B4-CA4B-4D09-A1AF-AC52FBC4B60B}", "name": "MBD2-67U with 4 x FAB-100 - 100kg GP Bombs LD", "weight": 465}
MBD2_67U_with_4_x_FAB_100___100kg_GP_Bombs_LD_ = {"clsid": "{29A828E2-C6BB-11d8-9897-000476191836}", "name": "MBD2-67U with 4 x FAB-100 - 100kg GP Bombs LD", "weight": 465}
MBD3_U2T_with_2_x_FAB_1500_M_54___1500kg_GP_Bombs_LD = {"clsid": "{7C5F0F5F-0A0B-46E8-937C-8922303E39A8}", "name": "MBD3-U2T with 2 x FAB-1500 M-54 - 1500kg GP Bombs LD", "weight": 3100}
MBD3_U4T_with_4_x_FAB_250___250kg_GP_Bombs_LD = {"clsid": "{6A367BB4-327F-4A04-8D9E-6D86BDC98E7E}", "name": "MBD3-U4T with 4 x FAB-250 - 250kg GP Bombs LD", "weight": 1060}
MBD3_U4T_with_4_x_RBK_250___42_x_PTAB_2_5M__250kg_CBUs_Medium_HEAT_AP = {"clsid": "{02B81892-7E24-4795-84F9-B8110C641AF0}", "name": "MBD3-U4T with 4 x RBK-250 - 42 x PTAB-2.5M, 250kg CBUs Medium HEAT/AP", "weight": 1126.4}
MBD3_U6_68_with_2_x_FAB_250___250kg_GP_Bombs_LD = {"clsid": "{E659C4BE-2CD8-4472-8C08-3F28ACB61A8A}", "name": "MBD3-U6-68 with 2 x FAB-250 - 250kg GP Bombs LD", "weight": 550}
MBD3_U6_68_with_3_x_FAB_250___250kg_GP_Bombs_LD = {"clsid": "{MBD3_U6_3*FAB-250_fwd}", "name": "MBD3-U6-68 with 3 x FAB-250 - 250kg GP Bombs LD", "weight": 810}
MBD3_U6_68_with_4_x_FAB_250___250kg_GP_Bombs_LD = {"clsid": "{3E35F8C1-052D-11d6-9191-00A0249B6F00}", "name": "MBD3-U6-68 with 4 x FAB-250 - 250kg GP Bombs LD", "weight": 1060}
MBD3_U6_68_with_4_x_FAB_250___250kg_GP_Bombs_LD_ = {"clsid": "{MBD3_U6_4*FAB-250_fwd}", "name": "MBD3-U6-68 with 4 x FAB-250 - 250kg GP Bombs LD", "weight": 1060}
MBD3_U6_68_with_5_x_FAB_250___250kg_GP_Bombs_LD = {"clsid": "{MBD3_U6_5*FAB-250}", "name": "MBD3-U6-68 with 5 x FAB-250 - 250kg GP Bombs LD", "weight": 1310}
MBD3_U6_68_with_6_x_BetAB_500ShP___500kg_Concrete_Piercing_HD_w_booster_Bombs = {"clsid": "{E96E1EDD-FF3F-47CF-A959-576C3B682955}", "name": "MBD3-U6-68 with 6 x BetAB-500ShP - 500kg Concrete Piercing HD w booster Bombs", "weight": 3060}
MBD3_U6_68_with_6_x_BetAB_500___500kg_Concrete_Piercing_Bombs_LD = {"clsid": "{436C6FB9-8BF2-46B6-9DC4-F55ABF3CD1EC}", "name": "MBD3-U6-68 with 6 x BetAB-500 - 500kg Concrete Piercing Bombs LD", "weight": 3060}
MBD3_U6_68_with_6_x_FAB_100___100kg_GP_Bombs_LD = {"clsid": "{F99BEC1A-869D-4AC7-9730-FBA0E3B1F5FC}", "name": "MBD3-U6-68 with 6 x FAB-100 - 100kg GP Bombs LD", "weight": 660}
MBD3_U6_68_with_6_x_FAB_250___250kg_GP_Bombs_LD = {"clsid": "{53BE25A4-C86C-4571-9BC0-47D668349595}", "name": "MBD3-U6-68 with 6 x FAB-250 - 250kg GP Bombs LD", "weight": 1560}
MBD3_U6_68_with_6_x_FAB_500_M_62___500kg_GP_Bombs_LD = {"clsid": "{FA673F4C-D9E4-4993-AA7A-019A92F3C005}", "name": "MBD3-U6-68 with 6 x FAB-500 M-62 - 500kg GP Bombs LD", "weight": 3060}
MBD3_U6_68_with_6_x_FAB_500_M_62___500kg_GP_Bombs_LD_ = {"clsid": "{0D945D78-542C-4E9B-9A17-9B5008CC8D39}", "name": "MBD3-U6-68 with 6 x FAB-500 M-62 - 500kg GP Bombs LD", "weight": 3060}
MBD3_U6_68_with_6_x_RBK_500_255___30_x_PTAB_10_5__500kg_CBUs_Heavy_HEAT_AP = {"clsid": "{F503C276-FE15-4C54-B310-17B50B735A84}", "name": "MBD3-U6-68 with 6 x RBK-500-255 - 30 x PTAB-10-5, 500kg CBUs Heavy HEAT/AP", "weight": 3060}
MBD3_U6_68_with_6_x_RBK_500_255___30_x_PTAB_10_5__500kg_CBUs_Heavy_HEAT_AP_ = {"clsid": "{4D459A95-59C0-462F-8A57-34E80697F38B}", "name": "MBD3-U6-68 with 6 x RBK-500-255 - 30 x PTAB-10-5, 500kg CBUs Heavy HEAT/AP", "weight": 3060}
MBD3_U9M_with_9_x_FAB_100___100kg_GP_Bombs_LD = {"clsid": "{5F1C54C0-0ABD-4868-A883-B52FF9FCB422}", "name": "MBD3-U9M with 9 x FAB-100 - 100kg GP Bombs LD", "weight": 960}
MBD3_U9M_with_9_x_FAB_250___250kg_GP_Bombs_LD = {"clsid": "{E1AAE713-5FC3-4CAA-9FF5-3FDCFB899E33}", "name": "MBD3-U9M with 9 x FAB-250 - 250kg GP Bombs LD", "weight": 2310}
MBD3_U9M_with_9_x_RBK_250___42_x_PTAB_2_5M__250kg_CBUs_Medium_HEAT_AP = {"clsid": "{BF83E8FD-E7A2-40D2-9608-42E13AFE2193}", "name": "MBD3-U9M with 9 x RBK-250 - 42 x PTAB-2.5M, 250kg CBUs Medium HEAT/AP", "weight": 2535}
MBD3_with_3_x_BetAB_500___500kg_Concrete_Piercing_Bombs_LD = {"clsid": "{005E70F5-C3EA-4E95-A148-C1044C42D845}", "name": "MBD3 with 3 x BetAB-500 - 500kg Concrete Piercing Bombs LD", "weight": 1566}
MBD3_with_3_x_FAB_100___100kg_GP_Bombs_LD = {"clsid": "{CEE04106-B9AA-46B4-9CD1-CD3FDCF0CE78}", "name": "MBD3 with 3 x FAB-100 - 100kg GP Bombs LD", "weight": 360}
MBD3_with_3_x_FAB_250___250kg_GP_Bombs_LD = {"clsid": "{D109EE9C-A1B7-4F1C-8D87-631C293A1D26}", "name": "MBD3 with 3 x FAB-250 - 250kg GP Bombs LD", "weight": 810}
MBD3_with_3_x_FAB_500_M_62___500kg_GP_Bombs_LD = {"clsid": "{A1E85991-B58E-4E92-AE91-DED6DC85B2E7}", "name": "MBD3 with 3 x FAB-500 M-62 - 500kg GP Bombs LD", "weight": 1560}
MBD3_with_3_x_RBK_250___42_x_PTAB_2_5M__250kg_CBUs_Medium_HEAT_AP = {"clsid": "{EAD9B2C1-F3BA-4A7B-A2A5-84E2AF8A1975}", "name": "MBD3 with 3 x RBK-250 - 42 x PTAB 2.5M, 250kg CBUs Medium HEAT/AP", "weight": 885}
MBD3_with_3_x_RBK_500_255___30_x_PTAB_10_5__500kg_CBUs_Heavy_HEAT_AP = {"clsid": "{919CE839-9390-4629-BAF7-229DE19B8523}", "name": "MBD3 with 3 x RBK-500-255 - 30 x PTAB-10-5, 500kg CBUs Heavy HEAT/AP", "weight": 1560}
MER12_with_12_x_M117___750lb_GP_Bombs_LD = {"clsid": "{574EDEDF-20DE-4942-B2A2-B2EDFD621562}", "name": "MER12 with 12 x M117 - 750lb GP Bombs LD", "weight": 4250}
MER12_with_12_x_Mk_82___500lb_GP_Bombs_LD = {"clsid": "{585D626E-7F42-4073-AB70-41E728C333E2}", "name": "MER12 with 12 x Mk-82 - 500lb GP Bombs LD", "weight": 3000}
MER2_with_2_x_Mk_20_Rockeye___490lbs_CBUs__247_x_HEAT_Bomblets = {"clsid": "{0B9ABA77-93B8-45FC-9C63-82AFB2CB50A4}", "name": "MER2 with 2 x Mk-20 Rockeye - 490lbs CBUs, 247 x HEAT Bomblets", "weight": 553}
MER2_with_2_x_Mk_82___500lb_GP_Bombs_LD = {"clsid": "{D5D51E24-348C-4702-96AF-97A714E72697}", "name": "MER2 with 2 x Mk-82 - 500lb GP Bombs LD", "weight": 565}
MER2_with_2_x_Mk_83___1000lb_GP_Bombs_LD = {"clsid": "{18617C93-78E7-4359-A8CE-D754103EDF63}", "name": "MER2 with 2 x Mk-83 - 1000lb GP Bombs LD", "weight": 1017}
MER3_with_3_x_M117___750lb_GP_Bombs_LD = {"clsid": "{82F90BEC-0E2E-4CE5-A66E-1E4ADA2B5D1E}", "name": "MER3 with 3 x M117 - 750lb GP Bombs LD", "weight": 1060}
MER6_with_6_x_BLU_107___440lb_Anti_Runway_Penetrator_Bombs = {"clsid": "{752B9782-F962-11d5-9190-00A0249B6F00}", "name": "MER6 with 6 x BLU-107 - 440lb Anti-Runway Penetrator Bombs", "weight": 1800}
MER6_with_6_x_M117___750lb_GP_Bombs_LD = {"clsid": "{6CDB6B36-7165-47D0-889F-6625FB333561}", "name": "MER6 with 6 x M117 - 750lb GP Bombs LD", "weight": 2100}
MER6_with_6_x_Mk_20_Rockeye___490lbs_CBUs__247_x_HEAT_Bomblets = {"clsid": "{3C7CD675-7D39-41C5-8735-0F4F537818A8}", "name": "MER6 with 6 x Mk-20 Rockeye - 490lbs CBUs, 247 x HEAT Bomblets", "weight": 1392}
MER6_with_6_x_Mk_82___500lb_GP_Bombs_LD = {"clsid": "{1C97B4A0-AA3B-43A8-8EE7-D11071457185}", "name": "MER6 with 6 x Mk-82 - 500lb GP Bombs LD", "weight": 1506}
Mercury_LLTV_Pod = {"clsid": "{B1EF6B0E-3D91-4047-A7A5-A99E7D8B4A8B}", "name": "Mercury LLTV Pod", "weight": 230}
MICA_IR = {"clsid": "{0DA03783-61E4-40B2-8FAE-6AEE0A5C5AAE}", "name": "MICA IR", "weight": 110}
MICA_RF = {"clsid": "{6D778860-7BB8-4ACB-9E95-BA772C6BBC2C}", "name": "MICA RF", "weight": 110}
MIM_104 = {"clsid": "MIM_104", "name": "M901 PATRIOT", "weight": None}
MIM_72 = {"clsid": "MIM_72", "name": "M48 CHAPARRAL", "weight": None}
Mistral = {"clsid": "{MBDA_MistralG}", "name": "Mistral", "weight": 27.2}
Mistral_ = {"clsid": "{MBDA_MistralD}", "name": "Mistral", "weight": 27.2}
MK_82_28 = {"clsid": "MK_82*28", "name": "28 x Mk-82 - 500lb GP Bombs LD", "weight": 6748}
Mk_20 = {"clsid": "{BRU-32 MK-20}", "name": "Mk-20", "weight": 279.38}
Mk_20_18 = {"clsid": "{ACADB374-6D6C-45A0-BA7C-B22B2E108AE4}", "name": "Mk 20*18", "weight": 3996}
Mk_20_Rockeye___490lbs_CBU__247_x_HEAT_Bomblets = {"clsid": "{ADD3FAE1-EBF6-4EF9-8EFC-B36B5DDF1E6B}", "name": "Mk-20 Rockeye - 490lbs CBU, 247 x HEAT Bomblets", "weight": 222}
Mk_81___250lb_GP_Bomb_LD = {"clsid": "{90321C8E-7ED1-47D4-A160-E074D5ABD902}", "name": "Mk-81 - 250lb GP Bomb LD", "weight": 118}
Mk_82 = {"clsid": "{BRU-32 MK-82}", "name": "Mk-82", "weight": 298.38}
Mk_82AIR = {"clsid": "{BRU-32 MK-82AIR}", "name": "Mk-82AIR", "weight": 298.38}
Mk_82Y___500lb_GP_Chute_Retarded_HD = {"clsid": "{Mk_82Y}", "name": "Mk-82Y - 500lb GP Chute Retarded HD", "weight": 232}
Mk_82_AIR_Ballute___500lb_GP_Bomb_HD = {"clsid": "{Mk82AIR}", "name": "Mk-82 AIR Ballute - 500lb GP Bomb HD", "weight": 242}
Mk_82_SnakeEye = {"clsid": "{BRU-32 MK-82SE}", "name": "Mk-82 SnakeEye", "weight": 298.38}
Mk_82_Snakeye___500lb_GP_Bomb_HD = {"clsid": "{Mk82SNAKEYE}", "name": "Mk-82 Snakeye - 500lb GP Bomb HD", "weight": 249.5}
Mk_82___500lb_GP_Bomb_LD = {"clsid": "{BCE4E030-38E9-423E-98ED-24BE3DA87C32}", "name": "Mk-82 - 500lb GP Bomb LD", "weight": 228}
Mk_83 = {"clsid": "{BRU-32 MK-83}", "name": "Mk-83", "weight": 504.38}
Mk_83CT = {"clsid": "{Mk_83CT}", "name": "Mk-83CT", "weight": 454}
Mk_83_ = {"clsid": "{BRU42_MK83 RS}", "name": "Mk-83", "weight": 575}
Mk_83__ = {"clsid": "{BRU3242_MK83 RS}", "name": "Mk-83", "weight": 632.38}
Mk_83___ = {"clsid": "{PHXBRU3242_MK83 RS}", "name": "Mk-83", "weight": 632.38}
Mk_83___1000lb_GP_Bomb_LD = {"clsid": "{7A44FF09-527C-4B7E-B42B-3F111CFE50FB}", "name": "Mk-83 - 1000lb GP Bomb LD", "weight": 454}
Mk_83____ = {"clsid": "{BRU42_MK83 LS}", "name": "Mk-83", "weight": 575}
Mk_83_____ = {"clsid": "{BRU3242_MK83 LS}", "name": "Mk-83", "weight": 632.38}
Mk_83______ = {"clsid": "{PHXBRU3242_MK83 LS}", "name": "Mk-83", "weight": 632.38}
Mk_84 = {"clsid": "{BRU-32 MK-84}", "name": "Mk-84", "weight": 951.38}
Mk_84_18 = {"clsid": "{F092B80C-BB54-477E-9408-66DEEF740008}", "name": "Mk 84*18", "weight": 16092}
Mk_84_28 = {"clsid": "{D3ABF208-FA56-4D56-BB31-E0D931D57AE3}", "name": "Mk 84*28", "weight": 25032}
Mk_84___2000lb_GP_Bomb_LD = {"clsid": "{AB8B8299-F1CC-4359-89B5-2172E0CF4A5A}", "name": "Mk-84 - 2000lb GP Bomb LD", "weight": 894}
MPS_410 = {"clsid": "{44EE8698-89F9-48EE-AF36-5FD31896A82D}", "name": "MPS-410", "weight": 150}
MPS_410_ = {"clsid": "{44EE8698-89F9-48EE-AF36-5FD31896A82C}", "name": "MPS-410", "weight": 150}
MXU_648_TP = {"clsid": "MXU-648-TP", "name": "MXU-648 Travel Pod", "weight": 300}
ODAB_500PM___525_kg__bomb__parachute__simulated_aerosol = {"clsid": "{ODAB-500PM}", "name": "ODAB-500PM - 525 kg, bomb, parachute, simulated aerosol", "weight": 520}
OFAB_100_120_TU_x_4 = {"clsid": "{OFAB-100-120-TU}", "name": "OFAB-100-120-TU x 4", "weight": 557}
OFAB_100_Jupiter___100kg_GP_Bomb_LD = {"clsid": "{OFAB_100_Jupiter}", "name": "OFAB-100 Jupiter - 100kg GP Bomb LD", "weight": 121}
ORO_57K___S_5M1_HE_FRAG_FFAR_x_8 = {"clsid": "{ORO57K_S5M1_HEFRAG}", "name": "ORO-57K - S-5M1 HE-FRAG FFAR x 8", "weight": 63.88}
ORO_57K___S_5MO_HE_FRAG_FFAR_x_8 = {"clsid": "{ORO57K_S5MO_HEFRAG}", "name": "ORO-57K - S-5MO HE-FRAG FFAR x 8", "weight": 63.88}
ORO_57K___S_5M_x_8 = {"clsid": "{ORO57K_S5M_HEFRAG}", "name": "ORO-57K - S-5M x 8", "weight": 64.92}
oh_58_brauning = {"clsid": "oh-58-brauning", "name": "OH-58D Brauning", "weight": 290}
Pavetack_F_111 = {"clsid": "{199D6D51-1764-497E-9AE5-7D07C8D4D87E}", "name": "Pavetack F-111", "weight": 200}
PKT_7_62 = {"clsid": "PKT_7_62", "name": "PKT 7.62mm MMG", "weight": 90}
PK_3___7_62mm_GPMG = {"clsid": "{PK-3}", "name": "PK-3 - 7.62mm GPMG", "weight": 218}
PTB300_MIG15 = {"clsid": "PTB300_MIG15", "name": "Fuel Tank 300 liters", "weight": 271}
PTB400_MIG15 = {"clsid": "PTB400_MIG15", "name": "Fuel Tank 400 liters", "weight": 364}
PTB400_MIG19 = {"clsid": "PTB400_MIG19", "name": "Fuel Tank 400 liters", "weight": 364}
PTB600_MIG15 = {"clsid": "PTB600_MIG15", "name": "Fuel Tank 600 liters", "weight": 531}
PTB760_MIG19 = {"clsid": "PTB760_MIG19", "name": "Fuel Tank 760 liters", "weight": 663.8}
P_50T___50kg_Practice_Bomb_LD = {"clsid": "{P-50T}", "name": "P-50T - 50kg Practice Bomb LD", "weight": 50}
RBK_250_275___150_x_AO_1SCh__250kg_CBU_HE_Frag = {"clsid": "{RBK_250_275_AO_1SCH}", "name": "RBK-250-275 - 150 x AO-1SCh, 250kg CBU HE/Frag", "weight": 244.6}
RBK_250___42_x_PTAB_2_5M__250kg_CBU_Medium_HEAT_AP = {"clsid": "{4203753F-8198-4E85-9924-6F8FF679F9FF}", "name": "RBK-250 - 42 x PTAB-2.5M, 250kg CBU Medium HEAT/AP", "weight": 244.6}
RBK_500U___126_x_OAB_2_5RT__500kg_CBU_HE_Frag = {"clsid": "{RBK_500U_OAB_2_5RT}", "name": "RBK-500U - 126 x OAB-2.5RT, 500kg CBU HE/Frag", "weight": 427}
RBK_500_255___30_x_PTAB_10_5__500kg_CBU_Heavy_HEAT_AP = {"clsid": "{D5435F26-F120-4FA3-9867-34ACE562EF1B}", "name": "RBK-500-255 - 30 x PTAB-10-5, 500kg CBU Heavy HEAT/AP", "weight": 253}
RBK_500___268_x_PTAB_1M__500kg_CBU_Light_HEAT_AP = {"clsid": "{7AEC222D-C523-425e-B714-719C0D1EB14D}", "name": "RBK-500 - 268 x PTAB-1M, 500kg CBU Light HEAT/AP", "weight": 427}
RB_04E__for_A_I___with_launcher = {"clsid": "{Rb04AI}", "name": "RB-04E (for A.I.) with launcher", "weight": 661}
RB_15F__for_A_I___with_launcher = {"clsid": "{Rb15AI}", "name": "RB-15F (for A.I.) with launcher", "weight": 610}
Rb_04E_Anti_ship_Missile = {"clsid": "{Rb04}", "name": "Rb-04E Anti-ship Missile", "weight": 661}
Rb_05A_MCLOS_ASM_AShM_AAM = {"clsid": "{Robot05}", "name": "Rb-05A MCLOS ASM/AShM/AAM", "weight": 341}
Rb_15F_Programmable_Anti_ship_Missile = {"clsid": "{Rb15}", "name": "Rb-15F Programmable Anti-ship Missile", "weight": 610}
Rb_24J__AIM_9P__Sidewinder_IR_AAM = {"clsid": "{Robot24J}", "name": "Rb-24J (AIM-9P) Sidewinder IR AAM", "weight": 140}
Rb_24__AIM_9B__Sidewinder_IR_AAM = {"clsid": "{Robot24}", "name": "Rb-24 (AIM-9B) Sidewinder IR AAM", "weight": 132}
Rb_74__AIM_9L__Sidewinder_IR_AAM = {"clsid": "{Robot74}", "name": "Rb-74 (AIM-9L) Sidewinder IR AAM", "weight": 144}
Rb_75A__AGM_65A_Maverick___TV_ASM_ = {"clsid": "{RB75}", "name": "Rb-75A (AGM-65A Maverick) (TV ASM)", "weight": 269.5}
Rb_75B__AGM_65B_Maverick___TV_ASM_ = {"clsid": "{RB75B}", "name": "Rb-75B (AGM-65B Maverick) (TV ASM)", "weight": 269.5}
Rb_75T__AGM_65A_Maverick___TV_ASM_Lg_HE_Whd_ = {"clsid": "{RB75T}", "name": "Rb-75T (AGM-65A Maverick) (TV ASM Lg HE Whd)", "weight": 354}
REFLEX_9M119 = {"clsid": "REFLEX_9M119", "name": "AT-11 SNIPER (Reflex)", "weight": None}
RKL609_ECM_Pod__Left_ = {"clsid": "{RKL609_L}", "name": "RKL609 ECM Pod (Left)", "weight": 150}
RKL609_ECM_Pod__Right_ = {"clsid": "{RKL609_R}", "name": "RKL609 ECM Pod (Right)", "weight": 150}
RN_24___470kg__nuclear_bomb__free_fall = {"clsid": "{RN-24}", "name": "RN-24 - 470kg, nuclear bomb, free fall", "weight": 470}
RN_28___260_kg__nuclear_bomb__free_fall = {"clsid": "{RN-28}", "name": "RN-28 - 260 kg, nuclear bomb, free fall", "weight": 260}
ROLAND = {"clsid": "ROLAND", "name": "ROLAND", "weight": None}
RPL_522_1300_liters_Fuel_Tank = {"clsid": "{M2KC_RPL_522}", "name": "RPL 522 1300 liters Fuel Tank", "weight": 1170}
RPL_522_1300_liters_Fuel_Tank__Empty_ = {"clsid": "{M2KC_RPL_522_EMPTY}", "name": "RPL 522 1300 liters Fuel Tank (Empty)", "weight": 180}
RPL_541_2000_liters_Fuel_Tank_ = {"clsid": "{M2KC_02_RPL541}", "name": "RPL 541 2000 liters Fuel Tank ", "weight": 1837}
RPL_541_2000_liters_Fuel_Tank__ = {"clsid": "{M2KC_08_RPL541}", "name": "RPL 541 2000 liters Fuel Tank ", "weight": 1837}
RPL_541_2000_liters_Fuel_Tank__Empty_ = {"clsid": "{M2KC_02_RPL541_EMPTY}", "name": "RPL 541 2000 liters Fuel Tank (Empty)", "weight": 257}
RPL_541_2000_liters_Fuel_Tank__Empty__ = {"clsid": "{M2KC_08_RPL541_EMPTY}", "name": "RPL 541 2000 liters Fuel Tank (Empty)", "weight": 257}
RP_3_25lb_AP_Mk_I = {"clsid": "{British_AP_25LBNo1_3INCHNo1}", "name": "RP-3 25lb AP Mk.I", "weight": 22}
RP_3_60lb_F_No1_Mk_I = {"clsid": "{British_HE_60LBFNo1_3INCHNo1}", "name": "RP-3 60lb F No1 Mk.I", "weight": 31.6}
RP_3_60lb_SAP_No2_Mk_I = {"clsid": "{British_HE_60LBSAPNo2_3INCHNo1}", "name": "RP-3 60lb SAP No2 Mk.I", "weight": 38.1}
RS2US___AAM__beam_rider = {"clsid": "{RS-2US}", "name": "RS2US - AAM, beam-rider", "weight": 105.2}
R_13M1___AAM__IR_guided = {"clsid": "{R-13M1}", "name": "R-13M1 - AAM, IR guided", "weight": 122.4}
R_13M___AAM__IR_guided = {"clsid": "{R-13M}", "name": "R-13M - AAM, IR guided", "weight": 119.7}
R_24R__AA_7_Apex_SA____Semi_Act_Rdr = {"clsid": "{CCF898C9-5BC7-49A4-9D1E-C3ED3D5166A1}", "name": "R-24R (AA-7 Apex SA) - Semi-Act Rdr", "weight": 215}
R_24T__AA_7_Apex_IR____Infra_Red = {"clsid": "{6980735A-44CC-4BB9-A1B5-591532F1DC69}", "name": "R-24T (AA-7 Apex IR) - Infra Red", "weight": 215}
R_27ER__AA_10_Alamo_C____Semi_Act_Extended_Range = {"clsid": "{E8069896-8435-4B90-95C0-01A03AE6E400}", "name": "R-27ER (AA-10 Alamo C) - Semi-Act Extended Range", "weight": 350}
R_27ET__AA_10_Alamo_D____IR_Extended_Range = {"clsid": "{B79C379A-9E87-4E50-A1EE-7F7E29C2E87A}", "name": "R-27ET (AA-10 Alamo D) - IR Extended Range", "weight": 343}
R_27R__AA_10_Alamo_A____Semi_Act_Rdr = {"clsid": "{9B25D316-0434-4954-868F-D51DB1A38DF0}", "name": "R-27R (AA-10 Alamo A) - Semi-Act Rdr", "weight": 253}
R_27T__AA_10_Alamo_B____Infra_Red = {"clsid": "{88DAC840-9F75-4531-8689-B46E64E42E53}", "name": "R-27T (AA-10 Alamo B) - Infra Red", "weight": 254}
R_33__AA_9_Amos____Semi_Act_Rdr = {"clsid": "{F1243568-8EF0-49D4-9CB5-4DA90D92BC1D}", "name": "R-33 (AA-9 Amos) - Semi-Act Rdr", "weight": 490}
R_3R___AAM__radar_guided = {"clsid": "{R-3R}", "name": "R-3R - AAM, radar guided", "weight": 111.5}
R_3S___AAM__IR_guided = {"clsid": "{R-3S}", "name": "R-3S - AAM, IR guided", "weight": 103.3}
R_40R__AA_6_Acrid____Semi_Act_Rdr = {"clsid": "{4EDBA993-2E34-444C-95FB-549300BF7CAF}", "name": "R-40R (AA-6 Acrid) - Semi-Act Rdr", "weight": 475}
R_40T__AA_6_Acrid____Infra_Red = {"clsid": "{5F26DBC2-FB43-4153-92DE-6BBCE26CB0FF}", "name": "R-40T (AA-6 Acrid) - Infra Red", "weight": 475}
R_550_Magic_2 = {"clsid": "{FC23864E-3B80-48E3-9C03-4DA8B1D7497B}", "name": "R.550 Magic 2", "weight": 89}
R_55___AAM__IR_guided = {"clsid": "{R-55}", "name": "R-55 - AAM, IR guided", "weight": 113}
R_60 = {"clsid": "{R-60}", "name": "R-60", "weight": 58.5}
R_60M = {"clsid": "{R-60M}", "name": "R-60M", "weight": 58.5}
R_60M_x_2 = {"clsid": "{R-60M 2L}", "name": "R-60M x 2", "weight": 122}
R_60M_x_2_ = {"clsid": "{R-60M 2R}", "name": "R-60M x 2", "weight": 122}
R_60M__AA_8_Aphid____Infra_Red = {"clsid": "{682A481F-0CB5-4693-A382-D00DD4A156D7}", "name": "R-60M (AA-8 Aphid) - Infra Red", "weight": 44}
R_60_x_2 = {"clsid": "{R-60 2L}", "name": "R-60 x 2", "weight": 122}
R_60_x_2_ = {"clsid": "{R-60 2R}", "name": "R-60 x 2", "weight": 122}
R_73__AA_11_Archer____Infra_Red = {"clsid": "{FBC29BFE-3D24-4C64-B81D-941239D12249}", "name": "R-73 (AA-11 Archer) - Infra Red", "weight": 110}
R_73__AA_11_Archer____Infra_Red_ = {"clsid": "{CBC29BFE-3D24-4C64-B81D-941239D12249}", "name": "R-73 (AA-11 Archer) - Infra Red", "weight": 110}
R_77__AA_12_Adder____Active_Rdr = {"clsid": "{B4C01D60-A8A3-4237-BD72-CA7655BC0FE9}", "name": "R-77 (AA-12 Adder) - Active Rdr", "weight": 175}
R_77__AA_12_Adder____Active_Rdr_ = {"clsid": "{B4C01D60-A8A3-4237-BD72-CA7655BC0FEC}", "name": "R-77 (AA-12 Adder) - Active Rdr", "weight": 250}
SAB_100___100kg_flare_illumination_Bomb = {"clsid": "{0511E528-EA28-4caf-A212-00D1408DF10A}", "name": "SAB-100 - 100kg flare/illumination Bomb", "weight": 100}
Sand_Filter = {"clsid": "{FAS}", "name": "Sand Filter", "weight": 15}
SC_250_Type_1_L2___250kg_GP_Bomb_LD = {"clsid": "{SC_250_T1_L2}", "name": "SC 250 Type 1 L2 - 250kg GP Bomb LD", "weight": 250}
SC_250_Type_3_J___250kg_GP_Bomb_LD = {"clsid": "{Schloss500XIIC1_SC_250_T3_J}", "name": "SC 250 Type 3 J - 250kg GP Bomb LD", "weight": 270}
SC_500_L2___500kg_GP_Bomb_LD = {"clsid": "{SC_500_L2}", "name": "SC 500 L2 - 500kg GP Bomb LD", "weight": 500}
SC_501_SC250 = {"clsid": "SC_501_SC250", "name": "SC 250 Type 3 J - 250kg GP Bomb LD", "weight": 250}
SC_501_SC500 = {"clsid": "SC_501_SC500", "name": "SC 500 J - 500kg GP Bomb LD", "weight": 500}
SC_50___50kg_GP_Bomb_LD = {"clsid": "{SC_50}", "name": "SC 50 - 50kg GP Bomb LD", "weight": 50}
SD_250_Stg___250kg_GP_Bomb_LD = {"clsid": "{SD_250_Stg}", "name": "SD 250 Stg - 250kg GP Bomb LD", "weight": 250}
SD_500_A___500kg_GP_Bomb_LD = {"clsid": "{SD_500_A}", "name": "SD 500 A - 500kg GP Bomb LD", "weight": 500}
SEASPARROW = {"clsid": "SEASPARROW", "name": "SEASPARROW", "weight": None}
Sea_Eagle___ASM = {"clsid": "{1461CD18-429A-42A9-A21F-4C621ECD4573}", "name": "Sea Eagle - ASM", "weight": 600}
Shpil_2_Laser_Recon__Intel_Pod = {"clsid": "{0519A263-0AB6-11d6-9193-00A0249B6F00}", "name": "Shpil-2 Laser Recon & Intel Pod", "weight": 200}
Sky_Shadow_ECM_Pod = {"clsid": "{8C3F26A2-FA0F-11d5-9190-00A0249B6F00}", "name": "Sky-Shadow ECM Pod", "weight": 200}
SM2 = {"clsid": "SM2", "name": "SM2", "weight": None}
Smokewinder___blue = {"clsid": "{A4BCC903-06C8-47bb-9937-A30FEDB4E743}", "name": "Smokewinder - blue", "weight": 200}
Smokewinder___green = {"clsid": "{A4BCC903-06C8-47bb-9937-A30FEDB4E742}", "name": "Smokewinder - green", "weight": 200}
Smokewinder___orange = {"clsid": "{A4BCC903-06C8-47bb-9937-A30FEDB4E746}", "name": "Smokewinder - orange", "weight": 200}
Smokewinder___red = {"clsid": "{A4BCC903-06C8-47bb-9937-A30FEDB4E741}", "name": "Smokewinder - red", "weight": 200}
Smokewinder___white = {"clsid": "{A4BCC903-06C8-47bb-9937-A30FEDB4E744}", "name": "Smokewinder - white", "weight": 200}
Smokewinder___yellow = {"clsid": "{A4BCC903-06C8-47bb-9937-A30FEDB4E745}", "name": "Smokewinder - yellow", "weight": 200}
Smoke_for_Christen_Eagle_II__white = {"clsid": "{CE2_SMOKE_WHITE}", "name": "Smoke for Christen Eagle II, white", "weight": 7}
Smoke_Generator___blue = {"clsid": "{D3F65166-1AB8-490f-AF2F-2FB6E22568B3}", "name": "Smoke Generator - blue", "weight": 220}
Smoke_Generator___blue_ = {"clsid": "{INV-SMOKE-BLUE}", "name": "Smoke Generator - blue", "weight": 0}
Smoke_Generator___green = {"clsid": "{D3F65166-1AB8-490f-AF2F-2FB6E22568B2}", "name": "Smoke Generator - green", "weight": 220}
Smoke_Generator___green_ = {"clsid": "{INV-SMOKE-GREEN}", "name": "Smoke Generator - green", "weight": 0}
Smoke_Generator___orange = {"clsid": "{D3F65166-1AB8-490f-AF2F-2FB6E22568B6}", "name": "Smoke Generator - orange", "weight": 220}
Smoke_Generator___orange_ = {"clsid": "{INV-SMOKE-ORANGE}", "name": "Smoke Generator - orange", "weight": 0}
Smoke_Generator___red = {"clsid": "{D3F65166-1AB8-490f-AF2F-2FB6E22568B1}", "name": "Smoke Generator - red", "weight": 220}
Smoke_Generator___red_ = {"clsid": "{INV-SMOKE-RED}", "name": "Smoke Generator - red", "weight": 0}
Smoke_Generator___white = {"clsid": "{D3F65166-1AB8-490f-AF2F-2FB6E22568B4}", "name": "Smoke Generator - white", "weight": 220}
Smoke_Generator___white_ = {"clsid": "{INV-SMOKE-WHITE}", "name": "Smoke Generator - white", "weight": 0}
Smoke_Generator___yellow = {"clsid": "{D3F65166-1AB8-490f-AF2F-2FB6E22568B5}", "name": "Smoke Generator - yellow", "weight": 220}
Smoke_Generator___yellow_ = {"clsid": "{INV-SMOKE-YELLOW}", "name": "Smoke Generator - yellow", "weight": 0}
Smoke_System_red_colorant = {"clsid": "{SMOKE-RED-AVIOJET}", "name": "Smoke System red colorant", "weight": 32.6}
Smoke_System_yellow_colorant = {"clsid": "{SMOKE-YELLOW-AVIOJET}", "name": "Smoke System yellow colorant", "weight": 32.6}
Smoke_System__White_Smoke_ = {"clsid": "{SMOKE-SYSTEM-AVIOJET}", "name": "Smoke System (White Smoke)", "weight": 1}
Smoke___red___21__t = {"clsid": "{MIG21_SMOKE_RED}", "name": "Smoke - red - 21 /t", "weight": 30}
Smoke___white___21 = {"clsid": "{SMOKE_WHITE}", "name": "Smoke - white - 21", "weight": 30}
Smoke___white___21_ = {"clsid": "{MIG21_SMOKE_WHITE}", "name": "Smoke - white - 21", "weight": 30}
SPITFIRE_45GAL_SLIPPER_TANK = {"clsid": "SPITFIRE_45GAL_SLIPPER_TANK", "name": "45 gal. Slipper Tank", "weight": 138.647}
SPITFIRE_45GAL_TORPEDO_TANK = {"clsid": "SPITFIRE_45GAL_TORPEDO_TANK", "name": "45 gal. Torpedo Tank", "weight": 144.647}
SPPU_22_1___2_x_23mm__GSh_23L_Autocannon_Pod = {"clsid": "{E92CBFE5-C153-11d8-9897-000476191836}", "name": "SPPU-22-1 - 2 x 23mm GSh-23L Autocannon Pod", "weight": 290}
SPRD_99_takeoff_rocket = {"clsid": "{SPRD}", "name": "SPRD-99 takeoff rocket", "weight": 500}
SPS_141_100__21____jamming_and_countermeasures_pod = {"clsid": "{SPS-141-100}", "name": "SPS-141-100 (21) - jamming and countermeasures pod", "weight": 150}
SPS_141___ECM_Jamming_Pod = {"clsid": "{F75187EF-1D9E-4DA9-84B4-1A1A14A3973A}", "name": "SPS-141 - ECM Jamming Pod", "weight": 150}
SUU_25_x_8_LUU_2___Target_Marker_Flares = {"clsid": "{CAE48299-A294-4bad-8EE6-89EFC5DCDF00}", "name": "SUU-25 x 8 LUU-2 - Target Marker Flares", "weight": 130}
SUU_25___8_LUU_2 = {"clsid": "{BRU42_SUU25}", "name": "SUU-25 * 8 LUU-2", "weight": 258}
SUU_25___8_LUU_2_ = {"clsid": "{BRU3242_SUU25}", "name": "SUU-25 * 8 LUU-2", "weight": 315.38}
Super_530D = {"clsid": "{FD21B13E-57F3-4C2A-9F78-C522D0B5BCE1}", "name": "Super 530D", "weight": 270}
SVIR_9M119 = {"clsid": "SVIR_9M119", "name": "AT-11 SNIPER (Svir')", "weight": None}
S_24A__21____180_kg__cumulative_unguided_rocket = {"clsid": "{S-24A}", "name": "S-24A (21) - 180 kg, cumulative unguided rocket", "weight": 235}
S_24B__21____180_kg__fragmented_unguided_rocket = {"clsid": "{S-24B}", "name": "S-24B (21) - 180 kg, fragmented unguided rocket", "weight": 235}
S_24B___240mm_UnGd_Rkt__235kg__HE_Frag___Low_Smk_ = {"clsid": "{1FA14DEA-8CDB-45AD-88A8-EC068DF1E65A}", "name": "S-24B - 240mm UnGd Rkt, 235kg, HE/Frag, (Low Smk)", "weight": 235}
S_24B___240mm_UnGd_Rkt__235kg__HE_Frag___Low_Smk__ = {"clsid": "{3858707D-F5D5-4bbb-BDD8-ABB0530EBC7C}", "name": "S-24B - 240mm UnGd Rkt, 235kg, HE/Frag, (Low Smk)", "weight": 295}
S_25L___320Kg__340mm_Laser_Guided_Rkt = {"clsid": "{0180F983-C14A-11d8-9897-000476191836}", "name": "S-25L - 320Kg, 340mm Laser Guided Rkt", "weight": 500}
S_25_OFM___340mm_UnGd_Rkt__480kg_Penetrator = {"clsid": "{A0648264-4BC0-4EE8-A543-D119F6BA4257}", "name": "S-25-OFM - 340mm UnGd Rkt, 480kg Penetrator", "weight": 495}
S_25_O___420mm_UnGd_Rkt__380kg_Frag = {"clsid": "{S_25_O}", "name": "S-25-O - 420mm UnGd Rkt, 380kg Frag", "weight": 445}
Tangazh_ELINT_pod = {"clsid": "{0519A262-0AB6-11d6-9193-00A0249B6F00}", "name": "Tangazh ELINT pod", "weight": 200}
TER_9A_with_2_x_CBU_87___202_x_CEM_Cluster_Bomb = {"clsid": "{TER_9A_2L*CBU-87}", "name": "TER-9A with 2 x CBU-87 - 202 x CEM Cluster Bomb", "weight": 913}
TER_9A_with_2_x_CBU_87___202_x_CEM_Cluster_Bomb_ = {"clsid": "{TER_9A_2R*CBU-87}", "name": "TER-9A with 2 x CBU-87 - 202 x CEM Cluster Bomb", "weight": 913}
TER_9A_with_2_x_CBU_97___10_x_SFW_Cluster_Bomb = {"clsid": "{TER_9A_2L*CBU-97}", "name": "TER-9A with 2 x CBU-97 - 10 x SFW Cluster Bomb", "weight": 887}
TER_9A_with_2_x_CBU_97___10_x_SFW_Cluster_Bomb_ = {"clsid": "{TER_9A_2R*CBU-97}", "name": "TER-9A with 2 x CBU-97 - 10 x SFW Cluster Bomb", "weight": 887}
TER_9A_with_2_x_GBU_12___500lb_Laser_Guided_Bomb = {"clsid": "{TER_9A_2L*GBU-12}", "name": "TER-9A with 2 x GBU-12 - 500lb Laser Guided Bomb", "weight": 607}
TER_9A_with_2_x_GBU_12___500lb_Laser_Guided_Bomb_ = {"clsid": "{TER_9A_2R*GBU-12}", "name": "TER-9A with 2 x GBU-12 - 500lb Laser Guided Bomb", "weight": 607}
TER_9A_with_2_x_Mk_82_AIR_Ballute___500lb_GP_Bomb_HD = {"clsid": "{TER_9A_2L*MK-82AIR}", "name": "TER-9A with 2 x Mk-82 AIR Ballute - 500lb GP Bomb HD", "weight": 537}
TER_9A_with_2_x_Mk_82_AIR_Ballute___500lb_GP_Bomb_HD_ = {"clsid": "{TER_9A_2R*MK-82AIR}", "name": "TER-9A with 2 x Mk-82 AIR Ballute - 500lb GP Bomb HD", "weight": 537}
TER_9A_with_2_x_Mk_82_Snakeye___500lb_GP_Bomb_HD = {"clsid": "{TER_9A_2L*MK-82_Snakeye}", "name": "TER-9A with 2 x Mk-82 Snakeye - 500lb GP Bomb HD", "weight": 552}
TER_9A_with_2_x_Mk_82_Snakeye___500lb_GP_Bomb_HD_ = {"clsid": "{TER_9A_2R*MK-82_Snakeye}", "name": "TER-9A with 2 x Mk-82 Snakeye - 500lb GP Bomb HD", "weight": 552}
TER_9A_with_2_x_Mk_82___500lb_GP_Bomb_LD = {"clsid": "{TER_9A_2L*MK-82}", "name": "TER-9A with 2 x Mk-82 - 500lb GP Bomb LD", "weight": 509}
TER_9A_with_2_x_Mk_82___500lb_GP_Bomb_LD_ = {"clsid": "{TER_9A_2R*MK-82}", "name": "TER-9A with 2 x Mk-82 - 500lb GP Bomb LD", "weight": 509}
TER_9A_with_3_x_BDU_33___25lb_Practice_Bomb_LD = {"clsid": "{TER_9A_3*BDU-33}", "name": "TER-9A with 3 x BDU-33 - 25lb Practice Bomb LD", "weight": 86.9}
TER_9A_with_3_x_CBU_87___202_x_CEM_Cluster_Bomb = {"clsid": "{TER_9A_3*CBU-87}", "name": "TER-9A with 3 x CBU-87 - 202 x CEM Cluster Bomb", "weight": 1343}
TER_9A_with_3_x_CBU_97___10_x_SFW_Cluster_Bomb = {"clsid": "{TER_9A_3*CBU-97}", "name": "TER-9A with 3 x CBU-97 - 10 x SFW Cluster Bomb", "weight": 1304}
TER_9A_with_3_x_Mk_82_AIR_Ballute___500lb_GP_Bomb_HD = {"clsid": "{TER_9A_3*MK-82AIR}", "name": "TER-9A with 3 x Mk-82 AIR Ballute - 500lb GP Bomb HD", "weight": 779}
TER_9A_with_3_x_Mk_82_Snakeye___500lb_GP_Bomb_HD = {"clsid": "{TER_9A_3*MK-82_Snakeye}", "name": "TER-9A with 3 x Mk-82 Snakeye - 500lb GP Bomb HD", "weight": 801.5}
TER_9A_with_3_x_Mk_82___500lb_GP_Bomb_LD = {"clsid": "{TER_9A_3*MK-82}", "name": "TER-9A with 3 x Mk-82 - 500lb GP Bomb LD", "weight": 737}
TEST_ROTARY_LAUNCHER_MK82 = {"clsid": "TEST_ROTARY_LAUNCHER_MK82", "name": "TEST ROTARY LAUNCHER MK82", "weight": 6748}
TGM_65H = {"clsid": "TGM_65H", "name": "TGM-65H - Trg Round for Mav H (CCD)", "weight": 208}
TORNADO_Fuel_tank = {"clsid": "{EF124821-F9BB-4314-A153-E0E2FE1162C4}", "name": "TORNADO Fuel tank", "weight": 1275}
TOW = {"clsid": "TOW", "name": "BGM-71D TOW ATGM", "weight": None}
U22_A_Jammer = {"clsid": "{U22A}", "name": "U22/A Jammer", "weight": 348}
UB_16UM_pod___16_x_S_5KO__57mm_UnGd_Rkts__HEAT_Frag = {"clsid": "{UB-16-57UMP}", "name": "UB-16UM pod - 16 x S-5KO, 57mm UnGd Rkts, HEAT/Frag", "weight": 138}
UB_16UM___16_S_5M = {"clsid": "{UB-16_S5M}", "name": "UB-16UM - 16 S-5M", "weight": 119.76}
UB_32A_24_pod___32_x_S_5KO = {"clsid": "{UB-32A-24}", "name": "UB-32A-24 pod - 32 x S-5KO", "weight": 275}
UB_32A_pod___32_x_S_5KO__57mm_UnGd_Rkts__HEAT_Frag = {"clsid": "{637334E4-AB5A-47C0-83A6-51B7F1DF3CD5}", "name": "UB-32A pod - 32 x S-5KO, 57mm UnGd Rkts, HEAT/Frag", "weight": 275}
UB_32M___32_S_5M = {"clsid": "{UB-32_S5M}", "name": "UB-32M - 32 S-5M", "weight": 228.52}
UPK_23_250___2_x_23mm__GSh_23L_Autocannon_Pod = {"clsid": "{05544F1A-C39C-466b-BC37-5BD1D52E57BB}", "name": "UPK-23-250 - 2 x 23mm GSh-23L Autocannon Pod", "weight": 218}
UPK_23_250___gun_pod = {"clsid": "{UPK-23-250 MiG-21}", "name": "UPK-23-250 - gun pod", "weight": 218}
U_22_Jammer_pod = {"clsid": "{U22}", "name": "U/22 Jammer pod", "weight": 348}
Werfer_Granate_21___21_cm_UnGd_air_to_air_rocket = {"clsid": "{WGr21}", "name": "Werfer-Granate 21 - 21 cm UnGd air-to-air rocket", "weight": 121}
XM158_M151 = {"clsid": "XM158_M151", "name": "XM158 pod - 7 x 2.75\" Hydra, UnGd Rkts M151, HE", "weight": 112}
XM158_M156 = {"clsid": "XM158_M156", "name": "XM158 pod - 7 x 2.75\" Hydra, UnGd Rkts M156, Wht Phos", "weight": 112}
XM158_M257 = {"clsid": "XM158_M257", "name": "XM158 pod - 7 x 2.75\" Hydra, UnGd Rkts M257, Para Illum", "weight": 112}
XM158_M274 = {"clsid": "XM158_M274", "name": "XM158 pod - 7 x 2.75\" Hydra, UnGd Rkts M274, Practice Smk", "weight": 112}
XM158_MK1 = {"clsid": "XM158_MK1", "name": "XM158 pod - 7 x 2.75\" Hydra, UnGd Rkts Mk1, Practice", "weight": 112}
XM158_MK5 = {"clsid": "XM158_MK5", "name": "XM158 pod - 7 x 2.75\" Hydra, UnGd Rkts Mk5, HEAT", "weight": 112}
_100_gal__Drop_Tank = {"clsid": "{MOSQUITO_100GAL_SLIPPER_TANK}", "name": "100 gal. Drop Tank", "weight": 375.3}
_108_US_gal__Paper_Fuel_Tank = {"clsid": "{US_108GAL_PAPER_FUEL_TANK}", "name": "108 US gal. Paper Fuel Tank", "weight": 319}
_110_US_gal__Fuel_Tank = {"clsid": "{US_110GAL_FUEL_TANK}", "name": "110 US gal. Fuel Tank", "weight": 349}
_12_AN_M64___500lb_GP_Bomb_LD = {"clsid": "{12xM64}", "name": "12 AN-M64 - 500lb GP Bomb LD", "weight": 2744}
_12_x_BetAB_500___500kg_Concrete_Piercing_Bombs_LD = {"clsid": "{D6A0441E-6794-4FEB-87F7-E68E2290DFAB}", "name": "12 x BetAB-500 - 500kg Concrete Piercing Bombs LD", "weight": 478}
_12_x_FAB_500_M_62___500kg_GP_Bombs_LD = {"clsid": "{E70446B7-C7E6-4B95-B685-DEA10CAD1A0E}", "name": "12 x FAB-500 M-62 - 500kg GP Bombs LD", "weight": 6000}
_13_R4M_3_2kg_UnGd_air_to_air_rocket = {"clsid": "{FW_190_R4M_LEFT_WING}", "name": "13 R4M 3.2kg UnGd air-to-air rocket", "weight": 70.05}
_13_R4M_3_2kg_UnGd_air_to_air_rocket_ = {"clsid": "{FW_190_R4M_RGHT_WING}", "name": "13 R4M 3.2kg UnGd air-to-air rocket", "weight": 70.05}
_150_US_gal__Fuel_Tank = {"clsid": "{US_150GAL_FUEL_TANK}", "name": "150 US gal. Fuel Tank", "weight": 458.8}
_20_x_AGM_86C_ALCM = {"clsid": "{22906569-A97F-404B-BA4F-D96DBF94D05E}", "name": "20 x AGM-86C ALCM", "weight": 39000}
_24_x_FAB_250___250kg_GP_Bombs_LD = {"clsid": "{B0241BD2-5628-47E0-954C-A8675B7E698E}", "name": "24 x FAB-250 - 250kg GP Bombs LD", "weight": 6000}
_250_lb_GP_Mk_I = {"clsid": "{British_GP_250LB_Bomb_Mk1}", "name": "250 lb GP Mk.I", "weight": 104.326}
_250_lb_GP_Mk_IV = {"clsid": "{British_GP_250LB_Bomb_Mk4}", "name": "250 lb GP Mk.IV", "weight": 104.326}
_250_lb_GP_Mk_IV_ = {"clsid": "{British_GP_250LB_Bomb_Mk4_on_Handley_Page_Type_B_Cut_Bar}", "name": "250 lb GP Mk.IV", "weight": 109.626}
_250_lb_GP_Mk_V = {"clsid": "{British_GP_250LB_Bomb_Mk5}", "name": "250 lb GP Mk.V", "weight": 104.326}
_250_lb_GP_Mk_V_ = {"clsid": "{British_GP_250LB_Bomb_Mk5_on_Handley_Page_Type_B_Cut_Bar}", "name": "250 lb GP Mk.V", "weight": 109.626}
_250_lb_MC_Mk_I = {"clsid": "{British_MC_250LB_Bomb_Mk1}", "name": "250 lb MC Mk.I", "weight": 102}
_250_lb_MC_Mk_II = {"clsid": "{British_MC_250LB_Bomb_Mk2}", "name": "250 lb MC Mk.II", "weight": 102}
_250_lb_MC_Mk_II_ = {"clsid": "{British_MC_250LB_Bomb_Mk2_on_Handley_Page_Type_B_Cut_Bar}", "name": "250 lb MC Mk.II", "weight": 107.3}
_250_lb_MC_Mk_I_ = {"clsid": "{British_MC_250LB_Bomb_Mk1_on_Handley_Page_Type_B_Cut_Bar}", "name": "250 lb MC Mk.I", "weight": 107.3}
_250_lb_S_A_P_ = {"clsid": "{British_SAP_250LB_Bomb_Mk5}", "name": "250 lb S.A.P.", "weight": 111.13}
_250_lb_S_A_P__ = {"clsid": "{British_SAP_250LB_Bomb_Mk5_on_Handley_Page_Type_B_Cut_Bar}", "name": "250 lb S.A.P.", "weight": 116.43}
_27_x_M117___750lb_GP_Bombs_LD = {"clsid": "{B58F99BA-5480-4572-8602-28B0449F5260}", "name": "27 x M117 - 750lb GP Bombs LD", "weight": 9180}
_27_x_Mk_82___500lb_GP_Bombs_LD = {"clsid": "{6C47D097-83FF-4FB2-9496-EAB36DDF0B05}", "name": "27 x Mk-82 - 500lb GP Bombs LD", "weight": 6507}
_2xGBU_12___500lb_Laser_Guided_Bomb = {"clsid": "{89D000B0-0360-461A-AD83-FB727E2ABA98}", "name": "2xGBU-12 - 500lb Laser Guided Bomb", "weight": 610.25}
_2xGBU_12___500lb_Laser_Guided_Bomb_ = {"clsid": "{BRU-42_2xGBU-12_right}", "name": "2xGBU-12 - 500lb Laser Guided Bomb", "weight": 610.25}
_2x_80kg_LYSB_71_Illumination_Bomb = {"clsid": "{LYSBOMB}", "name": "2x 80kg LYSB-71 Illumination Bomb", "weight": 220}
_2_BDU_45 = {"clsid": "{BRU42_2*BDU45 RS}", "name": "2 BDU-45", "weight": 592}
_2_BDU_45B = {"clsid": "{BRU42_2*BDU45B RS}", "name": "2 BDU-45B", "weight": 592}
_2_BDU_45B_ = {"clsid": "{BRU3242_2*BDU45B RS}", "name": "2 BDU-45B", "weight": 649.38}
_2_BDU_45B__ = {"clsid": "{PHXBRU3242_2*BDU45B RS}", "name": "2 BDU-45B", "weight": 649.38}
_2_BDU_45B___ = {"clsid": "{BRU42_2*BDU45B LS}", "name": "2 BDU-45B", "weight": 592}
_2_BDU_45B____ = {"clsid": "{BRU3242_2*BDU45B LS}", "name": "2 BDU-45B", "weight": 649.38}
_2_BDU_45B_____ = {"clsid": "{PHXBRU3242_2*BDU45B LS}", "name": "2 BDU-45B", "weight": 649.38}
_2_BDU_45_ = {"clsid": "{BRU3242_2*BDU45 RS}", "name": "2 BDU-45", "weight": 649.38}
_2_BDU_45__ = {"clsid": "{PHXBRU3242_2*BDU45 RS}", "name": "2 BDU-45", "weight": 649.38}
_2_BDU_45___ = {"clsid": "{BRU42_2*BDU45 LS}", "name": "2 BDU-45", "weight": 592}
_2_BDU_45____ = {"clsid": "{BRU3242_2*BDU45 LS}", "name": "2 BDU-45", "weight": 649.38}
_2_BDU_45_____ = {"clsid": "{PHXBRU3242_2*BDU45 LS}", "name": "2 BDU-45", "weight": 649.38}
_2_CBU_99 = {"clsid": "{BRU-70_2*CBU-99_LEFT}", "name": "2 CBU-99", "weight": 541}
_2_CBU_99_ = {"clsid": "{BRU-70_2*CBU-99_RIGHT}", "name": "2 CBU-99", "weight": 541}
_2_GBU_12 = {"clsid": "{BRU-42_2*GBU-12_LEFT}", "name": "2 GBU-12", "weight": 547}
_2_GBU_12_ = {"clsid": "{BRU-42_2*GBU-12_RIGHT}", "name": "2 GBU-12", "weight": 547}
_2_GBU_16 = {"clsid": "{BRU-42_2*GBU-16_LEFT}", "name": "2 GBU-16", "weight": 1005}
_2_GBU_16_ = {"clsid": "{BRU-42_2*GBU-16_RIGHT}", "name": "2 GBU-16", "weight": 1005}
_2_GBU_38 = {"clsid": "{BRU-42_2*GBU-38_LEFT}", "name": "2 GBU-38", "weight": 579}
_2_GBU_38_ = {"clsid": "{BRU-42_2*GBU-38_RIGHT}", "name": "2 GBU-38", "weight": 579}
_2_GBU_54_V_1_B = {"clsid": "{BRU-70A_2*GBU-54_LEFT}", "name": "2 GBU-54(V)1/B", "weight": 603}
_2_GBU_54_V_1_B_ = {"clsid": "{BRU-70A_2*GBU-54_RIGHT}", "name": "2 GBU-54(V)1/B", "weight": 603}
_2_LAU_10___4_ZUNI_MK_71 = {"clsid": "{BRU42_2*LAU10 L}", "name": "2 LAU-10 - 4 ZUNI MK 71", "weight": 1008}
_2_LAU_10___4_ZUNI_MK_71_ = {"clsid": "{BRU3242_2*LAU10 L}", "name": "2 LAU-10 - 4 ZUNI MK 71", "weight": 1065.38}
_2_LAU_10___4_ZUNI_MK_71__ = {"clsid": "{BRU42_2*LAU10 R}", "name": "2 LAU-10 - 4 ZUNI MK 71", "weight": 1008}
_2_LAU_10___4_ZUNI_MK_71___ = {"clsid": "{BRU3242_2*LAU10 R}", "name": "2 LAU-10 - 4 ZUNI MK 71", "weight": 1065.38}
_2_LAU_10___4_ZUNI_MK_71____ = {"clsid": "{BRU42_2*LAU10 RS}", "name": "2 LAU-10 - 4 ZUNI MK 71", "weight": 1008}
_2_LAU_10___4_ZUNI_MK_71_____ = {"clsid": "{BRU3242_2*LAU10 RS}", "name": "2 LAU-10 - 4 ZUNI MK 71", "weight": 1065.38}
_2_LAU_10___4_ZUNI_MK_71______ = {"clsid": "{PHXBRU3242_2*LAU10 RS}", "name": "2 LAU-10 - 4 ZUNI MK 71", "weight": 1065.38}
_2_LAU_10___4_ZUNI_MK_71_______ = {"clsid": "{BRU42_2*LAU10 LS}", "name": "2 LAU-10 - 4 ZUNI MK 71", "weight": 1008}
_2_LAU_10___4_ZUNI_MK_71________ = {"clsid": "{BRU3242_2*LAU10 LS}", "name": "2 LAU-10 - 4 ZUNI MK 71", "weight": 1065.38}
_2_LAU_10___4_ZUNI_MK_71_________ = {"clsid": "{PHXBRU3242_2*LAU10 LS}", "name": "2 LAU-10 - 4 ZUNI MK 71", "weight": 1065.38}
_2_LUU_2 = {"clsid": "{BRU42_2*LUU2 R}", "name": "2 LUU-2", "weight": 155.2}
_2_LUU_2_ = {"clsid": "{BRU3242_2*LUU2 R}", "name": "2 LUU-2", "weight": 212.58}
_2_LUU_2__ = {"clsid": "{BRU42_2*LUU2 L}", "name": "2 LUU-2", "weight": 155.2}
_2_LUU_2___ = {"clsid": "{BRU3242_2*LUU2 L}", "name": "2 LUU-2", "weight": 212.58}
_2_MK_20 = {"clsid": "{BRU42_2*MK20 RS}", "name": "2 MK-20", "weight": 572}
_2_MK_20_ = {"clsid": "{BRU3242_2*MK20 RS}", "name": "2 MK-20", "weight": 629.38}
_2_MK_20__ = {"clsid": "{PHXBRU3242_2*MK20 RS}", "name": "2 MK-20", "weight": 629.38}
_2_MK_20___ = {"clsid": "{BRU42_2*MK20 LS}", "name": "2 MK-20", "weight": 572}
_2_MK_20____ = {"clsid": "{BRU3242_2*MK20 LS}", "name": "2 MK-20", "weight": 629.38}
_2_MK_20_____ = {"clsid": "{PHXBRU3242_2*MK20 LS}", "name": "2 MK-20", "weight": 629.38}
_2_Mk_20_Rockeye = {"clsid": "{BRU-42_2*MK-20_LEFT}", "name": "2 Mk-20 Rockeye", "weight": 541}
_2_Mk_20_Rockeye_ = {"clsid": "{BRU-42_2*MK-20_RIGHT}", "name": "2 Mk-20 Rockeye", "weight": 541}
_2_Mk_81 = {"clsid": "{BRU42_2*MK81 RS}", "name": "2 Mk-81", "weight": 364}
_2_Mk_81_ = {"clsid": "{BRU3242_2*MK81 RS}", "name": "2 Mk-81", "weight": 421.38}
_2_Mk_81__ = {"clsid": "{PHXBRU3242_2*MK81 RS}", "name": "2 Mk-81", "weight": 421.38}
_2_Mk_81___ = {"clsid": "{BRU42_2*MK81 LS}", "name": "2 Mk-81", "weight": 364}
_2_Mk_81____ = {"clsid": "{BRU3242_2*MK81 LS}", "name": "2 Mk-81", "weight": 421.38}
_2_Mk_81_____ = {"clsid": "{PHXBRU3242_2*MK81 LS}", "name": "2 Mk-81", "weight": 421.38}
_2_Mk_82 = {"clsid": "{BRU-42_2*Mk-82_LEFT}", "name": "2 Mk-82", "weight": 579}
_2_Mk_82AIR = {"clsid": "{BRU42_2*MK82AIR RS}", "name": "2 Mk-82AIR", "weight": 610}
_2_Mk_82AIR_ = {"clsid": "{BRU3242_2*MK82AIR RS}", "name": "2 Mk-82AIR", "weight": 667.38}
_2_Mk_82AIR__ = {"clsid": "{PHXBRU3242_2*MK82AIR RS}", "name": "2 Mk-82AIR", "weight": 667.38}
_2_Mk_82AIR___ = {"clsid": "{BRU42_2*MK82AIR LS}", "name": "2 Mk-82AIR", "weight": 610}
_2_Mk_82AIR____ = {"clsid": "{BRU3242_2*MK82AIR LS}", "name": "2 Mk-82AIR", "weight": 667.38}
_2_Mk_82AIR_____ = {"clsid": "{PHXBRU3242_2*MK82AIR LS}", "name": "2 Mk-82AIR", "weight": 667.38}
_2_Mk_82_ = {"clsid": "{BRU-42_2*Mk-82_RIGHT}", "name": "2 Mk-82", "weight": 579}
_2_Mk_82_AIR = {"clsid": "{BRU-42_2*Mk-82AIR_LEFT}", "name": "2 Mk-82 AIR", "weight": 579}
_2_Mk_82_AIR_ = {"clsid": "{BRU-42_2*Mk-82AIR_RIGHT}", "name": "2 Mk-82 AIR", "weight": 579}
_2_Mk_82_SnakeEye = {"clsid": "{BRU42_2*MK82SE RS}", "name": "2 Mk-82 SnakeEye", "weight": 610}
_2_Mk_82_SnakeEye_ = {"clsid": "{BRU3242_2*MK82SE RS}", "name": "2 Mk-82 SnakeEye", "weight": 667.38}
_2_Mk_82_SnakeEye__ = {"clsid": "{PHXBRU3242_2*MK82SE RS}", "name": "2 Mk-82 SnakeEye", "weight": 667.38}
_2_Mk_82_SnakeEye___ = {"clsid": "{BRU42_2*MK82SE LS}", "name": "2 Mk-82 SnakeEye", "weight": 610}
_2_Mk_82_SnakeEye____ = {"clsid": "{BRU3242_2*MK82SE LS}", "name": "2 Mk-82 SnakeEye", "weight": 667.38}
_2_Mk_82_SnakeEye_____ = {"clsid": "{PHXBRU3242_2*MK82SE LS}", "name": "2 Mk-82 SnakeEye", "weight": 667.38}
_2_Mk_82_Snakeye = {"clsid": "{BRU-42_2*Mk-82SNAKEYE_LEFT}", "name": "2 Mk-82 Snakeye", "weight": 579}
_2_Mk_82_Snakeye_ = {"clsid": "{BRU-42_2*Mk-82SNAKEYE_RIGHT}", "name": "2 Mk-82 Snakeye", "weight": 579}
_2_Mk_82__ = {"clsid": "{BRU42_2*MK82 RS}", "name": "2 Mk-82", "weight": 610}
_2_Mk_82___ = {"clsid": "{BRU3242_2*MK82 RS}", "name": "2 Mk-82", "weight": 667.38}
_2_Mk_82____ = {"clsid": "{PHXBRU3242_2*MK82 RS}", "name": "2 Mk-82", "weight": 667.38}
_2_Mk_82_____ = {"clsid": "{BRU42_2*MK82 LS}", "name": "2 Mk-82", "weight": 610}
_2_Mk_82______ = {"clsid": "{BRU3242_2*MK82 LS}", "name": "2 Mk-82", "weight": 667.38}
_2_Mk_82_______ = {"clsid": "{PHXBRU3242_2*MK82 LS}", "name": "2 Mk-82", "weight": 667.38}
_2_Mk_83 = {"clsid": "{BRU-42_2*Mk-83_LEFT}", "name": "2 Mk-83", "weight": 991}
_2_Mk_83_ = {"clsid": "{BRU-42_2*Mk-83_RIGHT}", "name": "2 Mk-83", "weight": 991}
_2_SUU_25___8_LUU_2 = {"clsid": "{BRU42_2*SUU25 L}", "name": "2 SUU-25 * 8 LUU-2", "weight": 388}
_2_SUU_25___8_LUU_2_ = {"clsid": "{BRU3242_2*SUU25 L}", "name": "2 SUU-25 * 8 LUU-2", "weight": 445.38}
_2_SUU_25___8_LUU_2__ = {"clsid": "{BRU42_2*SUU25 R}", "name": "2 SUU-25 * 8 LUU-2", "weight": 388}
_2_SUU_25___8_LUU_2___ = {"clsid": "{BRU3242_2*SUU25 R}", "name": "2 SUU-25 * 8 LUU-2", "weight": 445.38}
_2_x_9M120F_Ataka__AT_9_Spiral_2____AGM__SACLOS__HE = {"clsid": "{2x9M120F_Ataka_V}", "name": "2 x 9M120F Ataka (AT-9 Spiral-2) - AGM, SACLOS, HE", "weight": 112}
_2_x_9M120_Ataka__AT_9_Spiral_2____ATGM__SACLOS__Tandem_HEAT = {"clsid": "{2x9M120_Ataka_V}", "name": "2 x 9M120 Ataka (AT-9 Spiral-2) - ATGM, SACLOS, Tandem HEAT", "weight": 112}
_2_x_9M220O_Ataka__AT_9_Spiral_2____AAM__SACLOS__Frag = {"clsid": "{2x9M220_Ataka_V}", "name": "2 x 9M220O Ataka (AT-9 Spiral-2) - AAM, SACLOS, Frag", "weight": 112}
_2_x_ALARM = {"clsid": "{07BE2D19-0E48-4B0B-91DA-5F6C8F9E3C75}", "name": "2 x ALARM", "weight": 530}
_2_x_BL_755_CBUs___450kg__147_Frag_Pen_bomblets = {"clsid": "{C535596E-F7D2-4301-8BB4-B1658BB87ED7}", "name": "2 x BL-755 CBUs - 450kg, 147 Frag/Pen bomblets", "weight": 554}
_2_x_B_13L_pods___10_x_S_13_OF__122mm_UnGd_Rkts__Blast_Frag = {"clsid": "{TWIN_B13L_5OF}", "name": "2 x B-13L pods - 10 x S-13-OF, 122mm UnGd Rkts, Blast/Frag", "weight": 1042}
_2_x_B_13L___5_S_13_OF = {"clsid": "{B13_5_S13OF_DUAL_L}", "name": "2 x B-13L - 5 S-13 OF", "weight": 1042}
_2_x_B_13L___5_S_13_OF_ = {"clsid": "{B13_5_S13OF_DUAL_R}", "name": "2 x B-13L - 5 S-13 OF", "weight": 1042}
_2_x_B_8M1_pods___40_x_S_8KOM__80mm_UnGd_Rkts__HEAT_AP = {"clsid": "{TWIN_B_8M1_S_8KOM}", "name": "2 x B-8M1 pods - 40 x S-8KOM, 80mm UnGd Rkts, HEAT/AP", "weight": 759}
_2_x_B_8M1___20_S_8KOM = {"clsid": "{B8M1_20_S8KOM_DUAL_L}", "name": "2 x B-8M1 - 20 S-8KOM", "weight": 975}
_2_x_B_8M1___20_S_8KOM_ = {"clsid": "{B8M1_20_S8KOM_DUAL_R}", "name": "2 x B-8M1 - 20 S-8KOM", "weight": 975}
_2_x_B_8M1___20_S_8OFP2 = {"clsid": "{B8M1_20_S8OFP2_DUAL_L}", "name": "2 x B-8M1 - 20 S-8OFP2", "weight": 975}
_2_x_B_8M1___20_S_8OFP2_ = {"clsid": "{B8M1_20_S8OFP2_DUAL_R}", "name": "2 x B-8M1 - 20 S-8OFP2", "weight": 975}
_2_x_B_8M1___20_S_8TsM = {"clsid": "{B8M1_20_S8TsM_DUAL_L}", "name": "2 x B-8M1 - 20 S-8TsM", "weight": 751}
_2_x_B_8M1___20_S_8TsM_ = {"clsid": "{B8M1_20_S8TsM_DUAL_R}", "name": "2 x B-8M1 - 20 S-8TsM", "weight": 751}
_2_x_B_8V20A_pods___40_x_S_8OFP2__80mm_UnGd_Rkts__HE_Frag_AP = {"clsid": "{TWIN_B_8M1_S_8_OFP2}", "name": "2 x B-8V20A pods - 40 x S-8OFP2, 80mm UnGd Rkts, HE/Frag/AP", "weight": 975}
_2_x_B_8V20A_pods___40_x_S_8TsM__80mm_UnGd_Rkts__Smk = {"clsid": "{TWIN_B_8M1_S_8TsM}", "name": "2 x B-8V20A pods - 40 x S-8TsM, 80mm UnGd Rkts, Smk", "weight": 751}
_2_x_FAB_250 = {"clsid": "{FAB_250_DUAL_L}", "name": "2 x FAB-250", "weight": 532}
_2_x_FAB_250_ = {"clsid": "{FAB_250_DUAL_R}", "name": "2 x FAB-250", "weight": 532}
_2_x_FAB_500 = {"clsid": "{FAB_500_DUAL_L}", "name": "2 x FAB-500", "weight": 1044}
_2_x_FAB_500_ = {"clsid": "{FAB_500_DUAL_R}", "name": "2 x FAB-500", "weight": 1044}
_2_x_HVAR__UnGd_Rkts = {"clsid": "{HVARx2}", "name": "2 x HVAR, UnGd Rkts", "weight": 128}
_2_x_OFAB_100_Jupiter___100kg_GP_Bombs_LD = {"clsid": "{FAB-100x2}", "name": "2 x OFAB-100 Jupiter - 100kg GP Bombs LD", "weight": 342}
_2_x_RBK_250_PTAB_2_5M = {"clsid": "{RBK_250_PTAB25M_DUAL_L}", "name": "2 x RBK-250 PTAB-2.5M", "weight": 578}
_2_x_RBK_250_PTAB_2_5M_ = {"clsid": "{RBK_250_PTAB25M_DUAL_R}", "name": "2 x RBK-250 PTAB-2.5M", "weight": 578}
_2_x_RBK_500_255_PTAB_10_5 = {"clsid": "{RBK_500_PTAB105_DUAL_L}", "name": "2 x RBK-500-255 PTAB-10-5", "weight": 538}
_2_x_RBK_500_255_PTAB_10_5_ = {"clsid": "{RBK_500_PTAB105_DUAL_R}", "name": "2 x RBK-500-255 PTAB-10-5", "weight": 538}
_2_x_RP_3_25lb_AP_Mk_I = {"clsid": "{MOSSIE_2_British_AP_25LBNo1_3INCHNo1_ON_LEFT_WING_RAILS}", "name": "2 x RP-3 25lb AP Mk.I", "weight": 174}
_2_x_RP_3_25lb_AP_Mk_I_ = {"clsid": "{MOSSIE_2_British_AP_25LBNo1_3INCHNo1_ON_RIGHT_WING_RAILS}", "name": "2 x RP-3 25lb AP Mk.I", "weight": 174}
_2_x_RP_3_60lb_F_No1_Mk_I = {"clsid": "{MOSSIE_2_British_HE_60LBFNo1_3INCHNo1_ON_LEFT_WING_RAILS}", "name": "2 x RP-3 60lb F No1 Mk.I", "weight": 193.2}
_2_x_RP_3_60lb_F_No1_Mk_I_ = {"clsid": "{MOSSIE_2_British_HE_60LBFNo1_3INCHNo1_ON_RIGHT_WING_RAILS}", "name": "2 x RP-3 60lb F No1 Mk.I", "weight": 193.2}
_2_x_RP_3_60lb_SAP_No2_Mk_I = {"clsid": "{MOSSIE_2_British_HE_60LBSAPNo2_3INCHNo1_ON_LEFT_WING_RAILS}", "name": "2 x RP-3 60lb SAP No2 Mk.I", "weight": 206.2}
_2_x_RP_3_60lb_SAP_No2_Mk_I_ = {"clsid": "{MOSSIE_2_British_HE_60LBSAPNo2_3INCHNo1_ON_RIGHT_WING_RAILS}", "name": "2 x RP-3 60lb SAP No2 Mk.I", "weight": 206.2}
_2_x_S_25 = {"clsid": "{S25_DUAL_L}", "name": "2 x S-25", "weight": 902}
_2_x_S_25_ = {"clsid": "{S25_DUAL_R}", "name": "2 x S-25", "weight": 902}
_2_x_S_25_OFM___340mm_UnGdrocket__480kg_Penetrator = {"clsid": "{TWIN_S25}", "name": "2 x S-25-OFM - 340mm UnGdrocket, 480kg Penetrator", "weight": 902}
_2_x_S_25_O___420mm_UnGd_Rkt__380kg_Frag = {"clsid": "{TWIN_S25_O}", "name": "2 x S-25-O - 420mm UnGd Rkt, 380kg Frag", "weight": 922}
_33_x_FAB_250___250kg_GP_Bombs_LD = {"clsid": "{BDAD04AA-4D4A-4E51-B958-180A89F963CF}", "name": "33 x FAB-250 - 250kg GP Bombs LD", "weight": 8250}
_33_x_FAB_500_M_62___500kg_GP_Bombs_LD = {"clsid": "{AD5E5863-08FC-4283-B92C-162E2B2BD3FF}", "name": "33 x FAB-500 M-62 - 500kg GP Bombs LD", "weight": 16500}
_3M45 = {"clsid": "3M45", "name": "SS-N-19 SHIPWRECK", "weight": None}
_3_BDU_33 = {"clsid": "{BRU42_3*BDU33}", "name": "3 BDU-33", "weight": 161}
_3_BDU_33_ = {"clsid": "{BRU3242_3*BDU33}", "name": "3 BDU-33", "weight": 218.38}
_3_BDU_33__ = {"clsid": "{BRU42_3*BDU33_N}", "name": "3 BDU-33", "weight": 161}
_3_BDU_33___ = {"clsid": "{BRU3242_3*BDU33_N}", "name": "3 BDU-33", "weight": 218.38}
_3_BDU_33____ = {"clsid": "{PHXBRU3242_BDU33}", "name": "3 BDU-33", "weight": 218.38}
_3_GBU_12 = {"clsid": "{BRU-42A_3*GBU-12}", "name": "3 GBU-12", "weight": 772}
_3_GBU_16 = {"clsid": "{BRU-42A_3*GBU-16}", "name": "3 GBU-16", "weight": 1459}
_3_GBU_38 = {"clsid": "{BRU-42_3*GBU-38}", "name": "3 GBU-38", "weight": 820}
_3_GBU_54_V_1_B = {"clsid": "{BRU-70A_3*GBU-54}", "name": "3 GBU-54(V)1/B", "weight": 856}
_3_Mk_81 = {"clsid": "{BRU-42_3*Mk-81LD}", "name": "3 Mk-81", "weight": 451}
_3_Mk_82 = {"clsid": "{BRU-42_3*Mk-82LD}", "name": "3 Mk-82", "weight": 820}
_3_Mk_82_AIR = {"clsid": "{BRU-42_3_MK82AIR}", "name": "3 Mk-82 AIR", "weight": 820}
_3_Mk_82_Snakeye = {"clsid": "{BRU-42_3*Mk-82SNAKEYE}", "name": "3 Mk-82 Snakeye", "weight": 820}
_3_Mk_83 = {"clsid": "{BRU-42_3*Mk-83}", "name": "3 Mk-83", "weight": 1438}
_3_x_4_5_inch_M8_UnGd_Rocket = {"clsid": "{3xM8_ROCKETS_IN_TUBES}", "name": "3 x 4.5 inch M8 UnGd Rocket", "weight": 71.72}
_3_x_FAB_1500_M_54___1500kg_GP_Bombs_LD = {"clsid": "{639DB5DD-CB7E-4E42-AC75-2112BC397B97}", "name": "3 x FAB-1500 M-54 - 1500kg GP Bombs LD", "weight": 4500}
_3_x_LAU_61_pods___57_x_2_75_Hydra__UnGd_Rkts_M151__HE = {"clsid": "{A76344EB-32D2-4532-8FA2-0C1BDC00747E}", "name": "3 x LAU-61 pods - 57 x 2.75\" Hydra, UnGd Rkts M151, HE", "weight": 876.45}
_48N6E2 = {"clsid": "48N6E2", "name": "48N6E2 S-300F (SA-N-6 Grumble)", "weight": None}
_4M80 = {"clsid": "_4M80", "name": "SS-N-12 SANDBOX", "weight": None}
_4x_SB_M_71_120kg_GP_Bomb_High_drag = {"clsid": "{M71BOMBD}", "name": "4x SB M/71 120kg GP Bomb High-drag", "weight": 609}
_4x_SB_M_71_120kg_GP_Bomb_Low_drag = {"clsid": "{M71BOMB}", "name": "4x SB M/71 120kg GP Bomb Low-drag", "weight": 609}
_4_x_AGM_154C___JSOW_Unitary_BROACH = {"clsid": "{AABA1A14-78A1-4E85-94DD-463CF75BD9E4}", "name": "4 x AGM-154C - JSOW Unitary BROACH", "weight": 2560}
_4_x_AN_M64___500lb_GP_Bomb_LD = {"clsid": "{4xAN-M64_on_InvCountedAttachmentPoints}", "name": "4 x AN-M64 - 500lb GP Bomb LD", "weight": 908}
_4_x_BGM_71D_TOW_ATGM = {"clsid": "{3EA17AB0-A805-4D9E-8732-4CE00CB00F17}", "name": "4 x BGM-71D TOW ATGM", "weight": 250}
_4_x_GBU_27___2000lb_Laser_Guided_Penetrator_Bombs = {"clsid": "{B8C99F40-E486-4040-B547-6639172A5D57}", "name": "4 x GBU-27 - 2000lb Laser Guided Penetrator Bombs", "weight": 3936}
_4_x_RP_3_25lb_AP_Mk_I = {"clsid": "{MOSSIE_4_British_AP_25LBNo1_3INCHNo1_ON_LEFT_WING_RAILS}", "name": "4 x RP-3 25lb AP Mk.I", "weight": 218}
_4_x_RP_3_25lb_AP_Mk_I_ = {"clsid": "{MOSSIE_4_British_AP_25LBNo1_3INCHNo1_ON_RIGHT_WING_RAILS}", "name": "4 x RP-3 25lb AP Mk.I", "weight": 218}
_4_x_RP_3_60lb_F_No1_Mk_I = {"clsid": "{MOSSIE_4_British_HE_60LBFNo1_3INCHNo1_ON_LEFT_WING_RAILS}", "name": "4 x RP-3 60lb F No1 Mk.I", "weight": 256.4}
_4_x_RP_3_60lb_F_No1_Mk_I_ = {"clsid": "{MOSSIE_4_British_HE_60LBFNo1_3INCHNo1_ON_RIGHT_WING_RAILS}", "name": "4 x RP-3 60lb F No1 Mk.I", "weight": 256.4}
_4_x_RP_3_60lb_SAP_No2_Mk_I = {"clsid": "{MOSSIE_4_British_HE_60LBSAPNo2_3INCHNo1_ON_LEFT_WING_RAILS}", "name": "4 x RP-3 60lb SAP No2 Mk.I", "weight": 282.4}
_4_x_RP_3_60lb_SAP_No2_Mk_I_ = {"clsid": "{MOSSIE_4_British_HE_60LBSAPNo2_3INCHNo1_ON_RIGHT_WING_RAILS}", "name": "4 x RP-3 60lb SAP No2 Mk.I", "weight": 282.4}
_500_lb_GP_Mk_I = {"clsid": "{British_GP_500LB_Bomb_Mk1}", "name": "500 lb GP Mk.I", "weight": 213.188}
_500_lb_GP_Mk_IV = {"clsid": "{British_GP_500LB_Bomb_Mk4}", "name": "500 lb GP Mk.IV", "weight": 213.188}
_500_lb_GP_Mk_V = {"clsid": "{British_GP_500LB_Bomb_Mk5}", "name": "500 lb GP Mk.V", "weight": 213.188}
_500_lb_GP_Short_tail = {"clsid": "{British_GP_500LB_Bomb_Mk4_Short}", "name": "500 lb GP Short tail", "weight": 207.7}
_500_lb_GP_Short_tail_ = {"clsid": "{British_GP_500LB_Bomb_Mk4_Short_on_Handley_Page_Type_B_Cut_Bar}", "name": "500 lb GP Short tail", "weight": 213}
_500_lb_MC_Mk_II = {"clsid": "{British_MC_500LB_Bomb_Mk2}", "name": "500 lb MC Mk.II", "weight": 231.8}
_500_lb_MC_Short_tail = {"clsid": "{British_MC_500LB_Bomb_Mk1_Short}", "name": "500 lb MC Short tail", "weight": 226.3}
_500_lb_MC_Short_tail_ = {"clsid": "{British_MC_500LB_Bomb_Mk1_Short_on_Handley_Page_Type_B_Cut_Bar}", "name": "500 lb MC Short tail", "weight": 231.6}
_500_lb_S_A_P_ = {"clsid": "{British_SAP_500LB_Bomb_Mk5}", "name": "500 lb S.A.P.", "weight": 222.26}
_50_gal__Drop_Tank = {"clsid": "{MOSQUITO_50GAL_SLIPPER_TANK}", "name": "50 gal. Drop Tank", "weight": 187.7}
_51_x_M117___750lb_GP_Bombs_LD = {"clsid": "{72CAC282-AE18-490B-BD4D-35E7EE969E73}", "name": "51 x M117 - 750lb GP Bombs LD", "weight": 17340}
_51_x_Mk_82___500lb_GP_Bombs_LD = {"clsid": "{B84DFE16-6AC7-4854-8F6D-34137892E166}", "name": "51 x Mk-82 - 500lb GP Bombs LD", "weight": 12291}
_5V55 = {"clsid": "5V55", "name": "5V55 S-300PS (SA-10B Grumble)", "weight": None}
_5_x_HVAR__UnGd_Rkt = {"clsid": "{P47_5_HVARS_ON_LEFT_WING_RAILS}", "name": "5 x HVAR, UnGd Rkt", "weight": 330}
_5_x_HVAR__UnGd_Rkt_ = {"clsid": "{P47_5_HVARS_ON_RIGHT_WING_RAILS}", "name": "5 x HVAR, UnGd Rkt", "weight": 330}
_5_x_Mk_82_Snakeye___500lb_GP_Bomb_HD = {"clsid": "{MER-5E_Mk82SNAKEYEx5}", "name": "5 x Mk-82 Snakeye - 500lb GP Bomb HD", "weight": 1250.7}
_5_x_Mk_82___500lb_GP_Bombs_LD = {"clsid": "{MER-5E_MK82x5}", "name": "5 x Mk-82 - 500lb GP Bombs LD", "weight": 1295.7}
_6_x_AGM_86C_ALCM_on_MER = {"clsid": "{45447F82-01B5-4029-A572-9AAD28AF0275}", "name": "6 x AGM-86C ALCM on MER", "weight": 11760}
_6_x_BetAB_500___500kg_Concrete_Piercing_Bombs_LD = {"clsid": "{2B7BDB38-4F45-43F9-BE02-E7B3141F3D24}", "name": "6 x BetAB-500 - 500kg Concrete Piercing Bombs LD", "weight": 2868}
_6_x_FAB_1500_M_54___1500kg_GP_Bombs_LD = {"clsid": "{D9179118-E42F-47DE-A483-A6C2EA7B4F38}", "name": "6 x FAB-1500 M-54 - 1500kg GP Bombs LD", "weight": 9000}
_6_x_FAB_500_M_62___500kg_GP_Bombs_LD = {"clsid": "{26D2AF37-B0DF-4AB6-9D61-A150FF58A37B}", "name": "6 x FAB-500 M-62 - 500kg GP Bombs LD", "weight": 3000}
_6_x_Kh_35__AS_20_Kayak____520kg__AShM__IN__Act_Rdr = {"clsid": "{C42EE4C3-355C-4B83-8B22-B39430B8F4AE}", "name": "6 x Kh-35 (AS-20 Kayak) - 520kg, AShM, IN & Act Rdr", "weight": 2880}
_6_x_Kh_65__AS_15B_Kent____1250kg__ASM__IN__MCC = {"clsid": "{0290F5DE-014A-4BB1-9843-D717749B1DED}", "name": "6 x Kh-65 (AS-15B Kent) - 1250kg, ASM, IN & MCC", "weight": 7500}
_6_x_Mk_20_Rockeye___490lbs_CBUs__247_x_HEAT_Bomblets = {"clsid": "{E79759F7-C622-4AA4-B1EF-37639A34D924}", "name": "6 x Mk-20 Rockeye - 490lbs CBUs, 247 x HEAT Bomblets", "weight": 1332}
_6_x_Mk_82___500lb_GP_Bombs_LD = {"clsid": "{027563C9-D87E-4A85-B317-597B510E3F03}", "name": "6 x Mk-82 - 500lb GP Bombs LD", "weight": 1446}
_75_US_gal__Fuel_Tank = {"clsid": "{DT75GAL}", "name": "75 US gal. Fuel Tank", "weight": 227.048087675}
_8_x_AGM_84A_Harpoon_ASM = {"clsid": "{46ACDCF8-5451-4E26-BDDB-E78D5830E93C}", "name": "8 x AGM-84A Harpoon ASM", "weight": 5292}
_8_x_AGM_86C_ALCM = {"clsid": "{8DCAF3A3-7FCF-41B8-BB88-58DEDA878EDE}", "name": "8 x AGM-86C ALCM", "weight": 15600}
_8_x_Kh_65__AS_15B_Kent____1250kg__ASM__IN__MCC = {"clsid": "{CD9417DF-455F-4176-A5A2-8C58D61AA00B}", "name": "8 x Kh-65 (AS-15B Kent) - 1250kg, ASM, IN & MCC", "weight": 10000}
_9M111 = {"clsid": "_9M111", "name": "AT-4 SPIGOT", "weight": None}
_9M114_Shturm_V_2_Rack = {"clsid": "{9M114 Shturm-V-2 Rack}", "name": "9M114 Shturm-V-2 Rack", "weight": 13}
_9M114_Shturm_V_2__AT_6_Spiral____ATGM__SACLOS = {"clsid": "{B919B0F4-7C25-455E-9A02-CEA51DB895E3}", "name": "9M114 Shturm-V-2 (AT-6 Spiral) - ATGM, SACLOS", "weight": 105}
_9M114_Shturm_V_8__AT_6_Spiral____ATGM__SACLOS = {"clsid": "{57232979-8B0F-4db7-8D9A-55197E06B0F5}", "name": "9M114 Shturm-V-8 (AT-6 Spiral) - ATGM, SACLOS", "weight": 422}
_9M117 = {"clsid": "_9M117", "name": "AT-10 SABBER", "weight": None}
_9M133 = {"clsid": "9M133", "name": "AT-14 KORNET", "weight": None}
_9M14 = {"clsid": "9M14", "name": "AT-3 SAGGER", "weight": None}
_9M31 = {"clsid": "9M31", "name": "SA-9 GASKIN", "weight": None}
_9M311 = {"clsid": "9M311", "name": "SA-19 GRISON", "weight": None}
_9M33 = {"clsid": "9M33", "name": "SA-8 GECKO", "weight": None}
_9M331 = {"clsid": "_9M331", "name": "SA-15 GAUNTLET", "weight": None}
_9M37 = {"clsid": "_9M37", "name": "SA-13 GOPHER", "weight": None}
_9M38 = {"clsid": "_9M38", "name": "SA-11 GADFLY", "weight": None}
_9M39 = {"clsid": "_9M39", "name": "SA-18 GROUSE", "weight": None}
_9S846_Strelets___2_x_Igla = {"clsid": "{9S846_2xIGLA}", "name": "9S846 Strelets - 2 x Igla", "weight": 71}
_NiteHawk_FLIR = {"clsid": "_NiteHawk_FLIR", "name": "AN/AAS-38 \"Nite hawk\" FLIR, Laser designator & Laser spot tracker pod", "weight": 200}
weapon_ids = {
"{AB_250_2_SD_2}": Weapons.AB_250_2___144_x_SD_2__250kg_CBU_with_HE_submunitions,
"{AB_250_2_SD_10A}": Weapons.AB_250_2___17_x_SD_10A__250kg_CBU_with_10kg_Frag_HE_submunitions,
"{AB_500_1_SD_10A}": Weapons.AB_500_1___34_x_SD_10A__500kg_CBU_with_10kg_Frag_HE_submunitions,
"{ADEN_GUNPOD}": Weapons.ADEN_GUNPOD,
"{BRU42_ADM141}": Weapons.ADM_141A,
"{BRU3242_ADM141}": Weapons.ADM_141A_,
"{ADM_141A}": Weapons.ADM_141A_TALD,
"{ADM_141B}": Weapons.ADM_141B_TALD,
"{AV8BNA_AERO1D}": Weapons.AERO_1D_300_Gallons_Fuel_Tank_,
"{AV8BNA_AERO1D_EMPTY}": Weapons.AERO_1D_300_Gallons_Fuel_Tank__Empty_,
"AGM114x2_OH_58": Weapons.AGM114x2_OH_58,
"{ee368869-c35a-486a-afe7-284beb7c5d52}": Weapons.AGM_114K,
"{88D18A5E-99C8-4B04-B40B-1C02F2018B6E}": Weapons.AGM_114K___4,
"{7B8DCEB4-820B-4015-9B48-1028A4195692}": Weapons.AGM_119B_Penguin_ASM,
"{AGM_122_SIDEARM}": Weapons.AGM_122_Sidearm,
"{LAU_7_AGM_122_SIDEARM}": Weapons.AGM_122_Sidearm_,
"{AGM_122}": Weapons.AGM_122_Sidearm___light_ARM,
"{AGM-154A}": Weapons.AGM_154A___JSOW_CEB__CBU_type_,
"{AGM-154B}": Weapons.AGM_154B___JSOW_Anti_Armour,
"{9BCC2A2B-5708-4860-B1F1-053A18442067}": Weapons.AGM_154C___JSOW_Unitary_BROACH,
"{AGM_45A}": Weapons.AGM_45A_Shrike_ARM,
"{3E6B632D-65EB-44D2-9501-1C2D04515404}": Weapons.AGM_45B_Shrike_ARM__Imp_,
"{C40A1E3A-DD05-40D9-85A4-217729E37FAE}": Weapons.AGM_62_Walleye_II___Guided_Weapon_Mk_5__TV_Guided_,
"{444BA8AE-82A7-4345-842E-76154EFCCA47}": Weapons.AGM_65D___Maverick_D__IIR_ASM_,
"{F16A4DE0-116C-4A71-97F0-2CF85B0313EF}": Weapons.AGM_65E___Maverick_E__Laser_ASM___Lg_Whd_,
"{69DC8AE7-8F77-427B-B8AA-B19D3F478B65}": Weapons.AGM_65K___Maverick_K__CCD_Imp_ASM_,
"AGM_84": Weapons.AGM_84,
"{8B7CADF9-4954-46B3-8CFB-93F2F5B90B03}": Weapons.AGM_84A_Harpoon_ASM,
"{AGM_84D}": Weapons.AGM_84D_Harpoon_AShM,
"{AF42E6DF-9A60-46D8-A9A0-1708B241AADB}": Weapons.AGM_84E_Harpoon_SLAM__Stand_Off_Land_Attack_Missile_,
"{AGM_84E}": Weapons.AGM_84E_Harpoon_SLAM__Stand_Off_Land_Attack_Missile__,
"{AGM_84H}": Weapons.AGM_84H_SLAM_ER__Expanded_Response_,
"{769A15DF-6AFB-439F-9B24-5B7A45C59D16}": Weapons.AGM_86C_ALCM,
"{B06DD79A-F21E-4EB9-BD9D-AB3844618C9C}": Weapons.AGM_88C_HARM___High_Speed_Anti_Radiation_Missile,
"{B06DD79A-F21E-4EB9-BD9D-AB3844618C93}": Weapons.AGM_88C_HARM___High_Speed_Anti_Radiation_Missile_,
"{C8E06185-7CD6-4C90-959F-044679E90751}": Weapons.AIM_120B_AMRAAM___Active_Rdr_AAM,
"{40EF17B7-F508-45de-8566-6FFECC0C1AB8}": Weapons.AIM_120C_5_AMRAAM___Active_Rdr_AAM,
"{AIM_54A_Mk47}": Weapons.AIM_54A_Mk47,
"{SHOULDER AIM_54A_Mk47 L}": Weapons.AIM_54A_Mk47_,
"{SHOULDER AIM_54A_Mk47 R}": Weapons.AIM_54A_Mk47__,
"{AIM_54A_Mk60}": Weapons.AIM_54A_Mk60,
"{SHOULDER AIM_54A_Mk60 L}": Weapons.AIM_54A_Mk60_,
"{SHOULDER AIM_54A_Mk60 R}": Weapons.AIM_54A_Mk60__,
"{AIM_54C_Mk47}": Weapons.AIM_54C_Mk47,
"{SHOULDER AIM_54C_Mk47 L}": Weapons.AIM_54C_Mk47_,
"{7575BA0B-7294-4844-857B-031A144B2595}": Weapons.AIM_54C_Mk47_Phoenix_IN__Semi_Active_Radar,
"{SHOULDER AIM_54C_Mk47 R}": Weapons.AIM_54C_Mk47__,
"{AIM-7E}": Weapons.AIM_7E_Sparrow_Semi_Active_Radar,
"{SHOULDER AIM-7F}": Weapons.AIM_7F,
"{BELLY AIM-7F}": Weapons.AIM_7F_,
"{AIM-7F}": Weapons.AIM_7F_Sparrow_Semi_Active_Radar,
"{SHOULDER AIM-7M}": Weapons.AIM_7M,
"{SHOULDER AIM-7MH}": Weapons.AIM_7MH,
"{BELLY AIM-7MH}": Weapons.AIM_7MH_,
"{AIM-7H}": Weapons.AIM_7MH_Sparrow_Semi_Active_Radar,
"{BELLY AIM-7M}": Weapons.AIM_7M_,
"{8D399DDA-FF81-4F14-904D-099B34FE7918}": Weapons.AIM_7M_Sparrow_Semi_Active_Radar,
"{AIM-9B}": Weapons.AIM_9B_Sidewinder_IR_AAM,
"{AIM-9L}": Weapons.AIM_9L_Sidewinder_IR_AAM,
"{6CEB49FC-DED8-4DED-B053-E1F033FF72D3}": Weapons.AIM_9M_Sidewinder_IR_AAM,
"{AIM-9P5}": Weapons.AIM_9P5_Sidewinder_IR_AAM,
"{9BFD8C90-F7AE-4e90-833B-BFD0CED0E536}": Weapons.AIM_9P_Sidewinder_IR_AAM,
"{5CE2FF2A-645A-4197-B48D-8720AC69394F}": Weapons.AIM_9X_Sidewinder_IR_AAM,
"{VIGGEN_X-TANK}": Weapons.AJS_External_tank_1013kg_fuel,
"{AKAN}": Weapons.AKAN_M_55_Gunpod__150_rnds_MINGR55_HE,
"{E6747967-B1F0-4C77-977B-AB2E6EB0C102}": Weapons.ALARM,
"{6D21ECEA-F85B-4E8D-9D51-31DC9B8AA4EF}": Weapons.ALQ_131___ECM_Pod,
"ALQ_184": Weapons.ALQ_184,
"ALQ_184_Long": Weapons.ALQ_184_Long,
"{A111396E-D3E8-4b9c-8AC9-2432489304D5}": Weapons.AN_AAQ_28_LITENING___Targeting_Pod,
"{AAQ-28_LEFT}": Weapons.AN_AAQ_28_LITENING___Targeting_Pod_,
"{ALQ_164_RF_Jammer}": Weapons.AN_ALQ_164_DECM_Pod,
"{1C2B16EB-8EB0-43de-8788-8EBB2D70B8BC}": Weapons.AN_ASQ_173_Laser_Spot_Tracker_Strike_CAMera__LST_SCAM_,
"{AN_ASQ_213}": Weapons.AN_ASQ_213_HTS___HARM_Targeting_System,
"{AN_ASQ_228}": Weapons.AN_ASQ_228_ATFLIR___Targeting_Pod,
"{AIS_ASQ_T50}": Weapons.AN_ASQ_T50_TCTS_Pod___ACMI_Pod,
"{AN_M30A1}": Weapons.AN_M30A1___100lb_GP_Bomb_LD,
"{AN-M3}": Weapons.AN_M3___2_Browning_Machine_Guns_12_7mm,
"{AN_M57}": Weapons.AN_M57___250lb_GP_Bomb_LD,
"{AN-M64}": Weapons.AN_M64___500lb_GP_Bomb_LD,
"{F86ANM64}": Weapons.AN_M64___500lb_GP_Bomb_LD_,
"{AN_M65}": Weapons.AN_M65___1000lb_GP_Bomb_LD,
"{AN_M66}": Weapons.AN_M66___2000lb_GP_Bomb_LD,
"{APU-60-1_R_60M}": Weapons.APU_60_1M_with_R_60M__AA_8_Aphid____Infra_Red,
"{B0DBC591-0F52-4F7D-AD7B-51E67725FB81}": Weapons.APU_60_2M_with_2_x_R_60M__AA_8_Aphid____Infra_Red,
"{275A2855-4A79-4B2D-B082-91EA2ADF4691}": Weapons.APU_60_2M_with_2_x_R_60M__AA_8_Aphid____Infra_Red_,
"{APU_68_S-24}": Weapons.APU_68___S_24B,
"{A6FD14D3-6D30-4C85-88A7-8D17BEE120E2}": Weapons.APU_6___6_9A4172_Vikhr,
"{F789E86A-EE2E-4E6B-B81E-D5E5F903B6ED}": Weapons.APU_8___8_9A4172_Vikhr,
"{ARAKM70BAP}": Weapons.ARAK_M_70B_AP_6x_135mm_UnGd_Rkts__Pshu70_HEAT,
"{ARAKM70BHE}": Weapons.ARAK_M_70B_HE_6x_135mm_UnGd_Rkts__Shu70_HE_FRAG,
"{ASO-2}": Weapons.ASO_2___countermeasures_pod,
"{M2KC_RAFAUT_BLG66}": Weapons.AUF2_BLG_66_AC_x_2,
"{M2KC_RAFAUT_GBU12}": Weapons.AUF2_GBU_12_x_2,
"{M2KC_RAFAUT_MK82A}": Weapons.AUF2_MK_82_Air_x_2,
"{M2KC_RAFAUT_MK82S}": Weapons.AUF2_MK_82_Snakeyes_x_2,
"{M2KC_RAFAUT_MK82}": Weapons.AUF2_MK_82_x_2,
"{M2KC_RAFAUT_ROCKEYE}": Weapons.AUF2_ROCKEYE_x_2,
"{AWW-13}": Weapons.AWW_13_DATALINK_POD,
"{M2KC_AAF}": Weapons.A_A_Training,
"{M2KC_AGF}": Weapons.A_G_Training,
"{BAP_100}": Weapons.BAP_100_Anti_Runway,
"{M2KC_BAP100_12_RACK}": Weapons.BAP_100_x_12,
"{M2KC_BAP100_18_RACK}": Weapons.BAP_100_x_18,
"{M2KC_BAP100_6_RACK}": Weapons.BAP_100_x_6,
"{BDU-33}": Weapons.BDU_33___25lb_Practice_Bomb_LD,
"{BDU_45}": Weapons.BDU_45,
"{BDU_45B}": Weapons.BDU_45B,
"{BRU-32 BDU-45B}": Weapons.BDU_45B_,
"{BRU-32 BDU-45}": Weapons.BDU_45_,
"{BDU_45LG}": Weapons.BDU_45_LG,
"{BDU-50HD}": Weapons.BDU_50HD___500lb_Inert_Practice_Bomb_HD,
"{BDU-50LD}": Weapons.BDU_50LD___500lb_Inert_Practice_Bomb_LD,
"{BDU-50LGB}": Weapons.BDU_50LGB___500lb_Laser_Guided_Inert_Practice_Bomb_LD,
"{BETAB-500M}": Weapons.BETAB_500M___479_kg__bomb__penetrating,
"{BETAB-500S}": Weapons.BETAB_500S___425_kg__bomb__penetrating,
"{BEER_BOMB}": Weapons.Beer_Bomb,
"Beer_Bomb_(D)_on_LH_Spitfire_Wing_Carrier": Weapons.Beer_Bomb__D__on_LH_Spitfire_Wing_Carrier,
"Beer_Bomb_(D)_on_RH_Spitfire_Wing_Carrier": Weapons.Beer_Bomb__D__on_RH_Spitfire_Wing_Carrier,
"Beer_Bomb_(L)_on_LH_Spitfire_Wing_Carrier": Weapons.Beer_Bomb__L__on_LH_Spitfire_Wing_Carrier,
"Beer_Bomb_(L)_on_RH_Spitfire_Wing_Carrier": Weapons.Beer_Bomb__L__on_RH_Spitfire_Wing_Carrier,
"{BLG66_BELOUGA}": Weapons.Belouga,
"{BD289E34-DF84-4C5E-9220-4B14C346E79D}": Weapons.BetAB_500ShP___500kg_Concrete_Piercing_HD_w_booster_Bomb,
"{35B698AC-9FEF-4EC4-AD29-484A0085F62B}": Weapons.BetAB_500___500kg_Concrete_Piercing_Bomb_LD,
"BF109K_4_FUEL_TANK": Weapons.BF109K_4_FUEL_TANK,
"BGM_109": Weapons.BGM_109,
"BGM-109B": Weapons.BGM_109B,
"BIN_200": Weapons.BIN_200,
"{BKF_AO2_5RT}": Weapons.BKF___12_x_AO_2_5RT,
"{BKF_PTAB2_5KO}": Weapons.BKF___12_x_PTAB_2_5KO,
"{BK90}": Weapons.BK_90_MJ12__12x_MJ2_HEAT___36x_MJ1_HE_FRAG_Bomblets_,
"{BK90MJ1}": Weapons.BK_90_MJ1__72_x_MJ1_HE_FRAG_Bomblets_,
"{BK90MJ2}": Weapons.BK_90_MJ2__24_x_MJ2_HEAT_Bomblets_,
"{BLG66_BELOUGA_AC}": Weapons.BLG_66_AC_Belouga,
"{BLG66_AC}": Weapons.BLG_66_Belouga___290kg_CBU__151_Frag_Pen_bomblets,
"{752B9781-F962-11d5-9190-00A0249B6F00}": Weapons.BLU_107___440lb_Anti_Runway_Penetrator_Bomb,
"{08164777-5E9C-4B08-B48E-5AA7AFB246E2}": Weapons.BL_755_CBU___450kg__147_Frag_Pen_bomblets,
"{8C3F26A1-FA0F-11d5-9190-00A0249B6F00}": Weapons.BOZ_107___Countermeasure_Dispenser,
"{BRU33_LAU10}": Weapons.BRU_33_with_1_x_LAU_10_pod___4_x_127mm_ZUNI__UnGd_Rkts_Mk71__HE_FRAG,
"{BRU33_LAU61}": Weapons.BRU_33_with_1_x_LAU_61_pod___19_x_2_75_Hydra__UnGd_Rkts_M151__HE,
"{BRU33_LAU61_M282}": Weapons.BRU_33_with_1_x_LAU_61_pod___19_x_2_75_Hydra__UnGd_Rkts_M282__HEDP,
"{BRU33_LAU68}": Weapons.BRU_33_with_1_x_LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M151__HE,
"{BRU33_LAU68_M282}": Weapons.BRU_33_with_1_x_LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M282__HEDP,
"{BRU33_LAU68_MK5}": Weapons.BRU_33_with_1_x_LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_Mk5__HEAT,
"{BRU33_2X_BDU-45B}": Weapons.BRU_33_with_2_x_BDU_45B___500lb_Practice_Bomb,
"{BRU33_2X_BDU_45LG}": Weapons.BRU_33_with_2_x_BDU_45_LG_500lb_Practice_Laser_Guided_Bomb,
"{BRU33_2X_BDU-45}": Weapons.BRU_33_with_2_x_BDU_45___500lb_Practice_Bomb,
"{BRU33_2X_CBU-99}": Weapons.BRU_33_with_2_x_CBU_99___490lbs__247_x_HEAT_Bomblets,
"{BRU33_2X_GBU-12}": Weapons.BRU_33_with_2_x_GBU_12___500lb_Laser_Guided_Bomb,
"{BRU33_2X_GBU-16}": Weapons.BRU_33_with_2_x_GBU_16___1000lb_Laser_Guided_Bomb,
"{BRU33_2*LAU10}": Weapons.BRU_33_with_2_x_LAU_10_pod___4_x_127mm_ZUNI__UnGd_Rkts_Mk71__HE_FRAG,
"{BRU33_2*LAU61}": Weapons.BRU_33_with_2_x_LAU_61_pod___19_x_2_75_Hydra__UnGd_Rkts_M151__HE,
"{BRU33_2*LAU61_M282}": Weapons.BRU_33_with_2_x_LAU_61_pod___19_x_2_75_Hydra__UnGd_Rkts_M282__HEDP,
"{BRU33_2*LAU68}": Weapons.BRU_33_with_2_x_LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M151__HE,
"{BRU33_2*LAU68_M282}": Weapons.BRU_33_with_2_x_LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M282__HEDP,
"{BRU33_2*LAU68_MK5}": Weapons.BRU_33_with_2_x_LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_Mk5__HEAT,
"{BRU33_2X_ROCKEYE}": Weapons.BRU_33_with_2_x_Mk_20_Rockeye___490lbs_CBU__247_x_HEAT_Bomblets,
"{BRU33_2X_MK-82Y}": Weapons.BRU_33_with_2_x_Mk_82Y___500lb_GP_Chute_Retarded_HD,
"{BRU33_2X_MK-82_Snakeye}": Weapons.BRU_33_with_2_x_Mk_82_Snakeye___500lb_GP_Bomb_HD,
"{BRU33_2X_MK-82}": Weapons.BRU_33_with_2_x_Mk_82___500lb_GP_Bomb_LD,
"{BRU33_2X_MK-83}": Weapons.BRU_33_with_2_x_Mk_83___1000lb_GP_Bomb_LD,
"{BRU41_6X_BDU-33}": Weapons.BRU_41A_with_6_x_BDU_33___25lb_Practice_Bomb_LD,
"{BRU41_6X_MK-82}": Weapons.BRU_41A_with_6_x_Mk_82___500lb_GP_Bomb_LD,
"BRU-42_3*BDU-33": Weapons.BRU_42_3_BDU_33,
"BRU-42_3*GBU-12": Weapons.BRU_42_3_GBU_12,
"BRU-42_LS": Weapons.BRU_42_LS,
"{62BE78B1-9258-48AE-B882-279534C0D278}": Weapons.BRU_42_with_2_x_GBU_10___2000lb_Laser_Guided_Bombs,
"{EB969276-1922-4ED1-A5CB-18590F45D7FE}": Weapons.BRU_42_with_2_x_GBU_27___2000lb_Laser_Guided_Penetrator_Bombs,
"{88D49E04-78DF-4F08-B47E-B81247A9E3C5}": Weapons.BRU_42_with_3_x_GBU_16___1000lb_Laser_Guided_Bombs,
"{LAU-131x3 - 7 AGR-20A}": Weapons.BRU_42_with_3_x_LAU_131_pods___7_x_2_75_Hydra__Laser_Guided_Rkts_M151__HE_APKWS,
"{LAU-131x3 - 7 AGR-20 M282}": Weapons.BRU_42_with_3_x_LAU_131_pods___7_x_2_75_Hydra__Laser_Guided_Rkts_M282__MPP_APKWS,
"{64329ED9-B14C-4c0b-A923-A3C911DA1527}": Weapons.BRU_42_with_3_x_LAU_68_pods___21_x_2_75_Hydra__UnGd_Rkts_M151__HE,
"{C2593383-3CA8-4b18-B73D-0E750BCA1C85}": Weapons.BRU_42_with_3_x_LAU_68_pods___21_x_2_75_Hydra__UnGd_Rkts_M156__Wht_Phos,
"{E6966004-A525-4f47-AF94-BCFEDF8FDBDA}": Weapons.BRU_42_with_3_x_LAU_68_pods___21_x_2_75_Hydra__UnGd_Rkts_M257__Para_Illum,
"{4C044B08-886B-46c8-9B1F-AB05B3ED9C1D}": Weapons.BRU_42_with_3_x_LAU_68_pods___21_x_2_75_Hydra__UnGd_Rkts_M274__Practice_Smk,
"{443364AE-D557-488e-9499-45EDB3BA6730}": Weapons.BRU_42_with_3_x_LAU_68_pods___21_x_2_75_Hydra__UnGd_Rkts_Mk1__Practice,
"{9BC82B3D-FE70-4910-B2B7-3E54EFE73262}": Weapons.BRU_42_with_3_x_LAU_68_pods___21_x_2_75_Hydra__UnGd_Rkts_Mk5__HEAT,
"{C0FA251E-B645-4ce5-926B-F4BC20822F8B}": Weapons.BRU_42_with_3_x_LAU_68_pods___21_x_2_75_Hydra__UnGd_Rkts_Mk61__Practice,
"{A1853B38-2160-4ffe-B7E9-9BF81E6C3D77}": Weapons.BRU_42_with_3_x_LAU_68_pods___21_x_2_75_Hydra__UnGd_Rkts_WTU_1_B__Practice,
"{BRU_42_3xLAU68_M282}": Weapons.BRU_42_with_3_x_LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M282__HEDP,
"{B83CB620-5BBE-4BEA-910C-EB605A327EF9}": Weapons.BRU_42_with_3_x_Mk_20_Rockeye___490lbs_CBUs__247_x_HEAT_Bomblets,
"{7B34E0BB-E427-4C2A-A61A-8407CE18B54D}": Weapons.BRU_42_with_3_x_Mk_81___250lb_GP_Bombs_LD,
"{BRU-42_3*Mk-82AIR}": Weapons.BRU_42_with_3_x_Mk_82_AIR_Ballute___500lb_GP_Bombs_HD,
"{60CC734F-0AFA-4E2E-82B8-93B941AB11CF}": Weapons.BRU_42_with_3_x_Mk_82___500lb_GP_Bombs_LD,
"{BRU-42_LS_3*SUU-25_8*LUU-2}": Weapons.BRU_42_with_3_x_SUU_25_x_8_LUU_2___Target_Marker_Flares,
"{BRU55_2*AGM-154A}": Weapons.BRU_55_with_2_x_AGM_154A___JSOW_CEB__CBU_type_,
"{BRU55_2*AGM-154C}": Weapons.BRU_55_with_2_x_AGM_154C___JSOW_Unitary_BROACH,
"{BRU55_2*GBU-38}": Weapons.BRU_55_with_2_x_GBU_38___JDAM__500lb_GPS_Guided_Bomb,
"{BRU57_2*AGM-154A}": Weapons.BRU_57_with_2_x_AGM_154A___JSOW_CEB__CBU_type_,
"{BRU57_2*AGM-154B}": Weapons.BRU_57_with_2_x_AGM_154B___JSOW_Anti_Armour,
"{BRU57_2*CBU-103}": Weapons.BRU_57_with_2_x_CBU_103___202_x_CEM__CBU_with_WCMD,
"{BRU57_2*CBU-105}": Weapons.BRU_57_with_2_x_CBU_105___10_x_SFW__CBU_with_WCMD,
"{BRU57_2*GBU-38}": Weapons.BRU_57_with_2_x_GBU_38___JDAM__500lb_GPS_Guided_Bomb,
"BR_250": Weapons.BR_250,
"BR_500": Weapons.BR_500,
"British_GP_250LBS_Bomb_MK4_on_LH_Spitfire_Wing_Carrier": Weapons.British_GP_250LBS_Bomb_MK4_on_LH_Spitfire_Wing_Carrier,
"British_GP_250LBS_Bomb_MK4_on_RH_Spitfire_Wing_Carrier": Weapons.British_GP_250LBS_Bomb_MK4_on_RH_Spitfire_Wing_Carrier,
"British_GP_500LBS_Bomb_MK4_on_British_UniversalBC_MK3": Weapons.British_GP_500LBS_Bomb_MK4_on_British_UniversalBC_MK3,
"{FC56DF80-9B09-44C5-8976-DCFAFF219062}": Weapons.B_13L_pod___5_x_S_13_OF__122mm_UnGd_Rkts__Blast_Frag,
"B-1B_Mk-84*8": Weapons.B_1B_Mk_84_8,
"{F72F47E5-C83A-4B85-96ED-D3E46671EE9A}": Weapons.B_8M1_pod___20_x_S_8KOM__80mm_UnGd_Rkts__HEAT_AP,
"{3DFB7320-AB0E-11d7-9897-000476191836}": Weapons.B_8M1_pod___20_x_S_8TsM__80mm_UnGd_Rkts__Smk,
"B-8M1 - 20 S-8OFP2": Weapons.B_8M1___20_S_8OFP2,
"B_8V20A_CM": Weapons.B_8V20A_CM,
"B_8V20A_CM_BU": Weapons.B_8V20A_CM_BU,
"B_8V20A_CM_GN": Weapons.B_8V20A_CM_GN,
"B_8V20A_CM_RD": Weapons.B_8V20A_CM_RD,
"B_8V20A_CM_VT": Weapons.B_8V20A_CM_VT,
"B_8V20A_CM_WH": Weapons.B_8V20A_CM_WH,
"B_8V20A_CM_YE": Weapons.B_8V20A_CM_YE,
"B_8V20A_OFP2": Weapons.B_8V20A_OFP2,
"B_8V20A_OM": Weapons.B_8V20A_OM,
"{6A4B9E69-64FE-439a-9163-3A87FB6A4D81}": Weapons.B_8V20A_pod___20_x_S_8KOM__80mm_UnGd_Rkts__HEAT_AP,
"CATM-9M": Weapons.CATM_9M,
"CBLS-200": Weapons.CBLS_200,
"CBU87*10": Weapons.CBU87_10,
"CBU97*10": Weapons.CBU97_10,
"{CBU_103}": Weapons.CBU_103___202_x_CEM__CBU_with_WCMD,
"{CBU_105}": Weapons.CBU_105___10_x_SFW__CBU_with_WCMD,
"{CBU-52B}": Weapons.CBU_52B___220_x_HE_Frag_bomblets,
"{CBU-87}": Weapons.CBU_87___202_x_CEM_Cluster_Bomb,
"{5335D97A-35A5-4643-9D9B-026C75961E52}": Weapons.CBU_97___10_x_SFW_Cluster_Bomb,
"{CBU_99}": Weapons.CBU_99___490lbs__247_x_HEAT_Bomblets,
"{CM_802AKG}": Weapons.CM_802AKG,
"{C_802AK}": Weapons.C_802AK,
"{C-101-DEFA553}": Weapons.DEFA_553___30mm_Revolver_Cannon,
"DIS_AKD-10": Weapons.DIS_AKD_10,
"DIS_AKG_DLPOD": Weapons.DIS_AKG_DLPOD,
"DIS_BOMB_250_2": Weapons.DIS_BOMB_250_2,
"DIS_BOMB_250_3": Weapons.DIS_BOMB_250_3,
"DIS_BRM1_90": Weapons.DIS_BRM1_90,
"DIS_CM-802AKG": Weapons.DIS_CM_802AKG,
"DIS_C-701IR": Weapons.DIS_C_701IR,
"DIS_C-701T": Weapons.DIS_C_701T,
"DIS_C-802AK": Weapons.DIS_C_802AK,
"DIS_DF4A_KD20": Weapons.DIS_DF4A_KD20,
"DIS_DF4B_YJ12": Weapons.DIS_DF4B_YJ12,
"DIS_GB6": Weapons.DIS_GB6,
"DIS_GB6_HE": Weapons.DIS_GB6_HE,
"DIS_GB6_TSP": Weapons.DIS_GB6_TSP,
"DIS_GBU_10": Weapons.DIS_GBU_10,
"DIS_GBU_12": Weapons.DIS_GBU_12,
"DIS_GBU_12_DUAL_GDJ_II19_L": Weapons.DIS_GBU_12_DUAL_GDJ_II19_L,
"DIS_GBU_12_DUAL_GDJ_II19_R": Weapons.DIS_GBU_12_DUAL_GDJ_II19_R,
"DIS_GBU_16": Weapons.DIS_GBU_16,
"DIS_GDJ_KD63": Weapons.DIS_GDJ_KD63,
"DIS_GDJ_KD63B": Weapons.DIS_GDJ_KD63B,
"DIS_GDJ_YJ83K": Weapons.DIS_GDJ_YJ83K,
"DIS_H6_250_2_N12": Weapons.DIS_H6_250_2_N12,
"DIS_H6_250_2_N24": Weapons.DIS_H6_250_2_N24,
"DIS_KD20": Weapons.DIS_KD20,
"DIS_KD63": Weapons.DIS_KD63,
"DIS_KD63B": Weapons.DIS_KD63B,
"DIS_LAU68_MK5_DUAL_GDJ_II19_L": Weapons.DIS_LAU68_MK5_DUAL_GDJ_II19_L,
"DIS_LAU68_MK5_DUAL_GDJ_II19_R": Weapons.DIS_LAU68_MK5_DUAL_GDJ_II19_R,
"DIS_LD-10": Weapons.DIS_LD_10,
"DIS_LD-10_DUAL_L": Weapons.DIS_LD_10_DUAL_L,
"DIS_LD-10_DUAL_R": Weapons.DIS_LD_10_DUAL_R,
"DIS_LS_6_500": Weapons.DIS_LS_6_500,
"DIS_MER6_250_3_N6": Weapons.DIS_MER6_250_3_N6,
"DIS_MK_20": Weapons.DIS_MK_20,
"DIS_MK_20_DUAL_GDJ_II19_L": Weapons.DIS_MK_20_DUAL_GDJ_II19_L,
"DIS_MK_20_DUAL_GDJ_II19_R": Weapons.DIS_MK_20_DUAL_GDJ_II19_R,
"DIS_MK_82S_DUAL_GDJ_II19_L": Weapons.DIS_MK_82S_DUAL_GDJ_II19_L,
"DIS_MK_82S_DUAL_GDJ_II19_R": Weapons.DIS_MK_82S_DUAL_GDJ_II19_R,
"DIS_MK_82_DUAL_GDJ_II19_L": Weapons.DIS_MK_82_DUAL_GDJ_II19_L,
"DIS_MK_82_DUAL_GDJ_II19_R": Weapons.DIS_MK_82_DUAL_GDJ_II19_R,
"DIS_PL-12": Weapons.DIS_PL_12,
"DIS_PL-5EII": Weapons.DIS_PL_5EII,
"DIS_PL-8A": Weapons.DIS_PL_8A,
"DIS_PL-8B": Weapons.DIS_PL_8B,
"DIS_RKT_90_UG": Weapons.DIS_RKT_90_UG,
"DIS_SD-10": Weapons.DIS_SD_10,
"DIS_SD-10_DUAL_L": Weapons.DIS_SD_10_DUAL_L,
"DIS_SD-10_DUAL_R": Weapons.DIS_SD_10_DUAL_R,
"DIS_SMOKE_GENERATOR_B": Weapons.DIS_SMOKE_GENERATOR_B,
"DIS_SMOKE_GENERATOR_G": Weapons.DIS_SMOKE_GENERATOR_G,
"DIS_SMOKE_GENERATOR_O": Weapons.DIS_SMOKE_GENERATOR_O,
"DIS_SMOKE_GENERATOR_R": Weapons.DIS_SMOKE_GENERATOR_R,
"DIS_SMOKE_GENERATOR_W": Weapons.DIS_SMOKE_GENERATOR_W,
"DIS_SMOKE_GENERATOR_Y": Weapons.DIS_SMOKE_GENERATOR_Y,
"DIS_SPJ_POD": Weapons.DIS_SPJ_POD,
"DIS_TANK1100": Weapons.DIS_TANK1100,
"DIS_TANK1100_EMPTY": Weapons.DIS_TANK1100_EMPTY,
"DIS_TANK800": Weapons.DIS_TANK800,
"DIS_TANK800_EMPTY": Weapons.DIS_TANK800_EMPTY,
"DIS_TYPE200": Weapons.DIS_TYPE200,
"DIS_TYPE200_DUAL_L": Weapons.DIS_TYPE200_DUAL_L,
"DIS_TYPE200_DUAL_R": Weapons.DIS_TYPE200_DUAL_R,
"DIS_WMD7": Weapons.DIS_WMD7,
"DIS_YJ12": Weapons.DIS_YJ12,
"DIS_YJ83K": Weapons.DIS_YJ83K,
"{DWS39_MJ1}": Weapons.DWS39_MJ1,
"{DWS39_MJ1_MJ2}": Weapons.DWS39_MJ1_MJ2,
"{DWS39_MJ2}": Weapons.DWS39_MJ2,
"{Eclair}": Weapons.Eclair,
"ER_4_SC50": Weapons.ER_4_SC50,
"{0519A261-0AB6-11d6-9193-00A0249B6F00}": Weapons.ETHER,
"FAB_100M": Weapons.FAB_100M,
"FAB_100M": Weapons.FAB_100M_,
"{FAB-100-4}": Weapons.FAB_100_x_4,
"{FB3CE165-BF07-4979-887C-92B87F13276B}": Weapons.FAB_100___100kg_GP_Bomb_LD,
"{40AA4ABE-D6EB-4CD6-AEFE-A1A0477B24AB}": Weapons.FAB_1500_M_54___1500kg_GP_Bomb_LD,
"{FAB-250-M54-TU}": Weapons.FAB_250_M54_TU___235_kg__bomb__parachute,
"{FAB-250-M54}": Weapons.FAB_250_M54___235_kg__bomb__parachute,
"{FAB_250_M62}": Weapons.FAB_250_M62___250kg_GP_Bomb_LD,
"{3C612111-C7AD-476E-8A8E-2485812F4E5C}": Weapons.FAB_250___250kg_GP_Bomb_LD,
"FAB_50": Weapons.FAB_50,
"{FAB-500-M54-TU}": Weapons.FAB_500_M54_TU___480_kg__bomb__parachute,
"{FAB-500-M54}": Weapons.FAB_500_M54___474_kg__bomb__free_fall,
"{37DCC01E-9E02-432F-B61D-10C166CA2798}": Weapons.FAB_500_M_62___500kg_GP_Bomb_LD,
"{FAB-500-SL}": Weapons.FAB_500_SL___515_kg__bomb__parachute,
"{FAB-500-TA}": Weapons.FAB_500_TA___477_kg__bomb__free_fall,
"FAB_50": Weapons.FAB_50_,
"FIM_92": Weapons.FIM_92,
"{FPU_8A_FUEL_TANK}": Weapons.FPU_8A_Fuel_Tank_330_gallons,
"{PTB_120_F86F35}": Weapons.Fuel_Tank_120_gallons,
"{PTB_150L_L39}": Weapons.Fuel_Tank_150_liters,
"{PTB_200_F86F35}": Weapons.Fuel_Tank_200_gallons,
"{PTB_350L_L39}": Weapons.Fuel_Tank_350_liters,
"{PTB_490C_MIG21}": Weapons.Fuel_Tank_490_L_Central__21_,
"{PTB_490_MIG21}": Weapons.Fuel_Tank_490_L__21_,
"{PTB_800_MIG21}": Weapons.Fuel_Tank_800_L__21_,
"Fuel_Tank_FT600": Weapons.Fuel_Tank_FT600,
"{414E383A-59EB-41BC-8566-2B5E0788ED1F}": Weapons.Fuel_tank_1150L,
"{C0FF4842-FBAC-11d5-9190-00A0249B6F00}": Weapons.Fuel_tank_1150L_MiG_29,
"{2BEC576B-CDF5-4B7F-961F-B0FA4312B841}": Weapons.Fuel_tank_1400L,
"{16602053-4A12-40A2-B214-AB60D481B20E}": Weapons.Fuel_tank_2000L,
"{7D7EC917-05F6-49D4-8045-61FC587DD019}": Weapons.Fuel_tank_3000L,
"{8A0BE8AE-58D4-4572-9263-3144C0D06364}": Weapons.Fuel_tank_300_gal,
"{F14-300gal}": Weapons.Fuel_tank_300_gal_,
"{F14-300gal-empty}": Weapons.Fuel_tank_300_gal__empty_,
"{EFEC8200-B922-11d7-9897-000476191836}": Weapons.Fuel_tank_330_gal,
"{EFEC8201-B922-11d7-9897-000476191836}": Weapons.Fuel_tank_330_gal_,
"{82364E69-5564-4043-A866-E13032926C3E}": Weapons.Fuel_tank_367_gal,
"{F376DBEE-4CAE-41BA-ADD9-B2910AC95DEC}": Weapons.Fuel_tank_370_gal,
"{0855A3A1-FA50-4C89-BDBB-5D5360ABA071}": Weapons.Fuel_tank_5000L,
"{E1F29B21-F291-4589-9FD8-3272EEC69506}": Weapons.Fuel_tank_610_gal,
"{A5BAEAB7-6FAF-4236-AF72-0FD900F493F9}": Weapons.Fuel_tank_800L,
"{E8D4652F-FD48-45B7-BA5B-2AE05BB5A9CF}": Weapons.Fuel_tank_800L_Wing,
"{B99EE8A8-99BC-4a8d-89AC-A26831920DCE}": Weapons.Fuel_tank_PTB_450,
"{PTB_450}": Weapons.Fuel_tank_PTB_450_,
"{A504D93B-4E80-4B4F-A533-0D9B65F2C55F}": Weapons.Fuel_tank_S_3,
"FW109_FUEL_TANK": Weapons.FW109_FUEL_TANK,
"{8B9E3FD0-F034-4A07-B6CE-C269884CC71B}": Weapons.F_4_Fuel_tank_C,
"{7B4B122D-C12C-4DB4-834E-4D8BB4D863A8}": Weapons.F_4_Fuel_tank_W,
"{PTB-150GAL}": Weapons.F_5_150Gal_Fuel_tank,
"{0395076D-2F77-4420-9D33-087A4398130B}": Weapons.F_5_275Gal_Fuel_tank,
"{GAU_12_Equalizer_AP}": Weapons.GAU_12_Gunpod_w_AP_M79,
"{GAU_12_Equalizer_HE}": Weapons.GAU_12_Gunpod_w_HE_M792,
"{GAU_12_Equalizer}": Weapons.GAU_12_Gunpod_w_SAPHEI_T,
"{BRU-32 GBU-10}": Weapons.GBU_10,
"{51F9AAE5-964F-4D21-83FB-502E3BFE5F8A}": Weapons.GBU_10___2000lb_Laser_Guided_Bomb,
"{BRU-32 GBU-12}": Weapons.GBU_12,
"{DB769D48-67D7-42ED-A2BE-108D566C8B1E}": Weapons.GBU_12___500lb_Laser_Guided_Bomb,
"{BRU-32 GBU-16}": Weapons.GBU_16,
"{0D33DDAE-524F-4A4E-B5B8-621754FE3ADE}": Weapons.GBU_16___1000lb_Laser_Guided_Bomb,
"{BRU-32 GBU-24}": Weapons.GBU_24,
"{34759BBC-AF1E-4AEE-A581-498FF7A6EBCE}": Weapons.GBU_24_Paveway_III___2000lb_Laser_Guided_Bomb,
"{GBU-24}": Weapons.GBU_24_Paveway_III___2000lb_Laser_Guided_Bomb_,
"{EF0A9419-01D6-473B-99A3-BEBDB923B14D}": Weapons.GBU_27___2000lb_Laser_Guided_Penetrator_Bomb,
"{F06B775B-FC70-44B5-8A9F-5B5E2EB839C7}": Weapons.GBU_28___5000lb_Laser_Guided_Penetrator_Bomb,
"GBU-31V3B*8": Weapons.GBU_31V3B_8,
"GBU-31*8": Weapons.GBU_31_8,
"{GBU-31}": Weapons.GBU_31_V_1_B___JDAM__2000lb_GPS_Guided_Bomb,
"{GBU_31_V_2B}": Weapons.GBU_31_V_2_B___JDAM__2000lb_GPS_Guided_Bomb,
"{GBU-31V3B}": Weapons.GBU_31_V_3_B___JDAM__2000lb_GPS_Guided_Penetrator_Bomb,
"{GBU_31_V_4B}": Weapons.GBU_31_V_4_B___JDAM__2000lb_GPS_Guided_Penetrator_Bomb,
"{GBU_32_V_2B}": Weapons.GBU_32_V_2_B___JDAM__1000lb_GPS_Guided_Bomb,
"GBU-38*16": Weapons.GBU_38_16,
"{GBU-38}": Weapons.GBU_38___JDAM__500lb_GPS_Guided_Bomb,
"{GBU_54_V_1B}": Weapons.GBU_54B___LJDAM__500lb_Laser__GPS_Guided_Bomb_LD,
"GUV_VOG": Weapons.GUV_VOG,
"GUV_YakB_GSHP": Weapons.GUV_YakB_GSHP,
"{HOT3G}": Weapons.HOT3,
"{HOT3D}": Weapons.HOT3_,
"{4CD2BB0F-5493-44EF-A927-9760350F7BA1}": Weapons.HSAB_with_9_x_Mk_20_Rockeye___490lbs_CBUs__247_x_HEAT_Bomblets,
"{696CFFC4-0BDE-42A8-BE4B-0BE3D9DD723C}": Weapons.HSAB_with_9_x_Mk_83___1000lb_GP_Bombs_LD,
"{HVAR_SMOKE_2}": Weapons.HVAR_SMOKE__UnGd_Rkt,
"{HVAR_SMOKE_GENERATOR}": Weapons.HVAR_Smoke_Generator,
"{HVAR}": Weapons.HVAR__UnGd_Rkt,
"I16_DROP_FUEL_TANK": Weapons.I16_DROP_FUEL_TANK,
"I16_FAB_100SV": Weapons.I16_FAB_100SV,
"I16_RS_82": Weapons.I16_RS_82,
"{IAB-500}": Weapons.IAB_500___470_kg__bomb__free_fall,
"{IR_Deflector}": Weapons.IR_Deflector,
"{KAB_1500Kr_LOADOUT}": Weapons.KAB_1500Kr___1500kg_TV_Guided_Bomb,
"{KAB_1500LG_LOADOUT}": Weapons.KAB_1500LG_Pr___1500kg_Laser_Guided_Penetrator_Bomb,
"{39821727-F6E2-45B3-B1F0-490CC8921D1E}": Weapons.KAB_1500L___1500kg_Laser_Guided_Bomb,
"{E2C426E3-8B10-4E09-B733-9CDC26520F48}": Weapons.KAB_500Kr___500kg_TV_Guided_Bomb,
"{BA565F89-2373-4A84-9502-A0E017D3A44A}": Weapons.KAB_500LG___500kg_Laser_Guided_Bomb,
"{KAB_500S_LOADOUT}": Weapons.KAB_500S___500kg_GPS_Guided_Bomb,
"{KB}": Weapons.KB_Flare_Chaff_dispenser_pod,
"{12429ECF-03F0-4DF6-BCBD-5D38B6343DE1}": Weapons.Kh_22__AS_4_Kitchen____1000kg__AShM__IN__Act_Pas_Rdr,
"{9F390892-E6F9-42C9-B84E-1136A881DCB2}": Weapons.Kh_23L_Grom__AS_7_Kerry____286kg__ASM__Laser_Guided,
"{6DADF342-D4BA-4D8A-B081-BA928C4AF86D}": Weapons.Kh_25ML__AS_10_Karen____300kg__ASM__Semi_Act_Laser,
"{79D73885-0801-45a9-917F-C90FE1CE3DFC}": Weapons.Kh_25ML__AS_10_Karen____300kg__ASM__Semi_Act_Laser_,
"{X-25ML}": Weapons.Kh_25ML__AS_10_Karen____300kg__ASM__Semi_Act_Laser__,
"{E86C5AA5-6D49-4F00-AD2E-79A62D6DDE26}": Weapons.Kh_25MPU__Updated_AS_12_Kegler____320kg__ARM__IN__Pas_Rdr,
"{752AF1D2-EBCC-4bd7-A1E7-2357F5601C70}": Weapons.Kh_25MPU__Updated_AS_12_Kegler____320kg__ARM__IN__Pas_Rdr_,
"{X-25MPU}": Weapons.Kh_25MPU__Updated_AS_12_Kegler____320kg__ARM__IN__Pas_Rdr__,
"{Kh-25MP}": Weapons.Kh_25MP__AS_12_Kegler____320kg__ARM__Pas_Rdr,
"{292960BB-6518-41AC-BADA-210D65D5073C}": Weapons.Kh_25MR__AS_10_Karen____300kg__ASM__10km__RC_Guided,
"{X-25MR}": Weapons.Kh_25MR__AS_10_Karen____300kg__ASM__RC_Guided,
"{Kh-28}": Weapons.Kh_28__AS_9_Kyle____720kg__ARM__Pas_Rdr,
"{3468C652-E830-4E73-AFA9-B5F260AB7C3D}": Weapons.Kh_29L__AS_14_Kedge____657kg__ASM__Semi_Act_Laser,
"{D4A8D9B9-5C45-42e7-BBD2-0E54F8308432}": Weapons.Kh_29L__AS_14_Kedge____657kg__ASM__Semi_Act_Laser_,
"{X-29L}": Weapons.Kh_29L__AS_14_Kedge____657kg__ASM__Semi_Act_Laser__,
"{B4FC81C9-B861-4E87-BBDC-A1158E648EBF}": Weapons.Kh_29T__AS_14_Kedge____670kg__ASM__TV_Guided,
"{601C99F7-9AF3-4ed7-A565-F8B8EC0D7AAC}": Weapons.Kh_29T__AS_14_Kedge____670kg__ASM__TV_Guided_,
"{X-29T}": Weapons.Kh_29T__AS_14_Kedge____670kg__ASM__TV_Guided__,
"{4D13E282-DF46-4B23-864A-A9423DFDE504}": Weapons.Kh_31A__AS_17_Krypton____610kg__AShM__IN__Act_Rdr,
"{4D13E282-DF46-4B23-864A-A9423DFDE50A}": Weapons.Kh_31A__AS_17_Krypton____610kg__AShM__IN__Act_Rdr_,
"{X-31A}": Weapons.Kh_31A__AS_17_Krypton____610kg__AShM__IN__Act_Rdr__,
"{D8F2C90B-887B-4B9E-9FE2-996BC9E9AF03}": Weapons.Kh_31P__AS_17_Krypton____600kg__ARM__IN__Pas_Rdr,
"{D8F2C90B-887B-4B9E-9FE2-996BC9E9AF0A}": Weapons.Kh_31P__AS_17_Krypton____600kg__ARM__IN__Pas_Rdr_,
"{X-31P}": Weapons.Kh_31P__AS_17_Krypton____600kg__ARM__IN__Pas_Rdr__,
"{2234F529-1D57-4496-8BB0-0150F9BDBBD2}": Weapons.Kh_35__AS_20_Kayak____520kg__AShM__IN__Act_Rdr,
"{2234F529-1D57-4496-8BB0-0150F9BDBBD3}": Weapons.Kh_35__AS_20_Kayak____520kg__AShM__IN__Act_Rdr_,
"{3F26D9C5-5CC3-4E42-BC79-82FAA54E9F26}": Weapons.Kh_41__SS_N_22_Sunburn____4500kg__AShM__IN__Act_Rdr,
"{FE382A68-8620-4AC0-BDF5-709BFE3977D7}": Weapons.Kh_58U__AS_11_Kilter____640kg__ARM__IN__Pas_Rdr,
"{B5CA9846-776E-4230-B4FD-8BCC9BFB1676}": Weapons.Kh_58U__AS_11_Kilter____640kg__ARM__IN__Pas_Rdr_,
"{40AB87E8-BEFB-4D85-90D9-B2753ACF9514}": Weapons.Kh_59M__AS_18_Kazoo____930kg__ASM__IN,
"{BADAF2DE-68B5-472A-8AAC-35BAEFF6B4A1}": Weapons.Kh_65__AS_15B_Kent____1250kg__ASM__IN__MCC,
"{Kh-66_Grom}": Weapons.Kh_66_Grom__21____AGM__radar_guided_APU_68,
"{96A7F676-F956-404A-AD04-F33FB2C74884}": Weapons.KMGU_2___96_x_AO_2_5RT_Dispenser__CBU__HE_Frag,
"{96A7F676-F956-404A-AD04-F33FB2C74881}": Weapons.KMGU_2___96_x_PTAB_2_5KO_Dispenser__CBU__HEAT_AP,
"KORD_12_7": Weapons.KORD_12_7,
"{F4920E62-A99A-11d8-9897-000476191836}": Weapons.Kopyo_radar_pod,
"{7210496B-7B81-4B52-80D6-8529ECF847CD}": Weapons.Kormoran___ASM,
"{K-13A}": Weapons.K_13A,
"{44EE8698-89F9-48EE-AF36-5FD31896A82F}": Weapons.L005_Sorbtsiya_ECM_pod__left_,
"{44EE8698-89F9-48EE-AF36-5FD31896A82A}": Weapons.L005_Sorbtsiya_ECM_pod__right_,
"{ECM_POD_L_175V}": Weapons.L175V_Khibiny_ECM_pod,
"{F14-LANTIRN-TP}": Weapons.LANTIRN_Targeting_Pod,
"LAU3_HE151": Weapons.LAU3_HE151,
"LAU3_HE5": Weapons.LAU3_HE5,
"LAU3_WP156": Weapons.LAU3_WP156,
"LAU3_WP1B": Weapons.LAU3_WP1B,
"LAU3_WP61": Weapons.LAU3_WP61,
"LAU-105": Weapons.LAU_105,
"LAU-105_1*AIM-9L_L": Weapons.LAU_105_1_AIM_9L_L,
"LAU-105_1*AIM-9L_R": Weapons.LAU_105_1_AIM_9L_R,
"LAU-105_1*AIM-9M_L": Weapons.LAU_105_1_AIM_9M_L,
"LAU-105_1*AIM-9M_R": Weapons.LAU_105_1_AIM_9M_R,
"LAU-105_1*CATM-9M_L": Weapons.LAU_105_1_CATM_9M_L,
"LAU-105_1*CATM-9M_R": Weapons.LAU_105_1_CATM_9M_R,
"LAU-105_2*AIM-9L": Weapons.LAU_105_2_AIM_9L,
"LAU-105_2*AIM-9P5": Weapons.LAU_105_2_AIM_9P5,
"LAU-105_2*CATM-9M": Weapons.LAU_105_2_CATM_9M,
"LAU-105_AIS_ASQ_T50_L": Weapons.LAU_105_AIS_ASQ_T50_L,
"LAU-105_AIS_ASQ_T50_R": Weapons.LAU_105_AIS_ASQ_T50_R,
"{DB434044-F5D0-4F1F-9BA9-B73027E18DD3}": Weapons.LAU_105_with_2_x_AIM_9M_Sidewinder_IR_AAM,
"{3C0745ED-8B0B-42eb-B907-5BD5C1717447}": Weapons.LAU_105_with_2_x_AIM_9P_Sidewinder_IR_AAM,
"{LAU_10R}": Weapons.LAU_10R_pod___4_x_127mm_ZUNI__UnGd_Rkts_Mk71__HE_FRAG,
"{F3EFE0AB-E91A-42D8-9CA2-B63C91ED570A}": Weapons.LAU_10_pod___4_x_127mm_ZUNI__UnGd_Rkts_Mk71__HE_FRAG,
"{BRU42_LAU10}": Weapons.LAU_10___4_ZUNI_MK_71,
"{BRU3242_LAU10}": Weapons.LAU_10___4_ZUNI_MK_71_,
"{LAU-115 - AIM-7E}": Weapons.LAU_115C_with_AIM_7E_Sparrow_Semi_Active_Radar,
"{LAU-115 - AIM-7F}": Weapons.LAU_115C_with_AIM_7F_Sparrow_Semi_Active_Radar,
"{LAU-115 - AIM-7H}": Weapons.LAU_115C_with_AIM_7MH_Sparrow_Semi_Active_Radar,
"LAU-115_2*LAU-127_AIM-120B": Weapons.LAU_115_2_LAU_127_AIM_120B,
"LAU-115_2*LAU-127_AIM-120C": Weapons.LAU_115_2_LAU_127_AIM_120C,
"LAU-115_2*LAU-127_AIM-9L": Weapons.LAU_115_2_LAU_127_AIM_9L,
"LAU-115_2*LAU-127_AIM-9M": Weapons.LAU_115_2_LAU_127_AIM_9M,
"LAU-115_2*LAU-127_AIM-9X": Weapons.LAU_115_2_LAU_127_AIM_9X,
"LAU-115_2*LAU-127_CATM-9M": Weapons.LAU_115_2_LAU_127_CATM_9M,
"LAU-115_LAU-127_AIM-9L": Weapons.LAU_115_LAU_127_AIM_9L,
"LAU-115_LAU-127_AIM-9L_R": Weapons.LAU_115_LAU_127_AIM_9L_R,
"LAU-115_LAU-127_AIM-9M": Weapons.LAU_115_LAU_127_AIM_9M,
"LAU-115_LAU-127_AIM-9M_R": Weapons.LAU_115_LAU_127_AIM_9M_R,
"LAU-115_LAU-127_AIM-9X": Weapons.LAU_115_LAU_127_AIM_9X,
"LAU-115_LAU-127_AIM-9X_R": Weapons.LAU_115_LAU_127_AIM_9X_R,
"LAU-115_LAU-127_CATM-9M": Weapons.LAU_115_LAU_127_CATM_9M,
"LAU-115_LAU-127_CATM-9M_R": Weapons.LAU_115_LAU_127_CATM_9M_R,
"{LAU-115 - AIM-120B}": Weapons.LAU_115_with_1_x_LAU_127_AIM_120B_AMRAAM___Active_Rdr_AAM,
"{LAU-115 - AIM-120B_R}": Weapons.LAU_115_with_1_x_LAU_127_AIM_120B_AMRAAM___Active_Rdr_AAM_,
"{LAU-115 - AIM-120C}": Weapons.LAU_115_with_1_x_LAU_127_AIM_120C_5_AMRAAM___Active_Rdr_AAM,
"{LAU-115 - AIM-120C_R}": Weapons.LAU_115_with_1_x_LAU_127_AIM_120C_5_AMRAAM___Active_Rdr_AAM_,
"{LAU-115 - AIM-7M}": Weapons.LAU_115_with_AIM_7M_Sparrow_Semi_Active_Radar,
"LAU_117_AGM_65A": Weapons.LAU_117_AGM_65A,
"LAU_117_AGM_65B": Weapons.LAU_117_AGM_65B,
"LAU_117_AGM_65F": Weapons.LAU_117_AGM_65F,
"LAU_117_AGM_65G": Weapons.LAU_117_AGM_65G,
"LAU_117_AGM_65H": Weapons.LAU_117_AGM_65H,
"LAU_117_AGM_65L": Weapons.LAU_117_AGM_65L,
"LAU_117_CATM_65K": Weapons.LAU_117_CATM_65K,
"LAU_117_TGM_65D": Weapons.LAU_117_TGM_65D,
"LAU_117_TGM_65G": Weapons.LAU_117_TGM_65G,
"LAU_117_TGM_65H": Weapons.LAU_117_TGM_65H,
"{444BA8AE-82A7-4345-842E-76154EFCCA46}": Weapons.LAU_117_with_AGM_65D___Maverick_D__IIR_ASM_,
"{F16A4DE0-116C-4A71-97F0-2CF85B0313EC}": Weapons.LAU_117_with_AGM_65E___Maverick_E__Laser_ASM___Lg_Whd_,
"{69DC8AE7-8F77-427B-B8AA-B19D3F478B66}": Weapons.LAU_117_with_AGM_65K___Maverick_K__CCD_Imp_ASM_,
"{3E6B632D-65EB-44D2-9501-1C2D04515405}": Weapons.LAU_118a_with_AGM_45B_Shrike_ARM__Imp_,
"LAU-127_AIM-9L": Weapons.LAU_127_AIM_9L,
"LAU-127_AIM-9M": Weapons.LAU_127_AIM_9M,
"LAU-127_AIM-9X": Weapons.LAU_127_AIM_9X,
"LAU-127_CATM-9M": Weapons.LAU_127_CATM_9M,
"LAU_131x3_HYDRA_70_M151": Weapons.LAU_131x3_HYDRA_70_M151,
"LAU_131x3_HYDRA_70_M156": Weapons.LAU_131x3_HYDRA_70_M156,
"LAU_131x3_HYDRA_70_M257": Weapons.LAU_131x3_HYDRA_70_M257,
"LAU_131x3_HYDRA_70_M274": Weapons.LAU_131x3_HYDRA_70_M274,
"LAU_131x3_HYDRA_70_MK1": Weapons.LAU_131x3_HYDRA_70_MK1,
"LAU_131x3_HYDRA_70_MK5": Weapons.LAU_131x3_HYDRA_70_MK5,
"LAU_131x3_HYDRA_70_MK61": Weapons.LAU_131x3_HYDRA_70_MK61,
"LAU_131x3_HYDRA_70_WTU1B": Weapons.LAU_131x3_HYDRA_70_WTU1B,
"{LAU-131 - 7 AGR-20A}": Weapons.LAU_131_pod___7_x_2_75_Hydra__Laser_Guided_Rkts_M151__HE_APKWS,
"{LAU-131 - 7 AGR-20 M282}": Weapons.LAU_131_pod___7_x_2_75_Hydra__Laser_Guided_Rkts_M282__MPP_APKWS,
"{69926055-0DA8-4530-9F2F-C86B157EA9F6}": Weapons.LAU_131_pod___7_x_2_75_Hydra__UnGd_Rkts_M151__HE,
"{2AF2EC3F-9065-4de5-93E1-1739C9A71EF7}": Weapons.LAU_131_pod___7_x_2_75_Hydra__UnGd_Rkts_M156__Wht_Phos,
"{DAD45FE5-CFF0-4a2b-99D4-5D044D3BC22F}": Weapons.LAU_131_pod___7_x_2_75_Hydra__UnGd_Rkts_M257__Para_Illum,
"{6D6D5C07-2A90-4a68-9A74-C5D0CFFB05D9}": Weapons.LAU_131_pod___7_x_2_75_Hydra__UnGd_Rkts_M274__Practice_Smk,
"{D22C2D63-E5C9-4247-94FB-5E8F3DE22B71}": Weapons.LAU_131_pod___7_x_2_75_Hydra__UnGd_Rkts_Mk1__Practice,
"{319293F2-392C-4617-8315-7C88C22AF7C4}": Weapons.LAU_131_pod___7_x_2_75_Hydra__UnGd_Rkts_Mk5__HEAT,
"{1CA5E00B-D545-4ff9-9B53-5970E292F14D}": Weapons.LAU_131_pod___7_x_2_75_Hydra__UnGd_Rkts_Mk61__Practice,
"{DDCE7D70-5313-4181-8977-F11018681662}": Weapons.LAU_131_pod___7_x_2_75_Hydra__UnGd_Rkts_WTU_1_B__Practice,
"{LAU-138 wtip - AIM-9L}": Weapons.LAU_138_AIM_9L,
"{LAU-138 wtip - AIM-9M}": Weapons.LAU_138_AIM_9M,
"{LAU3_FFAR_WP156}": Weapons.LAU_3_pod___19_x_2_75_FFAR__UnGd_Rkts_M156__Wht_Phos,
"{LAU3_FFAR_MK1HE}": Weapons.LAU_3_pod___19_x_2_75_FFAR__UnGd_Rkts_Mk1__HE,
"{LAU3_FFAR_MK5HEAT}": Weapons.LAU_3_pod___19_x_2_75_FFAR__UnGd_Rkts_Mk5__HEAT,
"{LAU_61R}": Weapons.LAU_61R_pod___19_x_2_75_Hydra__UnGd_Rkts_M151__HE,
"{FD90A1DC-9147-49FA-BF56-CB83EF0BD32B}": Weapons.LAU_61_pod___19_x_2_75_Hydra__UnGd_Rkts_M151__HE,
"{3DFB7321-AB0E-11d7-9897-000476191836}": Weapons.LAU_61_pod___19_x_2_75_Hydra__UnGd_Rkts_M156__Wht_Phos,
"{LAU_61_M282}": Weapons.LAU_61_pod___19_x_2_75_Hydra__UnGd_Rkts_M282__HEDP,
"{LAU68_FFAR_WP156}": Weapons.LAU_68_pod___7_x_2_75_FFAR__UnGd_Rkts_M156__Wht_Phos,
"{LAU68_FFAR_MK1HE}": Weapons.LAU_68_pod___7_x_2_75_FFAR__UnGd_Rkts_Mk1__HE,
"{LAU68_FFAR_MK5HEAT}": Weapons.LAU_68_pod___7_x_2_75_FFAR__UnGd_Rkts_Mk5__HEAT,
"{A021F29D-18AB-4d3e-985C-FC9C60E35E9E}": Weapons.LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M151__HE,
"{4F977A2A-CD25-44df-90EF-164BFA2AE72F}": Weapons.LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M156__Wht_Phos,
"{647C5F26-BDD1-41e6-A371-8DE1E4CC0E94}": Weapons.LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M257__Para_Illum,
"{0877B74B-5A00-4e61-BA8A-A56450BA9E27}": Weapons.LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M274__Practice_Smk,
"{LAU_68_M282}": Weapons.LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_M282__HEDP,
"{FC85D2ED-501A-48ce-9863-49D468DDD5FC}": Weapons.LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_Mk1__Practice,
"{174C6E6D-0C3D-42ff-BCB3-0853CB371F5C}": Weapons.LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_Mk5__HEAT,
"{65396399-9F5C-4ec3-A7D2-5A8F4C1D90C4}": Weapons.LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_Mk61__Practice,
"{1F7136CB-8120-4e77-B97B-945FF01FB67C}": Weapons.LAU_68_pod___7_x_2_75_Hydra__UnGd_Rkts_WTU_1_B__Practice,
"{LAU-7 - AIM-9L}": Weapons.LAU_7_AIM_9L,
"{LAU-7 - AIM-9M}": Weapons.LAU_7_AIM_9M,
"{F4-2-AIM9B}": Weapons.LAU_7_with_2_x_AIM_9B_Sidewinder_IR_AAM,
"{F4-2-AIM9L}": Weapons.LAU_7_with_2_x_AIM_9L_Sidewinder_IR_AAM,
"{9DDF5297-94B9-42FC-A45E-6E316121CD85}": Weapons.LAU_7_with_2_x_AIM_9M_Sidewinder_IR_AAM,
"{F4-2-AIM9P5}": Weapons.LAU_7_with_2_x_AIM_9P5_Sidewinder_IR_AAM,
"{773675AB-7C29-422f-AFD8-32844A7B7F17}": Weapons.LAU_7_with_2_x_AIM_9P_Sidewinder_IR_AAM,
"{GAR-8}": Weapons.LAU_7_with_AIM_9B_Sidewinder_IR_AAM,
"{AIM-9M-ON-ADAPTER}": Weapons.LAU_7_with_AIM_9M_Sidewinder_IR_AAM,
"{AIM-9P5-ON-ADAPTER}": Weapons.LAU_7_with_AIM_9P5_Sidewinder_IR_AAM,
"{AIM-9P-ON-ADAPTER}": Weapons.LAU_7_with_AIM_9P_Sidewinder_IR_AAM,
"{AIM-9X-ON-ADAPTER}": Weapons.LAU_7_with_AIM_9X_Sidewinder_IR_AAM,
"{LAU-7_AIS_ASQ_T50}": Weapons.LAU_7_with_AN_ASQ_T50_TCTS_Pod___ACMI_Pod,
"LAU_88_AGM_65D_ONE": Weapons.LAU_88_AGM_65D_ONE,
"LAU_88_AGM_65H": Weapons.LAU_88_AGM_65H,
"LAU_88_AGM_65H_2_L": Weapons.LAU_88_AGM_65H_2_L,
"LAU_88_AGM_65H_2_R": Weapons.LAU_88_AGM_65H_2_R,
"LAU_88_AGM_65H_3": Weapons.LAU_88_AGM_65H_3,
"{E6A6262A-CA08-4B3D-B030-E1A993B98452}": Weapons.LAU_88_with_2_x_AGM_65D___Maverick_D__IIR_ASM_,
"{E6A6262A-CA08-4B3D-B030-E1A993B98453}": Weapons.LAU_88_with_2_x_AGM_65D___Maverick_D__IIR_ASM__,
"{2CC29C7A-E863-411C-8A6E-BD6F0E730548}": Weapons.LAU_88_with_2_x_AGM_65E___Maverick_E__Laser_ASM___Lg_Whd_,
"{2CC29C7A-E863-411C-8A6E-BD6F0E730547}": Weapons.LAU_88_with_2_x_AGM_65E___Maverick_E__Laser_ASM___Lg_Whd__,
"{D7670BC7-881B-4094-906C-73879CF7EB28}": Weapons.LAU_88_with_2_x_AGM_65K___Maverick_K__CCD_Imp_ASM_,
"{D7670BC7-881B-4094-906C-73879CF7EB27}": Weapons.LAU_88_with_2_x_AGM_65K___Maverick_K__CCD_Imp_ASM__,
"{DAC53A2F-79CA-42FF-A77A-F5649B601308}": Weapons.LAU_88_with_3_x_AGM_65D___Maverick_D__IIR_ASM_,
"{71AAB9B8-81C1-4925-BE50-1EF8E9899271}": Weapons.LAU_88_with_3_x_AGM_65E___Maverick_E__Laser_ASM___Lg_Whd_,
"{907D835F-E650-4154-BAFD-C656882555C0}": Weapons.LAU_88_with_3_x_AGM_65K___Maverick_K__CCD_Imp_ASM_,
"{LAU_SNEB68G}": Weapons.LAU_SNEB68G___8xSNEB68_EAP,
"{LAU_SNEB68_WP}": Weapons.LAU_SNEB68G___8xSNEB68_WP,
"{CAAC1CFD-6745-416B-AFA4-CB57414856D0}": Weapons.Lantirn_F_16,
"{D1744B93-2A8A-4C4D-B004-7A09CD8C8F3F}": Weapons.Lantirn_Target_Pod,
"{LR25_ARF8M3_API}": Weapons.LR_25___25_x_ARF_8_M3_API,
"{LR25_ARF8M3_HEI}": Weapons.LR_25___25_x_ARF_8_M3_HEI,
"{LR25_ARF8M3_TPSM}": Weapons.LR_25___25_x_ARF_8_M3_TP_SM,
"{0519A264-0AB6-11d6-9193-00A0249B6F00}": Weapons.L_081_Fantasmagoria_ELINT_pod,
"{US_M10_SMOKE_TANK_BLUE}": Weapons.M10_Smoke_Tank___blue,
"{US_M10_SMOKE_TANK_GREEN}": Weapons.M10_Smoke_Tank___green,
"{US_M10_SMOKE_TANK_ORANGE}": Weapons.M10_Smoke_Tank___orange,
"{US_M10_SMOKE_TANK_RED}": Weapons.M10_Smoke_Tank___red,
"{US_M10_SMOKE_TANK_WHITE}": Weapons.M10_Smoke_Tank___white,
"{US_M10_SMOKE_TANK_YELLOW}": Weapons.M10_Smoke_Tank___yellow,
"{00F5DAC4-0466-4122-998F-B1A298E34113}": Weapons.M117___750lb_GP_Bomb_LD,
"M134_L": Weapons.M134_L,
"M134_R": Weapons.M134_R,
"M134_SIDE_L": Weapons.M134_SIDE_L,
"M134_SIDE_R": Weapons.M134_SIDE_R,
"{414DA830-B61A-4F9E-B71B-C2F6832E1D7A}": Weapons.M2000_Fuel_tank,
"M260_HYDRA": Weapons.M260_HYDRA,
"M260_HYDRA_WP": Weapons.M260_HYDRA_WP,
"M261_MK151": Weapons.M261_MK151,
"M261_MK156": Weapons.M261_MK156,
"M60_SIDE_L": Weapons.M60_SIDE_L,
"M60_SIDE_R": Weapons.M60_SIDE_R,
"{MAK79_MK20 2L}": Weapons.MAK79_2_MK_20,
"{MAK79_MK20 2R}": Weapons.MAK79_2_MK_20_,
"{MAK79_BDU33 3L}": Weapons.MAK79_3_BDU_33,
"{MAK79_BDU33 3R}": Weapons.MAK79_3_BDU_33_,
"{MAK79_BDU45 3L}": Weapons.MAK79_3_BDU_45,
"{MAK79_BDU45B 3L}": Weapons.MAK79_3_BDU_45B,
"{MAK79_BDU45B 3R}": Weapons.MAK79_3_BDU_45B_,
"{MAK79_BDU45 3R}": Weapons.MAK79_3_BDU_45_,
"{MAK79_MK81 3L}": Weapons.MAK79_3_Mk_81,
"{MAK79_MK81 3R}": Weapons.MAK79_3_Mk_81_,
"{MAK79_MK82 3L}": Weapons.MAK79_3_Mk_82,
"{MAK79_MK82AIR 3L}": Weapons.MAK79_3_Mk_82AIR,
"{MAK79_MK82AIR 3R}": Weapons.MAK79_3_Mk_82AIR_,
"{MAK79_MK82 3R}": Weapons.MAK79_3_Mk_82_,
"{MAK79_MK82SE 3L}": Weapons.MAK79_3_Mk_82_SnakeEye,
"{MAK79_MK82SE 3R}": Weapons.MAK79_3_Mk_82_SnakeEye_,
"{MAK79_MK83 3L}": Weapons.MAK79_3_Mk_83,
"{MAK79_MK83 3R}": Weapons.MAK79_3_Mk_83_,
"{MAK79_BDU33 4}": Weapons.MAK79_4_BDU_33,
"{MAK79_BDU45 4}": Weapons.MAK79_4_BDU_45,
"{MAK79_BDU45B 4}": Weapons.MAK79_4_BDU_45B,
"{MAK79_MK81 4}": Weapons.MAK79_4_Mk_81,
"{MAK79_MK82 4}": Weapons.MAK79_4_Mk_82,
"{MAK79_MK82AIR 4}": Weapons.MAK79_4_Mk_82AIR,
"{MAK79_MK82SE 4}": Weapons.MAK79_4_Mk_82_SnakeEye,
"{MAK79_MK20 1R}": Weapons.MAK79_MK_20,
"{MAK79_MK20 1L}": Weapons.MAK79_MK_20_,
"{MAK79_MK83 1R}": Weapons.MAK79_Mk_83,
"{MAK79_MK83 1L}": Weapons.MAK79_Mk_83_,
"{MMagicII}": Weapons.Matra_Magic_II,
"{Matra_S530D}": Weapons.Matra_Super_530D,
"{Matra155RocketPod}": Weapons.Matra_Type_155_Rocket_Pod,
"{5A1AC2B4-CA4B-4D09-A1AF-AC52FBC4B60B}": Weapons.MBD2_67U_with_4_x_FAB_100___100kg_GP_Bombs_LD,
"{29A828E2-C6BB-11d8-9897-000476191836}": Weapons.MBD2_67U_with_4_x_FAB_100___100kg_GP_Bombs_LD_,
"{7C5F0F5F-0A0B-46E8-937C-8922303E39A8}": Weapons.MBD3_U2T_with_2_x_FAB_1500_M_54___1500kg_GP_Bombs_LD,
"{6A367BB4-327F-4A04-8D9E-6D86BDC98E7E}": Weapons.MBD3_U4T_with_4_x_FAB_250___250kg_GP_Bombs_LD,
"{02B81892-7E24-4795-84F9-B8110C641AF0}": Weapons.MBD3_U4T_with_4_x_RBK_250___42_x_PTAB_2_5M__250kg_CBUs_Medium_HEAT_AP,
"{E659C4BE-2CD8-4472-8C08-3F28ACB61A8A}": Weapons.MBD3_U6_68_with_2_x_FAB_250___250kg_GP_Bombs_LD,
"{MBD3_U6_3*FAB-250_fwd}": Weapons.MBD3_U6_68_with_3_x_FAB_250___250kg_GP_Bombs_LD,
"{3E35F8C1-052D-11d6-9191-00A0249B6F00}": Weapons.MBD3_U6_68_with_4_x_FAB_250___250kg_GP_Bombs_LD,
"{MBD3_U6_4*FAB-250_fwd}": Weapons.MBD3_U6_68_with_4_x_FAB_250___250kg_GP_Bombs_LD_,
"{MBD3_U6_5*FAB-250}": Weapons.MBD3_U6_68_with_5_x_FAB_250___250kg_GP_Bombs_LD,
"{E96E1EDD-FF3F-47CF-A959-576C3B682955}": Weapons.MBD3_U6_68_with_6_x_BetAB_500ShP___500kg_Concrete_Piercing_HD_w_booster_Bombs,
"{436C6FB9-8BF2-46B6-9DC4-F55ABF3CD1EC}": Weapons.MBD3_U6_68_with_6_x_BetAB_500___500kg_Concrete_Piercing_Bombs_LD,
"{F99BEC1A-869D-4AC7-9730-FBA0E3B1F5FC}": Weapons.MBD3_U6_68_with_6_x_FAB_100___100kg_GP_Bombs_LD,
"{53BE25A4-C86C-4571-9BC0-47D668349595}": Weapons.MBD3_U6_68_with_6_x_FAB_250___250kg_GP_Bombs_LD,
"{FA673F4C-D9E4-4993-AA7A-019A92F3C005}": Weapons.MBD3_U6_68_with_6_x_FAB_500_M_62___500kg_GP_Bombs_LD,
"{0D945D78-542C-4E9B-9A17-9B5008CC8D39}": Weapons.MBD3_U6_68_with_6_x_FAB_500_M_62___500kg_GP_Bombs_LD_,
"{F503C276-FE15-4C54-B310-17B50B735A84}": Weapons.MBD3_U6_68_with_6_x_RBK_500_255___30_x_PTAB_10_5__500kg_CBUs_Heavy_HEAT_AP,
"{4D459A95-59C0-462F-8A57-34E80697F38B}": Weapons.MBD3_U6_68_with_6_x_RBK_500_255___30_x_PTAB_10_5__500kg_CBUs_Heavy_HEAT_AP_,
"{5F1C54C0-0ABD-4868-A883-B52FF9FCB422}": Weapons.MBD3_U9M_with_9_x_FAB_100___100kg_GP_Bombs_LD,
"{E1AAE713-5FC3-4CAA-9FF5-3FDCFB899E33}": Weapons.MBD3_U9M_with_9_x_FAB_250___250kg_GP_Bombs_LD,
"{BF83E8FD-E7A2-40D2-9608-42E13AFE2193}": Weapons.MBD3_U9M_with_9_x_RBK_250___42_x_PTAB_2_5M__250kg_CBUs_Medium_HEAT_AP,
"{005E70F5-C3EA-4E95-A148-C1044C42D845}": Weapons.MBD3_with_3_x_BetAB_500___500kg_Concrete_Piercing_Bombs_LD,
"{CEE04106-B9AA-46B4-9CD1-CD3FDCF0CE78}": Weapons.MBD3_with_3_x_FAB_100___100kg_GP_Bombs_LD,
"{D109EE9C-A1B7-4F1C-8D87-631C293A1D26}": Weapons.MBD3_with_3_x_FAB_250___250kg_GP_Bombs_LD,
"{A1E85991-B58E-4E92-AE91-DED6DC85B2E7}": Weapons.MBD3_with_3_x_FAB_500_M_62___500kg_GP_Bombs_LD,
"{EAD9B2C1-F3BA-4A7B-A2A5-84E2AF8A1975}": Weapons.MBD3_with_3_x_RBK_250___42_x_PTAB_2_5M__250kg_CBUs_Medium_HEAT_AP,
"{919CE839-9390-4629-BAF7-229DE19B8523}": Weapons.MBD3_with_3_x_RBK_500_255___30_x_PTAB_10_5__500kg_CBUs_Heavy_HEAT_AP,
"{574EDEDF-20DE-4942-B2A2-B2EDFD621562}": Weapons.MER12_with_12_x_M117___750lb_GP_Bombs_LD,
"{585D626E-7F42-4073-AB70-41E728C333E2}": Weapons.MER12_with_12_x_Mk_82___500lb_GP_Bombs_LD,
"{0B9ABA77-93B8-45FC-9C63-82AFB2CB50A4}": Weapons.MER2_with_2_x_Mk_20_Rockeye___490lbs_CBUs__247_x_HEAT_Bomblets,
"{D5D51E24-348C-4702-96AF-97A714E72697}": Weapons.MER2_with_2_x_Mk_82___500lb_GP_Bombs_LD,
"{18617C93-78E7-4359-A8CE-D754103EDF63}": Weapons.MER2_with_2_x_Mk_83___1000lb_GP_Bombs_LD,
"{82F90BEC-0E2E-4CE5-A66E-1E4ADA2B5D1E}": Weapons.MER3_with_3_x_M117___750lb_GP_Bombs_LD,
"{752B9782-F962-11d5-9190-00A0249B6F00}": Weapons.MER6_with_6_x_BLU_107___440lb_Anti_Runway_Penetrator_Bombs,
"{6CDB6B36-7165-47D0-889F-6625FB333561}": Weapons.MER6_with_6_x_M117___750lb_GP_Bombs_LD,
"{3C7CD675-7D39-41C5-8735-0F4F537818A8}": Weapons.MER6_with_6_x_Mk_20_Rockeye___490lbs_CBUs__247_x_HEAT_Bomblets,
"{1C97B4A0-AA3B-43A8-8EE7-D11071457185}": Weapons.MER6_with_6_x_Mk_82___500lb_GP_Bombs_LD,
"{B1EF6B0E-3D91-4047-A7A5-A99E7D8B4A8B}": Weapons.Mercury_LLTV_Pod,
"{0DA03783-61E4-40B2-8FAE-6AEE0A5C5AAE}": Weapons.MICA_IR,
"{6D778860-7BB8-4ACB-9E95-BA772C6BBC2C}": Weapons.MICA_RF,
"MIM_104": Weapons.MIM_104,
"MIM_72": Weapons.MIM_72,
"{MBDA_MistralG}": Weapons.Mistral,
"{MBDA_MistralD}": Weapons.Mistral_,
"MK_82*28": Weapons.MK_82_28,
"{BRU-32 MK-20}": Weapons.Mk_20,
"{ACADB374-6D6C-45A0-BA7C-B22B2E108AE4}": Weapons.Mk_20_18,
"{ADD3FAE1-EBF6-4EF9-8EFC-B36B5DDF1E6B}": Weapons.Mk_20_Rockeye___490lbs_CBU__247_x_HEAT_Bomblets,
"{90321C8E-7ED1-47D4-A160-E074D5ABD902}": Weapons.Mk_81___250lb_GP_Bomb_LD,
"{BRU-32 MK-82}": Weapons.Mk_82,
"{BRU-32 MK-82AIR}": Weapons.Mk_82AIR,
"{Mk_82Y}": Weapons.Mk_82Y___500lb_GP_Chute_Retarded_HD,
"{Mk82AIR}": Weapons.Mk_82_AIR_Ballute___500lb_GP_Bomb_HD,
"{BRU-32 MK-82SE}": Weapons.Mk_82_SnakeEye,
"{Mk82SNAKEYE}": Weapons.Mk_82_Snakeye___500lb_GP_Bomb_HD,
"{BCE4E030-38E9-423E-98ED-24BE3DA87C32}": Weapons.Mk_82___500lb_GP_Bomb_LD,
"{BRU-32 MK-83}": Weapons.Mk_83,
"{Mk_83CT}": Weapons.Mk_83CT,
"{BRU42_MK83 RS}": Weapons.Mk_83_,
"{BRU3242_MK83 RS}": Weapons.Mk_83__,
"{PHXBRU3242_MK83 RS}": Weapons.Mk_83___,
"{7A44FF09-527C-4B7E-B42B-3F111CFE50FB}": Weapons.Mk_83___1000lb_GP_Bomb_LD,
"{BRU42_MK83 LS}": Weapons.Mk_83____,
"{BRU3242_MK83 LS}": Weapons.Mk_83_____,
"{PHXBRU3242_MK83 LS}": Weapons.Mk_83______,
"{BRU-32 MK-84}": Weapons.Mk_84,
"{F092B80C-BB54-477E-9408-66DEEF740008}": Weapons.Mk_84_18,
"{D3ABF208-FA56-4D56-BB31-E0D931D57AE3}": Weapons.Mk_84_28,
"{AB8B8299-F1CC-4359-89B5-2172E0CF4A5A}": Weapons.Mk_84___2000lb_GP_Bomb_LD,
"{44EE8698-89F9-48EE-AF36-5FD31896A82D}": Weapons.MPS_410,
"{44EE8698-89F9-48EE-AF36-5FD31896A82C}": Weapons.MPS_410_,
"MXU-648-TP": Weapons.MXU_648_TP,
"{ODAB-500PM}": Weapons.ODAB_500PM___525_kg__bomb__parachute__simulated_aerosol,
"{OFAB-100-120-TU}": Weapons.OFAB_100_120_TU_x_4,
"{OFAB_100_Jupiter}": Weapons.OFAB_100_Jupiter___100kg_GP_Bomb_LD,
"{ORO57K_S5M1_HEFRAG}": Weapons.ORO_57K___S_5M1_HE_FRAG_FFAR_x_8,
"{ORO57K_S5MO_HEFRAG}": Weapons.ORO_57K___S_5MO_HE_FRAG_FFAR_x_8,
"{ORO57K_S5M_HEFRAG}": Weapons.ORO_57K___S_5M_x_8,
"oh-58-brauning": Weapons.oh_58_brauning,
"{199D6D51-1764-497E-9AE5-7D07C8D4D87E}": Weapons.Pavetack_F_111,
"PKT_7_62": Weapons.PKT_7_62,
"{PK-3}": Weapons.PK_3___7_62mm_GPMG,
"PTB300_MIG15": Weapons.PTB300_MIG15,
"PTB400_MIG15": Weapons.PTB400_MIG15,
"PTB400_MIG19": Weapons.PTB400_MIG19,
"PTB600_MIG15": Weapons.PTB600_MIG15,
"PTB760_MIG19": Weapons.PTB760_MIG19,
"{P-50T}": Weapons.P_50T___50kg_Practice_Bomb_LD,
"{RBK_250_275_AO_1SCH}": Weapons.RBK_250_275___150_x_AO_1SCh__250kg_CBU_HE_Frag,
"{4203753F-8198-4E85-9924-6F8FF679F9FF}": Weapons.RBK_250___42_x_PTAB_2_5M__250kg_CBU_Medium_HEAT_AP,
"{RBK_500U_OAB_2_5RT}": Weapons.RBK_500U___126_x_OAB_2_5RT__500kg_CBU_HE_Frag,
"{D5435F26-F120-4FA3-9867-34ACE562EF1B}": Weapons.RBK_500_255___30_x_PTAB_10_5__500kg_CBU_Heavy_HEAT_AP,
"{7AEC222D-C523-425e-B714-719C0D1EB14D}": Weapons.RBK_500___268_x_PTAB_1M__500kg_CBU_Light_HEAT_AP,
"{Rb04AI}": Weapons.RB_04E__for_A_I___with_launcher,
"{Rb15AI}": Weapons.RB_15F__for_A_I___with_launcher,
"{Rb04}": Weapons.Rb_04E_Anti_ship_Missile,
"{Robot05}": Weapons.Rb_05A_MCLOS_ASM_AShM_AAM,
"{Rb15}": Weapons.Rb_15F_Programmable_Anti_ship_Missile,
"{Robot24J}": Weapons.Rb_24J__AIM_9P__Sidewinder_IR_AAM,
"{Robot24}": Weapons.Rb_24__AIM_9B__Sidewinder_IR_AAM,
"{Robot74}": Weapons.Rb_74__AIM_9L__Sidewinder_IR_AAM,
"{RB75}": Weapons.Rb_75A__AGM_65A_Maverick___TV_ASM_,
"{RB75B}": Weapons.Rb_75B__AGM_65B_Maverick___TV_ASM_,
"{RB75T}": Weapons.Rb_75T__AGM_65A_Maverick___TV_ASM_Lg_HE_Whd_,
"REFLEX_9M119": Weapons.REFLEX_9M119,
"{RKL609_L}": Weapons.RKL609_ECM_Pod__Left_,
"{RKL609_R}": Weapons.RKL609_ECM_Pod__Right_,
"{RN-24}": Weapons.RN_24___470kg__nuclear_bomb__free_fall,
"{RN-28}": Weapons.RN_28___260_kg__nuclear_bomb__free_fall,
"ROLAND": Weapons.ROLAND,
"{M2KC_RPL_522}": Weapons.RPL_522_1300_liters_Fuel_Tank,
"{M2KC_RPL_522_EMPTY}": Weapons.RPL_522_1300_liters_Fuel_Tank__Empty_,
"{M2KC_02_RPL541}": Weapons.RPL_541_2000_liters_Fuel_Tank_,
"{M2KC_08_RPL541}": Weapons.RPL_541_2000_liters_Fuel_Tank__,
"{M2KC_02_RPL541_EMPTY}": Weapons.RPL_541_2000_liters_Fuel_Tank__Empty_,
"{M2KC_08_RPL541_EMPTY}": Weapons.RPL_541_2000_liters_Fuel_Tank__Empty__,
"{British_AP_25LBNo1_3INCHNo1}": Weapons.RP_3_25lb_AP_Mk_I,
"{British_HE_60LBFNo1_3INCHNo1}": Weapons.RP_3_60lb_F_No1_Mk_I,
"{British_HE_60LBSAPNo2_3INCHNo1}": Weapons.RP_3_60lb_SAP_No2_Mk_I,
"{RS-2US}": Weapons.RS2US___AAM__beam_rider,
"{R-13M1}": Weapons.R_13M1___AAM__IR_guided,
"{R-13M}": Weapons.R_13M___AAM__IR_guided,
"{CCF898C9-5BC7-49A4-9D1E-C3ED3D5166A1}": Weapons.R_24R__AA_7_Apex_SA____Semi_Act_Rdr,
"{6980735A-44CC-4BB9-A1B5-591532F1DC69}": Weapons.R_24T__AA_7_Apex_IR____Infra_Red,
"{E8069896-8435-4B90-95C0-01A03AE6E400}": Weapons.R_27ER__AA_10_Alamo_C____Semi_Act_Extended_Range,
"{B79C379A-9E87-4E50-A1EE-7F7E29C2E87A}": Weapons.R_27ET__AA_10_Alamo_D____IR_Extended_Range,
"{9B25D316-0434-4954-868F-D51DB1A38DF0}": Weapons.R_27R__AA_10_Alamo_A____Semi_Act_Rdr,
"{88DAC840-9F75-4531-8689-B46E64E42E53}": Weapons.R_27T__AA_10_Alamo_B____Infra_Red,
"{F1243568-8EF0-49D4-9CB5-4DA90D92BC1D}": Weapons.R_33__AA_9_Amos____Semi_Act_Rdr,
"{R-3R}": Weapons.R_3R___AAM__radar_guided,
"{R-3S}": Weapons.R_3S___AAM__IR_guided,
"{4EDBA993-2E34-444C-95FB-549300BF7CAF}": Weapons.R_40R__AA_6_Acrid____Semi_Act_Rdr,
"{5F26DBC2-FB43-4153-92DE-6BBCE26CB0FF}": Weapons.R_40T__AA_6_Acrid____Infra_Red,
"{FC23864E-3B80-48E3-9C03-4DA8B1D7497B}": Weapons.R_550_Magic_2,
"{R-55}": Weapons.R_55___AAM__IR_guided,
"{R-60}": Weapons.R_60,
"{R-60M}": Weapons.R_60M,
"{R-60M 2L}": Weapons.R_60M_x_2,
"{R-60M 2R}": Weapons.R_60M_x_2_,
"{682A481F-0CB5-4693-A382-D00DD4A156D7}": Weapons.R_60M__AA_8_Aphid____Infra_Red,
"{R-60 2L}": Weapons.R_60_x_2,
"{R-60 2R}": Weapons.R_60_x_2_,
"{FBC29BFE-3D24-4C64-B81D-941239D12249}": Weapons.R_73__AA_11_Archer____Infra_Red,
"{CBC29BFE-3D24-4C64-B81D-941239D12249}": Weapons.R_73__AA_11_Archer____Infra_Red_,
"{B4C01D60-A8A3-4237-BD72-CA7655BC0FE9}": Weapons.R_77__AA_12_Adder____Active_Rdr,
"{B4C01D60-A8A3-4237-BD72-CA7655BC0FEC}": Weapons.R_77__AA_12_Adder____Active_Rdr_,
"{0511E528-EA28-4caf-A212-00D1408DF10A}": Weapons.SAB_100___100kg_flare_illumination_Bomb,
"{FAS}": Weapons.Sand_Filter,
"{SC_250_T1_L2}": Weapons.SC_250_Type_1_L2___250kg_GP_Bomb_LD,
"{Schloss500XIIC1_SC_250_T3_J}": Weapons.SC_250_Type_3_J___250kg_GP_Bomb_LD,
"{SC_500_L2}": Weapons.SC_500_L2___500kg_GP_Bomb_LD,
"SC_501_SC250": Weapons.SC_501_SC250,
"SC_501_SC500": Weapons.SC_501_SC500,
"{SC_50}": Weapons.SC_50___50kg_GP_Bomb_LD,
"{SD_250_Stg}": Weapons.SD_250_Stg___250kg_GP_Bomb_LD,
"{SD_500_A}": Weapons.SD_500_A___500kg_GP_Bomb_LD,
"SEASPARROW": Weapons.SEASPARROW,
"{1461CD18-429A-42A9-A21F-4C621ECD4573}": Weapons.Sea_Eagle___ASM,
"{0519A263-0AB6-11d6-9193-00A0249B6F00}": Weapons.Shpil_2_Laser_Recon__Intel_Pod,
"{8C3F26A2-FA0F-11d5-9190-00A0249B6F00}": Weapons.Sky_Shadow_ECM_Pod,
"SM2": Weapons.SM2,
"{A4BCC903-06C8-47bb-9937-A30FEDB4E743}": Weapons.Smokewinder___blue,
"{A4BCC903-06C8-47bb-9937-A30FEDB4E742}": Weapons.Smokewinder___green,
"{A4BCC903-06C8-47bb-9937-A30FEDB4E746}": Weapons.Smokewinder___orange,
"{A4BCC903-06C8-47bb-9937-A30FEDB4E741}": Weapons.Smokewinder___red,
"{A4BCC903-06C8-47bb-9937-A30FEDB4E744}": Weapons.Smokewinder___white,
"{A4BCC903-06C8-47bb-9937-A30FEDB4E745}": Weapons.Smokewinder___yellow,
"{CE2_SMOKE_WHITE}": Weapons.Smoke_for_Christen_Eagle_II__white,
"{D3F65166-1AB8-490f-AF2F-2FB6E22568B3}": Weapons.Smoke_Generator___blue,
"{INV-SMOKE-BLUE}": Weapons.Smoke_Generator___blue_,
"{D3F65166-1AB8-490f-AF2F-2FB6E22568B2}": Weapons.Smoke_Generator___green,
"{INV-SMOKE-GREEN}": Weapons.Smoke_Generator___green_,
"{D3F65166-1AB8-490f-AF2F-2FB6E22568B6}": Weapons.Smoke_Generator___orange,
"{INV-SMOKE-ORANGE}": Weapons.Smoke_Generator___orange_,
"{D3F65166-1AB8-490f-AF2F-2FB6E22568B1}": Weapons.Smoke_Generator___red,
"{INV-SMOKE-RED}": Weapons.Smoke_Generator___red_,
"{D3F65166-1AB8-490f-AF2F-2FB6E22568B4}": Weapons.Smoke_Generator___white,
"{INV-SMOKE-WHITE}": Weapons.Smoke_Generator___white_,
"{D3F65166-1AB8-490f-AF2F-2FB6E22568B5}": Weapons.Smoke_Generator___yellow,
"{INV-SMOKE-YELLOW}": Weapons.Smoke_Generator___yellow_,
"{SMOKE-RED-AVIOJET}": Weapons.Smoke_System_red_colorant,
"{SMOKE-YELLOW-AVIOJET}": Weapons.Smoke_System_yellow_colorant,
"{SMOKE-SYSTEM-AVIOJET}": Weapons.Smoke_System__White_Smoke_,
"{MIG21_SMOKE_RED}": Weapons.Smoke___red___21__t,
"{SMOKE_WHITE}": Weapons.Smoke___white___21,
"{MIG21_SMOKE_WHITE}": Weapons.Smoke___white___21_,
"SPITFIRE_45GAL_SLIPPER_TANK": Weapons.SPITFIRE_45GAL_SLIPPER_TANK,
"SPITFIRE_45GAL_TORPEDO_TANK": Weapons.SPITFIRE_45GAL_TORPEDO_TANK,
"{E92CBFE5-C153-11d8-9897-000476191836}": Weapons.SPPU_22_1___2_x_23mm__GSh_23L_Autocannon_Pod,
"{SPRD}": Weapons.SPRD_99_takeoff_rocket,
"{SPS-141-100}": Weapons.SPS_141_100__21____jamming_and_countermeasures_pod,
"{F75187EF-1D9E-4DA9-84B4-1A1A14A3973A}": Weapons.SPS_141___ECM_Jamming_Pod,
"{CAE48299-A294-4bad-8EE6-89EFC5DCDF00}": Weapons.SUU_25_x_8_LUU_2___Target_Marker_Flares,
"{BRU42_SUU25}": Weapons.SUU_25___8_LUU_2,
"{BRU3242_SUU25}": Weapons.SUU_25___8_LUU_2_,
"{FD21B13E-57F3-4C2A-9F78-C522D0B5BCE1}": Weapons.Super_530D,
"SVIR_9M119": Weapons.SVIR_9M119,
"{S-24A}": Weapons.S_24A__21____180_kg__cumulative_unguided_rocket,
"{S-24B}": Weapons.S_24B__21____180_kg__fragmented_unguided_rocket,
"{1FA14DEA-8CDB-45AD-88A8-EC068DF1E65A}": Weapons.S_24B___240mm_UnGd_Rkt__235kg__HE_Frag___Low_Smk_,
"{3858707D-F5D5-4bbb-BDD8-ABB0530EBC7C}": Weapons.S_24B___240mm_UnGd_Rkt__235kg__HE_Frag___Low_Smk__,
"{0180F983-C14A-11d8-9897-000476191836}": Weapons.S_25L___320Kg__340mm_Laser_Guided_Rkt,
"{A0648264-4BC0-4EE8-A543-D119F6BA4257}": Weapons.S_25_OFM___340mm_UnGd_Rkt__480kg_Penetrator,
"{S_25_O}": Weapons.S_25_O___420mm_UnGd_Rkt__380kg_Frag,
"{0519A262-0AB6-11d6-9193-00A0249B6F00}": Weapons.Tangazh_ELINT_pod,
"{TER_9A_2L*CBU-87}": Weapons.TER_9A_with_2_x_CBU_87___202_x_CEM_Cluster_Bomb,
"{TER_9A_2R*CBU-87}": Weapons.TER_9A_with_2_x_CBU_87___202_x_CEM_Cluster_Bomb_,
"{TER_9A_2L*CBU-97}": Weapons.TER_9A_with_2_x_CBU_97___10_x_SFW_Cluster_Bomb,
"{TER_9A_2R*CBU-97}": Weapons.TER_9A_with_2_x_CBU_97___10_x_SFW_Cluster_Bomb_,
"{TER_9A_2L*GBU-12}": Weapons.TER_9A_with_2_x_GBU_12___500lb_Laser_Guided_Bomb,
"{TER_9A_2R*GBU-12}": Weapons.TER_9A_with_2_x_GBU_12___500lb_Laser_Guided_Bomb_,
"{TER_9A_2L*MK-82AIR}": Weapons.TER_9A_with_2_x_Mk_82_AIR_Ballute___500lb_GP_Bomb_HD,
"{TER_9A_2R*MK-82AIR}": Weapons.TER_9A_with_2_x_Mk_82_AIR_Ballute___500lb_GP_Bomb_HD_,
"{TER_9A_2L*MK-82_Snakeye}": Weapons.TER_9A_with_2_x_Mk_82_Snakeye___500lb_GP_Bomb_HD,
"{TER_9A_2R*MK-82_Snakeye}": Weapons.TER_9A_with_2_x_Mk_82_Snakeye___500lb_GP_Bomb_HD_,
"{TER_9A_2L*MK-82}": Weapons.TER_9A_with_2_x_Mk_82___500lb_GP_Bomb_LD,
"{TER_9A_2R*MK-82}": Weapons.TER_9A_with_2_x_Mk_82___500lb_GP_Bomb_LD_,
"{TER_9A_3*BDU-33}": Weapons.TER_9A_with_3_x_BDU_33___25lb_Practice_Bomb_LD,
"{TER_9A_3*CBU-87}": Weapons.TER_9A_with_3_x_CBU_87___202_x_CEM_Cluster_Bomb,
"{TER_9A_3*CBU-97}": Weapons.TER_9A_with_3_x_CBU_97___10_x_SFW_Cluster_Bomb,
"{TER_9A_3*MK-82AIR}": Weapons.TER_9A_with_3_x_Mk_82_AIR_Ballute___500lb_GP_Bomb_HD,
"{TER_9A_3*MK-82_Snakeye}": Weapons.TER_9A_with_3_x_Mk_82_Snakeye___500lb_GP_Bomb_HD,
"{TER_9A_3*MK-82}": Weapons.TER_9A_with_3_x_Mk_82___500lb_GP_Bomb_LD,
"TEST_ROTARY_LAUNCHER_MK82": Weapons.TEST_ROTARY_LAUNCHER_MK82,
"TGM_65H": Weapons.TGM_65H,
"{EF124821-F9BB-4314-A153-E0E2FE1162C4}": Weapons.TORNADO_Fuel_tank,
"TOW": Weapons.TOW,
"{U22A}": Weapons.U22_A_Jammer,
"{UB-16-57UMP}": Weapons.UB_16UM_pod___16_x_S_5KO__57mm_UnGd_Rkts__HEAT_Frag,
"{UB-16_S5M}": Weapons.UB_16UM___16_S_5M,
"{UB-32A-24}": Weapons.UB_32A_24_pod___32_x_S_5KO,
"{637334E4-AB5A-47C0-83A6-51B7F1DF3CD5}": Weapons.UB_32A_pod___32_x_S_5KO__57mm_UnGd_Rkts__HEAT_Frag,
"{UB-32_S5M}": Weapons.UB_32M___32_S_5M,
"{05544F1A-C39C-466b-BC37-5BD1D52E57BB}": Weapons.UPK_23_250___2_x_23mm__GSh_23L_Autocannon_Pod,
"{UPK-23-250 MiG-21}": Weapons.UPK_23_250___gun_pod,
"{U22}": Weapons.U_22_Jammer_pod,
"{WGr21}": Weapons.Werfer_Granate_21___21_cm_UnGd_air_to_air_rocket,
"XM158_M151": Weapons.XM158_M151,
"XM158_M156": Weapons.XM158_M156,
"XM158_M257": Weapons.XM158_M257,
"XM158_M274": Weapons.XM158_M274,
"XM158_MK1": Weapons.XM158_MK1,
"XM158_MK5": Weapons.XM158_MK5,
"{MOSQUITO_100GAL_SLIPPER_TANK}": Weapons._100_gal__Drop_Tank,
"{US_108GAL_PAPER_FUEL_TANK}": Weapons._108_US_gal__Paper_Fuel_Tank,
"{US_110GAL_FUEL_TANK}": Weapons._110_US_gal__Fuel_Tank,
"{12xM64}": Weapons._12_AN_M64___500lb_GP_Bomb_LD,
"{D6A0441E-6794-4FEB-87F7-E68E2290DFAB}": Weapons._12_x_BetAB_500___500kg_Concrete_Piercing_Bombs_LD,
"{E70446B7-C7E6-4B95-B685-DEA10CAD1A0E}": Weapons._12_x_FAB_500_M_62___500kg_GP_Bombs_LD,
"{FW_190_R4M_LEFT_WING}": Weapons._13_R4M_3_2kg_UnGd_air_to_air_rocket,
"{FW_190_R4M_RGHT_WING}": Weapons._13_R4M_3_2kg_UnGd_air_to_air_rocket_,
"{US_150GAL_FUEL_TANK}": Weapons._150_US_gal__Fuel_Tank,
"{22906569-A97F-404B-BA4F-D96DBF94D05E}": Weapons._20_x_AGM_86C_ALCM,
"{B0241BD2-5628-47E0-954C-A8675B7E698E}": Weapons._24_x_FAB_250___250kg_GP_Bombs_LD,
"{British_GP_250LB_Bomb_Mk1}": Weapons._250_lb_GP_Mk_I,
"{British_GP_250LB_Bomb_Mk4}": Weapons._250_lb_GP_Mk_IV,
"{British_GP_250LB_Bomb_Mk4_on_Handley_Page_Type_B_Cut_Bar}": Weapons._250_lb_GP_Mk_IV_,
"{British_GP_250LB_Bomb_Mk5}": Weapons._250_lb_GP_Mk_V,
"{British_GP_250LB_Bomb_Mk5_on_Handley_Page_Type_B_Cut_Bar}": Weapons._250_lb_GP_Mk_V_,
"{British_MC_250LB_Bomb_Mk1}": Weapons._250_lb_MC_Mk_I,
"{British_MC_250LB_Bomb_Mk2}": Weapons._250_lb_MC_Mk_II,
"{British_MC_250LB_Bomb_Mk2_on_Handley_Page_Type_B_Cut_Bar}": Weapons._250_lb_MC_Mk_II_,
"{British_MC_250LB_Bomb_Mk1_on_Handley_Page_Type_B_Cut_Bar}": Weapons._250_lb_MC_Mk_I_,
"{British_SAP_250LB_Bomb_Mk5}": Weapons._250_lb_S_A_P_,
"{British_SAP_250LB_Bomb_Mk5_on_Handley_Page_Type_B_Cut_Bar}": Weapons._250_lb_S_A_P__,
"{B58F99BA-5480-4572-8602-28B0449F5260}": Weapons._27_x_M117___750lb_GP_Bombs_LD,
"{6C47D097-83FF-4FB2-9496-EAB36DDF0B05}": Weapons._27_x_Mk_82___500lb_GP_Bombs_LD,
"{89D000B0-0360-461A-AD83-FB727E2ABA98}": Weapons._2xGBU_12___500lb_Laser_Guided_Bomb,
"{BRU-42_2xGBU-12_right}": Weapons._2xGBU_12___500lb_Laser_Guided_Bomb_,
"{LYSBOMB}": Weapons._2x_80kg_LYSB_71_Illumination_Bomb,
"{BRU42_2*BDU45 RS}": Weapons._2_BDU_45,
"{BRU42_2*BDU45B RS}": Weapons._2_BDU_45B,
"{BRU3242_2*BDU45B RS}": Weapons._2_BDU_45B_,
"{PHXBRU3242_2*BDU45B RS}": Weapons._2_BDU_45B__,
"{BRU42_2*BDU45B LS}": Weapons._2_BDU_45B___,
"{BRU3242_2*BDU45B LS}": Weapons._2_BDU_45B____,
"{PHXBRU3242_2*BDU45B LS}": Weapons._2_BDU_45B_____,
"{BRU3242_2*BDU45 RS}": Weapons._2_BDU_45_,
"{PHXBRU3242_2*BDU45 RS}": Weapons._2_BDU_45__,
"{BRU42_2*BDU45 LS}": Weapons._2_BDU_45___,
"{BRU3242_2*BDU45 LS}": Weapons._2_BDU_45____,
"{PHXBRU3242_2*BDU45 LS}": Weapons._2_BDU_45_____,
"{BRU-70_2*CBU-99_LEFT}": Weapons._2_CBU_99,
"{BRU-70_2*CBU-99_RIGHT}": Weapons._2_CBU_99_,
"{BRU-42_2*GBU-12_LEFT}": Weapons._2_GBU_12,
"{BRU-42_2*GBU-12_RIGHT}": Weapons._2_GBU_12_,
"{BRU-42_2*GBU-16_LEFT}": Weapons._2_GBU_16,
"{BRU-42_2*GBU-16_RIGHT}": Weapons._2_GBU_16_,
"{BRU-42_2*GBU-38_LEFT}": Weapons._2_GBU_38,
"{BRU-42_2*GBU-38_RIGHT}": Weapons._2_GBU_38_,
"{BRU-70A_2*GBU-54_LEFT}": Weapons._2_GBU_54_V_1_B,
"{BRU-70A_2*GBU-54_RIGHT}": Weapons._2_GBU_54_V_1_B_,
"{BRU42_2*LAU10 L}": Weapons._2_LAU_10___4_ZUNI_MK_71,
"{BRU3242_2*LAU10 L}": Weapons._2_LAU_10___4_ZUNI_MK_71_,
"{BRU42_2*LAU10 R}": Weapons._2_LAU_10___4_ZUNI_MK_71__,
"{BRU3242_2*LAU10 R}": Weapons._2_LAU_10___4_ZUNI_MK_71___,
"{BRU42_2*LAU10 RS}": Weapons._2_LAU_10___4_ZUNI_MK_71____,
"{BRU3242_2*LAU10 RS}": Weapons._2_LAU_10___4_ZUNI_MK_71_____,
"{PHXBRU3242_2*LAU10 RS}": Weapons._2_LAU_10___4_ZUNI_MK_71______,
"{BRU42_2*LAU10 LS}": Weapons._2_LAU_10___4_ZUNI_MK_71_______,
"{BRU3242_2*LAU10 LS}": Weapons._2_LAU_10___4_ZUNI_MK_71________,
"{PHXBRU3242_2*LAU10 LS}": Weapons._2_LAU_10___4_ZUNI_MK_71_________,
"{BRU42_2*LUU2 R}": Weapons._2_LUU_2,
"{BRU3242_2*LUU2 R}": Weapons._2_LUU_2_,
"{BRU42_2*LUU2 L}": Weapons._2_LUU_2__,
"{BRU3242_2*LUU2 L}": Weapons._2_LUU_2___,
"{BRU42_2*MK20 RS}": Weapons._2_MK_20,
"{BRU3242_2*MK20 RS}": Weapons._2_MK_20_,
"{PHXBRU3242_2*MK20 RS}": Weapons._2_MK_20__,
"{BRU42_2*MK20 LS}": Weapons._2_MK_20___,
"{BRU3242_2*MK20 LS}": Weapons._2_MK_20____,
"{PHXBRU3242_2*MK20 LS}": Weapons._2_MK_20_____,
"{BRU-42_2*MK-20_LEFT}": Weapons._2_Mk_20_Rockeye,
"{BRU-42_2*MK-20_RIGHT}": Weapons._2_Mk_20_Rockeye_,
"{BRU42_2*MK81 RS}": Weapons._2_Mk_81,
"{BRU3242_2*MK81 RS}": Weapons._2_Mk_81_,
"{PHXBRU3242_2*MK81 RS}": Weapons._2_Mk_81__,
"{BRU42_2*MK81 LS}": Weapons._2_Mk_81___,
"{BRU3242_2*MK81 LS}": Weapons._2_Mk_81____,
"{PHXBRU3242_2*MK81 LS}": Weapons._2_Mk_81_____,
"{BRU-42_2*Mk-82_LEFT}": Weapons._2_Mk_82,
"{BRU42_2*MK82AIR RS}": Weapons._2_Mk_82AIR,
"{BRU3242_2*MK82AIR RS}": Weapons._2_Mk_82AIR_,
"{PHXBRU3242_2*MK82AIR RS}": Weapons._2_Mk_82AIR__,
"{BRU42_2*MK82AIR LS}": Weapons._2_Mk_82AIR___,
"{BRU3242_2*MK82AIR LS}": Weapons._2_Mk_82AIR____,
"{PHXBRU3242_2*MK82AIR LS}": Weapons._2_Mk_82AIR_____,
"{BRU-42_2*Mk-82_RIGHT}": Weapons._2_Mk_82_,
"{BRU-42_2*Mk-82AIR_LEFT}": Weapons._2_Mk_82_AIR,
"{BRU-42_2*Mk-82AIR_RIGHT}": Weapons._2_Mk_82_AIR_,
"{BRU42_2*MK82SE RS}": Weapons._2_Mk_82_SnakeEye,
"{BRU3242_2*MK82SE RS}": Weapons._2_Mk_82_SnakeEye_,
"{PHXBRU3242_2*MK82SE RS}": Weapons._2_Mk_82_SnakeEye__,
"{BRU42_2*MK82SE LS}": Weapons._2_Mk_82_SnakeEye___,
"{BRU3242_2*MK82SE LS}": Weapons._2_Mk_82_SnakeEye____,
"{PHXBRU3242_2*MK82SE LS}": Weapons._2_Mk_82_SnakeEye_____,
"{BRU-42_2*Mk-82SNAKEYE_LEFT}": Weapons._2_Mk_82_Snakeye,
"{BRU-42_2*Mk-82SNAKEYE_RIGHT}": Weapons._2_Mk_82_Snakeye_,
"{BRU42_2*MK82 RS}": Weapons._2_Mk_82__,
"{BRU3242_2*MK82 RS}": Weapons._2_Mk_82___,
"{PHXBRU3242_2*MK82 RS}": Weapons._2_Mk_82____,
"{BRU42_2*MK82 LS}": Weapons._2_Mk_82_____,
"{BRU3242_2*MK82 LS}": Weapons._2_Mk_82______,
"{PHXBRU3242_2*MK82 LS}": Weapons._2_Mk_82_______,
"{BRU-42_2*Mk-83_LEFT}": Weapons._2_Mk_83,
"{BRU-42_2*Mk-83_RIGHT}": Weapons._2_Mk_83_,
"{BRU42_2*SUU25 L}": Weapons._2_SUU_25___8_LUU_2,
"{BRU3242_2*SUU25 L}": Weapons._2_SUU_25___8_LUU_2_,
"{BRU42_2*SUU25 R}": Weapons._2_SUU_25___8_LUU_2__,
"{BRU3242_2*SUU25 R}": Weapons._2_SUU_25___8_LUU_2___,
"{2x9M120F_Ataka_V}": Weapons._2_x_9M120F_Ataka__AT_9_Spiral_2____AGM__SACLOS__HE,
"{2x9M120_Ataka_V}": Weapons._2_x_9M120_Ataka__AT_9_Spiral_2____ATGM__SACLOS__Tandem_HEAT,
"{2x9M220_Ataka_V}": Weapons._2_x_9M220O_Ataka__AT_9_Spiral_2____AAM__SACLOS__Frag,
"{07BE2D19-0E48-4B0B-91DA-5F6C8F9E3C75}": Weapons._2_x_ALARM,
"{C535596E-F7D2-4301-8BB4-B1658BB87ED7}": Weapons._2_x_BL_755_CBUs___450kg__147_Frag_Pen_bomblets,
"{TWIN_B13L_5OF}": Weapons._2_x_B_13L_pods___10_x_S_13_OF__122mm_UnGd_Rkts__Blast_Frag,
"{B13_5_S13OF_DUAL_L}": Weapons._2_x_B_13L___5_S_13_OF,
"{B13_5_S13OF_DUAL_R}": Weapons._2_x_B_13L___5_S_13_OF_,
"{TWIN_B_8M1_S_8KOM}": Weapons._2_x_B_8M1_pods___40_x_S_8KOM__80mm_UnGd_Rkts__HEAT_AP,
"{B8M1_20_S8KOM_DUAL_L}": Weapons._2_x_B_8M1___20_S_8KOM,
"{B8M1_20_S8KOM_DUAL_R}": Weapons._2_x_B_8M1___20_S_8KOM_,
"{B8M1_20_S8OFP2_DUAL_L}": Weapons._2_x_B_8M1___20_S_8OFP2,
"{B8M1_20_S8OFP2_DUAL_R}": Weapons._2_x_B_8M1___20_S_8OFP2_,
"{B8M1_20_S8TsM_DUAL_L}": Weapons._2_x_B_8M1___20_S_8TsM,
"{B8M1_20_S8TsM_DUAL_R}": Weapons._2_x_B_8M1___20_S_8TsM_,
"{TWIN_B_8M1_S_8_OFP2}": Weapons._2_x_B_8V20A_pods___40_x_S_8OFP2__80mm_UnGd_Rkts__HE_Frag_AP,
"{TWIN_B_8M1_S_8TsM}": Weapons._2_x_B_8V20A_pods___40_x_S_8TsM__80mm_UnGd_Rkts__Smk,
"{FAB_250_DUAL_L}": Weapons._2_x_FAB_250,
"{FAB_250_DUAL_R}": Weapons._2_x_FAB_250_,
"{FAB_500_DUAL_L}": Weapons._2_x_FAB_500,
"{FAB_500_DUAL_R}": Weapons._2_x_FAB_500_,
"{HVARx2}": Weapons._2_x_HVAR__UnGd_Rkts,
"{FAB-100x2}": Weapons._2_x_OFAB_100_Jupiter___100kg_GP_Bombs_LD,
"{RBK_250_PTAB25M_DUAL_L}": Weapons._2_x_RBK_250_PTAB_2_5M,
"{RBK_250_PTAB25M_DUAL_R}": Weapons._2_x_RBK_250_PTAB_2_5M_,
"{RBK_500_PTAB105_DUAL_L}": Weapons._2_x_RBK_500_255_PTAB_10_5,
"{RBK_500_PTAB105_DUAL_R}": Weapons._2_x_RBK_500_255_PTAB_10_5_,
"{MOSSIE_2_British_AP_25LBNo1_3INCHNo1_ON_LEFT_WING_RAILS}": Weapons._2_x_RP_3_25lb_AP_Mk_I,
"{MOSSIE_2_British_AP_25LBNo1_3INCHNo1_ON_RIGHT_WING_RAILS}": Weapons._2_x_RP_3_25lb_AP_Mk_I_,
"{MOSSIE_2_British_HE_60LBFNo1_3INCHNo1_ON_LEFT_WING_RAILS}": Weapons._2_x_RP_3_60lb_F_No1_Mk_I,
"{MOSSIE_2_British_HE_60LBFNo1_3INCHNo1_ON_RIGHT_WING_RAILS}": Weapons._2_x_RP_3_60lb_F_No1_Mk_I_,
"{MOSSIE_2_British_HE_60LBSAPNo2_3INCHNo1_ON_LEFT_WING_RAILS}": Weapons._2_x_RP_3_60lb_SAP_No2_Mk_I,
"{MOSSIE_2_British_HE_60LBSAPNo2_3INCHNo1_ON_RIGHT_WING_RAILS}": Weapons._2_x_RP_3_60lb_SAP_No2_Mk_I_,
"{S25_DUAL_L}": Weapons._2_x_S_25,
"{S25_DUAL_R}": Weapons._2_x_S_25_,
"{TWIN_S25}": Weapons._2_x_S_25_OFM___340mm_UnGdrocket__480kg_Penetrator,
"{TWIN_S25_O}": Weapons._2_x_S_25_O___420mm_UnGd_Rkt__380kg_Frag,
"{BDAD04AA-4D4A-4E51-B958-180A89F963CF}": Weapons._33_x_FAB_250___250kg_GP_Bombs_LD,
"{AD5E5863-08FC-4283-B92C-162E2B2BD3FF}": Weapons._33_x_FAB_500_M_62___500kg_GP_Bombs_LD,
"3M45": Weapons._3M45,
"{BRU42_3*BDU33}": Weapons._3_BDU_33,
"{BRU3242_3*BDU33}": Weapons._3_BDU_33_,
"{BRU42_3*BDU33_N}": Weapons._3_BDU_33__,
"{BRU3242_3*BDU33_N}": Weapons._3_BDU_33___,
"{PHXBRU3242_BDU33}": Weapons._3_BDU_33____,
"{BRU-42A_3*GBU-12}": Weapons._3_GBU_12,
"{BRU-42A_3*GBU-16}": Weapons._3_GBU_16,
"{BRU-42_3*GBU-38}": Weapons._3_GBU_38,
"{BRU-70A_3*GBU-54}": Weapons._3_GBU_54_V_1_B,
"{BRU-42_3*Mk-81LD}": Weapons._3_Mk_81,
"{BRU-42_3*Mk-82LD}": Weapons._3_Mk_82,
"{BRU-42_3_MK82AIR}": Weapons._3_Mk_82_AIR,
"{BRU-42_3*Mk-82SNAKEYE}": Weapons._3_Mk_82_Snakeye,
"{BRU-42_3*Mk-83}": Weapons._3_Mk_83,
"{3xM8_ROCKETS_IN_TUBES}": Weapons._3_x_4_5_inch_M8_UnGd_Rocket,
"{639DB5DD-CB7E-4E42-AC75-2112BC397B97}": Weapons._3_x_FAB_1500_M_54___1500kg_GP_Bombs_LD,
"{A76344EB-32D2-4532-8FA2-0C1BDC00747E}": Weapons._3_x_LAU_61_pods___57_x_2_75_Hydra__UnGd_Rkts_M151__HE,
"48N6E2": Weapons._48N6E2,
"_4M80": Weapons._4M80,
"{M71BOMBD}": Weapons._4x_SB_M_71_120kg_GP_Bomb_High_drag,
"{M71BOMB}": Weapons._4x_SB_M_71_120kg_GP_Bomb_Low_drag,
"{AABA1A14-78A1-4E85-94DD-463CF75BD9E4}": Weapons._4_x_AGM_154C___JSOW_Unitary_BROACH,
"{4xAN-M64_on_InvCountedAttachmentPoints}": Weapons._4_x_AN_M64___500lb_GP_Bomb_LD,
"{3EA17AB0-A805-4D9E-8732-4CE00CB00F17}": Weapons._4_x_BGM_71D_TOW_ATGM,
"{B8C99F40-E486-4040-B547-6639172A5D57}": Weapons._4_x_GBU_27___2000lb_Laser_Guided_Penetrator_Bombs,
"{MOSSIE_4_British_AP_25LBNo1_3INCHNo1_ON_LEFT_WING_RAILS}": Weapons._4_x_RP_3_25lb_AP_Mk_I,
"{MOSSIE_4_British_AP_25LBNo1_3INCHNo1_ON_RIGHT_WING_RAILS}": Weapons._4_x_RP_3_25lb_AP_Mk_I_,
"{MOSSIE_4_British_HE_60LBFNo1_3INCHNo1_ON_LEFT_WING_RAILS}": Weapons._4_x_RP_3_60lb_F_No1_Mk_I,
"{MOSSIE_4_British_HE_60LBFNo1_3INCHNo1_ON_RIGHT_WING_RAILS}": Weapons._4_x_RP_3_60lb_F_No1_Mk_I_,
"{MOSSIE_4_British_HE_60LBSAPNo2_3INCHNo1_ON_LEFT_WING_RAILS}": Weapons._4_x_RP_3_60lb_SAP_No2_Mk_I,
"{MOSSIE_4_British_HE_60LBSAPNo2_3INCHNo1_ON_RIGHT_WING_RAILS}": Weapons._4_x_RP_3_60lb_SAP_No2_Mk_I_,
"{British_GP_500LB_Bomb_Mk1}": Weapons._500_lb_GP_Mk_I,
"{British_GP_500LB_Bomb_Mk4}": Weapons._500_lb_GP_Mk_IV,
"{British_GP_500LB_Bomb_Mk5}": Weapons._500_lb_GP_Mk_V,
"{British_GP_500LB_Bomb_Mk4_Short}": Weapons._500_lb_GP_Short_tail,
"{British_GP_500LB_Bomb_Mk4_Short_on_Handley_Page_Type_B_Cut_Bar}": Weapons._500_lb_GP_Short_tail_,
"{British_MC_500LB_Bomb_Mk2}": Weapons._500_lb_MC_Mk_II,
"{British_MC_500LB_Bomb_Mk1_Short}": Weapons._500_lb_MC_Short_tail,
"{British_MC_500LB_Bomb_Mk1_Short_on_Handley_Page_Type_B_Cut_Bar}": Weapons._500_lb_MC_Short_tail_,
"{British_SAP_500LB_Bomb_Mk5}": Weapons._500_lb_S_A_P_,
"{MOSQUITO_50GAL_SLIPPER_TANK}": Weapons._50_gal__Drop_Tank,
"{72CAC282-AE18-490B-BD4D-35E7EE969E73}": Weapons._51_x_M117___750lb_GP_Bombs_LD,
"{B84DFE16-6AC7-4854-8F6D-34137892E166}": Weapons._51_x_Mk_82___500lb_GP_Bombs_LD,
"5V55": Weapons._5V55,
"{P47_5_HVARS_ON_LEFT_WING_RAILS}": Weapons._5_x_HVAR__UnGd_Rkt,
"{P47_5_HVARS_ON_RIGHT_WING_RAILS}": Weapons._5_x_HVAR__UnGd_Rkt_,
"{MER-5E_Mk82SNAKEYEx5}": Weapons._5_x_Mk_82_Snakeye___500lb_GP_Bomb_HD,
"{MER-5E_MK82x5}": Weapons._5_x_Mk_82___500lb_GP_Bombs_LD,
"{45447F82-01B5-4029-A572-9AAD28AF0275}": Weapons._6_x_AGM_86C_ALCM_on_MER,
"{2B7BDB38-4F45-43F9-BE02-E7B3141F3D24}": Weapons._6_x_BetAB_500___500kg_Concrete_Piercing_Bombs_LD,
"{D9179118-E42F-47DE-A483-A6C2EA7B4F38}": Weapons._6_x_FAB_1500_M_54___1500kg_GP_Bombs_LD,
"{26D2AF37-B0DF-4AB6-9D61-A150FF58A37B}": Weapons._6_x_FAB_500_M_62___500kg_GP_Bombs_LD,
"{C42EE4C3-355C-4B83-8B22-B39430B8F4AE}": Weapons._6_x_Kh_35__AS_20_Kayak____520kg__AShM__IN__Act_Rdr,
"{0290F5DE-014A-4BB1-9843-D717749B1DED}": Weapons._6_x_Kh_65__AS_15B_Kent____1250kg__ASM__IN__MCC,
"{E79759F7-C622-4AA4-B1EF-37639A34D924}": Weapons._6_x_Mk_20_Rockeye___490lbs_CBUs__247_x_HEAT_Bomblets,
"{027563C9-D87E-4A85-B317-597B510E3F03}": Weapons._6_x_Mk_82___500lb_GP_Bombs_LD,
"{DT75GAL}": Weapons._75_US_gal__Fuel_Tank,
"{46ACDCF8-5451-4E26-BDDB-E78D5830E93C}": Weapons._8_x_AGM_84A_Harpoon_ASM,
"{8DCAF3A3-7FCF-41B8-BB88-58DEDA878EDE}": Weapons._8_x_AGM_86C_ALCM,
"{CD9417DF-455F-4176-A5A2-8C58D61AA00B}": Weapons._8_x_Kh_65__AS_15B_Kent____1250kg__ASM__IN__MCC,
"_9M111": Weapons._9M111,
"{9M114 Shturm-V-2 Rack}": Weapons._9M114_Shturm_V_2_Rack,
"{B919B0F4-7C25-455E-9A02-CEA51DB895E3}": Weapons._9M114_Shturm_V_2__AT_6_Spiral____ATGM__SACLOS,
"{57232979-8B0F-4db7-8D9A-55197E06B0F5}": Weapons._9M114_Shturm_V_8__AT_6_Spiral____ATGM__SACLOS,
"_9M117": Weapons._9M117,
"9M133": Weapons._9M133,
"9M14": Weapons._9M14,
"9M31": Weapons._9M31,
"9M311": Weapons._9M311,
"9M33": Weapons._9M33,
"_9M331": Weapons._9M331,
"_9M37": Weapons._9M37,
"_9M38": Weapons._9M38,
"_9M39": Weapons._9M39,
"{9S846_2xIGLA}": Weapons._9S846_Strelets___2_x_Igla,
"_NiteHawk_FLIR": Weapons._NiteHawk_FLIR
}
|
pydcs/dcs
|
dcs/weapons_data.py
|
Python
|
lgpl-3.0
| 212,933 | 0.006115 |
#!/usr/bin/python
import os
import sys
import grp
import pwd
import traceback
import utils
import hooking
DEV_MAPPER_PATH = "/dev/mapper"
DEV_DIRECTLUN_PATH = '/dev/directlun'
def createdirectory(dirpath):
# we don't use os.mkdir/chown because we need sudo
command = ['/bin/mkdir', '-p', dirpath]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error mkdir %s, err = %s\n' % (dirpath, err))
sys.exit(2)
mode = '755'
command = ['/bin/chmod', mode, dirpath]
if retcode != 0:
sys.stderr.write('directlun: error chmod %s %s, err = %s\n' % (dirpath, mode, err))
sys.exit(2)
def cloneDeviceNode(srcpath, devpath):
"""Clone a device node into a temporary private location."""
# we don't use os.remove/mknod/chmod/chown because we need sudo
command = ['/bin/rm', '-f', devpath]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error rm -f %s, err = %s\n' % (devpath, err))
sys.exit(2)
stat = os.stat(srcpath)
major = os.major(stat.st_rdev)
minor = os.minor(stat.st_rdev)
command = ['/bin/mknod', devpath, 'b', str(major), str(minor)]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error mknod %s, err = %s\n' % (devpath, err))
sys.exit(2)
mode = '660'
command = ['/bin/chmod', mode, devpath]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error chmod %s to %s, err = %s\n' % (devpath, mode, err))
sys.exit(2)
group = grp.getgrnam('qemu')
gid = group.gr_gid
user = pwd.getpwnam('qemu')
uid = user.pw_uid
owner = str(uid) + ':' + str(gid)
command = ['/bin/chown', owner, devpath]
retcode, out, err = utils.execCmd(command, sudo=True, raw=True)
if retcode != 0:
sys.stderr.write('directlun: error chown %s to %s, err = %s\n' % (devpath, owner, err))
sys.exit(2)
if os.environ.has_key('directlun'):
try:
luns = os.environ['directlun']
domxml = hooking.read_domxml()
createdirectory(DEV_DIRECTLUN_PATH)
for lun in luns.split(','):
try:
lun, options = lun.split(':')
except ValueError:
options = ''
options = options.split(';')
srcpath = DEV_MAPPER_PATH + '/' + lun
if not os.path.exists(srcpath):
sys.stderr.write('directlun before_vm_migration_destination: device not found %s\n' % srcpath)
sys.exit(2)
uuid = domxml.getElementsByTagName('uuid')[0]
uuid = uuid.childNodes[0].nodeValue
devpath = DEV_DIRECTLUN_PATH + '/' + lun + '-' + uuid
cloneDeviceNode(srcpath, devpath)
hooking.write_domxml(domxml)
except:
sys.stderr.write('directlun before_vm_migration_destination: [unexpected error]: %s\n' % traceback.format_exc())
sys.exit(2)
|
DragonRoman/rhevm-utils
|
3.0/hooks/directlun/before_vm_migrate_destination.py
|
Python
|
gpl-3.0
| 3,145 | 0.00318 |
import numpy as np
from PyQt5.QtWidgets import (QWidget, QProgressBar, QLabel, QCheckBox,
QPushButton, QSpinBox, QVBoxLayout, QFormLayout)
from .matplotlib import QMatplotlib
from qtgui.utils import QObserver
from tools.train import Training, TrainingController
from dltb.network import Network
class QTrainingBox(QWidget, QObserver, qobservables={
# FIXME[hack]: check what we are really interested in ...
Training: Training.Change.all(),
Network: Network.Change.all()}):
"""
Attributes
----------
range: numpy.ndarray
trainingLoss: numpy.ndarray
validationLoss: numpy.ndarray
rangeIndex: int
"""
_training: TrainingController = None
_network: Network = None
def __init__(self, training: TrainingController=None,
network: Network=None, parent=None):
"""Initialization of the QTrainingBox.
"""
super().__init__(parent)
self._initUI()
self._layoutComponents()
self._range = np.arange(100, dtype=np.float32)
self._trainingLoss = np.zeros(100, dtype=np.float32)
self._validationLoss = np.zeros(100, dtype=np.float32)
self._rangeIndex = 0
self.setTraining(training)
self.setNetwork(network)
def _initUI(self):
def slot(checked: bool):
if self._training.ready:
self._training.start()
elif self._training.running:
self._training.stop()
self._buttonTrainModel = QPushButton("Train")
self._buttonTrainModel.clicked.connect(slot)
self._plotLoss = QMatplotlib()
self._checkboxPlot = QCheckBox()
self._progressEpoch = QProgressBar()
self._progressEpoch.setFormat("%v/%m")
self._progressBatch = QProgressBar()
self._progressBatch.setFormat("%v (%p%)")
self._labelBatch = QLabel()
self._labelEpoch = QLabel()
self._labelLoss = QLabel()
self._labelAccuracy = QLabel()
self._labelDuration = QLabel()
self._labelNetwork = QLabel()
def slot(value: int):
self._training.epochs = value
self._spinboxEpochs = QSpinBox()
self._spinboxEpochs.valueChanged.connect(slot)
def slot(value: int):
self._training.batch_size = value
self._spinboxBatchSize = QSpinBox()
self._spinboxBatchSize.valueChanged.connect(slot)
def _layoutComponents(self):
form = QFormLayout()
form.addRow("Network:", self._labelNetwork)
form.addRow("Batch:", self._labelBatch)
form.addRow("Epoch:", self._labelEpoch)
form.addRow("Loss:", self._labelLoss)
form.addRow("Accuracy:", self._labelAccuracy)
form.addRow("Duration:", self._labelDuration)
form.addRow("Plot:", self._checkboxPlot)
layout = QVBoxLayout()
layout.addWidget(self._plotLoss)
layout.addLayout(form)
layout.addWidget(self._progressBatch)
layout.addWidget(self._progressEpoch)
layout.addWidget(self._buttonTrainModel)
layout.addWidget(self._spinboxEpochs)
layout.addWidget(self._spinboxBatchSize)
self.setLayout(layout)
def _enableComponents(self):
enabled = (self._network is not None and
self._training is not None and self._training.ready)
self._buttonTrainModel.setEnabled(enabled)
enabled = enabled and not self._training.running
self._spinboxEpochs.setEnabled(enabled)
self._spinboxBatchSize.setEnabled(enabled)
def setTraining(self, training: TrainingController):
self._exchangeView('_training', training)
# FIXME[test]: should be notified by the observable
self._enableComponents()
def network_changed(self, network, change):
self._network(network)
self._labelNetwork.setText(str(network))
def training_changed(self, training, change):
self._training(training)
self._enableComponents()
return
if 'network_changed' in change:
self._enableComponents()
if 'training_changed' in change:
if self._training.epochs:
self._progressEpoch.setRange(0, self._training.epochs)
if self._training.batches:
self._progressBatch.setRange(0, self._training.batches)
if self._training is not None:
if self._training.running:
self._buttonTrainModel.setText("Stop")
else:
self._buttonTrainModel.setText("Train")
self._enableComponents()
if 'epoch_changed' in change:
if self._training.epoch is None:
self._labelEpoch.setText("")
self._progressEpoch.setValue(0)
else:
self._labelEpoch.setText(str(self._training.epoch))
self._progressEpoch.setValue(self._training.epoch+1)
if 'batch_changed' in change:
if self._training.batch is not None:
self._labelBatch.setText(f"{self._training.batch}/"
f"{self._training.batches}")
self._labelDuration.setText(str(self._training.batch_duration))
self._progressBatch.setValue(self._training.batch)
if 'parameter_changed' in change:
self._spinboxEpochs.setRange(*self._training.epochs_range)
self._spinboxEpochs.setValue(self._training.epochs)
self._spinboxBatchSize.setRange(*self._training.batch_size_range)
self._spinboxBatchSize.setValue(self._training.batch_size)
if self._training.loss is not None:
self._labelLoss.setText(str(self._training.loss))
self._trainingLoss[self._rangeIndex] = self._training.loss
if self._checkboxPlot.checkState():
self._plotLoss.plot(self._range, self._trainingLoss)
# self._plotLoss.plot(self._validationLoss)
if self._training.accuracy is not None:
self._labelAccuracy.setText(str(self._training.accuracy))
self._rangeIndex = (self._rangeIndex + 1) % len(self._range)
|
Petr-By/qtpyvis
|
qtgui/widgets/training.py
|
Python
|
mit
| 6,290 | 0.00159 |
from setuptools import setup, find_packages
setup(
name='pulp_ostree_common',
version='1.0.0a1',
packages=find_packages(),
url='http://www.pulpproject.org',
license='GPLv2+',
author='Pulp Team',
author_email='pulp-list@redhat.com',
description='common code for pulp\'s ostree support',
)
|
dkliban/pulp_ostree
|
common/setup.py
|
Python
|
gpl-2.0
| 321 | 0 |
# -:- coding: utf-8 -:-#
"""
A resolver to query top-level domains via publicsuffix.org.
"""
from __future__ import absolute_import
NAME = "publicsuffix"
HELP = "a resolver to query top-level domains via publicsuffix.org"
DESC = """
This resolver returns a PTR record pointing to the top-level domain of the
hostname in question. When the --txt option is given, it will also return
additional informational TXT records.
The list of current top-level domains can be explicitly downloaded upon startup
via the --fetch argument.
"""
import dns.message
import logging
import sys
# remove current directory from path to load a module with the same name as us
oldpath, sys.path = sys.path, sys.path[1:]
import publicsuffix
sys.path = oldpath
"""
Module-level configuration
"""
TTL = 14400 # serve all records with this TTL
SERVE_TXT = True # serve additional TXT records
LIST_FETCH = False # download fresh copy of public suffix list
LIST_URL = "http://mxr.mozilla.org/mozilla-central/source/netwerk/dns/effective_tld_names.dat?raw=1"
log = logging.getLogger(__name__)
psl = publicsuffix.PublicSuffixList()
def configure_parser(parser):
"""
Configure provided argparse subparser with module-level options.
Use the set_defaults() construct as a callback for storing the parsed arguments.
"""
def set_defaults(args):
global TTL, SERVE_TXT, LIST_FETCH, LIST_URL
TTL = args.publicsuffix_ttl
SERVE_TXT = args.publicsuffix_txt
if args.publicsuffix_fetch in (True, False):
LIST_FETCH = args.publicsuffix_fetch
else:
LIST_FETCH = True
LIST_URL = args.publicsuffix_fetch
# download TLD list
if LIST_FETCH:
pass
parser.set_defaults(func=set_defaults)
parser.add_argument("--ttl", dest="publicsuffix_ttl", type=int,
default=TTL, metavar="TTL",
help="TTL to use for all records ")
parser.add_argument("--fetch", dest="publicsuffix_fetch", nargs="?",
default=LIST_FETCH, const=True, metavar="URL",
help="fetch new list on start, from given URL if provided")
parser.add_argument("--notxt", dest="publicsuffix_txt", action="store_false",
default=SERVE_TXT,
help="do not serve additional TXT records")
return parser
def validate(msg):
"""
Filter messages that are bad or we can't handle.
Return a DNS rcode describing the problem.
"""
opcode = msg.opcode()
# we only support queries
if opcode != dns.opcode.QUERY:
return dns.rcode.NOTIMP
# # we do not allow recursion
# if msg.flags & dns.flags.RD:
# return dns.rcode.REFUSED
# only allow single question (qdcount=1)
# @TODO: allow multiple questions?
if len(msg.question) != 1:
return dns.rcode.FORMERR
return dns.rcode.NOERROR
def query(msg):
"""
Return answer to provided DNS question.
Create appropriate skeleton response message via dns.message.make_response(msg).
"""
res = dns.message.make_response(msg)
# validate query
rcode = validate(msg)
res.set_rcode(rcode)
# stop here if didn't validate
if rcode != dns.rcode.NOERROR:
return res
# this is just one query in reality, really, but let's not assume that
for query in msg.question:
name = query.name.to_unicode(omit_final_dot=True)
# only deal with PTR queries
if query.rdtype not in (dns.rdatatype.PTR, dns.rdatatype.ANY):
res.set_rcode(dns.rcode.NXDOMAIN)
log.info("Skipping query type %d", query.rdtype)
continue
try:
suffix = psl.get_public_suffix(name)
except:
res.set_rcode(dns.rcode.SERVFAIL)
log.exception("Oddness while looking up suffix")
# don't process further questions since we've set rcode
break
if suffix:
suffix += "."
# answer section
rdata = suffix
# https://github.com/rthalley/dnspython/issues/44
try:
# dnspython3
rrset = dns.rrset.from_text(query.name, TTL,
dns.rdataclass.IN, dns.rdatatype.PTR,
rdata)
except AttributeError:
# dnspython2
rrset = dns.rrset.from_text(query.name, TTL,
dns.rdataclass.IN, dns.rdatatype.PTR,
rdata.encode("idna"))
res.answer.append(rrset)
if SERVE_TXT:
# additional section
tld = query.name.split(2)[-1].to_text(omit_final_dot=True)
rdata = '"see: http://en.wikipedia.org/wiki/.{}"'.format(tld)
# https://github.com/rthalley/dnspython/issues/44
try:
# python3
rrset = dns.rrset.from_text(suffix, TTL,
dns.rdataclass.IN, dns.rdatatype.TXT,
rdata)
except:
# python2
rrset = dns.rrset.from_text(suffix, TTL,
dns.rdataclass.IN, dns.rdatatype.TXT,
rdata.encode("latin1"))
res.additional.append(rrset)
return res
|
skion/junkdns
|
src/resolvers/publicsuffix.py
|
Python
|
mit
| 5,429 | 0.004421 |
#
# Copyright (C) 2003-2006 Greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for fingerprinting
"""
import unittest
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem.Fingerprints import FingerprintMols
class TestCase(unittest.TestCase):
def test1(self):
# FIX: test HashAtom
pass
def test2(self):
# FIX: test HashBond
pass
def test3(self):
# FIX: test HashPath
pass
def test4(self):
""" check containing mols, no Hs, no valence """
tgts = [('CCC(O)C(=O)O', ('CCC', 'OCC', 'OCC=O', 'OCCO', 'CCCC', 'OC=O', 'CC(O)C')), ]
for smi, matches in tgts:
m = Chem.MolFromSmiles(smi)
fp1 = Chem.RDKFingerprint(m, 2, 7, 9192, 4, 0)
_ = fp1.GetOnBits()
for match in matches:
m2 = Chem.MolFromSmiles(match)
fp2 = Chem.RDKFingerprint(m2, 2, 7, 9192, 4, 0)
v1, _ = DataStructs.OnBitProjSimilarity(fp2, fp1)
self.assertAlmostEqual(v1, 1, 'substruct %s not properly contained in %s' % (match, smi))
def test5(self):
""" check containing mols, use Hs, no valence """
tgts = [('CCC(O)C(=O)O', ('O[CH-][CH2-]', 'O[CH-][C-]=O')), ]
for smi, matches in tgts:
m = Chem.MolFromSmiles(smi)
fp1 = Chem.RDKFingerprint(m, 2, 7, 9192, 4, 1)
_ = fp1.GetOnBits()
for match in matches:
m2 = Chem.MolFromSmiles(match)
fp2 = Chem.RDKFingerprint(m2, 2, 7, 9192, 4, 1)
v1, _ = DataStructs.OnBitProjSimilarity(fp2, fp1)
self.assertAlmostEqual(v1, 1, 'substruct %s not properly contained in %s' % (match, smi))
def test6(self):
""" check that the bits in a signature of size N which has been folded in half
are the same as those in a signature of size N/2 """
smis = ['CCC(O)C(=O)O', 'c1ccccc1', 'C1CCCCC1', 'C1NCCCC1', 'CNCNCNC']
for smi in smis:
m = Chem.MolFromSmiles(smi)
fp1 = Chem.RDKFingerprint(m, 2, 7, 4096)
fp2 = DataStructs.FoldFingerprint(fp1, 2)
fp3 = Chem.RDKFingerprint(m, 2, 7, 2048)
self.assertEqual(tuple(fp2.GetOnBits()), tuple(fp3.GetOnBits()))
fp2 = DataStructs.FoldFingerprint(fp2, 2)
fp3 = Chem.RDKFingerprint(m, 2, 7, 1024)
self.assertEqual(tuple(fp2.GetOnBits()), tuple(fp3.GetOnBits()))
fp2 = DataStructs.FoldFingerprint(fp1, 4)
self.assertEqual(tuple(fp2.GetOnBits()), tuple(fp3.GetOnBits()))
def testGithub1747(self):
""" test github #1747: deprecated apply() function causes GetRDKFingerprint
to fail in Python 3 """
fp = FingerprintMols.GetRDKFingerprint(Chem.MolFromSmiles('CCO'))
self.assertNotEqual(0,fp.GetNumOnBits())
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
greglandrum/rdkit
|
rdkit/Chem/Fingerprints/UnitTestFingerprints.py
|
Python
|
bsd-3-clause
| 2,918 | 0.011309 |
from otp.ai.AIBaseGlobal import *
import DistributedCCharBaseAI
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.task import Task
import random
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
import CharStateDatasAI
class DistributedGoofySpeedwayAI(DistributedCCharBaseAI.DistributedCCharBaseAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGoofySpeedwayAI')
def __init__(self, air):
DistributedCCharBaseAI.DistributedCCharBaseAI.__init__(self, air, TTLocalizer.Goofy)
self.fsm = ClassicFSM.ClassicFSM('DistributedGoofySpeedwayAI', [State.State('Off', self.enterOff, self.exitOff, ['Lonely', 'TransitionToCostume', 'Walk']),
State.State('Lonely', self.enterLonely, self.exitLonely, ['Chatty', 'Walk', 'TransitionToCostume']),
State.State('Chatty', self.enterChatty, self.exitChatty, ['Lonely', 'Walk', 'TransitionToCostume']),
State.State('Walk', self.enterWalk, self.exitWalk, ['Lonely', 'Chatty', 'TransitionToCostume']),
State.State('TransitionToCostume', self.enterTransitionToCostume, self.exitTransitionToCostume, ['Off'])], 'Off', 'Off')
self.fsm.enterInitialState()
self.handleHolidays()
def delete(self):
self.fsm.requestFinalState()
DistributedCCharBaseAI.DistributedCCharBaseAI.delete(self)
self.lonelyDoneEvent = None
self.lonely = None
self.chattyDoneEvent = None
self.chatty = None
self.walkDoneEvent = None
self.walk = None
return
def generate(self):
DistributedCCharBaseAI.DistributedCCharBaseAI.generate(self)
name = self.getName()
self.lonelyDoneEvent = self.taskName(name + '-lonely-done')
self.lonely = CharStateDatasAI.CharLonelyStateAI(self.lonelyDoneEvent, self)
self.chattyDoneEvent = self.taskName(name + '-chatty-done')
self.chatty = CharStateDatasAI.CharChattyStateAI(self.chattyDoneEvent, self)
self.walkDoneEvent = self.taskName(name + '-walk-done')
if self.diffPath == None:
self.walk = CharStateDatasAI.CharWalkStateAI(self.walkDoneEvent, self)
else:
self.walk = CharStateDatasAI.CharWalkStateAI(self.walkDoneEvent, self, self.diffPath)
return
def walkSpeed(self):
return ToontownGlobals.GoofySpeed
def start(self):
self.fsm.request('Lonely')
def __decideNextState(self, doneStatus):
if self.transitionToCostume == 1:
curWalkNode = self.walk.getDestNode()
if simbase.air.holidayManager:
if ToontownGlobals.HALLOWEEN_COSTUMES in simbase.air.holidayManager.currentHolidays and simbase.air.holidayManager.currentHolidays[ToontownGlobals.HALLOWEEN_COSTUMES]:
simbase.air.holidayManager.currentHolidays[ToontownGlobals.HALLOWEEN_COSTUMES].triggerSwitch(curWalkNode, self)
self.fsm.request('TransitionToCostume')
elif ToontownGlobals.APRIL_FOOLS_COSTUMES in simbase.air.holidayManager.currentHolidays and simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES]:
simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES].triggerSwitch(curWalkNode, self)
self.fsm.request('TransitionToCostume')
else:
self.notify.warning('transitionToCostume == 1 but no costume holiday')
else:
self.notify.warning('transitionToCostume == 1 but no holiday Manager')
if doneStatus['state'] == 'lonely' and doneStatus['status'] == 'done':
self.fsm.request('Walk')
elif doneStatus['state'] == 'chatty' and doneStatus['status'] == 'done':
self.fsm.request('Walk')
elif doneStatus['state'] == 'walk' and doneStatus['status'] == 'done':
if len(self.nearbyAvatars) > 0:
self.fsm.request('Chatty')
else:
self.fsm.request('Lonely')
def enterOff(self):
pass
def exitOff(self):
DistributedCCharBaseAI.DistributedCCharBaseAI.exitOff(self)
def enterLonely(self):
self.lonely.enter()
self.acceptOnce(self.lonelyDoneEvent, self.__decideNextState)
def exitLonely(self):
self.ignore(self.lonelyDoneEvent)
self.lonely.exit()
def __goForAWalk(self, task):
self.notify.debug('going for a walk')
self.fsm.request('Walk')
return Task.done
def enterChatty(self):
self.chatty.enter()
self.acceptOnce(self.chattyDoneEvent, self.__decideNextState)
def exitChatty(self):
self.ignore(self.chattyDoneEvent)
self.chatty.exit()
def enterWalk(self):
self.notify.debug('going for a walk')
self.walk.enter()
self.acceptOnce(self.walkDoneEvent, self.__decideNextState)
def exitWalk(self):
self.ignore(self.walkDoneEvent)
self.walk.exit()
def avatarEnterNextState(self):
if len(self.nearbyAvatars) == 1:
if self.fsm.getCurrentState().getName() != 'Walk':
self.fsm.request('Chatty')
else:
self.notify.debug('avatarEnterNextState: in walk state')
else:
self.notify.debug('avatarEnterNextState: num avatars: ' + str(len(self.nearbyAvatars)))
def avatarExitNextState(self):
if len(self.nearbyAvatars) == 0:
if self.fsm.getCurrentState().getName() != 'Walk':
self.fsm.request('Lonely')
def handleHolidays(self):
DistributedCCharBaseAI.DistributedCCharBaseAI.handleHolidays(self)
if hasattr(simbase.air, 'holidayManager'):
if ToontownGlobals.APRIL_FOOLS_COSTUMES in simbase.air.holidayManager.currentHolidays:
if simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES] != None and simbase.air.holidayManager.currentHolidays[ToontownGlobals.APRIL_FOOLS_COSTUMES].getRunningState():
self.diffPath = TTLocalizer.Donald
return
def getCCLocation(self):
if self.diffPath == None:
return 1
else:
return 0
return
def enterTransitionToCostume(self):
pass
def exitTransitionToCostume(self):
pass
|
ksmit799/Toontown-Source
|
toontown/classicchars/DistributedGoofySpeedwayAI.py
|
Python
|
mit
| 6,450 | 0.004186 |
# Copyright (c) 2013 eBay Software Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.common import cfg
CONF = cfg.CONF
TIME_OUT = 1200
COUCHBASE_DUMP_DIR = '/tmp/backups'
COUCHBASE_CONF_DIR = '/etc/couchbase'
COUCHBASE_WEBADMIN_PORT = '8091'
COUCHBASE_REST_API = 'http://localhost:' + COUCHBASE_WEBADMIN_PORT
BUCKETS_JSON = '/buckets.json'
SECRET_KEY = '/secret_key'
SERVICE_CANDIDATES = ["couchbase-server"]
cmd_couchbase_status = ('sudo /opt/couchbase/bin/couchbase-cli server-info '
'-c %(IP)s:8091 -u root -p %(PWD)s')
cmd_node_init = ('sudo /opt/couchbase/bin/couchbase-cli node-init '
'-c %(IP)s:8091 --node-init-data-path=%(data_path)s '
'-u root -p %(PWD)s')
cmd_cluster_init = ('sudo /opt/couchbase/bin/couchbase-cli cluster-init '
'-c %(IP)s:8091 --cluster-init-username=root '
'--cluster-init-password=%(PWD)s '
'--cluster-init-port=8091')
cmd_kill = 'sudo pkill -u couchbase'
cmd_rm_old_data_dir = 'sudo rm -rf /opt/couchbase/var/lib/couchbase/data'
""" For optimal couchbase operations, swappiness of vm should be set to 0.
Reference link: http://docs.couchbase.com/couchbase-manual-2
.5/cb-admin/#using-couchbase-in-the-cloud """
cmd_set_swappiness = 'sudo sysctl vm.swappiness=0'
cmd_update_sysctl_conf = ('echo "vm.swappiness = 0" | sudo tee -a '
'/etc/sysctl.conf')
cmd_reset_pwd = 'sudo /opt/couchbase/bin/cbreset_password %(IP)s:8091'
pwd_file = COUCHBASE_CONF_DIR + SECRET_KEY
cmd_get_password_from_config = """sudo /opt/couchbase/bin/erl -noinput -eval \
'case file:read_file("/opt/couchbase/var/lib/couchbase/config/config.dat") \
of {ok, B} -> io:format("~p~n", [binary_to_term(B)]) end.' \
-run init stop | grep '\[{"root",\[{password,' | awk -F\\" '{print $4}'
"""
|
CMSS-BCRDB/RDS
|
trove/guestagent/datastore/experimental/couchbase/system.py
|
Python
|
apache-2.0
| 2,404 | 0.000832 |
#!/usr/bin/env python3
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def faker(name, debug=False):
print("%s: failing" % name)
return (False, {"name": name})
|
cylc/cylc
|
tests/functional/xtriggers/02-persistence/faker_fail.py
|
Python
|
gpl-3.0
| 873 | 0 |
#!/usr/bin/env python3
"""Script to generate unannotated baseline stubs using stubgen.
Basic usage:
$ python3 scripts/create_baseline_stubs.py <project on PyPI>
Run with -h for more help.
"""
import argparse
import os
import re
import shutil
import subprocess
import sys
from typing import Optional, Tuple
PYRIGHT_CONFIG = "pyrightconfig.stricter.json"
def search_pip_freeze_output(project: str, output: str) -> Optional[Tuple[str, str]]:
# Look for lines such as "typed-ast==1.4.2". '-' matches '_' and
# '_' matches '-' in project name, so that "typed_ast" matches
# "typed-ast", and vice versa.
regex = "^(" + re.sub(r"[-_]", "[-_]", project) + ")==(.*)"
m = re.search(regex, output, flags=re.IGNORECASE | re.MULTILINE)
if not m:
return None
return m.group(1), m.group(2)
def get_installed_package_info(project: str) -> Optional[Tuple[str, str]]:
"""Find package information from pip freeze output.
Match project name somewhat fuzzily (case sensitive; '-' matches '_', and
vice versa).
Return (normalized project name, installed version) if successful.
"""
r = subprocess.run(["pip", "freeze"], capture_output=True, text=True, check=True)
return search_pip_freeze_output(project, r.stdout)
def run_stubgen(package: str) -> None:
print(f"Running stubgen: stubgen -p {package}")
subprocess.run(["python", "-m", "mypy.stubgen", "-p", package], check=True)
def copy_stubs(src_base_dir: str, package: str, stub_dir: str) -> None:
"""Copy generated stubs to the target directory under stub_dir/."""
print(f"Copying stubs to {stub_dir}")
if not os.path.isdir(stub_dir):
os.mkdir(stub_dir)
src_dir = os.path.join(src_base_dir, package)
if os.path.isdir(src_dir):
shutil.copytree(src_dir, os.path.join(stub_dir, package))
else:
src_file = os.path.join("out", package + ".pyi")
if not os.path.isfile(src_file):
sys.exit("Error: Cannot find generated stubs")
shutil.copy(src_file, stub_dir)
def run_black(stub_dir: str) -> None:
print(f"Running black: black {stub_dir}")
subprocess.run(["black", stub_dir])
def run_isort(stub_dir: str) -> None:
print(f"Running isort: isort {stub_dir}")
subprocess.run(["python3", "-m", "isort", stub_dir])
def create_metadata(stub_dir: str, version: str) -> None:
"""Create a METADATA.toml file."""
m = re.match(r"[0-9]+.[0-9]+", version)
if m is None:
sys.exit(f"Error: Cannot parse version number: {version}")
fnam = os.path.join(stub_dir, "METADATA.toml")
version = m.group(0)
assert not os.path.exists(fnam)
print(f"Writing {fnam}")
with open(fnam, "w") as f:
f.write(f'version = "{version}.*"\n')
def add_pyright_exclusion(stub_dir: str) -> None:
"""Exclude stub_dir from strict pyright checks."""
with open(PYRIGHT_CONFIG) as f:
lines = f.readlines()
i = 0
while i < len(lines) and not lines[i].strip().startswith('"exclude": ['):
i += 1
assert i < len(lines), f"Error parsing {PYRIGHT_CONFIG}"
while not lines[i].strip().startswith("]"):
i += 1
line_to_add = f' "{stub_dir}",'
initial = i - 1
while lines[i].lower() > line_to_add.lower():
i -= 1
if lines[i + 1].strip().rstrip(",") == line_to_add.strip().rstrip(","):
print(f"{PYRIGHT_CONFIG} already up-to-date")
return
if i == initial:
# Special case: when adding to the end of the list, commas need tweaking
line_to_add = line_to_add.rstrip(",")
lines[i] = lines[i].rstrip() + ",\n"
lines.insert(i + 1, line_to_add + "\n")
print(f"Updating {PYRIGHT_CONFIG}")
with open(PYRIGHT_CONFIG, "w") as f:
f.writelines(lines)
def main() -> None:
parser = argparse.ArgumentParser(
description="""Generate baseline stubs automatically for an installed pip package
using stubgen. Also run black and isort. If the name of
the project is different from the runtime Python package name, you must
also use --package (example: --package yaml PyYAML)."""
)
parser.add_argument("project", help="name of PyPI project for which to generate stubs under stubs/")
parser.add_argument("--package", help="generate stubs for this Python package (defaults to project)")
args = parser.parse_args()
project = args.project
package = args.package
if not re.match(r"[a-zA-Z0-9-_.]+$", project):
sys.exit(f"Invalid character in project name: {project!r}")
if not package:
package = project # TODO: infer from installed files
if not os.path.isdir("stubs") or not os.path.isdir("stdlib"):
sys.exit("Error: Current working directory must be the root of typeshed repository")
# Get normalized project name and version of installed package.
info = get_installed_package_info(project)
if info is None:
print(f'Error: "{project}" is not installed', file=sys.stderr)
print("", file=sys.stderr)
print(f'Suggestion: Run "python3 -m pip install {project}" and try again', file=sys.stderr)
sys.exit(1)
project, version = info
stub_dir = os.path.join("stubs", project)
if os.path.exists(stub_dir):
sys.exit(f"Error: {stub_dir} already exists (delete it first)")
run_stubgen(package)
# Stubs were generated under out/. Copy them to stubs/.
copy_stubs("out", package, stub_dir)
run_isort(stub_dir)
run_black(stub_dir)
create_metadata(stub_dir, version)
# Since the generated stubs won't have many type annotations, we
# have to exclude them from strict pyright checks.
add_pyright_exclusion(stub_dir)
print("\nDone!\n\nSuggested next steps:")
print(f" 1. Manually review the generated stubs in {stub_dir}")
print(f' 2. Run "MYPYPATH={stub_dir} python3 -m mypy.stubtest {package}" to check the stubs against runtime')
print(f' 3. Run "mypy {stub_dir}" to check for errors')
print(f' 4. Run "black {stub_dir}" and "isort {stub_dir}" (if you\'ve made code changes)')
print(f' 5. Run "flake8 {stub_dir}" to check for e.g. unused imports')
print(" 6. Commit the changes on a new branch and create a typeshed PR")
if __name__ == "__main__":
main()
|
google/intellij-community
|
python/helpers/typeshed/scripts/create_baseline_stubs.py
|
Python
|
apache-2.0
| 6,346 | 0.001733 |
"""
.. module:: views.reports.procedure
:synopsis: Indivo view implementations for the procedure report.
.. moduleauthor:: Daniel Haas <daniel.haas@post.harvard.edu>
.. moduleauthor:: Ben Adida <ben@adida.net>
"""
from django.http import HttpResponseBadRequest, HttpResponse
from indivo.lib.view_decorators import marsloader, DEFAULT_ORDERBY
from indivo.lib.query import FactQuery, DATE, STRING, NUMBER
from indivo.models import Procedure
PROCEDURE_FILTERS = {
'procedure_name' : ('name', STRING),
'date_performed': ('date_performed', DATE),
DEFAULT_ORDERBY : ('created_at', DATE)
}
PROCEDURE_TEMPLATE = 'reports/procedure.xml'
def procedure_list(*args, **kwargs):
""" List the procedure data for a given record.
For 1:1 mapping of URLs to views. Just calls
:py:meth:`~indivo.views.reports.procedure._procedure_list`.
"""
return _procedure_list(*args, **kwargs)
def carenet_procedure_list(*args, **kwargs):
""" List the procedure data for a given carenet.
For 1:1 mapping of URLs to views. Just calls
:py:meth:`~indivo.views.reports.procedure._procedure_list`.
"""
return _procedure_list(*args, **kwargs)
@marsloader(query_api_support=True)
def _procedure_list(request, group_by, date_group, aggregate_by,
limit, offset, order_by,
status, date_range, filters,
record=None, carenet=None):
""" List the procedure objects matching the passed query parameters.
See :doc:`/query-api` for a listing of valid parameters.
Will return :http:statuscode:`200` with a list of procedures on success,
:http:statuscode:`400` if any invalid query parameters were passed.
"""
q = FactQuery(Procedure, PROCEDURE_FILTERS,
group_by, date_group, aggregate_by,
limit, offset, order_by,
status, date_range, filters,
record, carenet)
try:
return q.render(PROCEDURE_TEMPLATE)
except ValueError as e:
return HttpResponseBadRequest(str(e))
|
newmediamedicine/indivo_server_1_0
|
indivo/views/reports/procedure.py
|
Python
|
gpl-3.0
| 2,013 | 0.007452 |
""" Management command to link program enrollments and external student_keys to an LMS user """
from uuid import UUID
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
from lms.djangoapps.program_enrollments.api import link_program_enrollments
User = get_user_model()
INCORRECT_PARAMETER_TEMPLATE = (
"incorrectly formatted argument '{}', "
"must be in form <external user key>:<lms username>"
)
DUPLICATE_KEY_TEMPLATE = 'external user key {} provided multiple times'
class Command(BaseCommand):
"""
Management command to manually link ProgramEnrollments without an LMS user to an LMS user by
username.
Usage:
./manage.py lms link_program_enrollments <program_uuid> <user_item>*
where a <user_item> is a string formatted as <external_user_key>:<lms_username>
Normally, program enrollments should be linked by the Django Social Auth post_save signal
handler `lms.djangoapps.program_enrollments.signals.matriculate_learner`, but in the case that
a partner does not have an IDP set up for learners to log in through, we need a way to link
enrollments.
Provided a program uuid and a list of external_user_key:lms_username, this command will look up
the matching program enrollments and users, and update the program enrollments with the matching
user. If the program enrollment has course enrollments, we will enroll the user into their
waiting program courses.
If an external user key is specified twice, an exception will be raised and no enrollments will
be modified.
For each external_user_key:lms_username, if:
- The user is not found
- No enrollment is found for the given program and external_user_key
- The enrollment already has a user
An error message will be logged and the input will be skipped. All other inputs will be
processed and enrollments updated.
If there is an error while enrolling a user in a waiting program course enrollment, the error
will be logged, and we will roll back all transactions for that user so that their db state will
be the same as it was before this command was run. This is to allow the re-running of the same
command again to correctly enroll the user once the issue preventing the enrollment has been
resolved.
No other users will be affected, they will be processed normally.
"""
help = 'Manually links ProgramEnrollment records to LMS users'
def add_arguments(self, parser):
parser.add_argument(
'program_uuid',
help='the program in which we are linking enrollments to users',
)
parser.add_argument(
'user_items',
nargs='*',
help='specify the users to link, in the format <external_student_key>:<lms_username>*',
)
# pylint: disable=arguments-differ
def handle(self, program_uuid, user_items, *args, **options):
try:
parsed_program_uuid = UUID(program_uuid)
except ValueError:
raise CommandError("supplied program_uuid '{}' is not a valid UUID")
ext_keys_to_usernames = self.parse_user_items(user_items)
try:
link_program_enrollments(
parsed_program_uuid, ext_keys_to_usernames
)
except Exception as e:
raise CommandError(str(e))
def parse_user_items(self, user_items):
"""
Params:
list of strings in the format 'external_user_key:lms_username'
Returns:
dict mapping external user keys to lms usernames
Raises:
CommandError
"""
result = {}
for user_item in user_items:
split_args = user_item.split(':')
if len(split_args) != 2:
message = INCORRECT_PARAMETER_TEMPLATE.format(user_item)
raise CommandError(message)
external_user_key = split_args[0].strip()
lms_username = split_args[1].strip()
if not (external_user_key and lms_username):
message = INCORRECT_PARAMETER_TEMPLATE.format(user_item)
raise CommandError(message)
if external_user_key in result:
raise CommandError(DUPLICATE_KEY_TEMPLATE.format(external_user_key))
result[external_user_key] = lms_username
return result
|
cpennington/edx-platform
|
lms/djangoapps/program_enrollments/management/commands/link_program_enrollments.py
|
Python
|
agpl-3.0
| 4,440 | 0.004054 |
from __future__ import absolute_import
from proteus import *
from proteus.default_n import *
try:
from .rdls_p import *
from .vortex2D import *
except:
from rdls_p import *
from vortex2D import *
timeIntegration = NoIntegration
stepController = Newton_controller
# About the nonlinear solver
multilevelNonlinearSolver = Newton
if ct.ELLIPTIC_REDISTANCING > 0:
levelNonlinearSolver = TwoStageNewton
else:
levelNonlinearSolver = Newton
tolFac = 0.0
nl_atol_res = atolRedistance
linTolFac = 0.0
maxNonlinearIts = 100000
maxLineSearches = 0
useEisenstatWalker = True
fullNewtonFlag = True
if useHex:
hex=True
if pDegree_ls==1:
femSpaces = {0:C0_AffineLinearOnCubeWithNodalBasis}
elif pDegree_ls==2:
femSpaces = {0:C0_AffineLagrangeOnCubeWithNodalBasis}
elementQuadrature = CubeGaussQuadrature(nd,vortex_quad_order)
elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,vortex_quad_order)
else:
if pDegree_ls==1:
femSpaces = {0:C0_AffineLinearOnSimplexWithNodalBasis}
elif pDegree_ls==2:
femSpaces = {0:C0_AffineQuadraticOnSimplexWithNodalBasis}
elementQuadrature = SimplexGaussQuadrature(nd,vortex_quad_order)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,vortex_quad_order)
subgridError = HamiltonJacobi_ASGS_opt(coefficients,nd,stabFlag='2',lag=False)
shockCapturing = RDLS.ShockCapturing(coefficients,nd,shockCapturingFactor=shockCapturingFactor_rd,lag=lag_shockCapturing_rd)
numericalFluxType = DoNothing
nonlinearSmoother = None
levelNonlinearSolverConvergenceTest='r'
nonlinearSolverConvergenceTest='r'
matrix = SparseMatrix
if parallel:
multilevelLinearSolver = KSP_petsc4py#PETSc
levelLinearSolver = KSP_petsc4py#PETSc
linear_solver_options_prefix = 'rdls_'
linearSolverConvergenceTest = 'r-true'
else:
multilevelLinearSolver = LU
levelLinearSolver = LU
conservativeFlux = {}
|
erdc/proteus
|
proteus/tests/elliptic_redist/RDLS/rdls_n.py
|
Python
|
mit
| 1,916 | 0.014614 |
#!/usr/bin/python
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
""" Module: itim
============
"""
from __future__ import print_function
from multiprocessing import Process, Queue
import numpy as np
try:
from __builtin__ import zip as builtin_zip
except:
from builtins import zip as builtin_zip
from scipy.spatial import cKDTree
from . import messages
from . import utilities
from .surface import SurfaceFlatInterface as Surface
from .sanity_check import SanityCheck
from .interface import Interface
from .patches import patchTrajectory, patchOpenMM, patchMDTRAJ
class ITIM(Interface):
""" Identifies interfacial molecules at macroscopically flat interfaces.
*(Pártay, L. B.; Hantal, Gy.; Jedlovszky, P.; Vincze, Á.; Horvai, G., \
J. Comp. Chem. 29, 945, 2008)*
:param Object universe: The MDAnalysis_ Universe, MDTraj_ trajectory
or OpenMM_ Simulation objects.
:param Object group: An AtomGroup, or an array-like object with
the indices of the atoms in the group. Will
identify the interfacial molecules from this
group
:param float alpha: The probe sphere radius
:param str normal: The macroscopic interface normal direction
'x','y', 'z' or 'guess' (default)
:param bool molecular: Switches between search of interfacial
molecules / atoms (default: True)
:param int max_layers: The number of layers to be identified
:param dict radii_dict: Dictionary with the atomic radii of the
elements in the group. If None is supplied,
the default one (from GROMOS 43a1) will be
used.
:param float cluster_cut: Cutoff used for neighbors or density-based
cluster search (default: None disables the
cluster analysis)
:param float cluster_threshold_density: Number density threshold for
the density-based cluster search. 'auto'
determines the threshold automatically.
Default: None uses simple neighbors cluster
search, if cluster_cut is not None
:param Object extra_cluster_groups: Additional groups, to allow for
mixed interfaces
:param bool info: Print additional info
:param bool centered: Center the :py:obj:`group`
:param bool warnings: Print warnings
:param float mesh: The grid spacing used for the testlines
(default 0.4 Angstrom)
:param bool autoassign: If true (default) detect the interface
every time a new frame is selected.
Example:
>>> import MDAnalysis as mda
>>> import numpy as np
>>> import pytim
>>> from pytim.datafiles import *
>>>
>>> u = mda.Universe(WATER_GRO)
>>> oxygens = u.select_atoms("name OW")
>>>
>>> interface = pytim.ITIM(u, alpha=1.5, max_layers=4,molecular=True)
>>> # atoms in the layers can be accesses either through
>>> # the layers array:
>>> print (interface.layers)
[[<AtomGroup with 786 atoms> <AtomGroup with 681 atoms>
<AtomGroup with 663 atoms> <AtomGroup with 651 atoms>]
[<AtomGroup with 786 atoms> <AtomGroup with 702 atoms>
<AtomGroup with 666 atoms> <AtomGroup with 636 atoms>]]
>>> interface.layers[0,0] # upper side, first layer
<AtomGroup with 786 atoms>
>>> interface.layers[1,2] # lower side, third layer
<AtomGroup with 666 atoms>
>>> # or as a whole AtomGroup. This can include all atoms in all layers
>>> interface.atoms
<AtomGroup with 5571 atoms>
>>> selection = interface.atoms.sides == 0
>>> interface.atoms[ selection ] # all atoms in the upper side layer
<AtomGroup with 2781 atoms>
>>> selection = np.logical_and(interface.atoms.layers == 2 , selection)
>>> interface.atoms[ selection ] # upper side, second layer
<AtomGroup with 681 atoms>
>>> # the whole system can be quickly saved to a pdb file
>>> # including the layer information, written in the beta field
>>> # using:
>>> interface.writepdb('system.pdb',centered=True)
>>> # of course, the native interface of MDAnalysis can be used to
>>> # write pdb files, but the centering options are not available.
>>> # Writing to other formats that do not support the beta factor
>>> # will loose the information on the layers.
>>> interface.atoms.write('only_layers.pdb')
>>> # In some cases it might be necessary to compute two interfaces.
>>> # This could be done in the following way:
>>> import MDAnalysis as mda
>>> import pytim
>>> from pytim.datafiles import WATER_GRO, WATER_XTC
>>> u = mda.Universe(WATER_GRO,WATER_XTC)
>>> u2 = mda.Universe(WATER_GRO,WATER_XTC)
>>> inter = pytim.ITIM(u,group=u.select_atoms('resname SOL'))
>>> inter2 = pytim.ITIM(u2,group=u2.select_atoms('resname SOL'))
>>> for ts in u.trajectory[::50]:
... ts2 = u2.trajectory[ts.frame]
>>> # pytim can be used also on top of mdtraj (MDAnalysis must be present,though)
>>> import mdtraj
>>> import pytim
>>> from pytim.datafiles import WATER_GRO, WATER_XTC
>>> t = mdtraj.load_xtc(WATER_XTC,top=WATER_GRO)
>>> inter = pytim.ITIM(t)
.. _MDAnalysis: http://www.mdanalysis.org/
.. _MDTraj: http://www.mdtraj.org/
.. _OpenMM: http://www.openmm.org/
"""
@property
def layers(self):
"""Access the layers as numpy arrays of AtomGroups.
The object can be sliced as usual with numpy arrays, so, for example:
>>> import MDAnalysis as mda
>>> import pytim
>>> from pytim.datafiles import *
>>>
>>> u = mda.Universe(WATER_GRO)
>>> oxygens = u.select_atoms("name OW")
>>>
>>> interface = pytim.ITIM(u, alpha=1.5, max_layers=4,molecular=True)
>>> print(interface.layers[0,:]) # upper side (0), all layers
[<AtomGroup with 786 atoms> <AtomGroup with 681 atoms>
<AtomGroup with 663 atoms> <AtomGroup with 651 atoms>]
>>> repr(interface.layers[1,0]) # lower side (1), first layer (0)
'<AtomGroup with 786 atoms>'
>>> print(interface.layers[:,0:3]) # 1st - 3rd layer (0:3), on both sides
[[<AtomGroup with 786 atoms> <AtomGroup with 681 atoms>
<AtomGroup with 663 atoms>]
[<AtomGroup with 786 atoms> <AtomGroup with 702 atoms>
<AtomGroup with 666 atoms>]]
>>> print(interface.layers[1,0:4:2]) # side 1, layers 1-4 & stride 2 (0:4:2)
[<AtomGroup with 786 atoms> <AtomGroup with 666 atoms>]
"""
return self._layers
def __init__(self,
universe,
group=None,
alpha=1.5,
normal='guess',
molecular=True,
max_layers=1,
radii_dict=None,
cluster_cut=None,
cluster_threshold_density=None,
extra_cluster_groups=None,
info=False,
centered=False,
warnings=False,
mesh=0.4,
autoassign=True,
**kargs):
self.autoassign = autoassign
self.symmetry = 'planar'
self.do_center = centered
sanity = SanityCheck(self, warnings=warnings)
sanity.assign_universe(universe, group)
sanity.assign_alpha(alpha)
sanity.assign_mesh(mesh)
self.max_layers = max_layers
self._layers = np.empty(
[2, max_layers], dtype=self.universe.atoms[0].__class__)
self._surfaces = np.empty(max_layers, dtype=type(Surface))
self.info = info
self.normal = None
self.PDB = {}
self.molecular = molecular
sanity.assign_cluster_params(cluster_cut,
cluster_threshold_density, extra_cluster_groups)
sanity.assign_normal(normal)
sanity.assign_radii(radii_dict=radii_dict)
self.grid = None
self.use_threads = False
patchTrajectory(self.universe.trajectory, self)
self._assign_layers()
def _create_mesh(self):
""" Mesh assignment method
Based on a target value, determine a mesh size for the testlines
that is compatible with the simulation box.
Create the grid and initialize a cKDTree object with it to
facilitate fast searching of the gridpoints touched by molecules.
"""
box = utilities.get_box(self.universe, self.normal)
n, d = utilities.compute_compatible_mesh_params(self.target_mesh, box)
self.mesh_nx = n[0]
self.mesh_ny = n[1]
self.mesh_dx = d[0]
self.mesh_dy = d[1]
_x = np.linspace(0, box[0], num=int(self.mesh_nx), endpoint=False)
_y = np.linspace(0, box[1], num=int(self.mesh_ny), endpoint=False)
_X, _Y = np.meshgrid(_x, _y)
self.meshpoints = np.array([_X.ravel(), _Y.ravel()]).T
self.meshtree = cKDTree(self.meshpoints, boxsize=box[:2])
def _touched_lines(self, atom, _x, _y, _z, _radius):
return self.meshtree.query_ball_point([_x[atom], _y[atom]],
_radius[atom] + self.alpha)
def _append_layers(self, uplow, layer, layers):
inlayer_indices = np.flatnonzero(self._seen[uplow] == layer + 1)
inlayer_group = self.cluster_group[inlayer_indices]
if self.molecular is True:
# we first select the (unique) residues corresponding
# to inlayer_group, and then we create group of the
# atoms belonging to them, with
# inlayer_group.residues.atoms
inlayer_group = inlayer_group.residues.atoms
# now we need the indices within the cluster_group,
# of the atoms in the molecular layer group;
# NOTE that from MDAnalysis 0.16, .ids runs from 1->N
# (was 0->N-1 in 0.15), we use now .indices
indices = np.flatnonzero(
np.in1d(self.cluster_group.atoms.indices,
inlayer_group.atoms.indices))
# and update the tagged, sorted atoms
self._seen[uplow][indices] = layer + 1
# one of the two layers (upper,lower) or both are empty
if not inlayer_group:
raise Exception(messages.EMPTY_LAYER)
layers.append(inlayer_group)
def _assign_one_side(self,
uplow,
sorted_atoms,
_x,
_y,
_z,
_radius,
queue=None):
layers = []
for layer in range(0, self.max_layers):
# this mask tells which lines have been touched.
mask = self.mask[uplow][layer]
# atom here goes to 0 to #sorted_atoms, it is not a MDAnalysis
# index/atom
for atom in sorted_atoms:
if self._seen[uplow][atom] != 0:
continue
touched_lines = self._touched_lines(atom, _x, _y, _z, _radius)
_submask = mask[touched_lines]
if (len(_submask[_submask == 0]) == 0):
# no new contact, let's move to the next atom
continue
# let's mark now: 1) the touched lines
mask[touched_lines] = 1
# 2) the sorted atoms.
self._seen[uplow][atom] = layer + 1
# 3) if all lines have been touched, create a group that
# includes all atoms in this layer
if np.sum(mask) == len(mask):
self._append_layers(uplow, layer, layers)
break
if (queue is None):
return layers
else:
queue.put(layers)
def _prepare_layers_assignment(self):
self._create_mesh()
size = (2, int(self.max_layers), int(self.mesh_nx) * int(self.mesh_ny))
self.mask = np.zeros(size, dtype=int)
self.prepare_box()
def _prelabel_groups(self):
# first we label all atoms in group to be in the gas phase
self.label_group(self.analysis_group.atoms, beta=0.5)
# then all atoms in the largest group are labelled as liquid-like
self.label_group(self.cluster_group.atoms, beta=0.0)
def _assign_layers(self):
""" Determine the ITIM layers.
Note that the multiproc option is mainly for debugging purposes:
>>> import MDAnalysis as mda
>>> import pytim
>>> u = mda.Universe(pytim.datafiles.WATER_GRO)
>>> inter = pytim.ITIM(u,multiproc=True)
>>> test1 = len(inter.layers[0,0])
>>> inter = pytim.ITIM(u,multiproc=False)
>>> test2 = len(inter.layers[0,0])
>>> test1==test2
True
"""
up, low = 0, 1
self.reset_labels()
self._prepare_layers_assignment()
# groups have been checked already in _sanity_checks()
self._define_cluster_group()
# we always (internally) center in ITIM
self.center(planar_to_origin=True)
self._prelabel_groups()
_radius = self.cluster_group.radii
size = len(self.cluster_group.positions)
self._seen = [
np.zeros(size, dtype=np.int8),
np.zeros(size, dtype=np.int8)
]
_x = utilities.get_x(self.cluster_group, self.normal)
_y = utilities.get_y(self.cluster_group, self.normal)
_z = utilities.get_z(self.cluster_group, self.normal)
sort = np.argsort(_z + _radius * np.sign(_z))
# NOTE: np.argsort returns the sorted *indices*
# so far, it justs exploit a simple scheme splitting
# the calculation between the two sides. Would it be
# possible to implement easily 2d domain decomposition?
proc, queue = [None, None], [Queue(), Queue()]
proc[up] = Process(
target=self._assign_one_side,
args=(up, sort[::-1], _x, _y, _z, _radius, queue[up]))
proc[low] = Process(
target=self._assign_one_side,
args=(low, sort[::], _x, _y, _z, _radius, queue[low]))
for p in proc:
p.start()
for uplow in [up, low]:
for index, group in enumerate(queue[uplow].get()):
# cannot use self._layers[uplow][index] = group, otherwise
# info about universe is lost (do not know why yet)
# must use self._layers[uplow][index] =
# self.universe.atoms[group.indices]
self._layers[uplow][index] = self.universe.atoms[group.indices]
for p in proc:
p.join()
for q in queue:
q.close()
self.label_planar_sides()
for nlayer, layer in enumerate(self._layers[
0]): # TODO should this be moved out of assign_layers?
self._surfaces[nlayer] = Surface(self, options={'layer': nlayer})
if self.do_center is False: # NOTE: do_center requires centering in
# the middle of the box.
# ITIM always centers internally in the
# origin along the normal.
self.universe.atoms.positions = self.original_positions
else:
self._shift_positions_to_middle()
#
|
balazsfabian/pytim
|
pytim/itim.py
|
Python
|
gpl-3.0
| 16,238 | 0.000308 |
from kvmap.code.projections import *
from urllib2 import urlopen
from httplib import HTTPConnection
from threading import Thread
from kivy.logger import Logger
from kivy.loader import Loader
from os.path import join, dirname
import time, os
import hashlib
try:
from pyproj import Proj
from xml.etree import ElementTree as ET
except:
pass
class WMSOverlayServer(object):
cache = {}
available_maptype = dict(roadmap='Roadmap') # default
type = "wms"
'''Generic WMS server'''
def __init__(self, progress_callback=None):
self.progress_callback = progress_callback
def setProgressCallback(self, progress_callback):
self.progress_callback = progress_callback
def getInfo(self, lat, lon, epsilon):
return None
def get(self, parent, width, height):
self.bl = parent.bottom_left
self.tr = parent.top_right
self.zoom = parent.zoom
url = self.geturl(self.bl[0], self.bl[1], self.tr[0], self.tr[1], self.zoom, width, height)
if not url:
return None
key = hashlib.md5(url).hexdigest()
if key in self.cache:
return self.cache[key]
try:
image = Loader.image('http://' + self.provider_host + url, progress_callback=self.progress_callback)
self.cache[key] = image
except Exception, e:
Logger.error('OverlayServer could not find (or read) image %s [%s]' % (url, e))
image = None
def getLegendGraphic(self):
if self.legend is None and not self.triedlegend:
self.triedlegend = True
layer = self.layer
if "," in layer:
layer = layer[layer.rindex(",") + 1:]
if self.legendlayer:
layer = self.legendlayer
url = self.baseurl + "?REQUEST=GetLegendGraphic&VERSION=1.0.0&FORMAT=image/png&LAYER=%s&ext=.png" % (layer)
try:
print 'http://' + self.provider_host + url
image = Loader.image('http://' + self.provider_host + url)
self.legend = image
except Exception, e:
Logger.error('OverlayServer could not find LEGENDGRAPHICS for %s %s' % (self.baseurl, layer))
return self.legend
def xy_to_co(self, lat, lon):
if self.customBounds:
x, y = latlon_to_custom(lat, lon, self.bounds)
elif self.isPLatLon: # patch for android - does not require pyproj library
x, y = lon, lat
elif self.isPGoogle: # patch for android - does not require pyproj library
x, y = latlon_to_google (lat, lon)
else:
x, y = transform(pLatlon, self.projection, lon, lat)
return x, y
def co_to_ll(self, x, y):
if self.customBounds:
u, v = custom_to_unit(lat, lon, self.bounds)
l, m = unit_to_latlon(u, v)
elif self.isPLatLon: # patch for android - does not require pyproj library
l, m = y, x
elif self.isPGoogle: # patch for android - does not require pyproj library
l, m = google_to_latlon (y, x)
else:
l, m = transform(self.projection, pLatlon, y, x)
return l, m
def geturl(self, lat1, lon1, lat2, lon2, zoom, w, h):
try:
x1, y1 = self.xy_to_co(lat1, lon1)
x2, y2 = self.xy_to_co(lat2, lon2)
return self.url + "&BBOX=%f,%f,%f,%f&WIDTH=%i&HEIGHT=%i&ext=.png" % (x1, y1, x2, y2, w, h)
except RuntimeError, e:
return None
def parseLayer(self, layer, data):
try:
name = layer.find("Name").text
except:
name = None
srss = layer.findall("SRS")
if name: # and srss:
data[name] = map(lambda x:x.text, srss)
if self.debug:
print "Provider %s provides layer %s in projections %s" % (self.provider_host, name, data[name])
subs = layer.findall("Layer")
for sub in subs:
self.parseLayer(sub, data)
def initFromGetCapabilities(self, host, baseurl, layer=None, index=0, srs=None):
self.debug = (layer == None) and (index == 0)
# GetCapabilities (Layers + SRS)
if layer is None or srs is None:
capabilities = urlopen(host + baseurl + "?SERVICE=WMS&VERSION=1.1.1&Request=GetCapabilities").read().strip()
try:
tree = ET.fromstring(capabilities)
if self.debug:
ET.dump(tree)
layers = tree.findall("Capability/Layer") # TODO: proper parsing of cascading layers and their SRS
data = {}
for l in layers:
self.parseLayer(l, data)
# Choose Layer and SRS by (alphabetical) index
if layer is None:
layer = sorted(data.keys())[index]
if srs is None:
srs = sorted(data[layer])[0]
except:
pass
print "Displaying from %s/%s: layer %s in SRS %s." % (host, baseurl, layer, srs)
# generate tile URL and init projection by EPSG code
self.layer = layer
self.baseurl = baseurl
self.url = baseurl + "?LAYERS=%s&SRS=%s&FORMAT=image/png&TRANSPARENT=TRUE&SERVICE=WMS&VERSION=1.1.1&REQUEST=GetMap&STYLES=" % (layer, srs)
self.isPGoogle = False
self.isPLatLon = False
self.legend = None
self.legendlayer = None
self.triedlegend = False
if srs == "EPSG:4326":
self.isPLatLon = True
elif srs == "EPSG:900913" or srs == "EPSG:3857":
self.isPGoogle = True
try:
self.projection = pGoogle
except:
pass
else:
try:
self.projection = Proj(init=srs)
except:
pass
|
jchome/LocalGuide-Mobile
|
kvmap/overlays/WMSOverlayServer.py
|
Python
|
gpl-2.0
| 5,614 | 0.020485 |
dict([(k, chr(k + 65)) for k in range(10)])
|
asedunov/intellij-community
|
python/testData/intentions/convertDictComp_after.py
|
Python
|
apache-2.0
| 43 | 0.023256 |
# -*- coding: utf-8 -*-
from .api_server import API
app = API()
|
business-factory/captain-hook
|
hooks/app.py
|
Python
|
mit
| 66 | 0 |
# Copyright 2017 Okia SPRL (https://okia.be)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests.common import TransactionCase
from odoo.exceptions import ValidationError
class TestCreditControlPolicyLevel(TransactionCase):
post_install = True
at_install = False
def test_check_credit_policy(self):
"""
Test the constrains on res.partner
First we try to assign an account and a policy with a wrong policy
(this policy doesn't contains the account of the partner).
After that we add the previous account in the policy and
retry to assign this policy and this account on the partner
:return:
"""
policy = self.env.ref('account_credit_control.credit_control_3_time')
partner = self.env['res.partner'].create({
'name': 'Partner 1',
})
account = partner.property_account_receivable_id
with self.assertRaises(ValidationError):
partner.write({
'credit_policy_id': policy.id,
})
policy.write({
'account_ids': [(6, 0, [account.id])]
})
partner.property_account_receivable_id = account.id
partner.credit_policy_id = policy.id
|
ddico/account-financial-tools
|
account_credit_control/tests/test_res_partner.py
|
Python
|
agpl-3.0
| 1,262 | 0 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import psycopg2
from psycopg2.extras import Json
def con():
psycopg2.extensions.register_adapter(dict, Json)
psycopg2.extensions.register_adapter(list, Json)
conn = psycopg2.connect(dbname='cy', host='localhost', user='postgres', password='postgres')
return conn
|
g0v/sunshine.cy
|
parser/property/db_settings.py
|
Python
|
cc0-1.0
| 337 | 0.005935 |
import os
import urllib.request, urllib.error, urllib.parse
import shutil
import tempfile
import zipfile
import re
import logging
logger = logging.getLogger()
import xml.etree.cElementTree as ET
from util import stripInvalidXmlEntities
import awsSecrets
ICTRP_SECRETS = awsSecrets.getSecrets()
def nctList():
logger.info("Getting NCT ID list")
url = 'https://clinicaltrials.gov/ct2/results/download?flds=k&down_stds=all&down_typ=fields&down_flds=shown&down_fmt=xml&show_down=Y'
request = urllib.request.urlopen(url)
logger.info('Request complete')
tmpfile = tempfile.TemporaryFile()
shutil.copyfileobj(request, tmpfile)
request.close()
logger.info('Copied to temporary file')
z = zipfile.ZipFile(tmpfile, 'r')
xml = z.open('study_fields.xml', 'r')
logger.info('Opened ZIP contents')
root = ET.parse(xml)
logger.info('Parsed XML')
xml.close()
z.close()
tmpfile.close()
ids = [e.text for e in root.findall('.//nct_id')]
logger.info('NCT IDs listed: {} IDs found'.format(len(ids)))
return ids
def ictrpList():
logger.info("Getting ICTRP ID list")
url = 'http://apps.who.int/trialsearch/TrialService.asmx/GetTrials?Title=&username={username}&password={password}'.format(username=ICTRP_SECRETS['ICTRP_LIST_USERNAME'], password=ICTRP_SECRETS['ICTRP_LIST_PASSWORD'])
logger.info(url)
request = urllib.request.urlopen(url)
logger.info('Request complete')
xml = request.read().decode('utf-8')
request.close()
logger.info('Captured XML string')
root = ET.fromstring(stripInvalidXmlEntities(xml))
logger.info('Parsed XML')
ids = [e.text for e in root.findall('.//TrialID')]
logger.info('ICTRP IDs listed: {} IDs found'.format(len(ids)))
return ids
def crawlList():
baseUrl = "http://apps.who.int/trialsearch/crawl/"
authinfo = urllib.request.HTTPPasswordMgrWithDefaultRealm()
authinfo.add_password(None, baseUrl, ICTRP_SECRETS['ICTRP_CRAWL_USERNAME'], ICTRP_SECRETS['ICTRP_CRAWL_PASSWORD'])
handler = urllib.request.HTTPBasicAuthHandler(authinfo)
opener = urllib.request.build_opener(handler)
urllib.request.install_opener(opener)
def crawl(page):
response = urllib.request.urlopen(baseUrl + page)
body = response.read().decode('utf-8')
response.close()
return body
pages = re.findall('href\="(crawl[0-9]+.aspx)"', crawl("crawl0.aspx"))
logging.info("Crawl - got index, {} pages".format(len(pages)))
ids = []
for page in pages:
data = re.findall('trialid\=([A-Za-z0-9\-\/]+)', crawl(page))
logging.info("Crawl - got {}, {} IDs".format(page, len(data)))
ids.extend(data)
return ids
def allList():
il = frozenset(ictrpList())
nl = frozenset(nctList())
return il.union(nl)
#cl = frozenset(crawlList())
#al = sorted(cl.union(il, nl))
#logging.info("From Crawl but not listing: {}".format(sorted(cl.difference(il, nl))))
#logging.info("From list but not Crawl: {}".format(sorted(il.difference(cl))))
#logging.info("From ClinicalTrials.gov but not Crawl: {}".format(sorted(nl.difference(cl))))
#return al
|
gertvv/ictrp-retrieval
|
listRecords.py
|
Python
|
mit
| 3,167 | 0.007578 |
from __future__ import annotations
import contextlib
import os.path
import shutil
import sys
import pytest
from pre_commit import parse_shebang
from pre_commit.envcontext import envcontext
from pre_commit.envcontext import Var
from pre_commit.util import make_executable
def _echo_exe() -> str:
exe = shutil.which('echo')
assert exe is not None
return exe
def test_file_doesnt_exist():
assert parse_shebang.parse_filename('herp derp derp') == ()
def test_simple_case(tmpdir):
x = tmpdir.join('f')
x.write('#!/usr/bin/env echo')
make_executable(x.strpath)
assert parse_shebang.parse_filename(x.strpath) == ('echo',)
def test_find_executable_full_path():
assert parse_shebang.find_executable(sys.executable) == sys.executable
def test_find_executable_on_path():
assert parse_shebang.find_executable('echo') == _echo_exe()
def test_find_executable_not_found_none():
assert parse_shebang.find_executable('not-a-real-executable') is None
def write_executable(shebang, filename='run'):
os.mkdir('bin')
path = os.path.join('bin', filename)
with open(path, 'w') as f:
f.write(f'#!{shebang}')
make_executable(path)
return path
@contextlib.contextmanager
def bin_on_path():
bindir = os.path.join(os.getcwd(), 'bin')
with envcontext((('PATH', (bindir, os.pathsep, Var('PATH'))),)):
yield
def test_find_executable_path_added(in_tmpdir):
path = os.path.abspath(write_executable('/usr/bin/env sh'))
assert parse_shebang.find_executable('run') is None
with bin_on_path():
assert parse_shebang.find_executable('run') == path
def test_find_executable_path_ext(in_tmpdir):
"""Windows exports PATHEXT as a list of extensions to automatically add
to executables when doing PATH searching.
"""
exe_path = os.path.abspath(
write_executable('/usr/bin/env sh', filename='run.myext'),
)
env_path = {'PATH': os.path.dirname(exe_path)}
env_path_ext = dict(env_path, PATHEXT=os.pathsep.join(('.exe', '.myext')))
assert parse_shebang.find_executable('run') is None
assert parse_shebang.find_executable('run', _environ=env_path) is None
ret = parse_shebang.find_executable('run.myext', _environ=env_path)
assert ret == exe_path
ret = parse_shebang.find_executable('run', _environ=env_path_ext)
assert ret == exe_path
def test_normexe_does_not_exist():
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe('i-dont-exist-lol')
assert excinfo.value.args == ('Executable `i-dont-exist-lol` not found',)
def test_normexe_does_not_exist_sep():
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe('./i-dont-exist-lol')
assert excinfo.value.args == ('Executable `./i-dont-exist-lol` not found',)
@pytest.mark.xfail(os.name == 'nt', reason='posix only')
def test_normexe_not_executable(tmpdir): # pragma: win32 no cover
tmpdir.join('exe').ensure()
with tmpdir.as_cwd(), pytest.raises(OSError) as excinfo:
parse_shebang.normexe('./exe')
assert excinfo.value.args == ('Executable `./exe` is not executable',)
def test_normexe_is_a_directory(tmpdir):
with tmpdir.as_cwd():
tmpdir.join('exe').ensure_dir()
exe = os.path.join('.', 'exe')
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe(exe)
msg, = excinfo.value.args
assert msg == f'Executable `{exe}` is a directory'
def test_normexe_already_full_path():
assert parse_shebang.normexe(sys.executable) == sys.executable
def test_normexe_gives_full_path():
assert parse_shebang.normexe('echo') == _echo_exe()
assert os.sep in _echo_exe()
def test_normalize_cmd_trivial():
cmd = (_echo_exe(), 'hi')
assert parse_shebang.normalize_cmd(cmd) == cmd
def test_normalize_cmd_PATH():
cmd = ('echo', '--version')
expected = (_echo_exe(), '--version')
assert parse_shebang.normalize_cmd(cmd) == expected
def test_normalize_cmd_shebang(in_tmpdir):
echo = _echo_exe().replace(os.sep, '/')
path = write_executable(echo)
assert parse_shebang.normalize_cmd((path,)) == (echo, path)
def test_normalize_cmd_PATH_shebang_full_path(in_tmpdir):
echo = _echo_exe().replace(os.sep, '/')
path = write_executable(echo)
with bin_on_path():
ret = parse_shebang.normalize_cmd(('run',))
assert ret == (echo, os.path.abspath(path))
def test_normalize_cmd_PATH_shebang_PATH(in_tmpdir):
echo = _echo_exe()
path = write_executable('/usr/bin/env echo')
with bin_on_path():
ret = parse_shebang.normalize_cmd(('run',))
assert ret == (echo, os.path.abspath(path))
|
pre-commit/pre-commit
|
tests/parse_shebang_test.py
|
Python
|
mit
| 4,687 | 0 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'FilterListItemCopy.date_created'
db.alter_column('api_filterlistitemcopy', 'date_created', self.gf('django.db.models.fields.DateTimeField')())
def backwards(self, orm):
# Changing field 'FilterListItemCopy.date_created'
db.alter_column('api_filterlistitemcopy', 'date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now=True))
models = {
'api.blacklistitem': {
'Meta': {'object_name': 'BlackListItem', '_ormbases': ['api.FilterListItem']},
'filterlistitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['api.FilterListItem']", 'unique': 'True', 'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'blacklist'", 'max_length': '40'})
},
'api.eyehistory': {
'Meta': {'object_name': 'EyeHistory'},
'domain': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}),
'end_event': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {}),
'favIconUrl': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}),
'humanize_time': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'src': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}),
'start_event': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2000'}),
'total_time': ('django.db.models.fields.IntegerField', [], {}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'api.filterlistitem': {
'Meta': {'object_name': 'FilterListItem'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 8, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'api.filterlistitemcopy': {
'Meta': {'object_name': 'FilterListItemCopy'},
'date_created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '2000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'api.whitelistitem': {
'Meta': {'object_name': 'WhiteListItem', '_ormbases': ['api.FilterListItem']},
'filterlistitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['api.FilterListItem']", 'unique': 'True', 'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'whitelist'", 'max_length': '40'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['api']
|
haystack/eyebrowse-server
|
api/migrations/0003_auto__chg_field_filterlistitemcopy_date_created.py
|
Python
|
mit
| 6,965 | 0.007466 |
from __future__ import absolute_import
import logging
try:
from redis import Redis
from rq import Queue
except ImportError:
Redis = None
Queue = None
from kaneda.exceptions import ImproperlyConfigured
from .base import BaseQueue
class RQQueue(BaseQueue):
"""
RQ queue
:param queue: queue instance of RQ class.
:param redis_url: Redis connection url where RQ will attend the async reporting requests.
:param queue_name: name of the queue being used by the RQ worker process.
"""
settings_namespace = 'RQ'
def __init__(self, queue=None, redis_url=None, queue_name='kaneda'):
if not Redis:
raise ImproperlyConfigured('You need to install redis to use the RQ queue.')
if not Queue:
raise ImproperlyConfigured('You need to install rq library to use the RQ queue.')
if queue:
if not isinstance(queue, Queue):
raise ImproperlyConfigured('"queue" parameter is not an instance of RQ queue.')
self.queue = queue
elif redis_url:
self.queue = Queue(queue_name, connection=Redis.from_url(redis_url))
else:
self.queue = Queue(queue_name, connection=Redis())
def report(self, name, metric, value, tags, id_):
try:
return self.queue.enqueue('kaneda.tasks.rq.report', name, metric, value, tags, id_)
except Exception as e:
logger = logging.getLogger(__name__)
logger.exception(e)
|
APSL/kaneda
|
kaneda/queues/rq.py
|
Python
|
mit
| 1,504 | 0.003989 |
from .persistent_homology import PersistentHomology
__name__ = 'OpenTDA'
__version__ = 0.1
__all__ = ['PersistentHomology']
|
outlace/OpenTDA
|
tda/__init__.py
|
Python
|
apache-2.0
| 126 | 0 |
import requests
def login(output_file):
r = requests.get('http://quickstart.cloudera:8888/accounts/login/?next=/')
tmp = r.text.split('csrfmiddlewaretoken')
tmp = tmp[1].split("value='")
tmp = tmp[1].split("'")
token = tmp[0]
cookie = r.cookies
data = {'username':'cloudera','password':'cloudera','csrfmiddlewaretoken':token}
r = requests.post('http://quickstart.cloudera:8888/accounts/login/?next=/variants/api/variants/ulb|0|1|10177|A/',data=data,cookies=cookie)
f = open(output_file,'w')
f.write(r.text)
f.close()
login('curl-results.txt')
|
BRiDGEIris/cgs-apps
|
code/apps/variants/tests/local_login.py
|
Python
|
apache-2.0
| 561 | 0.046346 |
from datetime import datetime, timedelta
import logging
from urllib import urlencode
from django.http import Http404
from django.utils import html
from django.utils.safestring import mark_safe
import pytz
from corehq import Domain
from corehq.apps import reports
from corehq.apps.app_manager.models import get_app, Form, RemoteApp
from corehq.apps.app_manager.util import get_case_properties
from corehq.apps.cachehq.mixins import CachedCouchDocumentMixin
from corehq.apps.domain.middleware import CCHQPRBACMiddleware
from corehq.apps.export.models import FormQuestionSchema
from corehq.apps.reports.display import xmlns_to_name
from dimagi.ext.couchdbkit import *
from corehq.apps.reports.exportfilters import form_matches_users, is_commconnect_form, default_form_filter, \
default_case_filter
from corehq.apps.users.models import WebUser, CommCareUser, CouchUser
from corehq.util.view_utils import absolute_reverse
from couchexport.models import SavedExportSchema, GroupExportConfiguration, FakeSavedExportSchema, SplitColumn
from couchexport.transforms import couch_to_excel_datetime, identity
from couchexport.util import SerializableFunction
import couchforms
from dimagi.utils.couch.cache import cache_core
from dimagi.utils.couch.database import get_db
from dimagi.utils.decorators.memoized import memoized
from django.conf import settings
from django.core.validators import validate_email
from corehq.apps.reports.dispatcher import ProjectReportDispatcher, CustomProjectReportDispatcher
import json
import calendar
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from dimagi.utils.logging import notify_exception
from django_prbac.exceptions import PermissionDenied
class HQUserType(object):
REGISTERED = 0
DEMO_USER = 1
ADMIN = 2
UNKNOWN = 3
COMMTRACK = 4
human_readable = [settings.COMMCARE_USER_TERM,
ugettext_noop("demo_user"),
ugettext_noop("admin"),
ugettext_noop("Unknown Users"),
ugettext_noop("CommCare Supply")]
toggle_defaults = (True, False, False, False, False)
count = len(human_readable)
included_defaults = (True, True, True, True, False)
@classmethod
def use_defaults(cls):
return cls._get_manual_filterset(cls.included_defaults, cls.toggle_defaults)
@classmethod
def all_but_users(cls):
no_users = [True] * cls.count
no_users[cls.REGISTERED] = False
return cls._get_manual_filterset(cls.included_defaults, no_users)
@classmethod
def commtrack_defaults(cls):
# this is just a convenience method for clairty on commtrack projects
return cls.all()
@classmethod
def all(cls):
defaults = (True,) * cls.count
return cls._get_manual_filterset(defaults, cls.toggle_defaults)
@classmethod
def _get_manual_filterset(cls, included, defaults):
"""
manually construct a filter set. included and defaults should both be
arrays of booleans mapping to values in human_readable and whether they should be
included and defaulted, respectively.
"""
return [HQUserToggle(i, defaults[i]) for i in range(cls.count) if included[i]]
@classmethod
def use_filter(cls, ufilter):
return [HQUserToggle(i, unicode(i) in ufilter) for i in range(cls.count)]
class HQToggle(object):
type = None
show = False
name = None
def __init__(self, type, show, name):
self.type = type
self.name = name
self.show = show
def __repr__(self):
return "%(klass)s[%(type)s:%(show)s:%(name)s]" % dict(
klass = self.__class__.__name__,
type=self.type,
name=self.name,
show=self.show
)
class HQUserToggle(HQToggle):
def __init__(self, type, show):
name = _(HQUserType.human_readable[type])
super(HQUserToggle, self).__init__(type, show, name)
class TempCommCareUser(CommCareUser):
filter_flag = IntegerProperty()
def __init__(self, domain, username, uuid):
if username == HQUserType.human_readable[HQUserType.DEMO_USER]:
filter_flag = HQUserType.DEMO_USER
elif username == HQUserType.human_readable[HQUserType.ADMIN]:
filter_flag = HQUserType.ADMIN
else:
filter_flag = HQUserType.UNKNOWN
super(TempCommCareUser, self).__init__(
domain=domain,
username=username,
_id=uuid,
date_joined=datetime.utcnow(),
is_active=False,
user_data={},
first_name='',
last_name='',
filter_flag=filter_flag
)
def save(self, **params):
raise NotImplementedError
@property
def userID(self):
return self._id
@property
def username_in_report(self):
if self.filter_flag == HQUserType.UNKNOWN:
final = mark_safe('%s <strong>[unregistered]</strong>' % html.escape(self.username))
elif self.filter_flag == HQUserType.DEMO_USER:
final = mark_safe('<strong>%s</strong>' % html.escape(self.username))
else:
final = mark_safe('<strong>%s</strong> (%s)' % tuple(map(html.escape, [self.username, self.user_id])))
return final
@property
def raw_username(self):
return self.username
class Meta:
app_label = 'reports'
DATE_RANGE_CHOICES = ['last7', 'last30', 'lastn', 'lastmonth', 'since', 'range', '']
class ReportConfig(CachedCouchDocumentMixin, Document):
domain = StringProperty()
# the prefix of the report dispatcher class for this report, used to
# get route name for url reversing, and report names
report_type = StringProperty()
report_slug = StringProperty()
subreport_slug = StringProperty(default=None)
name = StringProperty()
description = StringProperty()
owner_id = StringProperty()
filters = DictProperty()
date_range = StringProperty(choices=DATE_RANGE_CHOICES)
days = IntegerProperty(default=None)
start_date = DateProperty(default=None)
end_date = DateProperty(default=None)
datespan_slug = StringProperty(default=None)
def delete(self, *args, **kwargs):
notifications = self.view('reportconfig/notifications_by_config',
reduce=False, include_docs=True, key=self._id).all()
for n in notifications:
n.config_ids.remove(self._id)
if n.config_ids:
n.save()
else:
n.delete()
return super(ReportConfig, self).delete(*args, **kwargs)
@classmethod
def by_domain_and_owner(cls, domain, owner_id, report_slug=None,
stale=True, skip=None, limit=None):
if stale:
#kwargs['stale'] = settings.COUCH_STALE_QUERY
pass
if report_slug is not None:
key = ["name slug", domain, owner_id, report_slug]
else:
key = ["name", domain, owner_id]
db = cls.get_db()
kwargs = {}
if skip is not None:
kwargs['skip'] = skip
if limit is not None:
kwargs['limit'] = limit
result = cache_core.cached_view(
db,
"reportconfig/configs_by_domain",
reduce=False,
include_docs=True,
startkey=key,
endkey=key + [{}],
wrapper=cls.wrap,
**kwargs
)
return result
@classmethod
def default(self):
return {
'name': '',
'description': '',
#'date_range': 'last7',
'days': None,
'start_date': None,
'end_date': None,
'filters': {}
}
def to_complete_json(self):
result = super(ReportConfig, self).to_json()
result.update({
'url': self.url,
'report_name': self.report_name,
'date_description': self.date_description,
'datespan_filters': self.datespan_filters,
'has_ucr_datespan': self.has_ucr_datespan,
})
return result
@property
@memoized
def _dispatcher(self):
from corehq.apps.userreports.reports.view import ConfigurableReport
dispatchers = [
ProjectReportDispatcher,
CustomProjectReportDispatcher,
ConfigurableReport,
]
for dispatcher in dispatchers:
if dispatcher.prefix == self.report_type:
return dispatcher()
raise Exception("Unknown dispatcher: %s" % self.report_type)
def get_date_range(self):
"""Duplicated in reports.config.js"""
date_range = self.date_range
# allow old report email notifications to represent themselves as a
# report config by leaving the default date range up to the report
# dispatcher
if not date_range:
return {}
import datetime
from dateutil.relativedelta import relativedelta
today = datetime.date.today()
if date_range == 'since':
start_date = self.start_date
end_date = today
elif date_range == 'range':
start_date = self.start_date
end_date = self.end_date
elif date_range == 'lastmonth':
end_date = today
start_date = today - relativedelta(months=1) + timedelta(days=1) # add one day to handle inclusiveness
else:
end_date = today
if date_range == 'last7':
days = 7
elif date_range == 'last30':
days = 30
elif date_range == 'lastn':
days = self.days
else:
raise Exception("Invalid date range")
start_date = today - datetime.timedelta(days=days)
if start_date is None or end_date is None:
# this is due to bad validation. see: http://manage.dimagi.com/default.asp?110906
logging.error('scheduled report %s is in a bad state (no startdate or enddate)' % self._id)
return {}
dates = {
'startdate': start_date.isoformat(),
'enddate': end_date.isoformat(),
}
if self.is_configurable_report:
filter_slug = self.datespan_slug
if filter_slug:
return {
'%s-start' % filter_slug: start_date.isoformat(),
'%s-end' % filter_slug: end_date.isoformat(),
filter_slug: '%(startdate)s to %(enddate)s' % dates,
}
return dates
@property
@memoized
def query_string(self):
params = {}
if self._id != 'dummy':
params['config_id'] = self._id
if not self.is_configurable_report:
params.update(self.filters)
params.update(self.get_date_range())
return urlencode(params, True)
@property
@memoized
def view_kwargs(self):
kwargs = {'domain': self.domain,
'report_slug': self.report_slug}
if self.subreport_slug:
kwargs['subreport_slug'] = self.subreport_slug
return kwargs
@property
@memoized
def url(self):
try:
from django.core.urlresolvers import reverse
from corehq.apps.userreports.reports.view import ConfigurableReport
if self.is_configurable_report:
url_base = reverse(ConfigurableReport.slug, args=[self.domain, self.subreport_slug])
else:
url_base = reverse(self._dispatcher.name(), kwargs=self.view_kwargs)
return url_base + '?' + self.query_string
except Exception:
return "#"
@property
@memoized
def report(self):
"""
Returns None if no report is found for that report slug, which happens
when a report is no longer available. All callers should handle this
case.
"""
return self._dispatcher.get_report(
self.domain, self.report_slug, self.subreport_slug
)
@property
def report_name(self):
try:
if self.report is None:
return _("Deleted Report")
else:
return _(self.report.name)
except Exception:
return _("Unsupported Report")
@property
def full_name(self):
if self.name:
return "%s (%s)" % (self.name, self.report_name)
else:
return self.report_name
@property
def date_description(self):
if self.date_range == 'lastmonth':
return "Last Month"
elif self.days and not self.start_date:
day = 'day' if self.days == 1 else 'days'
return "Last %d %s" % (self.days, day)
elif self.end_date:
return "From %s to %s" % (self.start_date, self.end_date)
elif self.start_date:
return "Since %s" % self.start_date
else:
return ''
@property
@memoized
def owner(self):
try:
return WebUser.get_by_user_id(self.owner_id)
except CouchUser.AccountTypeError:
return CommCareUser.get_by_user_id(self.owner_id)
def get_report_content(self, lang, attach_excel=False):
"""
Get the report's HTML content as rendered by the static view format.
"""
try:
if self.report is None:
return _("The report used to create this scheduled report is no"
" longer available on CommCare HQ. Please delete this"
" scheduled report and create a new one using an available"
" report."), None
except Exception:
pass
from django.http import HttpRequest, QueryDict
request = HttpRequest()
request.couch_user = self.owner
request.user = self.owner.get_django_user()
request.domain = self.domain
request.couch_user.current_domain = self.domain
request.couch_user.language = lang
request.GET = QueryDict(
self.query_string
+ '&filterSet=true'
+ ('&'
+ urlencode(self.filters, True)
+ '&'
+ urlencode(self.get_date_range(), True)
if self.is_configurable_report else '')
)
# Make sure the request gets processed by PRBAC Middleware
CCHQPRBACMiddleware.apply_prbac(request)
try:
if self.is_configurable_report:
response = self._dispatcher.dispatch(
request,
self.subreport_slug,
render_as='email',
**self.view_kwargs
)
else:
response = self._dispatcher.dispatch(
request,
render_as='email',
permissions_check=self._dispatcher.permissions_check,
**self.view_kwargs
)
if attach_excel is True:
if self.is_configurable_report:
file_obj = self._dispatcher.dispatch(
request, self.subreport_slug,
render_as='excel',
**self.view_kwargs
)
else:
file_obj = self._dispatcher.dispatch(
request,
render_as='excel',
permissions_check=self._dispatcher.permissions_check,
**self.view_kwargs
)
else:
file_obj = None
return json.loads(response.content)['report'], file_obj
except PermissionDenied:
return _(
"We are sorry, but your saved report '%(config_name)s' "
"is no longer accessible because your subscription does "
"not allow Custom Reporting. Please talk to your Project "
"Administrator about enabling Custom Reports. If you "
"want CommCare HQ to stop sending this message, please "
"visit %(saved_reports_url)s to remove this "
"Emailed Report."
) % {
'config_name': self.name,
'saved_reports_url': absolute_reverse('saved_reports',
args=[request.domain]),
}, None
except Http404:
return _("We are sorry, but your saved report '%(config_name)s' "
"can not be generated since you do not have the correct permissions. "
"Please talk to your Project Administrator about getting permissions for this"
"report.") % {'config_name': self.name}, None
except Exception:
notify_exception(None, "Error generating report: {}".format(self.report_slug), details={
'domain': self.domain,
'user': self.owner.username,
'report': self.report_slug,
'report config': self.get_id
})
return _("An error occurred while generating this report."), None
@property
def is_configurable_report(self):
from corehq.apps.userreports.reports.view import ConfigurableReport
return isinstance(self._dispatcher, ConfigurableReport)
@property
@memoized
def languages(self):
if self.is_configurable_report:
return self.report.spec.get_languages()
return set()
@property
@memoized
def configurable_report(self):
from corehq.apps.userreports.reports.view import ConfigurableReport
return ConfigurableReport.get_report(
self.domain, self.report_slug, self.subreport_slug
)
@property
def datespan_filters(self):
return (self.configurable_report.datespan_filters
if self.is_configurable_report else [])
@property
def has_ucr_datespan(self):
return self.is_configurable_report and self.datespan_filters
class UnsupportedScheduledReportError(Exception):
pass
class ReportNotification(CachedCouchDocumentMixin, Document):
domain = StringProperty()
owner_id = StringProperty()
recipient_emails = StringListProperty()
config_ids = StringListProperty()
send_to_owner = BooleanProperty()
attach_excel = BooleanProperty()
# language is only used if some of the config_ids refer to UCRs.
language = StringProperty()
hour = IntegerProperty(default=8)
minute = IntegerProperty(default=0)
day = IntegerProperty(default=1)
interval = StringProperty(choices=["daily", "weekly", "monthly"])
@property
def is_editable(self):
try:
self.report_slug
return False
except AttributeError:
return True
@classmethod
def by_domain_and_owner(cls, domain, owner_id, stale=True, **kwargs):
if stale:
kwargs['stale'] = settings.COUCH_STALE_QUERY
key = [domain, owner_id]
db = cls.get_db()
result = cache_core.cached_view(db, "reportconfig/user_notifications", reduce=False,
include_docs=True, startkey=key, endkey=key + [{}],
wrapper=cls.wrap, **kwargs)
return result
@property
def all_recipient_emails(self):
# handle old documents
if not self.owner_id:
return [self.owner.get_email()]
emails = []
if self.send_to_owner:
if self.owner.is_web_user():
emails.append(self.owner.username)
else:
email = self.owner.get_email()
try:
validate_email(email)
emails.append(email)
except Exception:
pass
emails.extend(self.recipient_emails)
return emails
@property
@memoized
def owner(self):
id = self.owner_id
try:
return WebUser.get_by_user_id(id)
except CouchUser.AccountTypeError:
return CommCareUser.get_by_user_id(id)
@property
@memoized
def configs(self):
"""
Access the notification's associated configs as a list, transparently
returning an appropriate dummy for old notifications which have
`report_slug` instead of `config_ids`.
"""
if self.config_ids:
configs = ReportConfig.view('_all_docs', keys=self.config_ids,
include_docs=True).all()
configs = [c for c in configs if not hasattr(c, 'deleted')]
elif self.report_slug == 'admin_domains':
raise UnsupportedScheduledReportError("admin_domains is no longer "
"supported as a schedulable report for the time being")
else:
# create a new ReportConfig object, useful for its methods and
# calculated properties, but don't save it
class ReadonlyReportConfig(ReportConfig):
def save(self, *args, **kwargs):
pass
config = ReadonlyReportConfig()
object.__setattr__(config, '_id', 'dummy')
config.report_type = ProjectReportDispatcher.prefix
config.report_slug = self.report_slug
config.domain = self.domain
config.owner_id = self.owner_id
configs = [config]
return configs
@property
def day_name(self):
if self.interval == 'weekly':
return calendar.day_name[self.day]
return {
"daily": _("Every day"),
"monthly": _("Day %s of every month" % self.day),
}[self.interval]
@classmethod
def day_choices(cls):
"""Tuples for day of week number and human-readable day of week"""
return tuple([(val, calendar.day_name[val]) for val in range(7)])
@classmethod
def hour_choices(cls):
"""Tuples for hour number and human-readable hour"""
return tuple([(val, "%s:00" % val) for val in range(24)])
def send(self):
from dimagi.utils.django.email import send_HTML_email
from corehq.apps.reports.views import get_scheduled_report_response
# Scenario: user has been removed from the domain that they
# have scheduled reports for. Delete this scheduled report
if not self.owner.is_member_of(self.domain):
self.delete()
return
if self.all_recipient_emails:
title = "Scheduled report from CommCare HQ"
if hasattr(self, "attach_excel"):
attach_excel = self.attach_excel
else:
attach_excel = False
body, excel_files = get_scheduled_report_response(self.owner, self.domain, self._id, attach_excel=attach_excel)
for email in self.all_recipient_emails:
send_HTML_email(title, email, body.content, email_from=settings.DEFAULT_FROM_EMAIL, file_attachments=excel_files)
class AppNotFound(Exception):
pass
class HQExportSchema(SavedExportSchema):
doc_type = 'SavedExportSchema'
domain = StringProperty()
transform_dates = BooleanProperty(default=True)
@property
def global_transform_function(self):
if self.transform_dates:
return couch_to_excel_datetime
else:
return identity
@classmethod
def wrap(cls, data):
if 'transform_dates' not in data:
data['transform_dates'] = False
self = super(HQExportSchema, cls).wrap(data)
if not self.domain:
self.domain = self.index[0]
return self
class FormExportSchema(HQExportSchema):
doc_type = 'SavedExportSchema'
app_id = StringProperty()
include_errors = BooleanProperty(default=False)
split_multiselects = BooleanProperty(default=False)
def update_schema(self):
super(FormExportSchema, self).update_schema()
if self.split_multiselects:
self.update_question_schema()
for column in [column for table in self.tables for column in table.columns]:
if isinstance(column, SplitColumn):
question = self.question_schema.question_schema.get(column.index)
column.options = question.options
column.ignore_extras = True
def update_question_schema(self):
schema = self.question_schema
schema.update_schema()
@property
def question_schema(self):
return FormQuestionSchema.get_or_create(self.domain, self.app_id, self.xmlns)
@property
@memoized
def app(self):
if self.app_id:
try:
return get_app(self.domain, self.app_id, latest=True)
except Http404:
logging.error('App %s in domain %s not found for export %s' % (
self.app_id,
self.domain,
self.get_id
))
raise AppNotFound()
else:
return None
@classmethod
def wrap(cls, data):
self = super(FormExportSchema, cls).wrap(data)
if self.filter_function == 'couchforms.filters.instances':
# grandfather in old custom exports
self.include_errors = False
self.filter_function = None
return self
@property
def filter(self):
user_ids = set(CouchUser.ids_by_domain(self.domain))
user_ids.update(CouchUser.ids_by_domain(self.domain, is_active=False))
user_ids.add('demo_user')
def _top_level_filter(form):
# careful, closures used
return form_matches_users(form, user_ids) or is_commconnect_form(form)
f = SerializableFunction(_top_level_filter)
if self.app_id is not None:
f.add(reports.util.app_export_filter, app_id=self.app_id)
if not self.include_errors:
f.add(couchforms.filters.instances)
actual = SerializableFunction(default_form_filter, filter=f)
return actual
@property
def domain(self):
return self.index[0]
@property
def xmlns(self):
return self.index[1]
@property
def formname(self):
return xmlns_to_name(self.domain, self.xmlns, app_id=self.app_id)
@property
@memoized
def question_order(self):
try:
if not self.app:
return []
except AppNotFound:
if settings.DEBUG:
return []
raise
else:
questions = self.app.get_questions(self.xmlns)
order = []
for question in questions:
if not question['value']: # question probably belongs to a broken form
continue
index_parts = question['value'].split('/')
assert index_parts[0] == ''
index_parts[1] = 'form'
index = '.'.join(index_parts[1:])
order.append(index)
return order
def get_default_order(self):
return {'#': self.question_order}
def uses_cases(self):
if not self.app or isinstance(self.app, RemoteApp):
return False
form = self.app.get_form_by_xmlns(self.xmlns)
if form and isinstance(form, Form):
return bool(form.active_actions())
return False
class FormDeidExportSchema(FormExportSchema):
@property
def transform(self):
return SerializableFunction()
@classmethod
def get_case(cls, doc, case_id):
pass
class CaseExportSchema(HQExportSchema):
doc_type = 'SavedExportSchema'
@property
def filter(self):
return SerializableFunction(default_case_filter)
@property
def domain(self):
return self.index[0]
@property
def domain_obj(self):
return Domain.get_by_name(self.domain)
@property
def case_type(self):
return self.index[1]
@property
def applications(self):
return self.domain_obj.full_applications(include_builds=False)
@property
def case_properties(self):
props = set([])
for app in self.applications:
prop_map = get_case_properties(app, [self.case_type], defaults=("name",))
props |= set(prop_map[self.case_type])
return props
class FakeFormExportSchema(FakeSavedExportSchema):
def remap_tables(self, tables):
# kill the weird confusing stuff, and rename the main table to something sane
tables = _apply_removal(tables, ('#|#export_tag|#', '#|location_|#', '#|history|#'))
return _apply_mapping(tables, {
'#': 'Forms',
})
def _apply_mapping(export_tables, mapping_dict):
def _clean(tabledata):
def _clean_tablename(tablename):
return mapping_dict.get(tablename, tablename)
return (_clean_tablename(tabledata[0]), tabledata[1])
return map(_clean, export_tables)
def _apply_removal(export_tables, removal_list):
return [tabledata for tabledata in export_tables if not tabledata[0] in removal_list]
class HQGroupExportConfiguration(CachedCouchDocumentMixin, GroupExportConfiguration):
"""
HQ's version of a group export, tagged with a domain
"""
domain = StringProperty()
def get_custom_exports(self):
def _rewrap(export):
# custom wrap if relevant
try:
return {
'form': FormExportSchema,
'case': CaseExportSchema,
}[export.type].wrap(export._doc)
except KeyError:
return export
for custom in list(self.custom_export_ids):
custom_export = self._get_custom(custom)
if custom_export:
yield _rewrap(custom_export)
def exports_of_type(self, type):
return self._saved_exports_from_configs([
config for config, schema in self.all_exports if schema.type == type
])
@property
@memoized
def form_exports(self):
return self.exports_of_type('form')
@property
@memoized
def case_exports(self):
return self.exports_of_type('case')
@classmethod
def by_domain(cls, domain):
return cache_core.cached_view(cls.get_db(), "groupexport/by_domain",
key=domain,
reduce=False,
include_docs=True,
wrapper=cls.wrap,
)
@classmethod
def get_for_domain(cls, domain):
"""
For when we only expect there to be one of these per domain,
which right now is always.
"""
groups = cls.by_domain(domain)
if groups:
if len(groups) > 1:
logging.error("Domain %s has more than one group export config! This is weird." % domain)
return groups[0]
return HQGroupExportConfiguration(domain=domain)
@classmethod
def add_custom_export(cls, domain, export_id):
group = cls.get_for_domain(domain)
if export_id not in group.custom_export_ids:
group.custom_export_ids.append(export_id)
group.save()
return group
@classmethod
def remove_custom_export(cls, domain, export_id):
group = cls.get_for_domain(domain)
updated = False
while export_id in group.custom_export_ids:
group.custom_export_ids.remove(export_id)
updated = True
if updated:
group.save()
return group
|
puttarajubr/commcare-hq
|
corehq/apps/reports/models.py
|
Python
|
bsd-3-clause
| 31,850 | 0.001601 |
import sys
import subprocess
MODULES = [
"scipy.cluster",
"scipy.cluster.vq",
"scipy.cluster.hierarchy",
"scipy.constants",
"scipy.fft",
"scipy.fftpack",
"scipy.fftpack.convolve",
"scipy.integrate",
"scipy.interpolate",
"scipy.io",
"scipy.io.arff",
"scipy.io.harwell_boeing",
"scipy.io.idl",
"scipy.io.matlab",
"scipy.io.netcdf",
"scipy.io.wavfile",
"scipy.linalg",
"scipy.linalg.blas",
"scipy.linalg.cython_blas",
"scipy.linalg.lapack",
"scipy.linalg.cython_lapack",
"scipy.linalg.interpolative",
"scipy.misc",
"scipy.ndimage",
"scipy.odr",
"scipy.optimize",
"scipy.signal",
"scipy.signal.windows",
"scipy.sparse",
"scipy.sparse.linalg",
"scipy.sparse.csgraph",
"scipy.spatial",
"scipy.spatial.distance",
"scipy.special",
"scipy.stats",
"scipy.stats.distributions",
"scipy.stats.mstats",
"scipy.stats.contingency"
]
def test_modules_importable():
# Regression test for gh-6793.
# Check that all modules are importable in a new Python process.
# This is not necessarily true if there are import cycles present.
for module in MODULES:
cmd = 'import {}'.format(module)
subprocess.check_call([sys.executable, '-c', cmd])
|
WarrenWeckesser/scipy
|
scipy/_lib/tests/test_import_cycles.py
|
Python
|
bsd-3-clause
| 1,306 | 0 |
"""setup.py"""
from setuptools import setup
with open("README.md") as readme_file:
README = readme_file.read()
test_requirements = ["mock", "pytest", "responses", "testfixtures", "requests", "pyzmq"]
# Async requirements
test_requirements.extend(["pytest-asyncio", "aiohttp", "tornado", "websockets"])
setup(
author="Beau Barker",
author_email="beauinmelbourne@gmail.com",
classifiers=[
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
description="Send JSON-RPC requests",
entry_points={"console_scripts": ["jsonrpc = jsonrpcclient.__main__:main"]},
extras_require={
"aiohttp": ["aiohttp>=3"],
"requests": ["requests"],
"requests_security": ["requests[security]"],
"tornado": ["tornado"],
"unittest": test_requirements,
"websockets": ["websockets"],
"zmq": ["pyzmq"],
},
include_package_data=True,
install_requires=["apply_defaults<1", "click<8", "jsonschema<4"],
license="MIT",
long_description=README,
long_description_content_type="text/markdown",
name="jsonrpcclient",
# Be PEP 561 compliant
# https://mypy.readthedocs.io/en/stable/installed_packages.html#making-pep-561-compatible-packages
package_data={"jsonrpcclient": ["response-schema.json", "py.typed"]},
zip_safe=False,
packages=["jsonrpcclient", "jsonrpcclient.clients"],
url="https://github.com/bcb/jsonrpcclient",
version="3.3.6",
)
|
bcb/jsonrpcclient
|
setup.py
|
Python
|
mit
| 1,595 | 0.001881 |
import webapp2, logging
from database import get_feed_source_by_name, store_feed_source, \
get_feed_source_by_url, change_feed_source_url
class AddHandler(webapp2.RequestHandler):
def post(self):
from database import FeedSource
name = self.request.get('name')
url = self.request.get('url')
frequency_ms = self.request.get('frequency_ms')
should_update = self.request.get('should_update')
should_be_added = True
existing_source = get_feed_source_by_url(url)
if existing_source:
should_be_added = False
self.response.write( \
'The URL (' + url + ') already exists (name - ' + \
existing_source.name + ').<br/>')
self.response.write('Forgot you added it already? :O')
else:
existing_source = get_feed_source_by_name(name)
if existing_source:
if should_update:
should_be_added = False
change_feed_source_url(existing_source, url)
self.response.write('Updated.')
else:
should_be_added = False
self.response.write('The name (' + name + ') already exists.<br/>')
self.response.write( \
'Go back and choose a different name, or tick "Update?".<br/>')
if should_be_added and store_feed_source(name, url, int(frequency_ms)):
self.response.write('Added.');
def get(self):
from database import FeedSource
self.response.write("""<!doctype html><title>Add Feed</title>
<form method="post">
Name - <input name="name"/><br/>
URL - <input name="url"/><br/>
Frequency (milliseconds) -
<input type="number" value="1000" name="frequency_ms"/><br/>
<label>Update?<input type="checkbox" name="should_update" value="1"/></label>
<input type="submit"/>
</form>""")
|
phistuck/FrequentFeedScraper
|
add_handler.py
|
Python
|
mit
| 1,694 | 0.020661 |
#!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2017 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
{
"P1": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("RATIO", FastSerializer.VOLTTYPE_FLOAT)),
"partitions": (),
"indexes": ("ID")
},
"P2": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("RATIO", FastSerializer.VOLTTYPE_FLOAT)),
"partitions": (),
"indexes": ("ID")
},
"P3": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("RATIO", FastSerializer.VOLTTYPE_FLOAT)),
"partitions": (),
"indexes": ("ID")
},
"R1": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("RATIO", FastSerializer.VOLTTYPE_FLOAT)),
"partitions": (),
"indexes": ("ID")
},
"R3": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("RATIO", FastSerializer.VOLTTYPE_FLOAT)),
"partitions": (),
"indexes": ("ID")
}
}
|
deerwalk/voltdb
|
tests/sqlcoverage/schema/joined-matview-string-schema.py
|
Python
|
agpl-3.0
| 3,168 | 0 |
"""Configuration classes."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
from . import types as t
from .util import (
find_python,
generate_pip_command,
ApplicationError,
)
from .util_common import (
docker_qualify_image,
get_docker_completion,
get_remote_completion,
CommonConfig,
)
from .metadata import (
Metadata,
)
from .data import (
data_context,
)
try:
TIntegrationConfig = t.TypeVar('TIntegrationConfig', bound='IntegrationConfig')
except AttributeError:
TIntegrationConfig = None # pylint: disable=invalid-name
class ParsedRemote:
"""A parsed version of a "remote" string."""
def __init__(self, arch, platform, version): # type: (t.Optional[str], str, str) -> None
self.arch = arch
self.platform = platform
self.version = version
@staticmethod
def parse(value): # type: (str) -> t.Optional['ParsedRemote']
"""Return a ParsedRemote from the given value or None if the syntax is invalid."""
parts = value.split('/')
if len(parts) == 2:
arch = None
platform, version = parts
elif len(parts) == 3:
arch, platform, version = parts
else:
return None
return ParsedRemote(arch, platform, version)
class EnvironmentConfig(CommonConfig):
"""Configuration common to all commands which execute in an environment."""
def __init__(self, args, command):
"""
:type args: any
:type command: str
"""
super(EnvironmentConfig, self).__init__(args, command)
self.local = args.local is True
self.venv = args.venv
self.venv_system_site_packages = args.venv_system_site_packages
self.python = args.python if 'python' in args else None # type: str
self.docker = docker_qualify_image(args.docker) # type: str
self.docker_raw = args.docker # type: str
self.remote = args.remote # type: str
if self.remote:
self.parsed_remote = ParsedRemote.parse(self.remote)
if not self.parsed_remote or not self.parsed_remote.platform or not self.parsed_remote.version:
raise ApplicationError('Unrecognized remote "%s" syntax. Use "platform/version" or "arch/platform/version".' % self.remote)
else:
self.parsed_remote = None
self.docker_privileged = args.docker_privileged if 'docker_privileged' in args else False # type: bool
self.docker_pull = args.docker_pull if 'docker_pull' in args else False # type: bool
self.docker_keep_git = args.docker_keep_git if 'docker_keep_git' in args else False # type: bool
self.docker_seccomp = args.docker_seccomp if 'docker_seccomp' in args else None # type: str
self.docker_memory = args.docker_memory if 'docker_memory' in args else None
self.docker_terminate = args.docker_terminate if 'docker_terminate' in args else None # type: str
if self.docker_seccomp is None:
self.docker_seccomp = get_docker_completion().get(self.docker_raw, {}).get('seccomp', 'default')
self.remote_stage = args.remote_stage # type: str
self.remote_provider = args.remote_provider # type: str
self.remote_aws_region = args.remote_aws_region # type: str
self.remote_terminate = args.remote_terminate # type: str
if self.remote_provider == 'default':
self.remote_provider = None
self.requirements = args.requirements # type: bool
if self.python == 'default':
self.python = None
actual_major_minor = '.'.join(str(i) for i in sys.version_info[:2])
self.python_version = self.python or actual_major_minor
self.python_interpreter = args.python_interpreter
self.pip_check = args.pip_check
self.delegate = self.docker or self.remote or self.venv
self.delegate_args = [] # type: t.List[str]
if self.delegate:
self.requirements = True
self.inject_httptester = args.inject_httptester if 'inject_httptester' in args else False # type: bool
self.httptester = docker_qualify_image(args.httptester if 'httptester' in args else '') # type: str
if self.get_delegated_completion().get('httptester', 'enabled') == 'disabled':
self.httptester = False
if self.get_delegated_completion().get('pip-check', 'enabled') == 'disabled':
self.pip_check = False
if args.check_python and args.check_python != actual_major_minor:
raise ApplicationError('Running under Python %s instead of Python %s as expected.' % (actual_major_minor, args.check_python))
if self.docker_keep_git:
def git_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
"""Add files from the content root .git directory to the payload file list."""
for dirpath, _dirnames, filenames in os.walk(os.path.join(data_context().content.root, '.git')):
paths = [os.path.join(dirpath, filename) for filename in filenames]
files.extend((path, os.path.relpath(path, data_context().content.root)) for path in paths)
data_context().register_payload_callback(git_callback)
@property
def python_executable(self):
"""
:rtype: str
"""
return find_python(self.python_version)
@property
def pip_command(self):
"""
:rtype: list[str]
"""
return generate_pip_command(self.python_executable)
def get_delegated_completion(self):
"""Returns a dictionary of settings specific to the selected delegation system, if any. Otherwise returns an empty dictionary.
:rtype: dict[str, str]
"""
if self.docker:
return get_docker_completion().get(self.docker_raw, {})
if self.remote:
return get_remote_completion().get(self.remote, {})
return {}
class TestConfig(EnvironmentConfig):
"""Configuration common to all test commands."""
def __init__(self, args, command):
"""
:type args: any
:type command: str
"""
super(TestConfig, self).__init__(args, command)
self.coverage = args.coverage # type: bool
self.coverage_label = args.coverage_label # type: str
self.coverage_check = args.coverage_check # type: bool
self.coverage_config_base_path = None # type: t.Optional[str]
self.include = args.include or [] # type: t.List[str]
self.exclude = args.exclude or [] # type: t.List[str]
self.require = args.require or [] # type: t.List[str]
self.changed = args.changed # type: bool
self.tracked = args.tracked # type: bool
self.untracked = args.untracked # type: bool
self.committed = args.committed # type: bool
self.staged = args.staged # type: bool
self.unstaged = args.unstaged # type: bool
self.changed_from = args.changed_from # type: str
self.changed_path = args.changed_path # type: t.List[str]
self.base_branch = args.base_branch # type: str
self.lint = args.lint if 'lint' in args else False # type: bool
self.junit = args.junit if 'junit' in args else False # type: bool
self.failure_ok = args.failure_ok if 'failure_ok' in args else False # type: bool
self.metadata = Metadata.from_file(args.metadata) if args.metadata else Metadata()
self.metadata_path = None
if self.coverage_check:
self.coverage = True
def metadata_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
"""Add the metadata file to the payload file list."""
config = self
if self.metadata_path:
files.append((os.path.abspath(config.metadata_path), config.metadata_path))
data_context().register_payload_callback(metadata_callback)
class ShellConfig(EnvironmentConfig):
"""Configuration for the shell command."""
def __init__(self, args):
"""
:type args: any
"""
super(ShellConfig, self).__init__(args, 'shell')
self.raw = args.raw # type: bool
if self.raw:
self.httptester = False
class SanityConfig(TestConfig):
"""Configuration for the sanity command."""
def __init__(self, args):
"""
:type args: any
"""
super(SanityConfig, self).__init__(args, 'sanity')
self.test = args.test # type: t.List[str]
self.skip_test = args.skip_test # type: t.List[str]
self.list_tests = args.list_tests # type: bool
self.allow_disabled = args.allow_disabled # type: bool
self.enable_optional_errors = args.enable_optional_errors # type: bool
self.info_stderr = self.lint
class IntegrationConfig(TestConfig):
"""Configuration for the integration command."""
def __init__(self, args, command):
"""
:type args: any
:type command: str
"""
super(IntegrationConfig, self).__init__(args, command)
self.start_at = args.start_at # type: str
self.start_at_task = args.start_at_task # type: str
self.allow_destructive = args.allow_destructive # type: bool
self.allow_root = args.allow_root # type: bool
self.allow_disabled = args.allow_disabled # type: bool
self.allow_unstable = args.allow_unstable # type: bool
self.allow_unstable_changed = args.allow_unstable_changed # type: bool
self.allow_unsupported = args.allow_unsupported # type: bool
self.retry_on_error = args.retry_on_error # type: bool
self.continue_on_error = args.continue_on_error # type: bool
self.debug_strategy = args.debug_strategy # type: bool
self.changed_all_target = args.changed_all_target # type: str
self.changed_all_mode = args.changed_all_mode # type: str
self.list_targets = args.list_targets # type: bool
self.tags = args.tags
self.skip_tags = args.skip_tags
self.diff = args.diff
self.no_temp_workdir = args.no_temp_workdir
self.no_temp_unicode = args.no_temp_unicode
if self.get_delegated_completion().get('temp-unicode', 'enabled') == 'disabled':
self.no_temp_unicode = True
if self.list_targets:
self.explain = True
self.info_stderr = True
def get_ansible_config(self): # type: () -> str
"""Return the path to the Ansible config for the given config."""
ansible_config_relative_path = os.path.join(data_context().content.integration_path, '%s.cfg' % self.command)
ansible_config_path = os.path.join(data_context().content.root, ansible_config_relative_path)
if not os.path.exists(ansible_config_path):
# use the default empty configuration unless one has been provided
ansible_config_path = super(IntegrationConfig, self).get_ansible_config()
return ansible_config_path
class PosixIntegrationConfig(IntegrationConfig):
"""Configuration for the posix integration command."""
def __init__(self, args):
"""
:type args: any
"""
super(PosixIntegrationConfig, self).__init__(args, 'integration')
class WindowsIntegrationConfig(IntegrationConfig):
"""Configuration for the windows integration command."""
def __init__(self, args):
"""
:type args: any
"""
super(WindowsIntegrationConfig, self).__init__(args, 'windows-integration')
self.windows = args.windows # type: t.List[str]
self.inventory = args.inventory # type: str
if self.windows:
self.allow_destructive = True
class NetworkIntegrationConfig(IntegrationConfig):
"""Configuration for the network integration command."""
def __init__(self, args):
"""
:type args: any
"""
super(NetworkIntegrationConfig, self).__init__(args, 'network-integration')
self.platform = args.platform # type: t.List[str]
self.platform_collection = dict(args.platform_collection or []) # type: t.Dict[str, str]
self.platform_connection = dict(args.platform_connection or []) # type: t.Dict[str, str]
self.inventory = args.inventory # type: str
self.testcase = args.testcase # type: str
class UnitsConfig(TestConfig):
"""Configuration for the units command."""
def __init__(self, args):
"""
:type args: any
"""
super(UnitsConfig, self).__init__(args, 'units')
self.collect_only = args.collect_only # type: bool
self.num_workers = args.num_workers # type: int
self.requirements_mode = args.requirements_mode if 'requirements_mode' in args else ''
if self.requirements_mode == 'only':
self.requirements = True
elif self.requirements_mode == 'skip':
self.requirements = False
|
azaghal/ansible
|
test/lib/ansible_test/_internal/config.py
|
Python
|
gpl-3.0
| 13,139 | 0.00274 |
# Copyright (C) Mesosphere, Inc. See LICENSE file for details.
"""MesosDNS mock endpoint"""
import copy
import logging
import re
from exceptions import EndpointException
from mocker.endpoints.recording import (
RecordingHTTPRequestHandler,
RecordingTcpIpEndpoint,
)
# pylint: disable=C0103
log = logging.getLogger(__name__)
# pylint: disable=R0903
class MesosDnsHTTPRequestHandler(RecordingHTTPRequestHandler):
"""Request handler that mimics MesosDNS
Depending on how it was set up, it will respond with different SRV
entries for preset services.
"""
SRV_QUERY_REGEXP = re.compile('^/v1/services/_([^_]+)._tcp.marathon.mesos$')
def _calculate_response(self, base_path, url_args, body_args=None):
"""Reply with the currently set mock-reply for given SRV record query.
Please refer to the description of the BaseHTTPRequestHandler class
for details on the arguments and return value of this method.
Raises:
EndpointException: request URL path is unsupported
"""
if base_path == '/v1/reflect/me':
# A test URI that is used by tests. In some cases it is impossible
# to reuse SRV record path.
return self._reflect_request(base_path, url_args, body_args)
match = self.SRV_QUERY_REGEXP.search(base_path)
if match:
return self.__srv_permissions_request_handler(match.group(1))
raise EndpointException(
code=500,
content="Path `{}` is not supported yet".format(base_path))
def __srv_permissions_request_handler(self, srvid):
"""Calculate reply for given service-ID
Arguments:
srvid (string): service ID to reply to
"""
ctx = self.server.context
if srvid not in ctx.data['services']:
raise EndpointException(
code=500,
content="Service `{}` is unknown".format(srvid))
blob = self._convert_data_to_blob(ctx.data['services'][srvid])
return 200, 'application/json', blob
def create_srv_entry(srv_name, ip, port):
"""Create a SRV entry based on the supplied data
Arguments:
srv_name (string): service ID that the new SRV-entry should represent
port (string): TCP/IP port that the new agent should pretend to listen on
ip (string): IP address that the new agent hould pretend to listen on
Returns:
SRV entry dict mimicing the one returned by MesosDNS
"""
res = {}
res['service'] = "_{}._tcp.marathon.mesos".format(srv_name)
res['host'] = "{}-74b1w-s1.marathon.mesos.".format(srv_name)
res['ip'] = ip
res['port'] = port
return res
EMPTY_SRV = {
"scheduler-alwaysthere": [
{
"service": "",
"host": "",
"ip": "",
"port": "",
}
],
}
SCHEDULER_SRV_ALWAYSTHERE = {
"scheduler-alwaysthere": [
create_srv_entry("scheduler-alwaysthere", "127.0.0.1", 16000),
create_srv_entry("scheduler-alwaysthere", "127.0.0.1", 16002),
],
}
SCHEDULER_SRV_ALWAYSTHERE_DIFFERENTPORT = {
"scheduler-alwaysthere": [
create_srv_entry("scheduler-alwaysthere", "127.0.0.15", 16001),
create_srv_entry("scheduler-alwaysthere", "127.0.0.1", 16002),
],
}
SCHEDULER_SRV_ALWAYSTHERE_NEST1 = {
"scheduler-alwaysthere.nest1.nest2": [
create_srv_entry("scheduler-alwaysthere.nest1.nest2", "127.0.0.1", 18000),
create_srv_entry("scheduler-alwaysthere.nest1.nest2", "127.0.0.1", 16002),
],
}
SCHEDULER_SRV_ALWAYSTHERE_NEST2 = {
"scheduler-alwaysthere.nest1": [
create_srv_entry("scheduler-alwaysthere.nest1", "127.0.0.1", 17000),
create_srv_entry("scheduler-alwaysthere.nest1", "127.0.0.1", 16002),
],
}
SCHEDULER_SRV_ONLYMESOSDNS_NEST2 = {
"scheduler-onlymesosdns.nest1.nest2": [
create_srv_entry("scheduler-onlymesosdns.nest1.nest2", "127.0.0.1", 18003),
create_srv_entry("scheduler-onlymesosdns.nest1.nest2", "127.0.0.1", 16002),
],
}
INITIAL_SRVDATA = {}
INITIAL_SRVDATA.update(SCHEDULER_SRV_ALWAYSTHERE)
INITIAL_SRVDATA.update(SCHEDULER_SRV_ALWAYSTHERE_NEST1)
INITIAL_SRVDATA.update(SCHEDULER_SRV_ALWAYSTHERE_NEST2)
INITIAL_SRVDATA.update(SCHEDULER_SRV_ONLYMESOSDNS_NEST2)
# pylint: disable=R0903,C0103
class MesosDnsEndpoint(RecordingTcpIpEndpoint):
"""An endpoint that mimics DC/OS MesosDNS"""
def __init__(self, port, ip=''):
super().__init__(port, ip, MesosDnsHTTPRequestHandler)
self.__context_init()
def reset(self, *_):
"""Reset the endpoint to the default/initial state."""
with self._context.lock:
super().reset()
self.__context_init()
def set_srv_response(self, srvs):
"""Change the endpoint output so that it responds with a non-default
MesosDNS srv node.
"""
with self._context.lock:
self._context.data["services"] = srvs
def __context_init(self):
"""Helper function meant to initialize all the data relevant to this
particular type of endpoint"""
self._context.data["services"] = copy.deepcopy(INITIAL_SRVDATA)
|
asridharan/dcos
|
packages/adminrouter/extra/src/test-harness/modules/mocker/endpoints/mesos_dns.py
|
Python
|
apache-2.0
| 5,228 | 0.001148 |
# -*- coding: utf-8 -*-
import sys
import pytest
from textdata.eval import evaluation
_PY2 = sys.version_info[0] == 2
_PY26 = sys.version_info[:2] == (2, 6)
def test_evaluation_natural():
cases = [
(' 1 ', 1),
(' 1.1 \n ', 1.1),
(' gizmo \n\t \n', 'gizmo'),
]
if not _PY26:
cases.append((' 1+4j ', 1+4j))
# PY26 doesn't play nice with complex literals
# Not worth fighting over.
for value, expected in cases:
assert evaluation(value) == expected
assert evaluation(value.strip()) == expected
assert evaluation(value, 'natural') == expected
def test_evaluation_none():
cases = [
(' 1 ', 1),
(' 1.1 \n ', 1.1),
(' gizmo \n\t \n', 'gizmo'),
(' 1+4j ', 1+4j)
]
for value, _ in cases:
assert evaluation(value, None) == value
assert evaluation(value, 'none') == value
def test_evaluation_minimal():
cases = [
(' 1 ', '1'),
(' 1.1 \n ', '1.1'),
(' gizmo \n\t \n', 'gizmo'),
(' 1+4j ', '1+4j')
]
for value, expected in cases:
assert evaluation(value, 'minimal') == expected
assert evaluation(value, False) == expected
def test_evaluation_broken():
cases = [
(' 1 ', '1'),
(' 1.1 \n ', '1.1'),
(' gizmo \n\t \n', 'gizmo'),
(' 1+4j ', '1+4j')
]
for value, expected in cases:
with pytest.raises(ValueError):
assert evaluation(value, 'smork') == expected
with pytest.raises(ValueError):
assert evaluation(value, value) == expected
def test_py23_diff():
if _PY2:
assert evaluation('007', 'natural') == 7
else:
assert evaluation('007', 'natural') == '007'
def test_evaluation_func():
custom = lambda x: x.strip().upper()
def custom2(x):
return x.strip().upper()
assert evaluation(' haPpIly ', custom) == 'HAPPILY'
assert evaluation(' haPpIly ', custom2) == 'HAPPILY'
def test_evaluation_full():
cases = [
(' "1" ', 1),
(' "1.1" \n ', 1.1),
(' gizmo \n\t \n', 'gizmo'),
(' "gizmo" \n\t \n', 'gizmo'),
(' "and space " \n\t \n', 'and space '),
(' "a" ', 'a')
]
if not _PY26:
cases.append((' 1+4j ', 1+4j))
cases.append((' "1+4j" ', 1+4j))
# PY26 doesn't play nice with complex literals
# Not worth fighting over.
for value, expected in cases:
assert evaluation(value.strip(), 'full') == expected
def test_evaluation_exception():
def broken():
raise ValueError
assert evaluation(' mostly ', broken) == 'mostly'
|
jonathaneunice/textdata
|
test/test_eval.py
|
Python
|
apache-2.0
| 2,704 | 0.001109 |
#!/usr/bin/env python
#
# dfu.py: Access USB DFU class devices
# Copyright (C) 2009 Black Sphere Technologies
# Copyright (C) 2012 Transition Robotics Inc.
# Written by Gareth McMullin <gareth@blacksphere.co.nz>
# Modified by Piotr Esden-Tempski <piotr@esden.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import usb
DFU_DETACH_TIMEOUT = 1000
# DFU Requests
DFU_DETACH = 0x00
DFU_DNLOAD = 0x01
DFU_UPLOAD = 0x02
DFU_GETSTATUS = 0x03
DFU_CLRSTATUS = 0x04
DFU_GETSTATE = 0x05
DFU_ABORT = 0x06
# DFU States
STATE_APP_IDLE = 0x00
STATE_APP_DETACH = 0x01
STATE_DFU_IDLE = 0x02
STATE_DFU_DOWNLOAD_SYNC = 0x03
STATE_DFU_DOWNLOAD_BUSY = 0x04
STATE_DFU_DOWNLOAD_IDLE = 0x05
STATE_DFU_MANIFEST_SYNC = 0x06
STATE_DFU_MANIFEST = 0x07
STATE_DFU_MANIFEST_WAIT_RESET = 0x08
STATE_DFU_UPLOAD_IDLE = 0x09
STATE_DFU_ERROR = 0x0a
DFU_STATUS_OK = 0x00
# DFU Status cides
DFU_STATUS_ERROR_TARGET = 0x01
DFU_STATUS_ERROR_FILE = 0x02
DFU_STATUS_ERROR_WRITE = 0x03
DFU_STATUS_ERROR_ERASE = 0x04
DFU_STATUS_ERROR_CHECK_ERASED = 0x05
DFU_STATUS_ERROR_PROG = 0x06
DFU_STATUS_ERROR_VERIFY = 0x07
DFU_STATUS_ERROR_ADDRESS = 0x08
DFU_STATUS_ERROR_NOTDONE = 0x09
DFU_STATUS_ERROR_FIRMWARE = 0x0a
DFU_STATUS_ERROR_VENDOR = 0x0b
DFU_STATUS_ERROR_USBR = 0x0c
DFU_STATUS_ERROR_POR = 0x0d
DFU_STATUS_ERROR_UNKNOWN = 0x0e
DFU_STATUS_ERROR_STALLEDPKT = 0x0f
class dfu_status(object):
def __init__(self, buf):
self.bStatus = buf[0]
self.bwPollTimeout = buf[1] + (buf[2]<<8) + (buf[3]<<16)
self.bState = buf[4]
self.iString = buf[5]
class dfu_device(object):
def __init__(self, dev, conf, iface):
self.dev = dev
self.conf = conf
self.iface = iface
try:
self.handle = self.dev.open()
except:
raise
try:
self.handle.setConfiguration(conf)
except:
pass
try:
self.handle.claimInterface(iface.interfaceNumber)
except:
raise
if type(self.iface) is usb.Interface:
self.index = self.iface.interfaceNumber
else:
self.index = self.iface
def detach(self, wTimeout=255):
self.handle.controlMsg(usb.ENDPOINT_OUT | usb.TYPE_CLASS |
usb.RECIP_INTERFACE, DFU_DETACH,
None, value=wTimeout, index=self.index)
def download(self, wBlockNum, data):
self.handle.controlMsg(usb.ENDPOINT_OUT | usb.TYPE_CLASS |
usb.RECIP_INTERFACE, DFU_DNLOAD,
data, value=wBlockNum, index=self.index)
def upload(self, wBlockNum, length):
return self.handle.controlMsg(usb.ENDPOINT_IN | usb.TYPE_CLASS |
usb.RECIP_INTERFACE, DFU_UPLOAD,
length, value=wBlockNum,
index=self.index)
def get_status(self):
buf = self.handle.controlMsg(usb.ENDPOINT_IN | usb.TYPE_CLASS |
usb.RECIP_INTERFACE, DFU_GETSTATUS,
6, index=self.index, timeout=2000)
return dfu_status(buf)
def clear_status(self):
self.handle.controlMsg(usb.ENDPOINT_OUT | usb.TYPE_CLASS |
usb.RECIP_INTERFACE, DFU_CLRSTATUS,
"", index=0)
def get_state(self):
buf = self.handle.controlMsg(usb.ENDPOINT_IN | usb.TYPE_CLASS |
usb.RECIP_INTERFACE, DFU_GETSTATE,
1, index=self.index)
return buf[0]
def abort(self):
self.handle.controlMsg(usb.ENDPOINT_OUT | usb.TYPE_CLASS |
usb.RECIP_INTERFACE, DFU_ABORT,
None, index=self.index)
def make_idle(self):
retries = 3
while retries:
try:
status = self.get_status()
except:
self.clear_status()
continue
retries -= 1
if status.bState == STATE_DFU_IDLE:
return True
if ((status.bState == STATE_DFU_DOWNLOAD_SYNC) or
(status.bState == STATE_DFU_DOWNLOAD_IDLE) or
(status.bState == STATE_DFU_MANIFEST_SYNC) or
(status.bState == STATE_DFU_UPLOAD_IDLE) or
(status.bState == STATE_DFU_DOWNLOAD_BUSY) or
(status.bState == STATE_DFU_MANIFEST)):
self.abort()
continue
if status.bState == STATE_DFU_ERROR:
self.clear_status()
continue
if status.bState == STATE_APP_IDLE:
self.detach(DFU_DETACH_TIMEOUT)
continue
if ((status.bState == STATE_APP_DETACH) or
(status.bState == STATE_DFU_MANIFEST_WAIT_RESET)):
usb.reset(self.handle)
return False
raise Exception
def finddevs():
devs = []
for bus in usb.busses():
for dev in bus.devices:
for conf in dev.configurations:
for ifaces in conf.interfaces:
for iface in ifaces:
if ((iface.interfaceClass == 0xFE) and
(iface.interfaceSubClass == 0x01)):
devs.append((dev, conf, iface))
return devs
if __name__ == "__main__":
devs = finddevs()
if not devs:
print("No devices found!")
exit(-1)
else:
print("Found %i devices." % len(devs))
for dfu in devs:
handle = dfu[0].open()
try:
man = handle.getString(dfu[0].iManufacturer, 30)
product = handle.getString(dfu[0].iProduct, 30)
serial = handle.getString(dfu[0].iSerialNumber, 40)
except Exception as e:
print("Could not access descriptions strings of a DFU device. " +
"Maybe the OS driver is claiming it?")
print("Exception:", e)
continue
print("Device %s: ID %04x:%04x %s - %s - %s" % (dfu[0].filename,
dfu[0].idVendor, dfu[0].idProduct, man, product, serial))
print("%r, %r" % (dfu[1], dfu[2]))
print("Finished scanning for devices.")
|
mmoiozo/mavcourse
|
sw/tools/dfu/dfu.py
|
Python
|
gpl-2.0
| 7,206 | 0.005551 |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import optparse
from webkitpy.layout_tests.port import mac
from webkitpy.layout_tests.port import port_testcase
class MacPortTest(port_testcase.PortTestCase):
os_name = 'mac'
os_version = 'mac10.11'
port_name = 'mac'
full_port_name = 'mac-mac10.11'
port_maker = mac.MacPort
def assert_name(self, port_name, os_version_string, expected):
port = self.make_port(os_version=os_version_string, port_name=port_name)
self.assertEqual(expected, port.name())
def test_operating_system(self):
self.assertEqual('mac', self.make_port().operating_system())
def test_build_path(self):
# Test that optional paths are used regardless of whether they exist.
options = optparse.Values({'configuration': 'Release', 'build_directory': '/foo'})
self.assert_build_path(options, ['/mock-checkout/out/Release'], '/foo/Release')
# Test that optional relative paths are returned unmodified.
options = optparse.Values({'configuration': 'Release', 'build_directory': 'foo'})
self.assert_build_path(options, ['/mock-checkout/out/Release'], 'foo/Release')
# Test that we prefer the legacy dir over the new dir.
options = optparse.Values({'configuration': 'Release', 'build_directory': None})
self.assert_build_path(options, ['/mock-checkout/xcodebuild/Release',
'/mock-checkout/out/Release'], '/mock-checkout/xcodebuild/Release')
def test_build_path_timestamps(self):
options = optparse.Values({'configuration': 'Release', 'build_directory': None})
port = self.make_port(options=options)
port.host.filesystem.maybe_make_directory('/mock-checkout/out/Release')
port.host.filesystem.maybe_make_directory('/mock-checkout/xcodebuild/Release')
# Check with 'out' being newer.
port.host.filesystem.mtime = lambda f: 5 if '/out/' in f else 4
self.assertEqual(port._build_path(), '/mock-checkout/out/Release')
# Check with 'xcodebuild' being newer.
port.host.filesystem.mtime = lambda f: 5 if '/xcodebuild/' in f else 4
self.assertEqual(port._build_path(), '/mock-checkout/xcodebuild/Release')
def test_driver_name_option(self):
self.assertTrue(self.make_port()._path_to_driver().endswith('Content Shell'))
self.assertTrue(self.make_port(options=optparse.Values(dict(driver_name='OtherDriver')))._path_to_driver().endswith('OtherDriver'))
def test_path_to_image_diff(self):
self.assertEqual(self.make_port()._path_to_image_diff(), '/mock-checkout/out/Release/image_diff')
def test_expectation_files(self):
# FIXME: crbug.com/589709 - Delete this test override once the 10.11 failures have been rebaselined or triaged.
pass
|
danakj/chromium
|
third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
|
Python
|
bsd-3-clause
| 4,329 | 0.003234 |
from django.contrib import admin
# Register your models here.
from .models import DbQuery, StarsFilter
admin.site.register(DbQuery)
admin.site.register(StarsFilter)
|
mavrix93/LightCurvesClassifier
|
lcc_web/web/interface/admin.py
|
Python
|
mit
| 167 | 0.005988 |
from rest_framework import serializers
from drf_haystack.serializers import HaystackSerializerMixin
from .models import {{ cookiecutter.model_name }}
from .search_indexes import {{ cookiecutter.model_name }}Index
class {{ cookiecutter.model_name }}Serializer(serializers.ModelSerializer):
class Meta:
model = {{ cookiecutter.model_name }}
fields = '__all__'
class {{ cookiecutter.model_name }}SearchSerializer(HaystackSerializerMixin, {{ cookiecutter.model_name }}Serializer):
groupby_key = serializers.SerializerMethodField()
def get_groupby_key(self, obj):
return obj._meta.verbose_name_plural.title()
class Meta({{ cookiecutter.model_name }}Serializer.Meta):
index_classes = [{{ cookiecutter.model_name }}Index]
|
rickydunlop/cookiecutter-django-app-template-drf-haystack
|
{{cookiecutter.app_name}}/serializers.py
|
Python
|
mit
| 771 | 0.022049 |
import socket
import asyncore
import pickle
from minissl.AbstractConnection import AbstractConnection
class PickleStreamWrapper(asyncore.dispatcher_with_send, AbstractConnection):
"""Buffers a stream until it contains valid data serialized by pickle.
That is a big of an ugly glue code I had to come up with in the last minute.
The SSL-Server and Client were developed by using custom AbstractConnection
to hide the actual communication chanel.
However the AbstractConnection does not do fragmentation, it is expected
to always send and receive all data at once. After trying to implement a
TCP based AbstractConnection type I noticed that all this underlying
fragmentation and buffering of the IP breaks that pattern. Therefore this
class has been written to glue the behavior of the AbstractConnection and
the Networking sockets together.
"""
def __init__(self, sock):
"""Creates a new PickleStream Wrapper for the underlying socket.
:param sock:
The underlying base socket
"""
asyncore.dispatcher_with_send.__init__(self, sock)
AbstractConnection.__init__(self)
self.rx_buffer = ''
self.tx_buffer = ''
def handle_read(self):
new_data = self.recv(1024)
self.rx_buffer += new_data
try:
# try to load the buffer to see if we have something that pickle
# understands. If it worked out send the data upstream, if not do
# nothing and wait for the rest of the data to arrive
unpickled_data = pickle.loads(self.rx_buffer)
if self._receive_handler is not None:
self._receive_handler(self, self.rx_buffer)
# Clear the buffer
self.rx_buffer = ''
except:
pass
def handle_close(self):
AbstractConnection.close(self)
asyncore.dispatcher_with_send.close(self)
def send(self, data):
"""Send all the data
:param data:
The data to send
To match the AbstractConnection API this has to redirect send to sendall
because send can not handle data that is larger than some 512 byte
buffer limit. sendall on the other hand can without a problem.
"""
self.socket.sendall(data)
class TcpDispatcher(asyncore.dispatcher):
"""A powerful TCP dispatcher based on asyncore to listen for incoming
connections.
See http://docs.python.org/2/library/asyncore.html for more information on
the library.
"""
def __init__(self, host, port, receive_callback):
"""Start a new dispatcher to listen on the given host socket
:param host:
The host interface to listen to
:param port:
The port to bind to
:param receive_callback:
This callback will be used to notify if an accepted TCP connection
sent any data
"""
asyncore.dispatcher.__init__(self)
self.receive_callback = receive_callback
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
def handle_accept(self):
"""Handle TCP accepts.
In this case if it is a valid accept a separate handler will be launched
that takes care of the rest of the messages being exchanged of the new
accepted connection.
"""
pair = self.accept()
if pair is not None:
sock, addr = pair
print 'Incoming connection from %s' % repr(addr)
wrapper = PickleStreamWrapper(sock)
wrapper.set_receive_handler(self.receive_callback)
|
vsaw/miniSSL
|
minissl/TcpDispatcher.py
|
Python
|
mit
| 3,729 | 0.001341 |
import hashlib
from vkAPI.mixins import *
from vkAPI.utils import *
class Session(object):
"""A Session object where you should put a default params which will always send"""
API_URL = 'https://api.vk.com/method/'
def __init__(self, access_token=None, secret=None):
"""
:except kwargs: such params as 'lang' or 'https'
:param access_token: access token
:param secret: a 'secret' parameter
:type access_token: str
:type secret: str
"""
self.access_token = access_token
self._session_request = VkRequest()
if secret:
self._secret = secret
def __str__(self):
return '<Session of vkAPI>'
@staticmethod
def get_md5_hash(string):
return hashlib.md5(string.encode()).hexdigest()
@staticmethod
def get_sig_data(method, secret, params=None):
if params is None:
params = {}
data = ''
for key, item in params.items():
data += str(key) + '=' + str(item) + '&'
data = data[:-1]
data += '&sig=' + Session.get_md5_hash('/method/' + method + '?' + data + secret)
return data
def _make_request(self, method_request):
req = self._send_request(method_request)
req.raise_for_status()
text = Parser(req.text).start()
for error_or_response in json_iter_parse(text):
if 'response' in error_or_response:
return error_or_response['response']
elif 'error' in error_or_response:
error = VkAPIError(error_or_response)
if error.is_access_token_incorrect():
self.access_token = None
return self._make_request(method_request)
raise error
def _send_request(self, request):
url = self.API_URL + request._method_name
method_args = request._api._method_default_args.copy()
method_args.update(request._method_args)
access_token = self.access_token
if access_token:
method_args['access_token'] = access_token
if hasattr(self, '_secret'):
if self._secret is not None:
method_args = self.get_sig_data(request._method_name, self._secret, method_args)
timeout = request._api._timeout
response = self._session_request.post(url, method_args, timeout=timeout)
return response
def __setattr__(self, key, value):
if key == 'API_URL':
raise AttributeError('"' + key + '" doesn\'t support assignment')
self.__dict__[key] = value
class API(object):
def __init__(self, session, timeout=10, v='5.68', **method_default_args):
"""
:param session: Object Session
:param timeout: timeout. 10 by default
:param v: API version
:param method_default_args: Default args that will be used always in this API object
:type session: Session
:type timeout: int
:type v: str
"""
self._session = session
self._timeout = timeout
self._method_default_args = method_default_args
self._method_default_args.update({'v': v})
def __getattr__(self, method_name):
return Request(self, method_name)
def __call__(self, method_name, **method_kwargs):
return getattr(self, method_name)(**method_kwargs)
class Decorator(API):
def __getattr__(self, method_name):
return DecorRequest(self, method_name)
def __call__(self, method_name, **method_kwargs):
def decorator(func):
def wrapper(*args, **kwargs):
return func(*args, getattr(self, method_name)(**method_kwargs), **kwargs)
return wrapper
return decorator
class Request(object):
__slots__ = ('_api', '_method_name', '_method_args')
def __init__(self, api, method_name):
self._api = api
self._method_name = method_name
def __getattr__(self, method_name):
return Request(self._api, self._method_name + '.' + method_name)
def __call__(self, **method_args):
self._method_args = method_args
return self._api._session._make_request(self)
class DecorRequest(Request):
def __getattr__(self, method_name):
return DecorRequest(self._api, self._method_name + '.' + method_name)
def __call__(self, is_method=False, **method_args):
self._method_args = method_args
def decorator(func):
def wrapper(*args, **kwargs):
return func(*args, self._api._session._make_request(self), **kwargs)
return wrapper
return decorator
class AuthSession(AuthMixin, Session):
def __init__(self, user_login='', user_password='', app_id=2274003, scope='offline', client_secret='hHbZxrka2uZ6jB1inYsH',
lang='ru'):
AuthMixin.__init__(self, user_login, user_password, app_id, scope, client_secret, lang)
access_token = self.access_token
secret = self._secret
Session.__init__(self, access_token, secret)
self.access_token = access_token
def __setattr__(self, key, value):
if key == 'OAUTH_URL' or key == 'API_URL':
raise AttributeError('"' + key + '" doesn\'t support assignment')
self.__dict__[key] = value
|
sakost/vkAPI
|
vkAPI/API.py
|
Python
|
apache-2.0
| 5,360 | 0.002425 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Math Operations.
Note: Functions taking `Tensor` arguments can also take anything accepted by
`tf.convert_to_tensor`.
Note: Elementwise binary operations in TensorFlow follow [numpy-style
broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
TensorFlow provides a variety of math functions including:
* Basic arithmetic operators and trigonometric functions.
* Special math functions (like: `tf.math.igamma` and `tf.math.zeta`)
* Complex number functions (like: `tf.math.imag` and `tf.math.angle`)
* Reductions and scans (like: `tf.math.reduce_mean` and `tf.math.cumsum`)
* Segment functions (like: `tf.math.segment_sum`)
See: `tf.linalg` for matrix and tensor functions.
<a id=Segmentation></a>
## About Segmentation
TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it defines a mapping from the first dimension onto
`segment_ids`. The `segment_ids` tensor should be the size of
the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
where `k<d0`.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.math.segment_sum(c, tf.constant([0, 0, 1]))
# ==> [[0 0 0 0]
# [5 6 7 8]]
```
The standard `segment_*` functions assert that the segment indices are sorted.
If you have unsorted indices use the equivalent `unsorted_segment_` function.
These functions take an additional argument `num_segments` so that the output
tensor can be efficiently allocated.
``` python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
# ==> [[ 6, 8, 10, 12],
# [-1, -2, -3, -4]]
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import numpy as np
import six
from six.moves import builtins
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_bitwise_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_sparse_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
np_dtypes = LazyLoader(
"np_dtypes", globals(),
"tensorflow.python.ops.numpy_ops.np_dtypes")
# Aliases for some automatically-generated names.
nextafter = gen_math_ops.next_after
@tf_export("linspace", v1=["lin_space", "linspace"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("lin_space")
def linspace_nd(start, stop, num, name=None, axis=0):
r"""Generates evenly-spaced values in an interval along a given axis.
A sequence of `num` evenly-spaced values are generated beginning at `start`
along a given `axis`.
If `num > 1`, the values in the sequence increase by
`(stop - start) / (num - 1)`, so that the last one is exactly `stop`.
If `num <= 0`, `ValueError` is raised.
Matches
[np.linspace](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html)'s
behaviour
except when `num == 0`.
For example:
```
tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0]
```
`Start` and `stop` can be tensors of arbitrary size:
>>> tf.linspace([0., 5.], [10., 40.], 5, axis=0)
<tf.Tensor: shape=(5, 2), dtype=float32, numpy=
array([[ 0. , 5. ],
[ 2.5 , 13.75],
[ 5. , 22.5 ],
[ 7.5 , 31.25],
[10. , 40. ]], dtype=float32)>
`Axis` is where the values will be generated (the dimension in the
returned tensor which corresponds to the axis will be equal to `num`)
>>> tf.linspace([0., 5.], [10., 40.], 5, axis=-1)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[ 0. , 2.5 , 5. , 7.5 , 10. ],
[ 5. , 13.75, 22.5 , 31.25, 40. ]], dtype=float32)>
Args:
start: A `Tensor`. Must be one of the following types: `bfloat16`,
`float32`, `float64`. N-D tensor. First entry in the range.
stop: A `Tensor`. Must have the same type and shape as `start`. N-D tensor.
Last entry in the range.
num: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D
tensor. Number of values to generate.
name: A name for the operation (optional).
axis: Axis along which the operation is performed (used only when N-D
tensors are provided).
Returns:
A `Tensor`. Has the same type as `start`.
"""
with ops.name_scope(name, "linspace", [start, stop]):
start = ops.convert_to_tensor(start, name="start")
# stop must be convertible to the same dtype as start
stop = ops.convert_to_tensor(stop, name="stop", dtype=start.dtype)
num_int = array_ops.convert_to_int_tensor(num, name="num")
num = cast(num_int, dtype=start.dtype)
broadcast_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(start), array_ops.shape(stop))
start = array_ops.broadcast_to(start, broadcast_shape)
stop = array_ops.broadcast_to(stop, broadcast_shape)
expanded_start = array_ops.expand_dims(start, axis=axis)
expanded_stop = array_ops.expand_dims(stop, axis=axis)
shape = array_ops.shape(expanded_start)
ndims = array_ops.shape(shape)[0]
axis = array_ops.where_v2(axis >= 0, axis, ndims + axis)
# The purpose is to avoid having negative values when repeating.
num_fill = gen_math_ops.maximum(num_int - 2, 0)
# To avoid having negative values in the range or zero division
# the result is sliced in the end so a correct result is returned for
# num == 1, and num == 0.
n_steps = gen_math_ops.maximum(num_int - 1, 1)
delta = (expanded_stop - expanded_start) / cast(n_steps,
expanded_stop.dtype)
# Re-cast tensors as delta.
expanded_start = cast(expanded_start, delta.dtype)
expanded_stop = cast(expanded_stop, delta.dtype)
# If num < 0, we will throw exception in the range
# otherwise use the same div for delta
range_end = array_ops.where_v2(num_int >= 0, n_steps, -1)
# Even though range supports an output dtype, its limited
# (e.g. doesn't support half at the moment).
desired_range = cast(range(1, range_end, dtype=dtypes.int64), delta.dtype)
mask = gen_math_ops.equal(axis, range(ndims))
# desired_range_shape is [1. 1. 1. ... 1. num_fill 1. 1. ... 1.], where the
# index of num_fill is equal to axis.
desired_range_shape = array_ops.where_v2(mask, num_fill, 1)
desired_range = array_ops.reshape(desired_range, desired_range_shape)
res = expanded_start + delta * desired_range
# Add the start and endpoints to the result, and slice out the desired
# portion.
all_tensors = (expanded_start, res, expanded_stop)
concatenated = array_ops.concat(all_tensors, axis=axis)
begin = array_ops.zeros_like(shape)
size = array_ops.where_v2(mask, num_int, shape)
return array_ops.slice(concatenated, begin, size)
linspace = linspace_nd
arg_max = deprecation.deprecated(None, "Use `tf.math.argmax` instead")(arg_max) # pylint: disable=used-before-assignment
arg_min = deprecation.deprecated(None, "Use `tf.math.argmin` instead")(arg_min) # pylint: disable=used-before-assignment
tf_export(v1=["arg_max"])(dispatch.add_dispatch_support(arg_max))
tf_export(v1=["arg_min"])(dispatch.add_dispatch_support(arg_min))
# This is set by resource_variable_ops.py. It is included in this way since
# there is a circular dependency between math_ops and resource_variable_ops
_resource_variable_type = None
def _set_doc(doc):
def _decorator(func):
func.__doc__ = doc
return func
return _decorator
# pylint: disable=redefined-builtin
@tf_export(v1=["math.argmax", "argmax"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"dimension")
@_set_doc(
gen_math_ops.arg_max.__doc__.replace("dimensions",
"axes").replace("dimension", "axis"))
def argmax(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension",
dimension)
return argmax_v2(input, axis, output_type, name)
@tf_export("math.argmax", "argmax", v1=[])
@dispatch.add_dispatch_support
def argmax_v2(input, axis=None, output_type=dtypes.int64, name=None):
"""Returns the index with the largest value across axes of a tensor.
In case of identity returns the smallest index.
For example:
>>> A = tf.constant([2, 20, 30, 3, 6])
>>> tf.math.argmax(A) # A[2] is maximum in tensor A
<tf.Tensor: shape=(), dtype=int64, numpy=2>
>>> B = tf.constant([[2, 20, 30, 3, 6], [3, 11, 16, 1, 8],
... [14, 45, 23, 5, 27]])
>>> tf.math.argmax(B, 0)
<tf.Tensor: shape=(5,), dtype=int64, numpy=array([2, 2, 0, 2, 2])>
>>> tf.math.argmax(B, 1)
<tf.Tensor: shape=(3,), dtype=int64, numpy=array([2, 2, 1])>
>>> C = tf.constant([0, 0, 0, 0])
>>> tf.math.argmax(C) # Returns smallest index in case of ties
<tf.Tensor: shape=(), dtype=int64, numpy=0>
Args:
input: A `Tensor`.
axis: An integer, the axis to reduce across. Default to 0.
output_type: An optional output dtype (`tf.int32` or `tf.int64`). Defaults
to `tf.int64`.
name: An optional name for the operation.
Returns:
A `Tensor` of type `output_type`.
"""
if axis is None:
axis = 0
return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)
@tf_export(v1=["math.argmin", "argmin"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"dimension")
@_set_doc(
gen_math_ops.arg_min.__doc__.replace("dimensions",
"axes").replace("dimension", "axis"))
def argmin(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension",
dimension)
return argmin_v2(input, axis, output_type, name)
@tf_export("math.argmin", "argmin", v1=[])
@dispatch.add_dispatch_support
def argmin_v2(input, axis=None, output_type=dtypes.int64, name=None):
"""Returns the index with the smallest value across axes of a tensor.
Returns the smallest index in case of ties.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`,
`quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`,
`uint64`.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
int32 or int64, must be in the range `-rank(input), rank(input))`.
Describes which axis of the input Tensor to reduce across. For vectors,
use axis = 0.
output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to
`tf.int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `output_type`.
Usage:
```python
import tensorflow as tf
a = [1, 10, 26.9, 2.8, 166.32, 62.3]
b = tf.math.argmin(input = a)
c = tf.keras.backend.eval(b)
# c = 0
# here a[0] = 1 which is the smallest element of a across axis 0
```
"""
if axis is None:
axis = 0
return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type)
# pylint: enable=redefined-builtin
# pylint: disable=anomalous-backslash-in-string,protected-access
# pylint: disable=g-docstring-has-escape
@tf_export("math.abs", "abs")
@dispatch.add_dispatch_support
def abs(x, name=None): # pylint: disable=redefined-builtin
r"""Computes the absolute value of a tensor.
Given a tensor of integer or floating-point values, this operation returns a
tensor of the same type, where each element contains the absolute value of the
corresponding element in the input.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. For
a complex number \\(a + bj\\), its absolute value is computed as
\\(\sqrt{a^2 + b^2}\\).
For example:
>>> # real number
>>> x = tf.constant([-2.25, 3.25])
>>> tf.abs(x)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([2.25, 3.25], dtype=float32)>
>>> # complex number
>>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])
>>> tf.abs(x)
<tf.Tensor: shape=(2, 1), dtype=float64, numpy=
array([[5.25594901],
[6.60492241]])>
Args:
x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,
`int32`, `int64`, `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` of the same size, type and sparsity as `x`,
with absolute values. Note, for `complex64` or `complex128` input, the
returned `Tensor` will be of type `float32` or `float64`, respectively.
"""
with ops.name_scope(name, "Abs", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
# pylint: enable=g-docstring-has-escape
# pylint: disable=redefined-builtin
def _bucketize(input, boundaries, name=None):
return gen_math_ops.bucketize(input=input, boundaries=boundaries, name=name)
# pylint: enable=redefined-builtin
class DivideDelegateWithName(object):
"""Use Python2/Python3 division delegation to implement divide for tensors."""
def __init__(self, x, name):
"""Construct DivideDelegateWithName.
Args:
x: Tensor to use as left operand in operator overloads
name: The name that is preferred for the op created.
"""
self.x = x
self.name = name
def __truediv__(self, y):
return _truediv_python3(self.x, y, self.name)
def __floordiv__(self, y):
return floordiv(self.x, y, self.name)
def __div__(self, y):
return _div_python2(self.x, y, self.name)
@tf_export("math.divide", "divide")
@dispatch.add_dispatch_support
def divide(x, y, name=None):
"""Computes Python style division of `x` by `y`.
For example:
>>> x = tf.constant([16, 12, 11])
>>> y = tf.constant([4, 6, 2])
>>> tf.divide(x,y)
<tf.Tensor: shape=(3,), dtype=float64,
numpy=array([4. , 2. , 5.5])>
Args:
x: A `Tensor`
y: A `Tensor`
name: A name for the operation (optional).
Returns:
A `Tensor` with same shape as input
"""
if name is not None:
# Cannot use tensors operator overload, because it has no way to track
# override names. Use a dummy class to track the runtime division behavior
return DivideDelegateWithName(x, name) / y
else:
# We do conversion here to make sure at least x is a tensor.
if not tensor_util.is_tf_type(x):
dtype = y.dtype.base_dtype if tensor_util.is_tf_type(y) else None
x = ops.convert_to_tensor(x, dtype=dtype)
return x / y
@tf_export("math.multiply", "multiply")
@dispatch.add_dispatch_support
def multiply(x, y, name=None):
"""Returns an element-wise x * y.
For example:
>>> x = tf.constant(([1, 2, 3, 4]))
>>> tf.math.multiply(x, x)
<tf.Tensor: shape=(4,), dtype=..., numpy=array([ 1, 4, 9, 16], dtype=int32)>
Since `tf.math.multiply` will convert its arguments to `Tensor`s, you can also
pass in non-`Tensor` arguments:
>>> tf.math.multiply(7,6)
<tf.Tensor: shape=(), dtype=int32, numpy=42>
If `x.shape` is not the same as `y.shape`, they will be broadcast to a
compatible shape. (More about broadcasting
[here](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).)
For example:
>>> x = tf.ones([1, 2]);
>>> y = tf.ones([2, 1]);
>>> x * y # Taking advantage of operator overriding
<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[1., 1.],
[1., 1.]], dtype=float32)>
The reduction version of this elementwise operation is `tf.math.reduce_prod`
Args:
x: A Tensor. Must be one of the following types: `bfloat16`,
`half`, `float32`, `float64`, `uint8`, `int8`, `uint16`,
`int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
Raises:
* InvalidArgumentError: When `x` and `y` have incompatible shapes or types.
"""
return gen_math_ops.mul(x, y, name)
# TODO(aselle): put deprecation in after another round of global code changes
@deprecation.deprecated(
"2016-12-30",
"`tf.mul(x, y)` is deprecated; use `tf.math.multiply(x, y)` or `x * y`")
def _mul(x, y, name=None):
return gen_math_ops.mul(x, y, name)
_mul.__doc__ = (
gen_math_ops.mul.__doc__ + ("" if _mul.__doc__ is None else _mul.__doc__))
@tf_export("math.subtract", "subtract")
@dispatch.add_dispatch_support
def subtract(x, y, name=None):
return gen_math_ops.sub(x, y, name)
subtract.__doc__ = gen_math_ops.sub.__doc__
# TODO(aselle): put deprecation in after another round of global code changes
@deprecation.deprecated(
"2016-12-30",
"`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`")
def _sub(x, y, name=None):
return gen_math_ops.sub(x, y, name)
_sub.__doc__ = (
gen_math_ops.sub.__doc__ + ("" if _sub.__doc__ is None else _sub.__doc__))
negative = gen_math_ops.neg
# pylint: disable=g-docstring-has-escape
@deprecation.deprecated(
"2016-12-30",
"`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`")
def _neg(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
return negative(x, name)
# pylint: enable=g-docstring-has-escape
@tf_export(v1=["math.scalar_mul", "scalar_mul"])
@dispatch.add_dispatch_support
def scalar_mul(scalar, x, name=None):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
Intended for use in gradient code which might deal with `IndexedSlices`
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
name: A name for the operation (optional).
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
scalar = ops.convert_to_tensor(
scalar, dtype=x.dtype.base_dtype, name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(
gen_math_ops.mul(scalar, x.values, name), x.indices, x.dense_shape)
else:
return gen_math_ops.mul(scalar, x, name)
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
@tf_export("math.scalar_mul", "scalar_mul", v1=[])
@dispatch.add_dispatch_support
@_set_doc(scalar_mul.__doc__)
def scalar_mul_v2(scalar, x, name=None):
with ops.name_scope(name, "scalar_mul", [x]) as name:
return scalar_mul(scalar, x, name)
@tf_export("math.pow", "pow")
@dispatch.add_dispatch_support
def pow(x, y, name=None): # pylint: disable=redefined-builtin
r"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
corresponding elements in `x` and `y`. For example:
```python
x = tf.constant([[2, 2], [3, 3]])
y = tf.constant([[8, 16], [2, 3]])
tf.pow(x, y) # [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
`complex64`, or `complex128`.
y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
`complex64`, or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
# pylint: disable=redefined-builtin,redefined-outer-name
@tf_export("dtypes.complex", "complex")
@dispatch.add_dispatch_support
def complex(real, imag, name=None):
r"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```python
real = tf.constant([2.25, 3.25])
imag = tf.constant([4.75, 5.75])
tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
Raises:
TypeError: Real and imag must be correct types
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("real and imag have incorrect types: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
@tf_export("math.sign", "sign")
@dispatch.add_dispatch_support
def sign(x, name=None):
r"""Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0`.
For complex numbers, `y = sign(x) = x / |x| if x != 0, otherwise y = 0`.
Example usage:
>>> # real number
>>> tf.math.sign([0., 2., -3.])
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([ 0., 1., -1.], dtype=float32)>
>>> # complex number
>>> tf.math.sign([1 + 1j, 0 + 0j])
<tf.Tensor: shape=(2,), dtype=complex128,
numpy=array([0.70710678+0.70710678j, 0. +0.j ])>
Args:
x: A Tensor. Must be one of the following types: bfloat16, half, float32,
float64, int32, int64, complex64, complex128.
name: A name for the operation (optional).
Returns:
A Tensor. Has the same type as x.
If x is a SparseTensor, returns SparseTensor(x.indices,
tf.math.sign(x.values, ...), x.dense_shape).
"""
x = ops.convert_to_tensor(x)
if x.dtype.is_complex:
return gen_math_ops.div_no_nan(
x,
cast(
gen_math_ops.complex_abs(
x,
Tout=dtypes.float32
if x.dtype == dtypes.complex64 else dtypes.float64),
dtype=x.dtype),
name=name)
return gen_math_ops.sign(x, name=name)
@tf_export("math.real", v1=["math.real", "real"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("real")
@dispatch.add_dispatch_support
def real(input, name=None):
r"""Returns the real part of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the real part of each element in `input` considered as a complex number.
For example:
```python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.math.real(x) # [-2.25, 3.25]
```
If `input` is already real, it is returned unchanged.
Args:
input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Real", [input]) as name:
input = ops.convert_to_tensor(input, name="input")
if input.dtype.is_complex:
real_dtype = input.dtype.real_dtype
return gen_math_ops.real(input, Tout=real_dtype, name=name)
else:
return input
@tf_export("math.imag", v1=["math.imag", "imag"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("imag")
@dispatch.add_dispatch_support
def imag(input, name=None):
r"""Returns the imaginary part of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the imaginary part of each element in `input` considered as a complex
number. If `input` is real, a tensor of all zeros is returned.
For example:
```python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.math.imag(x) # [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `float`, `double`,
`complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Imag", [input]) as name:
input = ops.convert_to_tensor(input, name="input")
if input.dtype.is_complex:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
else:
return array_ops.zeros_like(input)
@tf_export("math.angle", v1=["math.angle", "angle"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("angle")
@dispatch.add_dispatch_support
def angle(input, name=None):
r"""Returns the element-wise argument of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the argument of each element in `input` considered as a complex number.
The elements in `input` are considered to be complex numbers of the form
\\(a + bj\\), where *a* is the real part and *b* is the imaginary part.
If `input` is real then *b* is zero by definition.
The argument returned by this function is of the form \\(atan2(b, a)\\).
If `input` is real, a tensor of all zeros is returned.
For example:
```
input = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j], dtype=tf.complex64)
tf.math.angle(input).numpy()
# ==> array([2.0131705, 1.056345 ], dtype=float32)
```
Args:
input: A `Tensor`. Must be one of the following types: `float`, `double`,
`complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Angle", [input]) as name:
input = ops.convert_to_tensor(input, name="input")
if input.dtype.is_complex:
return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name)
else:
return array_ops.where(input < 0, np.pi * array_ops.ones_like(input),
array_ops.zeros_like(input))
# pylint: enable=redefined-outer-name,redefined-builtin
@tf_export("math.round", "round")
@dispatch.add_dispatch_support
def round(x, name=None): # pylint: disable=redefined-builtin
"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:
```python
x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])
tf.round(x) # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return gen_math_ops.round(x, name=name)
@tf_export("cast", "dtypes.cast")
@dispatch.add_dispatch_support
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor` or `IndexedSlices`) to `dtype`.
For example:
>>> x = tf.constant([1.8, 2.2], dtype=tf.float32)
>>> tf.cast(x, tf.int32)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>
Notice `tf.cast` has an alias `tf.dtypes.cast`:
>>> x = tf.constant([1.8, 2.2], dtype=tf.float32)
>>> tf.dtypes.cast(x, tf.int32)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>
The operation supports data types (for `x` and `dtype`) of
`uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,
`float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`.
In case of casting from complex types (`complex64`, `complex128`) to real
types, only the real part of `x` is returned. In case of casting from real
types to complex types (`complex64`, `complex128`), the imaginary part of the
returned value is set to `0`. The handling of complex types here matches the
behavior of numpy.
Note casting nan and inf values to integral types has undefined behavior.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could
be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`,
`int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`,
`bfloat16`.
dtype: The destination type. The list of supported dtypes is the same as
`x`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and
same type as `dtype`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
if isinstance(x,
(ops.Tensor, _resource_variable_type)) and base_type == x.dtype:
return x
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
elif isinstance(x, ops.IndexedSlices):
values_cast = cast(x.values, base_type, name=name)
x = ops.IndexedSlices(values_cast, x.indices, x.dense_shape)
else:
# TODO(josh11b): If x is not already a Tensor, we could return
# ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype != base_type:
x = gen_math_ops.cast(x, base_type, name=name)
if x.dtype.is_complex and base_type.is_floating:
logging.warn("Casting complex to real discards imaginary part.")
return x
@tf_export("dtypes.saturate_cast", "saturate_cast")
@dispatch.add_dispatch_support
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(
value,
ops.convert_to_tensor(dtype.min, dtype=value.dtype, name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(
value,
ops.convert_to_tensor(dtype.max, dtype=value.dtype, name="max"))
return cast(value, dtype, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_float"])
@dispatch.add_dispatch_support
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_double"])
@dispatch.add_dispatch_support
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_int32"])
@dispatch.add_dispatch_support
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_int64"])
@dispatch.add_dispatch_support
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_bfloat16"])
@dispatch.add_dispatch_support
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_complex64"])
@dispatch.add_dispatch_support
def to_complex64(x, name="ToComplex64"):
"""Casts a tensor to type `complex64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `complex64`.
Raises:
TypeError: If `x` cannot be cast to the `complex64`.
"""
return cast(x, dtypes.complex64, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_complex128"])
@dispatch.add_dispatch_support
def to_complex128(x, name="ToComplex128"):
"""Casts a tensor to type `complex128`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `complex128`.
Raises:
TypeError: If `x` cannot be cast to the `complex128`.
"""
return cast(x, dtypes.complex128, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops.neg)
ops.Tensor._override_operator("__abs__", abs)
def _maybe_get_dtype(x):
"""Returns a numpy type if available from x. Skips if x is numpy.ndarray."""
# Don't put np.ndarray in this list, because np.result_type looks at the
# value (not just dtype) of np.ndarray to decide the result type.
if isinstance(x, numbers.Real):
return x
if isinstance(x, ops.Tensor):
return x.dtype.as_numpy_dtype
if isinstance(x, dtypes.DType):
return x.as_numpy_dtype
if isinstance(x, tensor_shape.TensorShape):
return np.int32
if isinstance(x, (list, tuple)):
raise ValueError("Got sequence {}".format(x))
return x
def maybe_promote_tensors(*tensors, force_same_dtype=True):
"""Promote tensors if numpy style promotion is enabled."""
if not tensors:
return tensors
if not ops._numpy_style_type_promotion:
if not force_same_dtype:
return tensors
promoted_tensors = []
promoted_tensors.append(tensors[0])
dtype = tensors[0].dtype.base_dtype
for tensor in tensors[1:]:
promoted_tensors.append(
ops.convert_to_tensor(tensor, dtype, name="x"))
return promoted_tensors
result_type = np_dtypes._result_type(
*[_maybe_get_dtype(x) for x in nest.flatten(tensors)])
def _promote_or_cast(x):
if isinstance(x, ops.Tensor):
x = cast(x, result_type)
else:
x = ops.convert_to_tensor(x, result_type)
return x
return [_promote_or_cast(x) for x in tensors]
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
try:
# force_same_dtype=False to preserve existing TF behavior
# TODO(b/178860388): Figure out why binary_op_wrapper and
# r_binary_op_wrapper use different force_same_dtype values.
x, y = maybe_promote_tensors(x, y, force_same_dtype=False)
return func(x, y, name=name)
except (TypeError, ValueError) as e:
# Even if dispatching the op failed, the RHS may be a tensor aware
# object that can implement the operator with knowledge of itself
# and the tensor.
# If the RHS is not tensor aware we still want to raise the
# original error from the LHS, because it may be more
# informative.
if hasattr(type(y), "__r%s__" % op_name):
try:
r_op = getattr(y, "__r%s__" % op_name)
out = r_op(x)
if out is NotImplemented:
raise
return out
except (TypeError, ValueError):
raise e
else:
raise
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return sparse_tensor.SparseTensor(
sp_x.indices,
func(sp_x.indices, sp_x.values, sp_x.dense_shape, y, name=name),
sp_x.dense_shape)
def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
# TODO(b/178860388): Figure out why binary_op_wrapper and
# r_binary_op_wrapper use different force_same_dtype values.
y, x = maybe_promote_tensors(y, x)
return func(x, y, name=name)
# Propagate func.__doc__ to the wrappers
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.bfloat16: None,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.name_scope(name, "truediv",
[sp_indices, sp_values, sp_shape, y]) as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(
sp_indices, sp_values, sp_shape, y, name=name)
def _truediv_python3(x, y, name=None):
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops.real_div(x, y, name=name)
def _div_python2(x, y, name=None):
"""Divide two values using Python 2 semantics.
Used for Tensor.__div__.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
with ops.name_scope(name, "div", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
if x_dtype.is_floating or x_dtype.is_complex:
return gen_math_ops.real_div(x, y, name=name)
else:
return gen_math_ops.floor_div(x, y, name=name)
@tf_export("math.truediv", "truediv")
@dispatch.add_dispatch_support
def truediv(x, y, name=None):
"""Divides x / y elementwise (using Python 3 division operator semantics).
NOTE: Prefer using the Tensor operator or tf.divide which obey Python
division operator semantics.
This function forces Python 3 division operator semantics where all integer
arguments are cast to floating types first. This op is generated by normal
`x / y` division in Python 3 and in Python 2.7 with
`from __future__ import division`. If you want integer division that rounds
down, use `x // y` or `tf.math.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
return _truediv_python3(x, y, name)
@deprecation.deprecated(
date=None,
instructions="Deprecated in favor of operator or tf.math.divide.")
@tf_export(v1=["div"])
@dispatch.add_dispatch_support
def div(x, y, name=None):
"""Divides x / y elementwise (using Python 2 division operator semantics).
NOTE: Prefer using the Tensor division operator or tf.divide which obey Python
3 division operator semantics.
This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x`
and `y` are both integers then the result will be an integer. This is in
contrast to Python 3, where division with `/` is always a float while division
with `//` is always an integer.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
return _div_python2(x, y, name)
@tf_export("math.divide_no_nan", v1=["math.divide_no_nan", "div_no_nan"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("div_no_nan")
@dispatch.add_dispatch_support
def div_no_nan(x, y, name=None):
"""Computes a safe divide which returns 0 if the y is zero.
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
y: A `Tensor` whose dtype is compatible with `x`.
name: A name for the operation (optional).
Returns:
The element-wise value of the x divided by y.
"""
with ops.name_scope(name, "div_no_nan", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
return gen_math_ops.div_no_nan(x, y, name=name)
@tf_export("math.multiply_no_nan")
@dispatch.add_dispatch_support
def multiply_no_nan(x, y, name=None):
"""Computes the product of x and y and returns 0 if the y is zero, even if x is NaN or infinite.
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
y: A `Tensor` whose dtype is compatible with `x`.
name: A name for the operation (optional).
Returns:
The element-wise value of the x times y.
"""
with ops.name_scope(name, "multiply_no_nan", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
return gen_math_ops.mul_no_nan(x, y, name=name)
# TODO(aselle): This should be removed
mod = gen_math_ops.floor_mod
# TODO(aselle): Deprecate this once all internal functionality uses
# tf.truncatediv
@tf_export("math.floordiv", v1=["math.floordiv", "floordiv"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("floordiv")
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding toward the most negative integer.
The same as `tf.compat.v1.div(x,y)` for integers, but uses
`tf.floor(tf.compat.v1.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down.
Raises:
TypeError: If the inputs are complex.
"""
with ops.name_scope(name, "floordiv", [x, y]) as name:
return gen_math_ops.floor_div(x, y, name=name)
realdiv = gen_math_ops.real_div
truncatediv = gen_math_ops.truncate_div
# TODO(aselle): Rename this to floordiv when we can.
floor_div = gen_math_ops.floor_div
truncatemod = gen_math_ops.truncate_mod
floormod = gen_math_ops.floor_mod
@tf_export("__operators__.add", v1=[])
@dispatch.add_dispatch_support
def _add_dispatch(x, y, name=None):
"""The operation invoked by the `Tensor.__add__` operator.
Purpose in the API:
This method is exposed in TensorFlow's API so that library developers
can register dispatching for `Tensor.__add__` to allow it to handle
custom composite tensors & other custom objects.
The API symbol is not intended to be called by users directly and does
appear in TensorFlow's generated documentation.
Args:
x: The left-hand side of the `+` operator.
y: The right-hand side of the `+` operator.
name: an optional name for the operation.
Returns:
The result of the elementwise `+` operation.
"""
if not isinstance(y, ops.Tensor) and not isinstance(
y, sparse_tensor.SparseTensor):
y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
if x.dtype == dtypes.string:
return gen_math_ops.add(x, y, name=name)
else:
return gen_math_ops.add_v2(x, y, name=name)
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
if isinstance(y, sparse_tensor.SparseTensor): # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.dense_shape, x, name)
return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)
else:
return multiply(x, y, name=name)
# NOTE(aselle): When integer division is added for sparse_dense_cwise,
# div, truediv, and floordiv should be delegated appropriately for
# Python semantics, analogous to dense cwise tensor operations.
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_add_dispatch, "add")
_OverrideBinaryOperatorHelper(subtract, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(div, "div")
_OverrideBinaryOperatorHelper(truediv, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
@tf_export("math.logical_xor", v1=["math.logical_xor", "logical_xor"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("logical_xor")
def logical_xor(x, y, name="LogicalXor"):
"""Logical XOR function.
x ^ y = (x | y) & ~(x & y)
Requires that `x` and `y` have the same shape or have
[broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
shapes. For example, `x` and `y` can be:
- Two single elements of type `bool`
- One `tf.Tensor` of type `bool` and one single `bool`, where the result will
be calculated by applying logical XOR with the single element to each
element in the larger Tensor.
- Two `tf.Tensor` objects of type `bool` of the same shape. In this case,
the result will be the element-wise logical XOR of the two input tensors.
Usage:
>>> a = tf.constant([True])
>>> b = tf.constant([False])
>>> tf.math.logical_xor(a, b)
<tf.Tensor: shape=(1,), dtype=bool, numpy=array([ True])>
>>> c = tf.constant([True])
>>> x = tf.constant([False, True, True, False])
>>> tf.math.logical_xor(c, x)
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([ True, False, False, True])>
>>> y = tf.constant([False, False, True, True])
>>> z = tf.constant([False, True, False, True])
>>> tf.math.logical_xor(y, z)
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, True, True, False])>
Args:
x: A `tf.Tensor` type bool.
y: A `tf.Tensor` of type bool.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of type bool with the same size as that of x or y.
"""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
def and_(x, y, name=None):
if x.dtype == dtypes.bool:
return gen_math_ops.logical_and(x, y, name)
return gen_bitwise_ops.bitwise_and(x, y)
def or_(x, y, name=None):
if x.dtype == dtypes.bool:
return gen_math_ops.logical_or(x, y, name)
return gen_bitwise_ops.bitwise_or(x, y)
def xor_(x, y, name=None):
if x.dtype == dtypes.bool:
return logical_xor(x, y, name)
return gen_bitwise_ops.bitwise_xor(x, y)
def invert_(x, name=None):
if x.dtype == dtypes.bool:
return gen_math_ops.logical_not(x, name=name)
return gen_bitwise_ops.invert(x, name=name)
_OverrideBinaryOperatorHelper(and_, "and")
_OverrideBinaryOperatorHelper(or_, "or")
_OverrideBinaryOperatorHelper(xor_, "xor")
ops.Tensor._override_operator("__invert__", invert_)
def _promote_dtypes_decorator(fn):
def wrapper(x, y, *args, **kwargs):
x, y = maybe_promote_tensors(x, y, force_same_dtype=False)
return fn(x, y, *args, **kwargs)
return tf_decorator.make_decorator(fn, wrapper)
ops.Tensor._override_operator("__lt__", _promote_dtypes_decorator(
gen_math_ops.less))
ops.Tensor._override_operator("__le__", _promote_dtypes_decorator(
gen_math_ops.less_equal))
ops.Tensor._override_operator("__gt__", _promote_dtypes_decorator(
gen_math_ops.greater))
ops.Tensor._override_operator("__ge__", _promote_dtypes_decorator(
gen_math_ops.greater_equal))
@tf_export("math.equal", "equal")
@dispatch.add_dispatch_support
def equal(x, y, name=None):
"""Returns the truth value of (x == y) element-wise.
Performs a [broadcast](
https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
arguments and then an element-wise equality comparison, returning a Tensor of
boolean values.
For example:
>>> x = tf.constant([2, 4])
>>> y = tf.constant(2)
>>> tf.math.equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
>>> x = tf.constant([2, 4])
>>> y = tf.constant([2, 4])
>>> tf.math.equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>
Args:
x: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.
y: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of type bool with the same size as that of x or y.
Raises:
`tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
"""
return gen_math_ops.equal(x, y, name=name)
@tf_export("math.not_equal", "not_equal")
@dispatch.add_dispatch_support
def not_equal(x, y, name=None):
"""Returns the truth value of (x != y) element-wise.
Performs a [broadcast](
https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
arguments and then an element-wise inequality comparison, returning a Tensor
of boolean values.
For example:
>>> x = tf.constant([2, 4])
>>> y = tf.constant(2)
>>> tf.math.not_equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, True])>
>>> x = tf.constant([2, 4])
>>> y = tf.constant([2, 4])
>>> tf.math.not_equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
Args:
x: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.
y: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of type bool with the same size as that of x or y.
Raises:
`tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
"""
return gen_math_ops.not_equal(x, y, name=name)
@tf_export("__operators__.eq", v1=[])
@dispatch.add_dispatch_support
def tensor_equals(self, other):
"""The operation invoked by the `Tensor.__eq__` operator.
Compares two tensors element-wise for equality if they are
broadcast-compatible; or returns False if they are not broadcast-compatible.
(Note that this behavior differs from `tf.math.equal`, which raises an
exception if the two tensors are not broadcast-compatible.)
Purpose in the API:
This method is exposed in TensorFlow's API so that library developers
can register dispatching for `Tensor.__eq__` to allow it to handle
custom composite tensors & other custom objects.
The API symbol is not intended to be called by users directly and does
appear in TensorFlow's generated documentation.
Args:
self: The left-hand side of the `==` operator.
other: The right-hand side of the `==` operator.
Returns:
The result of the elementwise `==` operation, or `False` if the arguments
are not broadcast-compatible.
"""
if other is None:
return False
g = getattr(self, "graph", None)
if (ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions() and
(g is None or g.building_function)):
self, other = maybe_promote_tensors(self, other)
return gen_math_ops.equal(self, other, incompatible_shape_error=False)
else:
# In legacy graph mode, tensor equality is object equality
return self is other
@tf_export("__operators__.ne", v1=[])
@dispatch.add_dispatch_support
def tensor_not_equals(self, other):
"""The operation invoked by the `Tensor.__ne__` operator.
Compares two tensors element-wise for inequality if they are
broadcast-compatible; or returns True if they are not broadcast-compatible.
(Note that this behavior differs from `tf.math.not_equal`, which raises an
exception if the two tensors are not broadcast-compatible.)
Purpose in the API:
This method is exposed in TensorFlow's API so that library developers
can register dispatching for `Tensor.__ne__` to allow it to handle
custom composite tensors & other custom objects.
The API symbol is not intended to be called by users directly and does
appear in TensorFlow's generated documentation.
Args:
self: The left-hand side of the `!=` operator.
other: The right-hand side of the `!=` operator.
Returns:
The result of the elementwise `!=` operation, or `True` if the arguments
are not broadcast-compatible.
"""
if other is None:
return True
if ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions():
self, other = maybe_promote_tensors(self, other)
return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)
else:
# In legacy graph mode, tensor equality is object equality
return self is not other
ops.Tensor._override_operator("__eq__", tensor_equals)
ops.Tensor._override_operator("__ne__", tensor_not_equals)
@tf_export("range")
@dispatch.add_dispatch_support
def range(start, limit=None, delta=1, dtype=None, name="range"): # pylint: disable=redefined-builtin
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
The dtype of the resulting tensor is inferred from the inputs unless
it is provided explicitly.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
>>> start = 3
>>> limit = 18
>>> delta = 3
>>> tf.range(start, limit, delta)
<tf.Tensor: shape=(5,), dtype=int32,
numpy=array([ 3, 6, 9, 12, 15], dtype=int32)>
>>> start = 3
>>> limit = 1
>>> delta = -0.5
>>> tf.range(start, limit, delta)
<tf.Tensor: shape=(4,), dtype=float32,
numpy=array([3. , 2.5, 2. , 1.5], dtype=float32)>
>>> limit = 5
>>> tf.range(limit)
<tf.Tensor: shape=(5,), dtype=int32,
numpy=array([0, 1, 2, 3, 4], dtype=int32)>
Args:
start: A 0-D `Tensor` (scalar). Acts as first entry in the range if `limit`
is not None; otherwise, acts as range limit and first entry defaults to 0.
limit: A 0-D `Tensor` (scalar). Upper limit of sequence, exclusive. If None,
defaults to the value of `start` while the first entry of the range
defaults to 0.
delta: A 0-D `Tensor` (scalar). Number that increments `start`. Defaults to
1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
An 1-D `Tensor` of type `dtype`.
@compatibility(numpy)
Equivalent to np.arange
@end_compatibility
"""
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
if not isinstance(start, ops.Tensor):
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
if not isinstance(limit, ops.Tensor):
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
if not isinstance(delta, ops.Tensor):
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max([arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
else:
inferred_dtype = dtype
# Always try to perform a cast even when start/limit/delta are already
# tensors. This will resolve the case where start/limit/delta's original's
# dtype is different from provided dtype.
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
def _range_tensor_conversion_function(value, dtype=None, name=None,
as_ref=False):
del as_ref
return range(value.start, value.stop, value.step, dtype=dtype, name=name)
if not six.PY2:
ops.register_tensor_conversion_function(builtins.range,
_range_tensor_conversion_function)
# Reduction operations
def _ReductionDims(x, axis): # pylint: disable=invalid-name
"""Returns range(0, rank(x)) if axis is None."""
if axis is not None:
return axis
else:
x_rank = None
if isinstance(x, ops.Tensor):
x_rank = x.shape.rank
elif (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.shape.is_fully_defined()):
x_rank = x.dense_shape.shape.dims[0].value # sparse.dense_shape is 1-D.
# Fast path: avoid creating Rank and Range ops if ndims is known.
if x_rank:
return constant_op.constant(np.arange(x_rank, dtype=np.int32))
else:
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def _has_fully_defined_shape(tensor):
"""Returns true if tensor has a fully defined shape."""
return isinstance(tensor, ops.EagerTensor) or tensor.shape.is_fully_defined()
def _may_reduce_to_scalar(keepdims, axis, output):
"""Set a reduction's output shape to be a scalar if we are certain."""
if not _has_fully_defined_shape(output) and (not keepdims) and (
axis is None):
output.set_shape(())
return output
@tf_export(v1=["math.reduce_sum", "reduce_sum"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_sum_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the sum of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.add` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> # x has a shape of (2, 3) (two rows and three columns):
>>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
>>> x.numpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)
>>> # sum all the elements
>>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
>>> tf.reduce_sum(x).numpy()
6
>>> # reduce along the first dimension
>>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> tf.reduce_sum(x, 0).numpy()
array([2, 2, 2], dtype=int32)
>>> # reduce along the second dimension
>>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
>>> tf.reduce_sum(x, 1).numpy()
array([3, 3], dtype=int32)
>>> # keep the original dimensions
>>> tf.reduce_sum(x, 1, keepdims=True).numpy()
array([[3],
[3]], dtype=int32)
>>> # reduce along both dimensions
>>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
>>> # or, equivalently, reduce along rows, then reduce the resultant array
>>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> # 2 + 2 + 2 = 6
>>> tf.reduce_sum(x, [0, 1]).numpy()
6
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor, of the same dtype as the input_tensor.
@compatibility(numpy)
Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
int64 while tensorflow returns the same dtype as the input.
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_sum(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_sum", "reduce_sum", v1=[])
@dispatch.add_dispatch_support
def reduce_sum(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the sum of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.add` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> # x has a shape of (2, 3) (two rows and three columns):
>>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
>>> x.numpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)
>>> # sum all the elements
>>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
>>> tf.reduce_sum(x).numpy()
6
>>> # reduce along the first dimension
>>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> tf.reduce_sum(x, 0).numpy()
array([2, 2, 2], dtype=int32)
>>> # reduce along the second dimension
>>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
>>> tf.reduce_sum(x, 1).numpy()
array([3, 3], dtype=int32)
>>> # keep the original dimensions
>>> tf.reduce_sum(x, 1, keepdims=True).numpy()
array([[3],
[3]], dtype=int32)
>>> # reduce along both dimensions
>>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
>>> # or, equivalently, reduce along rows, then reduce the resultant array
>>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> # 2 + 2 + 2 = 6
>>> tf.reduce_sum(x, [0, 1]).numpy()
6
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor)]`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor.
@compatibility(numpy)
Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
int64 while tensorflow returns the same dtype as the input.
@end_compatibility
"""
return reduce_sum_with_dims(input_tensor, axis, keepdims, name,
_ReductionDims(input_tensor, axis))
def reduce_sum_with_dims(input_tensor,
axis=None,
keepdims=False,
name=None,
dims=None):
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._sum(input_tensor, dims, keepdims, name=name))
@tf_export("math.reduce_euclidean_norm")
@dispatch.add_dispatch_support
def reduce_euclidean_norm(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the Euclidean norm of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1, 2, 3], [1, 1, 1]]) # x.dtype is tf.int32
tf.math.reduce_euclidean_norm(x) # returns 4 as dtype is tf.int32
y = tf.constant([[1, 2, 3], [1, 1, 1]], dtype = tf.float32)
tf.math.reduce_euclidean_norm(y) # returns 4.1231055 which is sqrt(17)
tf.math.reduce_euclidean_norm(y, 0) # [sqrt(2), sqrt(5), sqrt(10)]
tf.math.reduce_euclidean_norm(y, 1) # [sqrt(14), sqrt(3)]
tf.math.reduce_euclidean_norm(y, 1, keepdims=True) # [[sqrt(14)], [sqrt(3)]]
tf.math.reduce_euclidean_norm(y, [0, 1]) # sqrt(17)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor.
"""
keepdims = bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.euclidean_norm(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.count_nonzero", "count_nonzero"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
@deprecation.deprecated_args(
None, "reduction_indices is deprecated, use axis instead",
"reduction_indices")
def count_nonzero(input_tensor=None,
axis=None,
keepdims=None,
dtype=dtypes.int64,
name=None,
reduction_indices=None,
keep_dims=None,
input=None): # pylint: disable=redefined-builtin
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
x = tf.constant([[0, 1, 0], [1, 1, 0]])
tf.math.count_nonzero(x) # 3
tf.math.count_nonzero(x, 0) # [1, 2, 0]
tf.math.count_nonzero(x, 1) # [1, 2]
tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]]
tf.math.count_nonzero(x, [0, 1]) # 3
```
**NOTE** Strings are compared against zero-length empty string `""`. Any
string with a size greater than zero is already considered as nonzero.
For example:
```python
x = tf.constant(["", "a", " ", "b", ""])
tf.math.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings.
```
Args:
input_tensor: The tensor to reduce. Should be of numeric type, `bool`, or
`string`.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
input: Overrides input_tensor. For compatibility.
Returns:
The reduced tensor (number of nonzero values).
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
input_tensor = deprecation.deprecated_argument_lookup("input", input,
"input_tensor",
input_tensor)
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
return count_nonzero_v2(input_tensor, axis, keepdims, dtype, name)
@tf_export("math.count_nonzero", v1=[])
@dispatch.add_dispatch_support
def count_nonzero_v2(
input, # pylint: disable=redefined-builtin
axis=None,
keepdims=None,
dtype=dtypes.int64,
name=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
x = tf.constant([[0, 1, 0], [1, 1, 0]])
tf.math.count_nonzero(x) # 3
tf.math.count_nonzero(x, 0) # [1, 2, 0]
tf.math.count_nonzero(x, 1) # [1, 2]
tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]]
tf.math.count_nonzero(x, [0, 1]) # 3
```
**NOTE** Strings are compared against zero-length empty string `""`. Any
string with a size greater than zero is already considered as nonzero.
For example:
```python
x = tf.constant(["", "a", " ", "b", ""])
tf.math.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings.
```
Args:
input: The tensor to reduce. Should be of numeric type, `bool`, or `string`.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input), rank(input))`.
keepdims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
The reduced tensor (number of nonzero values).
"""
if keepdims is None:
keepdims = False
with ops.name_scope(name, "count_nonzero", [input]):
input = ops.convert_to_tensor(input, name="input")
# A scalar of 'zero' is enough as `not_equal` will broadcast.
zero = array_ops.zeros([], dtype=input.dtype)
return cast(
reduce_sum(
# int64 reduction happens on GPU
cast(gen_math_ops.not_equal(input, zero), dtypes.int64),
axis=axis,
keepdims=keepdims),
dtype=dtype)
@tf_export(v1=["math.reduce_mean", "reduce_mean"])
@dispatch.add_dispatch_support
def reduce_mean_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis` by computing the
mean of elements across the dimensions in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a tensor with a single
element is returned.
For example:
>>> x = tf.constant([[1., 1.], [2., 2.]])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.5>
>>> tf.reduce_mean(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
>>> tf.reduce_mean(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
Please note that `np.mean` has a `dtype` parameter that could be used to
specify the output type. By default this is `dtype=float64`. On the other
hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
for example:
>>> x = tf.constant([1, 0, 1, 0])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=int32, numpy=0>
>>> y = tf.constant([1., 0., 1., 0.])
>>> tf.reduce_mean(y)
<tf.Tensor: shape=(), dtype=float32, numpy=0.5>
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_mean(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_mean", "reduce_mean", v1=[])
@dispatch.add_dispatch_support
def reduce_mean(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis` by computing the
mean of elements across the dimensions in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a tensor with a single
element is returned.
For example:
>>> x = tf.constant([[1., 1.], [2., 2.]])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.5>
>>> tf.reduce_mean(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
>>> tf.reduce_mean(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
Please note that `np.mean` has a `dtype` parameter that could be used to
specify the output type. By default this is `dtype=float64`. On the other
hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
for example:
>>> x = tf.constant([1, 0, 1, 0])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=int32, numpy=0>
>>> y = tf.constant([1., 0., 1., 0.])
>>> tf.reduce_mean(y)
<tf.Tensor: shape=(), dtype=float32, numpy=0.5>
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.mean(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export("math.reduce_variance")
@dispatch.add_dispatch_support
def reduce_variance(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the variance of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_variance(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.25>
>>> tf.math.reduce_variance(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], ...)>
>>> tf.math.reduce_variance(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.25, 0.25], ...)>
Args:
input_tensor: The tensor to reduce. Should have real or complex type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name scope for the associated operations (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor. Note, for
`complex64` or `complex128` input, the returned `Tensor` will be of type
`float32` or `float64`, respectively.
@compatibility(numpy)
Equivalent to np.var
Please note `np.var` has a `dtype` parameter that could be used to specify the
output type. By default this is `dtype=float64`. On the other hand,
`tf.math.reduce_variance` has aggressive type inference from `input_tensor`.
@end_compatibility
"""
name = name if name else "reduce_variance"
with ops.name_scope(name):
means = reduce_mean(input_tensor, axis=axis, keepdims=True)
if means.dtype.is_integer:
raise TypeError("Input must be either real or complex")
diff = input_tensor - means
if diff.dtype.is_complex:
# For complex values we need to take the absolute value before squaring.
# This is achieved by multiplying with the conjugate.
real_dtype = diff.dtype.real_dtype
squared_deviations = gen_math_ops.real(
gen_math_ops.mul(gen_math_ops.conj(diff), diff), Tout=real_dtype)
else:
squared_deviations = gen_math_ops.square(diff)
return reduce_mean(squared_deviations, axis=axis, keepdims=keepdims)
@tf_export("math.reduce_std")
@dispatch.add_dispatch_support
def reduce_std(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the standard deviation of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_std(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.118034>
>>> tf.math.reduce_std(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], dtype=float32)>
>>> tf.math.reduce_std(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.5, 0.5], dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have real or complex type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name scope for the associated operations (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor. Note, for
`complex64` or `complex128` input, the returned `Tensor` will be of type
`float32` or `float64`, respectively.
@compatibility(numpy)
Equivalent to np.std
Please note `np.std` has a `dtype` parameter that could be used to specify the
output type. By default this is `dtype=float64`. On the other hand,
`tf.math.reduce_std` has aggressive type inference from `input_tensor`.
@end_compatibility
"""
name = name if name else "reduce_std"
with ops.name_scope(name):
variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims)
return gen_math_ops.sqrt(variance)
@tf_export("math.reduce_prod", "reduce_prod", v1=[])
@dispatch.add_dispatch_support
def reduce_prod(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.multiply` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.multiply` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_prod(x)
<tf.Tensor: shape=(), dtype=float32, numpy=24.>
>>> tf.math.reduce_prod(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
>>> tf.math.reduce_prod(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.prod(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_prod", "reduce_prod"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_prod_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.multiply` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.multiply` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_prod(x)
<tf.Tensor: shape=(), dtype=float32, numpy=24.>
>>> tf.math.reduce_prod(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
>>> tf.math.reduce_prod(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_prod(input_tensor, axis, keepdims, name)
@tf_export(v1=["math.reduce_min", "reduce_min"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_min_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the `tf.math.minimum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.minimum` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Usage example:
>>> x = tf.constant([5, 1, 2, 4])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=int32, numpy=1>
>>> x = tf.constant([-5, -1, -2, -4])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=int32, numpy=-5>
>>> x = tf.constant([4, float('nan')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('nan'), float('nan')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('-inf'), float('inf')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=-inf>
See the numpy docs for `np.amin` and `np.nanmin` behavior.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_min(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_min", "reduce_min", v1=[])
@dispatch.add_dispatch_support
def reduce_min(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the `tf.math.minimum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.minimum` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> a = tf.constant([
... [[1, 2], [3, 4]],
... [[1, 2], [3, 4]]
... ])
>>> tf.reduce_min(a)
<tf.Tensor: shape=(), dtype=int32, numpy=1>
Choosing a specific axis returns minimum element in the given axis:
>>> b = tf.constant([[1, 2, 3], [4, 5, 6]])
>>> tf.reduce_min(b, axis=0)
<tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 2, 3], dtype=int32)>
>>> tf.reduce_min(b, axis=1)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 4], dtype=int32)>
Setting `keepdims` to `True` retains the dimension of `input_tensor`:
>>> tf.reduce_min(a, keepdims=True)
<tf.Tensor: shape=(1, 1, 1), dtype=int32, numpy=array([[[1]]], dtype=int32)>
>>> tf.math.reduce_min(a, axis=0, keepdims=True)
<tf.Tensor: shape=(1, 2, 2), dtype=int32, numpy=
array([[[1, 2],
[3, 4]]], dtype=int32)>
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._min(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_max", "reduce_max"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_max_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.maximum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.maximum` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Usage example:
>>> x = tf.constant([5, 1, 2, 4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=5>
>>> x = tf.constant([-5, -1, -2, -4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=-1>
>>> x = tf.constant([4, float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('nan'), float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('-inf'), float('inf')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=inf>
See the numpy docs for `np.amax` and `np.nanmax` behavior.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_max(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_max", "reduce_max", v1=[])
@dispatch.add_dispatch_support
def reduce_max(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.maximum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.maximum` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Usage example:
>>> x = tf.constant([5, 1, 2, 4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=5>
>>> x = tf.constant([-5, -1, -2, -4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=-1>
>>> x = tf.constant([4, float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('nan'), float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('-inf'), float('inf')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=inf>
See the numpy docs for `np.amax` and `np.nanmax` behavior.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return reduce_max_with_dims(input_tensor, axis, keepdims, name,
_ReductionDims(input_tensor, axis))
def reduce_max_with_dims(input_tensor,
axis=None,
keepdims=False,
name=None,
dims=None):
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._max(input_tensor, dims, keepdims, name=name))
@tf_export(v1=["math.reduce_all", "reduce_all"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_all_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.logical_and` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_and` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.math.reduce_all(x)
<tf.Tensor: shape=(), dtype=bool, numpy=False>
>>> tf.math.reduce_all(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
>>> tf.math.reduce_all(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_all(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_all", "reduce_all", v1=[])
@dispatch.add_dispatch_support
def reduce_all(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.logical_and` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_and` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.math.reduce_all(x)
<tf.Tensor: shape=(), dtype=bool, numpy=False>
>>> tf.math.reduce_all(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
>>> tf.math.reduce_all(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._all(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_any", "reduce_any"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_any_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.logical_or` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_or` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.reduce_any(x)
<tf.Tensor: shape=(), dtype=bool, numpy=True>
>>> tf.reduce_any(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>
>>> tf.reduce_any(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_any(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_any", "reduce_any", v1=[])
@dispatch.add_dispatch_support
def reduce_any(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.logical_or` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_or` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.reduce_any(x)
<tf.Tensor: shape=(), dtype=bool, numpy=True>
>>> tf.reduce_any(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>
>>> tf.reduce_any(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._any(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_logsumexp", "reduce_logsumexp"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_logsumexp_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
tf.reduce_logsumexp(x) # log(6)
tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) # [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) # log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_logsumexp(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_logsumexp", "reduce_logsumexp", v1=[])
@dispatch.add_dispatch_support
def reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
tf.reduce_logsumexp(x) # log(6)
tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) # [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) # log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
keepdims = False if keepdims is None else keepdims
input_tensor = ops.convert_to_tensor(input_tensor)
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
reduce_dim = _ReductionDims(input_tensor, axis)
raw_max = reduce_max_with_dims(
input_tensor, axis=axis, keepdims=True, dims=reduce_dim)
my_max = array_ops.stop_gradient(
gen_math_ops.select(
gen_math_ops.is_finite(raw_max), raw_max,
gen_array_ops.zeros_like(raw_max)))
result = gen_math_ops.log(
reduce_sum_with_dims(
gen_math_ops.exp(gen_math_ops.sub(input_tensor, my_max)),
axis=axis,
keepdims=keepdims,
dims=reduce_dim))
if not keepdims:
my_max = array_ops.reshape(my_max, gen_array_ops.shape(result))
result = _add_dispatch(result, my_max, name=name)
return _may_reduce_to_scalar(keepdims, axis, result)
@tf_export("linalg.trace", v1=["linalg.trace", "trace"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("trace")
@dispatch.add_dispatch_support
def trace(x, name=None):
"""Compute the trace of a tensor `x`.
`trace(x)` returns the sum along the main diagonal of each inner-most matrix
in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
`output[i, j, k, ..., l] = trace(x[i, j, k, ..., l, :, :])`
For example:
```python
x = tf.constant([[1, 2], [3, 4]])
tf.linalg.trace(x) # 5
x = tf.constant([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
tf.linalg.trace(x) # 15
x = tf.constant([[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]],
[[-1, -2, -3],
[-4, -5, -6],
[-7, -8, -9]]])
tf.linalg.trace(x) # [15, -15]
```
Args:
x: tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
@tf_export("linalg.matmul", "matmul")
@dispatch.add_dispatch_support
def matmul(a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must, following any transpositions, be tensors of rank >= 2
where the inner 2 dimensions specify valid matrix multiplication dimensions,
and any further outer dimensions specify matching batch size.
Both matrices must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Either matrix can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices (rank-2 tensors) with
datatypes `bfloat16` or `float32`.
A simple 2-D tensor matrix multiplication:
>>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
>>> a # 2-D tensor
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[1, 2, 3],
[4, 5, 6]], dtype=int32)>
>>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])
>>> b # 2-D tensor
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[ 7, 8],
[ 9, 10],
[11, 12]], dtype=int32)>
>>> c = tf.matmul(a, b)
>>> c # `a` * `b`
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[ 58, 64],
[139, 154]], dtype=int32)>
A batch matrix multiplication with batch shape [2]:
>>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3])
>>> a # 3-D tensor
<tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy=
array([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]], dtype=int32)>
>>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2])
>>> b # 3-D tensor
<tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=
array([[[13, 14],
[15, 16],
[17, 18]],
[[19, 20],
[21, 22],
[23, 24]]], dtype=int32)>
>>> c = tf.matmul(a, b)
>>> c # `a` * `b`
<tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
array([[[ 94, 100],
[229, 244]],
[[508, 532],
[697, 730]]], dtype=int32)>
Since python >= 3.5 the @ operator is supported
(see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow,
it simply calls the `tf.matmul()` function, so the following lines are
equivalent:
>>> d = a @ b @ [[10], [11]]
>>> d = tf.matmul(tf.matmul(a, b), [[10], [11]])
Args:
a: `tf.Tensor` of type `float16`, `float32`, `float64`, `int32`,
`complex64`, `complex128` and rank > 1.
b: `tf.Tensor` with same type and rank as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
adjoint_b: If `True`, `b` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix. Notice, this
**does not support `tf.sparse.SparseTensor`**, it just makes optimizations
that assume most values in `a` are zero.
See `tf.sparse.sparse_dense_matmul`
for some support for `tf.sparse.SparseTensor` multiplication.
b_is_sparse: If `True`, `b` is treated as a sparse matrix. Notice, this
**does not support `tf.sparse.SparseTensor`**, it just makes optimizations
that assume most values in `a` are zero.
See `tf.sparse.sparse_dense_matmul`
for some support for `tf.sparse.SparseTensor` multiplication.
name: Name for the operation (optional).
Returns:
A `tf.Tensor` of the same type as `a` and `b` where each inner-most matrix
is the product of the corresponding matrices in `a` and `b`, e.g. if all
transpose or adjoint attributes are `False`:
`output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`,
for all indices `i`, `j`.
Note: This is matrix product, not element-wise product.
Raises:
ValueError: If `transpose_a` and `adjoint_a`, or `transpose_b` and
`adjoint_b` are both set to `True`.
"""
with ops.name_scope(name, "MatMul", [a, b]) as name:
if transpose_a and adjoint_a:
raise ValueError("Only one of transpose_a and adjoint_a can be True.")
if transpose_b and adjoint_b:
raise ValueError("Only one of transpose_b and adjoint_b can be True.")
if context.executing_eagerly():
if not isinstance(a, (ops.EagerTensor, _resource_variable_type)):
a = ops.convert_to_tensor(a, name="a")
if not isinstance(b, (ops.EagerTensor, _resource_variable_type)):
b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b")
else:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b")
# TODO(apassos) remove _shape_tuple here when it is not needed.
a_shape = a._shape_tuple() # pylint: disable=protected-access
b_shape = b._shape_tuple() # pylint: disable=protected-access
output_may_have_non_empty_batch_shape = (
(a_shape is None or len(a_shape) > 2) or
(b_shape is None or len(b_shape) > 2))
if (not a_is_sparse and
not b_is_sparse) and output_may_have_non_empty_batch_shape:
# BatchMatmul does not support transpose, so we conjugate the matrix and
# use adjoint instead. Conj() is a noop for real matrices.
if transpose_a:
a = conj(a)
adjoint_a = True
if transpose_b:
b = conj(b)
adjoint_b = True
return gen_math_ops.batch_mat_mul_v2(
a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)
# Neither matmul nor sparse_matmul support adjoint, so we conjugate
# the matrix and use transpose instead. Conj() is a noop for real
# matrices.
if adjoint_a:
a = conj(a)
transpose_a = True
if adjoint_b:
b = conj(b)
transpose_b = True
use_sparse_matmul = False
if a_is_sparse or b_is_sparse:
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (
a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types)
if ((a.dtype == dtypes.bfloat16 or b.dtype == dtypes.bfloat16) and
a.dtype != b.dtype):
# matmul currently doesn't handle mixed-precision inputs.
use_sparse_matmul = True
if use_sparse_matmul:
ret = sparse_matmul(
a,
b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
# sparse_matmul always returns float32, even with
# bfloat16 inputs. This prevents us from configuring bfloat16 training.
# casting to bfloat16 also matches non-sparse matmul behavior better.
if a.dtype == dtypes.bfloat16 and b.dtype == dtypes.bfloat16:
ret = cast(ret, dtypes.bfloat16)
return ret
else:
return gen_math_ops.mat_mul(
a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
@tf_export("linalg.matvec")
@dispatch.add_dispatch_support
def matvec(a,
b,
transpose_a=False,
adjoint_a=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by vector `b`, producing `a` * `b`.
The matrix `a` must, following any transpositions, be a tensor of rank >= 2,
with `shape(a)[-1] == shape(b)[-1]`, and `shape(a)[:-2]` able to broadcast
with `shape(b)[:-1]`.
Both `a` and `b` must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Matrix `a` can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the inputs contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices/vectors (rank-2/1
tensors) with datatypes `bfloat16` or `float32`.
For example:
```python
# 2-D tensor `a`
# [[1, 2, 3],
# [4, 5, 6]]
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
# 1-D tensor `b`
# [7, 9, 11]
b = tf.constant([7, 9, 11], shape=[3])
# `a` * `b`
# [ 58, 64]
c = tf.linalg.matvec(a, b)
# 3-D tensor `a`
# [[[ 1, 2, 3],
# [ 4, 5, 6]],
# [[ 7, 8, 9],
# [10, 11, 12]]]
a = tf.constant(np.arange(1, 13, dtype=np.int32),
shape=[2, 2, 3])
# 2-D tensor `b`
# [[13, 14, 15],
# [16, 17, 18]]
b = tf.constant(np.arange(13, 19, dtype=np.int32),
shape=[2, 3])
# `a` * `b`
# [[ 86, 212],
# [410, 563]]
c = tf.linalg.matvec(a, b)
```
Args:
a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
`complex128` and rank > 1.
b: `Tensor` with same type as `a` and compatible dimensions.
transpose_a: If `True`, `a` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a` and `b` where each inner-most vector is
the product of the corresponding matrices in `a` and vectors in `b`, e.g. if
all transpose or adjoint attributes are `False`:
`output`[..., i] = sum_k (`a`[..., i, k] * `b`[..., k]), for all indices i.
Note: This is matrix-vector product, not element-wise product.
Raises:
ValueError: If transpose_a and adjoint_a are both set to True.
"""
with ops.name_scope(name, "MatVec", [a, b]) as name:
output = matmul(
a,
array_ops.expand_dims(b, axis=-1),
transpose_a=transpose_a,
adjoint_a=adjoint_a,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse)
return array_ops.squeeze(output, axis=-1)
# TODO(b/178650720): Also support numpy-style type promotion in freestanding TF
# functions (e.g. tf.add).
def matmul_wrapper(a, b, name=None): # pylint: disable=missing-function-docstring
if ops._numpy_style_type_promotion:
return a._matmul(b)
return matmul(a, b, name=name)
matmul_wrapper.__doc__ = matmul.__doc__
_OverrideBinaryOperatorHelper(matmul_wrapper, "matmul")
sparse_matmul = deprecation.deprecated(None, "Use `tf.linalg.matmul` instead")(
gen_math_ops.sparse_mat_mul)
tf_export(v1=["sparse_matmul"])(sparse_matmul)
@dispatch.add_dispatch_support
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
@ops.RegisterStatistics("BatchMatMul", "flops")
@ops.RegisterStatistics("BatchMatMulV2", "flops")
def _calc_batch_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for BatchMatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[-2])
else:
k = int(a_shape[-1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
def _as_indexed_slices(x, optimize=True):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
optimize: if true, attempt to optimize the conversion of 'x'.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs, optimize=True):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
optimize: if true, attempt to optimize the conversion of each input.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [
o.indices for o in outputs if o.indices.dtype == dtypes.int32
]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
@tf_export("math.add_n", "add_n")
@dispatch.add_dispatch_support
def add_n(inputs, name=None):
"""Adds all input tensors element-wise.
`tf.math.add_n` performs the same operation as `tf.math.accumulate_n`, but it
waits for all of its inputs to be ready before beginning to sum.
This buffering can result in higher memory consumption when inputs are ready
at different times, since the minimum temporary storage required is
proportional to the input size rather than the output size.
This op does not [broadcast](
https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html)
its inputs. If you need broadcasting, use `tf.math.add` (or the `+` operator)
instead.
For example:
>>> a = tf.constant([[3, 5], [4, 8]])
>>> b = tf.constant([[1, 6], [2, 9]])
>>> tf.math.add_n([a, b, a])
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[ 7, 16],
[10, 25]], dtype=int32)>
Args:
inputs: A list of `tf.Tensor` or `tf.IndexedSlices` objects, each with the
same shape and type. `tf.IndexedSlices` objects will be converted into
dense tensors prior to adding.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of the same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, collections_abc.Iterable):
raise ValueError("inputs must be an iterable of at least one "
"Tensor/IndexedSlices with the same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, (ops.Tensor, ops.IndexedSlices)) for x in inputs):
raise ValueError("inputs must be an iterable of at least one "
"Tensor/IndexedSlices with the same dtype and shape")
if len(inputs) == 1:
if isinstance(inputs[0], ops.IndexedSlices):
values = ops.convert_to_tensor(inputs[0])
else:
values = inputs[0]
if name:
return array_ops.identity(values, name=name)
return values
return gen_math_ops.add_n(inputs, name=name)
@tf_export("math.accumulate_n", v1=["math.accumulate_n", "accumulate_n"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("accumulate_n")
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
`accumulate_n` performs the same operation as `tf.math.add_n`.
For example:
```python
a = tf.constant([[1, 2], [3, 4]])
b = tf.constant([[5, 0], [0, 6]])
tf.math.accumulate_n([a, b, a]) # [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.math.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
# [[7, 4],
# [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Expected shape of elements of `inputs` (optional). Also controls the
output shape of this op, which may affect type inference in other ops. A
value of `None` means "infer the input shape from the shapes in `inputs`".
tensor_dtype: Expected data type of `inputs` (optional). A value of `None`
means "infer the input dtype from `inputs[0]`".
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
def _input_error():
return ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not inputs or not isinstance(inputs, (list, tuple)):
raise _input_error()
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise _input_error()
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise _input_error()
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
# tensor_dtype is for safety only; operator's output type computed in C++
if tensor_dtype is not None and tensor_dtype != inputs[0].dtype:
raise TypeError("tensor_dtype is {}, but input is of type {}".format(
tensor_dtype, inputs[0].dtype))
if len(inputs) == 1 and name is None:
return inputs[0]
elif len(inputs) == 1 and name is not None:
return array_ops.identity(inputs[0], name=name)
return add_n(inputs, name=name)
@ops.RegisterGradient("AccumulateNV2")
def _accumulate_n_grad(op, grad):
"""Same as gradient for AddN. Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
@tf_export("math.sigmoid", "nn.sigmoid", "sigmoid")
@dispatch.add_dispatch_support
def sigmoid(x, name=None):
r"""Computes sigmoid of `x` element-wise.
Formula for calculating $\mathrm{sigmoid}(x) = y = 1 / (1 + \exp(-x))$.
For $x \in (-\infty, \infty)$, $\mathrm{sigmoid}(x) \in (0, 1)$.
Example Usage:
If a positive number is large, then its sigmoid will approach to 1 since the
formula will be `y = <large_num> / (1 + <large_num>)`
>>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
>>> tf.math.sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32,
numpy=array([0.5 , 0.7310586, 1. , 1. ], dtype=float32)>
If a negative number is large, its sigmoid will approach to 0 since the
formula will be `y = 1 / (1 + <large_num>)`
>>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
>>> tf.math.sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32, numpy=
array([0.0000000e+00, 1.9287499e-22, 2.6894143e-01, 0.5],
dtype=float32)>
Args:
x: A Tensor with type `float16`, `float32`, `float64`, `complex64`, or
`complex128`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x`.
Usage Example:
>>> x = tf.constant([-128.0, 0.0, 128.0], dtype=tf.float32)
>>> tf.sigmoid(x)
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0. , 0.5, 1. ], dtype=float32)>
@compatibility(scipy)
Equivalent to scipy.special.expit
@end_compatibility
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.sigmoid(x, name=name)
@tf_export("math.log_sigmoid", v1=["math.log_sigmoid", "log_sigmoid"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("log_sigmoid")
def log_sigmoid(x, name=None):
"""Computes log sigmoid of `x` element-wise.
Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability,
we use `y = -tf.nn.softplus(-x)`.
Args:
x: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x`.
Usage Example:
If a positive number is large, then its log_sigmoid will approach to 0 since
the formula will be `y = log( <large_num> / (1 + <large_num>) )` which
approximates to `log (1)` which is 0.
>>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
>>> tf.math.log_sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32, numpy=
array([-6.9314718e-01, -3.1326169e-01, -1.9287499e-22, -0.0000000e+00],
dtype=float32)>
If a negative number is large, its log_sigmoid will approach to the number
itself since the formula will be `y = log( 1 / (1 + <large_num>) )` which is
`log (1) - log ( (1 + <large_num>) )` which approximates to `- <large_num>`
that is the number itself.
>>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
>>> tf.math.log_sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32, numpy=
array([-100. , -50. , -1.3132616, -0.6931472],
dtype=float32)>
"""
with ops.name_scope(name, "LogSigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name)
@tf_export("math.cumsum", "cumsum")
@dispatch.add_dispatch_support
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
For example:
>>> # tf.cumsum([a, b, c]) # [a, a + b, a + b + c]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([ 2, 6, 12, 20], dtype=int32)>
>>> # using varying `axis` values
>>> y = tf.constant([[2, 4, 6, 8], [1,3,5,7]])
>>> tf.cumsum(y, axis=0)
<tf.Tensor: shape=(2, 4), dtype=int32, numpy=
array([[ 2, 4, 6, 8],
[ 3, 7, 11, 15]], dtype=int32)>
>>> tf.cumsum(y, axis=1)
<tf.Tensor: shape=(2, 4), dtype=int32, numpy=
array([[ 2, 6, 12, 20],
[ 1, 4, 9, 16]], dtype=int32)>
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
instead:
>>> # tf.cumsum([a, b, c], exclusive=True) => [0, a, a + b]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x, exclusive=True)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([ 0, 2, 6, 12], dtype=int32)>
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
>>> # tf.cumsum([a, b, c], reverse=True) # [a + b + c, b + c, c]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x, reverse=True)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([20, 18, 14, 8], dtype=int32)>
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
>>> # tf.cumsum([a, b, c], exclusive=True, reverse=True) # [b + c, c, 0]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x, exclusive=True, reverse=True)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([18, 14, 8, 0], dtype=int32)>
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumsum.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
@tf_export("math.cumprod", v1=["math.cumprod", "cumprod"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("cumprod")
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the
first element of the input is identical to the first element of the output:
```python
tf.math.cumprod([a, b, c]) # [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed
instead:
```python
tf.math.cumprod([a, b, c], exclusive=True) # [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```python
tf.math.cumprod([a, b, c], reverse=True) # [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.math.cumprod([a, b, c], exclusive=True, reverse=True) # [b * c, c, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumprod.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
@tf_export("math.cumulative_logsumexp", v1=["math.cumulative_logsumexp"])
@dispatch.add_dispatch_support
def cumulative_logsumexp(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative log-sum-exp of the tensor `x` along `axis`.
By default, this op performs an inclusive cumulative log-sum-exp, which means
that the first element of the input is identical to the first element of
the output.
This operation is significantly more numerically stable than the equivalent
tensorflow operation `tf.math.log(tf.math.cumsum(tf.math.exp(x)))`, although
computes the same result given infinite numerical precision. However, note
that in some cases, it may be less stable than `tf.math.reduce_logsumexp`
for a given element, as it applies the "log-sum-exp trick" in a different
way.
More precisely, where `tf.math.reduce_logsumexp` uses the following trick:
```
log(sum(exp(x))) == log(sum(exp(x - max(x)))) + max(x)
```
it cannot be directly used here as there is no fast way of applying it
to each prefix `x[:i]`. Instead, this function implements a prefix
scan using pairwise log-add-exp, which is a commutative and associative
(up to floating point precision) operator:
```
log_add_exp(x, y) = log(exp(x) + exp(y))
= log(1 + exp(min(x, y) - max(x, y))) + max(x, y)
```
However, reducing using the above operator leads to a different computation
tree (logs are taken repeatedly instead of only at the end), and the maximum
is only computed pairwise instead of over the entire prefix. In general, this
leads to a different and slightly less precise computation.
Args:
x: A `Tensor`. Must be one of the following types: `float16`, `float32`,
`float64`.
axis: A `Tensor` of type `int32` or `int64` (default: 0). Must be in the
range `[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumulative log-sum-exp.
reverse: If `True`, performs the cumulative log-sum-exp in the reverse
direction.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same shape and type as `x`.
"""
with ops.name_scope(name, "CumulativeLogsumexp", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumulative_logsumexp(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
@tf_export("math.conj", v1=["math.conj", "conj"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("conj")
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `x` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `x`. The
complex numbers in `x` must be of the form \\(a + bj\\), where `a` is the
real part and `b` is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
>>> x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
>>> tf.math.conj(x)
<tf.Tensor: shape=(2,), dtype=complex128,
numpy=array([-2.25-4.75j, 3.25-5.75j])>
If `x` is real, it is returned unchanged.
For example:
>>> x = tf.constant([-2.25, 3.25])
>>> tf.math.conj(x)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([-2.25, 3.25], dtype=float32)>
Args:
x: `Tensor` to conjugate. Must have numeric or variant type.
name: A name for the operation (optional).
Returns:
A `Tensor` that is the conjugate of `x` (with the same type).
Raises:
TypeError: If `x` is not a numeric tensor.
@compatibility(numpy)
Equivalent to numpy.conj.
@end_compatibility
"""
if isinstance(x, ops.Tensor):
dt = x.dtype
if dt.is_floating or dt.is_integer:
return x
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex or x.dtype == dtypes.variant:
return gen_math_ops.conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError("Expected numeric or variant tensor, got dtype %r" %
x.dtype)
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keepdims were set to True.
"""
# TODO(allenl): Refactor `reduced_shape` to take the tensor corresponding to
# `input_shape` rather than `tf.shape` of it. Then we can check if the shape
# is fully defined here, which may be faster executing eagerly than running
# `tf.shape` and then fetching its constant value.
constant_input_shape = tensor_util.constant_value(input_shape)
if constant_input_shape is not None:
constant_axes = tensor_util.constant_value(axes)
if constant_axes is not None:
constant_axes = np.array(constant_axes, dtype=np.int32)
constant_input_shape = np.array(constant_input_shape, dtype=np.int32)
constant_input_shape[constant_axes] = 1
return constant_input_shape
# Example:
# cast needed for SparseTensor reductions
input_shape = cast(input_shape, dtypes.int32) # [2, 3, 5, 7]
axes = cast(axes, dtypes.int32) # [1, 2]
input_rank = array_ops.size(input_shape) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[
range(input_rank), # [0, 1, 2, 3]
axes
], # [1, 2]
[
input_shape, # [2, 3, 5, 7]
array_ops.fill(axes_shape, 1)
]) # [1, 1]
def _unsorted_segment_N(data, segment_ids, num_segments):
""" Helper function for unsorted_segment_mean/_sqrtN.
Computes the number
of segment entries with 0-entries set to 1 to allow division by N.
"""
num_segments = ops.convert_to_tensor(num_segments)
# bincount doesn't support negative indices so we use unsorted_segment_sum
segment_ids_shape = array_ops.shape_internal(segment_ids)
ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)
n = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments)
# add dimensions for all non-reduced axes
broadcastable_shape = array_ops.concat(
[num_segments[array_ops.newaxis],
array_ops.ones([array_ops.rank(data)
- array_ops.rank(segment_ids)],
dtype=num_segments.dtype)],
axis=0)
n = array_ops.reshape(n, broadcastable_shape)
return gen_math_ops.maximum(n, 1)
@tf_export(
"math.unsorted_segment_mean",
v1=["math.unsorted_segment_mean", "unsorted_segment_mean"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("unsorted_segment_mean")
@dispatch.add_dispatch_support
def unsorted_segment_mean(data, segment_ids, num_segments, name=None):
r"""Computes the mean along segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
This operator is similar to the `tf.math.unsorted_segment_sum` operator.
Instead of computing the sum over segments, it computes the mean of all
entries belonging to a segment such that:
\\(output_i = 1/N_i \sum_{j...} data[j...]\\) where the sum is over tuples
`j...` such that `segment_ids[j...] == i` with \\N_i\\ being the number of
occurrences of id \\i\\.
If there is no entry for a given segment ID `i`, it outputs 0.
If the given segment ID `i` is negative, the value is dropped and will not
be added to the sum of the segment.
Args:
data: A `Tensor` with floating point or complex dtype.
segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
num_segments: An integer scalar `Tensor`. The number of distinct segment
IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`
dimensions, which are replaced with a single dimension which has size
`num_segments`.
"""
with ops.name_scope(name, "UnsortedSegmentMean"):
data = ops.convert_to_tensor(data)
segment_ids = ops.convert_to_tensor(segment_ids)
N = _unsorted_segment_N(data, segment_ids, num_segments)
summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
return summed / N
@tf_export(
"math.unsorted_segment_sqrt_n",
v1=["math.unsorted_segment_sqrt_n", "unsorted_segment_sqrt_n"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("unsorted_segment_sqrt_n")
@dispatch.add_dispatch_support
def unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None):
r"""Computes the sum along segments of a tensor divided by the sqrt(N).
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
This operator is similar to the `tf.math.unsorted_segment_sum` operator.
Additionally to computing the sum over segments, it divides the results by
sqrt(N).
\\(output_i = 1/sqrt(N_i) \sum_{j...} data[j...]\\) where the sum is over
tuples `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the
number of occurrences of id \\i\\.
If there is no entry for a given segment ID `i`, it outputs 0.
Note that this op only supports floating point and complex dtypes,
due to tf.sqrt only supporting these types.
If the given segment ID `i` is negative, the value is dropped and will not
be added to the sum of the segment.
Args:
data: A `Tensor` with floating point or complex dtype.
segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
num_segments: An integer scalar `Tensor`. The number of distinct segment
IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`
dimensions, which are replaced with a single dimension which has size
`num_segments`.
"""
with ops.name_scope(name, "UnsortedSegmentSqrtN"):
data = ops.convert_to_tensor(data)
segment_ids = ops.convert_to_tensor(segment_ids)
N = _unsorted_segment_N(data, segment_ids, num_segments)
summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
return summed / gen_math_ops.sqrt(N)
@tf_export(v1=["sparse.segment_sum", "sparse_segment_sum"])
@deprecation.deprecated_endpoints("sparse_segment_sum")
def sparse_segment_sum(data,
indices,
segment_ids,
name=None,
num_segments=None):
r"""Computes the sum along sparse segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
first dimension, selecting a subset of dimension 0, specified by `indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
# Select two rows, one segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
# => [[0 0 0 0]]
# Select two rows, two segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
# => [[ 1 2 3 4]
# [-1 -2 -3 -4]]
# With missing segment ids.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
num_segments=4)
# => [[ 1 2 3 4]
# [ 0 0 0 0]
# [-1 -2 -3 -4]
# [ 0 0 0 0]]
# Select all rows, two segments.
tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
# => [[0 0 0 0]
# [5 6 7 8]]
# Which is equivalent to:
tf.math.segment_sum(c, tf.constant([0, 0, 1]))
```
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_sum_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_sum(
data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("sparse.segment_sum", v1=[])
def sparse_segment_sum_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
r"""Computes the sum along sparse segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
first dimension, selecting a subset of dimension 0, specified by `indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
# Select two rows, one segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
# => [[0 0 0 0]]
# Select two rows, two segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
# => [[ 1 2 3 4]
# [-1 -2 -3 -4]]
# With missing segment ids.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
num_segments=4)
# => [[ 1 2 3 4]
# [ 0 0 0 0]
# [-1 -2 -3 -4]
# [ 0 0 0 0]]
# Select all rows, two segments.
tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
# => [[0 0 0 0]
# [5 6 7 8]]
# Which is equivalent to:
tf.math.segment_sum(c, tf.constant([0, 0, 1]))
```
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
return sparse_segment_sum(
data, indices, segment_ids, name=name, num_segments=num_segments)
@tf_export(v1=["sparse.segment_mean", "sparse_segment_mean"])
@deprecation.deprecated_endpoints("sparse_segment_mean")
def sparse_segment_mean(data,
indices,
segment_ids,
name=None,
num_segments=None):
r"""Computes the mean along sparse segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
`data`'s first dimension, selecting a subset of dimension 0, specified by
`indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_mean_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_mean(
data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("sparse.segment_mean", v1=[])
def sparse_segment_mean_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
r"""Computes the mean along sparse segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
`data`'s first dimension, selecting a subset of dimension 0, specified by
`indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
return sparse_segment_mean(
data, indices, segment_ids, name=name, num_segments=num_segments)
@tf_export(v1=["sparse.segment_sqrt_n", "sparse_segment_sqrt_n"])
@deprecation.deprecated_endpoints("sparse_segment_sqrt_n")
def sparse_segment_sqrt_n(data,
indices,
segment_ids,
name=None,
num_segments=None):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
`N` is the size of the segment being reduced.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_sqrt_n_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_sqrt_n(
data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("sparse.segment_sqrt_n", v1=[])
def sparse_segment_sqrt_n_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.sparse.segment_mean`, but instead of dividing by the size of the
segment, `N`, divide by `sqrt(N)` instead.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
return sparse_segment_sqrt_n(
data, indices, segment_ids, name=name, num_segments=num_segments)
@tf_export("tensordot", "linalg.tensordot")
@dispatch.add_dispatch_support
def tensordot(a, b, axes, name=None):
r"""Tensor contraction of a and b along specified axes and outer product.
Tensordot (also known as tensor contraction) sums the product of elements
from `a` and `b` over the indices specified by `a_axes` and `b_axes`.
The lists `a_axes` and `b_axes` specify those pairs of axes along which to
contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension
as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists
`a_axes` and `b_axes` must have identical length and consist of unique
integers that specify valid axes for each of the tensors. Additionally
outer product is supported by passing `axes=0`.
This operation corresponds to `numpy.tensordot(a, b, axes)`.
Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1`
is equivalent to matrix multiplication.
Example 2: When `a` and `b` are matrices (order 2), the case
`axes = [[1], [0]]` is equivalent to matrix multiplication.
Example 3: When `a` and `b` are matrices (order 2), the case `axes=0` gives
the outer product, a tensor of order 4.
Example 4: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two
tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor
\\(c_{jklm}\\) whose entry
corresponding to the indices \\((j,k,l,m)\\) is given by:
\\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\).
In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.
Args:
a: `Tensor` of type `float32` or `float64`.
b: `Tensor` with the same type as `a`.
axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].
If axes is a scalar, sum over the last N axes of a and the first N axes of
b in order. If axes is a list or `Tensor` the first and second row contain
the set of unique integers specifying axes along which the contraction is
computed, for `a` and `b`, respectively. The number of axes for `a` and
`b` must be equal. If `axes=0`, computes the outer product between `a` and
`b`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `a`.
Raises:
ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.
IndexError: If the values in axes exceed the rank of the corresponding
tensor.
"""
def _tensordot_reshape(a, axes, flipped=False):
"""Helper method to perform transpose and reshape for contraction op.
This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`
using `array_ops.transpose` and `array_ops.reshape`. The method takes a
tensor and performs the correct transpose and reshape operation for a given
set of indices. It returns the reshaped tensor as well as a list of indices
necessary to reshape the tensor again after matrix multiplication.
Args:
a: `Tensor`.
axes: List or `int32` `Tensor` of unique indices specifying valid axes of
`a`.
flipped: An optional `bool`. Defaults to `False`. If `True`, the method
assumes that `a` is the second argument in the contraction operation.
Returns:
A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is
the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is
either a list of integers or an `int32` `Tensor`, depending on whether
the shape of a is fully specified, and free_dims_static is either a list
of integers and None values, or None, representing the inferred
static shape of the free dimensions
"""
if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
free_dims = [shape_a[i] for i in free]
prod_free = int(np.prod([shape_a[i] for i in free]))
prod_axes = int(np.prod([shape_a[i] for i in axes]))
perm = list(axes) + free if flipped else free + list(axes)
new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]
if (perm != np.arange(len(shape_a))).any():
a_trans = array_ops.transpose(a, perm)
else:
a_trans = a
if a_trans.get_shape().as_list() != new_shape:
reshaped_a = array_ops.reshape(a_trans, new_shape)
else:
reshaped_a = a_trans
return reshaped_a, free_dims, free_dims
else:
if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
axes_dims = [shape_a[i] for i in axes]
free_dims = [shape_a[i] for i in free]
free_dims_static = free_dims
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
free = ops.convert_to_tensor(free, dtype=dtypes.int32, name="free")
shape_a = array_ops.shape(a)
else:
free_dims_static = None
shape_a = array_ops.shape(a)
rank_a = array_ops.rank(a)
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
axes = array_ops.where(axes >= 0, axes, axes + rank_a)
free, _ = gen_array_ops.list_diff(range(rank_a), axes, dtypes.int32)
free_dims = array_ops.gather(shape_a, free)
axes_dims = array_ops.gather(shape_a, axes)
prod_free_dims = reduce_prod(free_dims)
prod_axes_dims = reduce_prod(axes_dims)
if flipped:
perm = array_ops.concat([axes, free], 0)
new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])
else:
perm = array_ops.concat([free, axes], 0)
new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims_static
def _tensordot_axes(a, axes):
"""Generates two sets of contraction axes for the two tensor arguments."""
a_shape = a.get_shape()
if isinstance(axes, compat.integral_types):
if axes < 0:
raise ValueError("'axes' must be at least 0.")
if a_shape.ndims is not None:
if axes > a_shape.ndims:
raise ValueError("'axes' must not be larger than the number of "
"dimensions of tensor %s." % a)
return (list(xrange(a_shape.ndims - axes,
a_shape.ndims)), list(xrange(axes)))
else:
rank = array_ops.rank(a)
return (range(rank - axes, rank,
dtype=dtypes.int32), range(axes, dtype=dtypes.int32))
elif isinstance(axes, (list, tuple)):
if len(axes) != 2:
raise ValueError("'axes' must be an integer or have length 2.")
a_axes = axes[0]
b_axes = axes[1]
if isinstance(a_axes, compat.integral_types) and \
isinstance(b_axes, compat.integral_types):
a_axes = [a_axes]
b_axes = [b_axes]
if len(a_axes) != len(b_axes):
raise ValueError(
"Different number of contraction axes 'a' and 'b', %s != %s." %
(len(a_axes), len(b_axes)))
return a_axes, b_axes
else:
axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32)
return axes[0], axes[1]
with ops.name_scope(name, "Tensordot", [a, b, axes]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_axes, b_axes = _tensordot_axes(a, axes)
a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)
b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(
b, b_axes, True)
ab_matmul = matmul(a_reshape, b_reshape)
if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):
if (ab_matmul.get_shape().is_fully_defined() and
ab_matmul.get_shape().as_list() == a_free_dims + b_free_dims):
return ab_matmul
else:
return array_ops.reshape(
ab_matmul, a_free_dims + b_free_dims, name=name)
else:
a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)
b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)
product = array_ops.reshape(
ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
if a_free_dims_static is not None and b_free_dims_static is not None:
product.set_shape(a_free_dims_static + b_free_dims_static)
return product
@tf_export("math.polyval")
@dispatch.add_dispatch_support
def polyval(coeffs, x, name=None):
r"""Computes the elementwise value of a polynomial.
If `x` is a tensor and `coeffs` is a list n + 1 tensors,
this function returns the value of the n-th order polynomial
`p(x) = coeffs[n-1] + coeffs[n-2] * x + ... + coeffs[0] * x**(n-1)`
evaluated using Horner's method, i.e.
```python
p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] + x * coeffs[0]))
```
Usage Example:
>>> coefficients = [1.0, 2.5, -4.2]
>>> x = 5.0
>>> y = tf.math.polyval(coefficients, x)
>>> y
<tf.Tensor: shape=(), dtype=float32, numpy=33.3>
Usage Example:
>>> tf.math.polyval([2, 1, 0], 3) # evaluates 2 * (3**2) + 1 * (3**1) + 0 * (3**0)
<tf.Tensor: shape=(), dtype=int32, numpy=21>
`tf.math.polyval` can also be used in polynomial regression. Taking
advantage of this function can facilitate writing a polynomial equation
as compared to explicitly writing it out, especially for higher degree
polynomials.
>>> x = tf.constant(3)
>>> theta1 = tf.Variable(2)
>>> theta2 = tf.Variable(1)
>>> theta3 = tf.Variable(0)
>>> tf.math.polyval([theta1, theta2, theta3], x)
<tf.Tensor: shape=(), dtype=int32, numpy=21>
Args:
coeffs: A list of `Tensor` representing the coefficients of the polynomial.
x: A `Tensor` representing the variable of the polynomial.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as the expression p(x) with usual broadcasting
rules for element-wise addition and multiplication applied.
@compatibility(numpy)
Equivalent to numpy.polyval.
@end_compatibility
"""
if not isinstance(coeffs, list):
raise ValueError("Argument coeffs must be list type "
"found {}.".format(type(coeffs)))
with ops.name_scope(name, "polyval", nest.flatten(coeffs) + [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if len(coeffs) < 1:
return array_ops.zeros_like(x, name=name)
coeffs = [
ops.convert_to_tensor(coeff, name=("coeff_%d" % index))
for index, coeff in enumerate(coeffs)
]
p = coeffs[0]
for c in coeffs[1:]:
p = c + p * x
return p
@tf_export("math.reciprocal_no_nan")
@dispatch.add_dispatch_support
def reciprocal_no_nan(x, name=None):
"""Performs a safe reciprocal operation, element wise.
If a particular element is zero, the reciprocal for that element is
also set to zero.
For example:
```python
x = tf.constant([2.0, 0.5, 0, 1], dtype=tf.float32)
tf.math.reciprocal_no_nan(x) # [ 0.5, 2, 0.0, 1.0 ]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64` `complex64` or
`complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
Raises:
TypeError: x must be of a valid dtype.
"""
with ops.name_scope(name, "reciprocal_no_nan", [x]) as scope:
x = ops.convert_to_tensor(x, name="x")
one = constant_op.constant(1, dtype=x.dtype.base_dtype, name="one")
return gen_math_ops.div_no_nan(one, x, name=scope)
@tf_export("math.xlog1py")
@dispatch.add_dispatch_support
def xlog1py(x, y, name=None):
r"""Compute x * log1p(y).
Given `x` and `y`, compute `x * log1p(y)`. This function safely returns
zero when `x = 0`, no matter what the value of `y` is.
Example:
>>> tf.math.xlog1py(0., 1.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.>
>>> tf.math.xlog1py(1., 1.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.6931472>
>>> tf.math.xlog1py(2., 2.)
<tf.Tensor: shape=(), dtype=float32, numpy=2.1972246>
>>> tf.math.xlog1py(0., -1.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.>
Args:
x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
`complex64`, `complex128`
y: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
`complex64`, `complex128`
name: A name for the operation (optional).
Returns:
`x * log1p(y)`.
@compatibility(scipy)
Equivalent to scipy.special.xlog1py
@end_compatibility
"""
with ops.name_scope(name, "xlog1py", [x]):
return gen_math_ops.xlog1py(x, y)
@tf_export("math.erfinv")
@dispatch.add_dispatch_support
def erfinv(x, name=None):
"""Compute inverse error function.
Given `x`, compute the inverse error function of `x`. This function
is the inverse of `tf.math.erf`.
Args:
x: `Tensor` with type `float` or `double`.
name: A name for the operation (optional).
Returns:
Inverse error function of `x`.
"""
with ops.name_scope(name, "erfinv", [x]):
return gen_math_ops.erfinv(x)
@tf_export("math.ndtri")
@dispatch.add_dispatch_support
def ndtri(x, name=None):
"""Compute quantile of Standard Normal.
Args:
x: `Tensor` with type `float` or `double`.
name: A name for the operation (optional).
Returns:
Inverse error function of `x`.
"""
with ops.name_scope(name, "ndtri", [x]):
return gen_math_ops.ndtri(x)
@tf_export("math.erfcinv")
@dispatch.add_dispatch_support
def erfcinv(x, name=None):
"""Computes the inverse of complementary error function.
Given `x`, compute the inverse complementary error function of `x`.
This function is the inverse of `tf.math.erfc`, and is defined on
`[0, 2]`.
>>> tf.math.erfcinv([0., 0.2, 1., 1.5, 2.])
<tf.Tensor: shape=(5,), dtype=float32, numpy=
array([ inf, 0.9061935, -0. , -0.4769363, -inf],
dtype=float32)>
Args:
x: `Tensor` with type `float` or `double`.
name: A name for the operation (optional).
Returns:
Inverse complementary error function of `x`.
@compatibility(numpy)
Equivalent to scipy.special.erfcinv
@end_compatibility
"""
with ops.name_scope(name, "erfcinv", [x]):
x = ops.convert_to_tensor(x, name="start")
return -ndtri(0.5 * x) * np.sqrt(0.5)
@tf_export("math.ceil", v1=["math.ceil", "ceil"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("ceil")
@dispatch.add_dispatch_support
def ceil(x, name=None):
"""Return the ceiling of the input, element-wise.
For example:
>>> tf.math.ceil([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
<tf.Tensor: shape=(7,), dtype=float32,
numpy=array([-1., -1., -0., 1., 2., 2., 2.], dtype=float32)>
Args:
x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`. `int32`
name: A name for the operation (optional).
Returns:
A `tf.Tensor`. Has the same type as `x`.
@compatibility(numpy)
Equivalent to np.ceil
@end_compatibility
"""
return gen_math_ops.ceil(x, name)
@tf_export("math.sqrt", "sqrt")
@dispatch.add_dispatch_support
def sqrt(x, name=None): # pylint: disable=redefined-builtin
r"""Computes element-wise square root of the input tensor.
Note: This operation does not support integer types.
>>> x = tf.constant([[4.0], [16.0]])
>>> tf.sqrt(x)
<tf.Tensor: shape=(2, 1), dtype=float32, numpy=
array([[2.],
[4.]], dtype=float32)>
>>> y = tf.constant([[-4.0], [16.0]])
>>> tf.sqrt(y)
<tf.Tensor: shape=(2, 1), dtype=float32, numpy=
array([[nan],
[ 4.]], dtype=float32)>
>>> z = tf.constant([[-1.0], [16.0]], dtype=tf.complex128)
>>> tf.sqrt(z)
<tf.Tensor: shape=(2, 1), dtype=complex128, numpy=
array([[0.0+1.j],
[4.0+0.j]])>
Note: In order to support complex type, please provide an input tensor
of `complex64` or `complex128`.
Args:
x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
`complex64`, `complex128`
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of same size, type and sparsity as `x`.
"""
return gen_math_ops.sqrt(x, name)
# pylint: disable=g-docstring-has-escape
@tf_export("math.exp", "exp")
@dispatch.add_dispatch_support
def exp(x, name=None):
r"""Computes exponential of x element-wise. \\(y = e^x\\).
This function computes the exponential of the input tensor element-wise.
i.e. `math.exp(x)` or \\(e^x\\), where `x` is the input tensor.
\\(e\\) denotes Euler's number and is approximately equal to 2.718281.
Output is positive for any real input.
>>> x = tf.constant(2.0)
>>> tf.math.exp(x)
<tf.Tensor: shape=(), dtype=float32, numpy=7.389056>
>>> x = tf.constant([2.0, 8.0])
>>> tf.math.exp(x)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([ 7.389056, 2980.958 ], dtype=float32)>
For complex numbers, the exponential value is calculated as
$$
e^{x+iy} = {e^x} {e^{iy}} = {e^x} ({\cos (y) + i \sin (y)})
$$
For `1+1j` the value would be computed as:
$$
e^1 (\cos (1) + i \sin (1)) = 2.7182817 \times (0.5403023+0.84147096j)
$$
>>> x = tf.constant(1 + 1j)
>>> tf.math.exp(x)
<tf.Tensor: shape=(), dtype=complex128,
numpy=(1.4686939399158851+2.2873552871788423j)>
Args:
x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `tf.Tensor`. Has the same type as `x`.
@compatibility(numpy)
Equivalent to np.exp
@end_compatibility
"""
return gen_math_ops.exp(x, name)
# pylint: enable=g-docstring-has-escape
@tf_export("math.sobol_sample")
@dispatch.add_dispatch_support
def sobol_sample(dim, num_results, skip=0, dtype=dtypes.float32, name=None):
"""Generates points from the Sobol sequence.
Creates a Sobol sequence with `num_results` samples. Each sample has dimension
`dim`. Skips the first `skip` samples.
Args:
dim: Positive scalar `Tensor` representing each sample's dimension.
num_results: Positive scalar `Tensor` of dtype int32. The number of Sobol
points to return in the output.
skip: (Optional) Positive scalar `Tensor` of dtype int32. The number of
initial points of the Sobol sequence to skip. Default value is 0.
dtype: (Optional) The `tf.Dtype` of the sample. One of: `tf.float32` or
`tf.float64`. Defaults to `tf.float32`.
name: (Optional) Python `str` name prefixed to ops created by this function.
Returns:
`Tensor` of samples from Sobol sequence with `shape` [num_results, dim].
"""
with ops.name_scope(name, "sobol", [dim, num_results, skip]):
return gen_math_ops.sobol_sample(dim, num_results, skip, dtype=dtype)
@tf_export("math.rsqrt", v1=["math.rsqrt", "rsqrt"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("rsqrt")
@dispatch.add_dispatch_support
def rsqrt(x, name=None):
"""Computes reciprocal of square root of x element-wise.
For example:
>>> x = tf.constant([2., 0., -2.])
>>> tf.math.rsqrt(x)
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0.707, inf, nan], dtype=float32)>
Args:
x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `tf.Tensor`. Has the same type as `x`.
"""
return gen_math_ops.rsqrt(x, name)
@tf_export("math.acos", "acos")
@dispatch.add_dispatch_support
def acos(x, name=None):
"""Computes acos of x element-wise.
Provided an input tensor, the `tf.math.acos` operation
returns the inverse cosine of each element of the tensor.
If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`.
Input range is `[-1, 1]` and the output has a range of `[0, pi]`.
For example:
>>> x = tf.constant([1.0, -0.5, 3.4, 0.2, 0.0, -2], dtype = tf.float32)
>>> tf.math.acos(x)
<tf.Tensor: shape=(6,), dtype=float32,
numpy= array([0. , 2.0943952, nan, 1.3694383, 1.5707964, nan],
dtype=float32)>
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`,
`complex64`, `complex128`, `string`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as x.
"""
return gen_math_ops.acos(x, name)
@tf_export("math.floor", "floor")
@dispatch.add_dispatch_support
def floor(x, name=None):
"""Returns element-wise largest integer not greater than x.
Both input range is `(-inf, inf)` and the
output range consists of all integer values.
For example:
>>> x = tf.constant([1.3324, -1.5, 5.555, -2.532, 0.99, float("inf")])
>>> tf.floor(x).numpy()
array([ 1., -2., 5., -3., 0., inf], dtype=float32)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as x.
"""
return gen_math_ops.floor(x, name)
|
annarev/tensorflow
|
tensorflow/python/ops/math_ops.py
|
Python
|
apache-2.0
| 186,089 | 0.003353 |
# -*- coding: utf-8 -*-
from south.v2 import DataMigration
class Migration(DataMigration):
def forwards(self, orm):
orm.Boundary.objects.filter(category='Zip Code').update(sort_order=5)
def backwards(self, orm):
pass
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'treemap.audit': {
'Meta': {'object_name': 'Audit'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'field': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']", 'null': 'True', 'blank': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'model_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'previous_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'ref': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Audit']", 'null': 'True'}),
'requires_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.User']"})
},
u'treemap.benefitcurrencyconversion': {
'Meta': {'object_name': 'BenefitCurrencyConversion'},
'co2_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'currency_symbol': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'electricity_kwh_to_currency': ('django.db.models.fields.FloatField', [], {}),
'h20_gal_to_currency': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'natural_gas_kbtu_to_currency': ('django.db.models.fields.FloatField', [], {}),
'nox_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'o3_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'pm10_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'sox_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'voc_lb_to_currency': ('django.db.models.fields.FloatField', [], {})
},
u'treemap.boundary': {
'Meta': {'object_name': 'Boundary'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857', 'db_column': "u'the_geom_webmercator'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.fieldpermission': {
'Meta': {'unique_together': "((u'model_name', u'field_name', u'role', u'instance'),)", 'object_name': 'FieldPermission'},
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'permission_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Role']"})
},
u'treemap.instance': {
'Meta': {'object_name': 'Instance'},
'basemap_data': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'basemap_type': ('django.db.models.fields.CharField', [], {'default': "u'google'", 'max_length': '255'}),
'boundaries': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['treemap.Boundary']", 'null': 'True', 'blank': 'True'}),
'bounds': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857'}),
'config': ('treemap.json_field.JSONField', [], {'blank': 'True'}),
'default_role': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'default_role'", 'to': u"orm['treemap.Role']"}),
'eco_benefits_conversion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.BenefitCurrencyConversion']", 'null': 'True', 'blank': 'True'}),
'geo_rev': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'itree_region_default': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'url_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['treemap.User']", 'null': 'True', 'through': u"orm['treemap.InstanceUser']", 'blank': 'True'})
},
u'treemap.instanceuser': {
'Meta': {'object_name': 'InstanceUser'},
'admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Role']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.User']"})
},
u'treemap.itreecodeoverride': {
'Meta': {'unique_together': "((u'instance_species', u'region'),)", 'object_name': 'ITreeCodeOverride'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_species': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Species']"}),
'itree_code': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.ITreeRegion']"})
},
u'treemap.itreeregion': {
'Meta': {'object_name': 'ITreeRegion'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'treemap.mapfeature': {
'Meta': {'object_name': 'MapFeature'},
'address_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_zip': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'feature_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geom': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '3857', 'db_column': "u'the_geom_webmercator'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'readonly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'udfs': ('treemap.udf.UDFField', [], {'db_index': 'True', 'blank': 'True'})
},
u'treemap.plot': {
'Meta': {'object_name': 'Plot', '_ormbases': [u'treemap.MapFeature']},
'length': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'mapfeature_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['treemap.MapFeature']", 'unique': 'True', 'primary_key': 'True'}),
'owner_orig_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'treemap.reputationmetric': {
'Meta': {'object_name': 'ReputationMetric'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'approval_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'denial_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'direct_write_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'treemap.role': {
'Meta': {'object_name': 'Role'},
'default_permission': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rep_thresh': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.species': {
'Meta': {'object_name': 'Species'},
'bloom_period': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cultivar': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fact_sheet': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fall_conspicuous': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'flower_conspicuous': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fruit_period': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'genus': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'max_dbh': ('django.db.models.fields.IntegerField', [], {'default': '200'}),
'max_height': ('django.db.models.fields.IntegerField', [], {'default': '800'}),
'native_status': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'otm_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'palatable_human': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'plant_guide': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'udfs': ('treemap.udf.UDFField', [], {'db_index': 'True', 'blank': 'True'}),
'wildlife_value': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'treemap.staticpage': {
'Meta': {'object_name': 'StaticPage'},
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'treemap.tree': {
'Meta': {'object_name': 'Tree'},
'canopy_height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'date_planted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_removed': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'diameter': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'plot': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Plot']"}),
'readonly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'species': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Species']", 'null': 'True', 'blank': 'True'}),
'udfs': ('treemap.udf.UDFField', [], {'db_index': 'True', 'blank': 'True'})
},
u'treemap.treephoto': {
'Meta': {'object_name': 'TreePhoto'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'tree': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Tree']"})
},
u'treemap.user': {
'Meta': {'object_name': 'User'},
'allow_email_contact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'firstname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'lastname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'treemap.userdefinedcollectionvalue': {
'Meta': {'object_name': 'UserDefinedCollectionValue'},
'data': ('django_hstore.fields.DictionaryField', [], {}),
'field_definition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.UserDefinedFieldDefinition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model_id': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.userdefinedfielddefinition': {
'Meta': {'object_name': 'UserDefinedFieldDefinition'},
'datatype': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'iscollection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['treemap']
symmetrical = True
|
johnsonc/OTM2
|
opentreemap/treemap/migrations/0061_change_zip_code_sort_order.py
|
Python
|
gpl-3.0
| 20,859 | 0.008102 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.views import flavors as flavors_view
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.compute import flavors
from nova import exception
from nova.i18n import _
authorize = extensions.extension_authorizer('compute', 'flavormanage')
class FlavorManageController(wsgi.Controller):
"""The Flavor Lifecycle API controller for the OpenStack API."""
_view_builder_class = flavors_view.ViewBuilder
def __init__(self):
super(FlavorManageController, self).__init__()
@wsgi.action("delete")
def _delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
flavor = flavors.get_flavor_by_flavor_id(
id, ctxt=context, read_deleted="no")
except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
flavors.destroy(flavor['name'])
return webob.Response(status_int=202)
@wsgi.action("create")
def _create(self, req, body):
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'flavor'):
msg = _("Invalid request body")
raise webob.exc.HTTPBadRequest(explanation=msg)
vals = body['flavor']
name = vals.get('name')
if name is None:
msg = _("A valid name parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
flavorid = vals.get('id')
memory = vals.get('ram')
if memory is None:
msg = _("A valid ram parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
vcpus = vals.get('vcpus')
if vcpus is None:
msg = _("A valid vcpus parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
root_gb = vals.get('disk')
if root_gb is None:
msg = _("A valid disk parameter is required")
raise webob.exc.HTTPBadRequest(explanation=msg)
ephemeral_gb = vals.get('OS-FLV-EXT-DATA:ephemeral', 0)
swap = vals.get('swap', 0)
rxtx_factor = vals.get('rxtx_factor', 1.0)
is_public = vals.get('os-flavor-access:is_public', True)
try:
flavor = flavors.create(name, memory, vcpus, root_gb,
ephemeral_gb=ephemeral_gb,
flavorid=flavorid, swap=swap,
rxtx_factor=rxtx_factor,
is_public=is_public)
req.cache_db_flavor(flavor)
except (exception.FlavorExists,
exception.FlavorIdExists) as err:
raise webob.exc.HTTPConflict(explanation=err.format_message())
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.FlavorCreateFailed as exc:
raise webob.exc.HTTPInternalServerError(explanation=
exc.format_message())
return self._view_builder.show(req, flavor)
class Flavormanage(extensions.ExtensionDescriptor):
"""Flavor create/delete API support."""
name = "FlavorManage"
alias = "os-flavor-manage"
namespace = ("http://docs.openstack.org/compute/ext/"
"flavor_manage/api/v1.1")
updated = "2012-01-19T00:00:00Z"
def get_controller_extensions(self):
controller = FlavorManageController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
|
scripnichenko/nova
|
nova/api/openstack/compute/legacy_v2/contrib/flavormanage.py
|
Python
|
apache-2.0
| 4,241 | 0.000472 |
import importlib
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from ckeditor_link import conf
from django import template
from django.template.defaultfilters import stringfilter
try:
module_name, class_name = conf.CKEDITOR_LINK_MODEL.rsplit(".", 1)
my_module = importlib.import_module(module_name)
ckeditor_link_class = getattr(my_module, class_name, None)
except ImportError:
ckeditor_link_class = None
register = template.Library()
@register.filter
@stringfilter
def ckeditor_link_add_links(html):
# lxml is not a dependency, but needed for this tag.
from lxml.html import fragment_fromstring, tostring
if not ckeditor_link_class:
# TODO: use some log thing, or rais ImproperlyConfigured!
if settings.DEBUG:
msg = "Warning: CKEDITOR_LINK_MODEL (%s) could not be imported!?" % (conf.CKEDITOR_LINK_MODEL, )
raise ImproperlyConfigured(msg)
return html
fragment = fragment_fromstring("<div>" + html + "</div>")
links = fragment.cssselect('a')
for link in links:
if link.get('data-ckeditor-link', None):
link.attrib.pop('data-ckeditor-link')
kwargs = {}
dummy_link = ckeditor_link_class()
for key, value in link.items():
if key.startswith('data-'):
new_key = key.replace('data-', '', 1)
# DEPRECATED: use CKEDITOR_LINK_ATTR_MODIFIERS setting!
if new_key == 'page_2':
new_key = 'cms_page' # backward compat, for 0.2.0
if new_key == 'cms_page_2':
new_key = 'cms_page'
# until here
if hasattr(dummy_link, new_key):
if hasattr(dummy_link, new_key + "_id"):
# set fk directly
new_key = new_key + "_id"
if not value:
value = None
kwargs[new_key] = value
link.attrib.pop(key)
for key, formatted_string in conf.CKEDITOR_LINK_ATTR_MODIFIERS.items():
try:
kwargs[key] = formatted_string.format(**kwargs)
except KeyError:
# this is an option, we dont know at all how our link is/was built (ages ago)
pass
try:
# this can go wrong with fk and the like
real_link = ckeditor_link_class(**kwargs)
link.set('href', real_link.get_link())
if getattr(real_link, 'get_link_target', None):
link.set('target', real_link.get_link_target())
if getattr(real_link, 'get_link_style', None):
link.set('class', real_link.get_link_style())
if getattr(real_link, 'get_link_attrs', None):
for attr, value in real_link.get_link_attrs().items():
link.set(attr, value)
except (ValueError, ObjectDoesNotExist):
continue
# arf: http://makble.com/python-why-lxml-etree-tostring-method-returns-bytes
# beautifulsoup to the rescue!
return tostring(fragment, encoding='unicode')
|
benzkji/django-ckeditor-link
|
ckeditor_link/templatetags/ckeditor_link_tags.py
|
Python
|
gpl-2.0
| 3,359 | 0.001191 |
#!/usr/bin/python
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('PersistenceEngine')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import time
import threading
import traceback
class PersistenceEngine(threading.Thread):
DEFAULT_SAVEDDATALOCATION = None
DEFAULT_SAVE_PERIOD = 60 # in seconds
PERIOD_WAKEUP = 1 # in seconds
def __init__(self,getDataCb):
# store params
self.getDataCb = getDataCb
# local variables
self.varLock = threading.RLock()
self.savePeriod = self.DEFAULT_SAVE_PERIOD
self.savedDataLocation = self.DEFAULT_SAVEDDATALOCATION
self.runningPeriod = 0
self.goOn = True
self.closingSem = threading.Lock()
self.closingSem.acquire()
# intialize parent class
threading.Thread.__init__(self)
self.name = "PersistenceEngine"
def run(self):
# log
log.info('thread started')
try:
# run in loop until time to stop
while self._getGoOn():
time.sleep(self.PERIOD_WAKEUP)
with self.varLock:
self.runningPeriod += self.PERIOD_WAKEUP
if self.runningPeriod >= self.getSavePeriod():
self._performSaveRoutine()
self.runningPeriod = 0
# time to stop, save one last time
self._performSaveRoutine()
# release closingSem
self.closingSem.release()
except Exception as err:
output = []
output += ['===== crash in thread {0} ====='.format(self.name)]
output += ['\nerror:\n']
output += [str(err)]
output += ['\ncall stack:\n']
output += [traceback.format_exc()]
output = '\n'.join(output)
print output # critical error
log.critical(output)
# release closingSem
self.closingSem.release()
raise
finally:
# log
log.info('thread ended')
#======================== public ==========================================
def setSavePeriod(self,newSavePeriod):
assert type(newSavePeriod)==int
self.varLock.acquire()
self.savePeriod = newSavePeriod
self.varLock.release()
def getSavePeriod(self):
self.varLock.acquire()
returnVal = self.savePeriod
self.varLock.release()
return returnVal
def setSavedDataLocation(self,newSavedDataLocation):
self.varLock.acquire()
self.savedDataLocation = newSavedDataLocation
self.varLock.release()
def getSavedDataLocation(self):
self.varLock.acquire()
returnVal = self.savedDataLocation
self.varLock.release()
return returnVal
def stop(self):
log.info("stop called")
self._setGoOn(False)
self.closingSem.acquire()
log.info("stopped")
def indicateChange(self):
'''
Some important data has been changed and data should be saved soon.
'''
with self.varLock:
self.runningPeriod = self.getSavePeriod()
#======================== virtual methods =================================
def retrieveData(self):
raise NotImplementedError() # to be implemented by child class
def saveData(self,dataToSave):
raise NotImplementedError() # to be implemented by child class
def quarantineData(self):
raise NotImplementedError() # to be implemented by child class
#======================== private =========================================
def _performSaveRoutine(self):
# get a copy of the data to save
dataToSave = self.getDataCb()
# save the data
self.saveData(dataToSave)
def _getGoOn(self):
self.varLock.acquire()
returnVal = self.goOn
self.varLock.release()
return returnVal
def _setGoOn(self,newGoOn):
assert newGoOn in [True,False]
self.varLock.acquire()
self.goOn = newGoOn
self.varLock.release()
|
dustcloud/dustlink
|
DustLinkData/PersistenceEngine.py
|
Python
|
bsd-3-clause
| 4,768 | 0.014681 |
#!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import testsuite
# Bootstrap the testsuite
testsuite.setup()
import testbase
class PluginTest(testbase.BasePluginTest):
pluginName = 'disablesshpasswdauth'
PluginData = ""
SshdConfigContents = """\
Dummy line 1
Dummy line 2
PasswordAuthentication yes
# PasswordAuthentication yes
Dummy line 3
"""
def setUpExtra(self):
sshdConfigDir = self.mkdirs("etc/ssh")
sshdConfig = self.sshdConfigFile = os.path.join(
sshdConfigDir, "sshd_config")
file(sshdConfig, "w").write(self.SshdConfigContents)
def testFiles(self):
self.assertEquals(file(self.sshdConfigFile).read(),
self.SshdConfigContents.replace(
'\nPasswordAuthentication yes',
'\nPasswordAuthentication no').replace(
'# PasswordAuthentication yes',
'# # PasswordAuthentication yes')
)
|
sassoftware/amiconfig
|
amiconfig_test/plugin_disablesshpasswdauth_test.py
|
Python
|
apache-2.0
| 1,526 | 0.001966 |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
def printLegend(rowLabels,colLabels,params):
fig = plt.figure()
col_labels=colLabels
row_labels=rowLabels
table_vals=params
the_table = plt.table(cellText=table_vals,
colWidths = [0.2]*4,
rowLabels=row_labels,
colLabels=col_labels,
loc='center')
plt.text(12,3.4,'Table Title',size=8)
plt.title('Legend for expiriments')
plt.show()
|
ehudmagal/robotqcapp
|
Utils/RobotQAUtils/graphics/drawTable.py
|
Python
|
bsd-3-clause
| 435 | 0.036782 |
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib.api.definitions import security_groups_remote_address_group \
as sgag_def
from neutron_lib import constants as n_const
from neutron_lib import context
from neutron_lib.db import api as db_api
from oslo_db import exception as db_exc
from neutron.api import extensions
from neutron.common import config
from neutron.db.models import ovn as ovn_models
from neutron.db import ovn_revision_numbers_db as ovn_rn_db
import neutron.extensions
from neutron.services.revisions import revision_plugin
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit.extensions import test_securitygroup
EXTENSIONS_PATH = ':'.join(neutron.extensions.__path__)
PLUGIN_CLASS = (
'neutron.tests.unit.db.test_ovn_revision_numbers_db.TestMaintenancePlugin')
class TestRevisionNumber(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self):
super(TestRevisionNumber, self).setUp()
self.ctx = context.get_admin_context()
self.addCleanup(self._delete_objs)
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
self.net = self.deserialize(self.fmt, res)['network']
def _delete_objs(self):
with db_api.CONTEXT_WRITER.using(self.ctx):
self.ctx.session.query(
ovn_models.OVNRevisionNumbers).delete()
def _create_initial_revision(self, resource_uuid, resource_type,
revision_number=ovn_rn_db.INITIAL_REV_NUM,
may_exist=False):
ovn_rn_db.create_initial_revision(
self.ctx, resource_uuid, resource_type,
revision_number=revision_number, may_exist=may_exist)
def test_bump_revision(self):
with db_api.CONTEXT_WRITER.using(self.ctx):
self._create_initial_revision(self.net['id'],
ovn_rn_db.TYPE_NETWORKS)
self.net['revision_number'] = 123
ovn_rn_db.bump_revision(self.ctx, self.net,
ovn_rn_db.TYPE_NETWORKS)
row = ovn_rn_db.get_revision_row(self.ctx, self.net['id'])
self.assertEqual(123, row.revision_number)
def test_bump_older_revision(self):
with db_api.CONTEXT_WRITER.using(self.ctx):
self._create_initial_revision(
self.net['id'], ovn_rn_db.TYPE_NETWORKS,
revision_number=124)
self.net['revision_number'] = 1
ovn_rn_db.bump_revision(self.ctx, self.net,
ovn_rn_db.TYPE_NETWORKS)
row = ovn_rn_db.get_revision_row(self.ctx, self.net['id'])
self.assertEqual(124, row.revision_number)
@mock.patch.object(ovn_rn_db.LOG, 'warning')
def test_bump_revision_row_not_found(self, mock_log):
with db_api.CONTEXT_WRITER.using(self.ctx):
self.net['revision_number'] = 123
ovn_rn_db.bump_revision(self.ctx, self.net,
ovn_rn_db.TYPE_NETWORKS)
# Assert the revision number wasn't bumped
row = ovn_rn_db.get_revision_row(self.ctx, self.net['id'])
self.assertEqual(123, row.revision_number)
self.assertIn('No revision row found for',
mock_log.call_args[0][0])
def test_delete_revision(self):
with db_api.CONTEXT_WRITER.using(self.ctx):
self._create_initial_revision(self.net['id'],
ovn_rn_db.TYPE_NETWORKS)
ovn_rn_db.delete_revision(self.ctx, self.net['id'],
ovn_rn_db.TYPE_NETWORKS)
row = ovn_rn_db.get_revision_row(self.ctx, self.net['id'])
self.assertIsNone(row)
def test_create_initial_revision_may_exist_duplicated_entry(self):
try:
with db_api.CONTEXT_WRITER.using(self.ctx):
args = (self.net['id'], ovn_rn_db.TYPE_NETWORKS)
self._create_initial_revision(*args)
# DBDuplicateEntry is raised when may_exist is False (default)
self._create_initial_revision(*args)
except Exception as exc:
if type(exc) is not db_exc.DBDuplicateEntry:
self.fail("create_initial_revision with the same parameters "
"should have raisen a DBDuplicateEntry exception")
with db_api.CONTEXT_WRITER.using(self.ctx):
args = (self.net['id'], ovn_rn_db.TYPE_NETWORKS)
self._create_initial_revision(*args)
try:
self._create_initial_revision(*args, may_exist=True)
except db_exc.DBDuplicateEntry:
self.fail("create_initial_revision shouldn't raise "
"DBDuplicateEntry when may_exist is True")
class TestMaintenancePlugin(test_securitygroup.SecurityGroupTestPlugin,
test_l3.TestL3NatBasePlugin):
__native_pagination_support = True
__native_sorting_support = True
supported_extension_aliases = ['external-net', 'security-group',
sgag_def.ALIAS]
class TestRevisionNumberMaintenance(test_securitygroup.SecurityGroupsTestCase,
test_l3.L3NatTestCaseMixin):
def setUp(self):
service_plugins = {
'router':
'neutron.tests.unit.extensions.test_l3.TestL3NatServicePlugin'}
l3_plugin = test_l3.TestL3NatServicePlugin()
sec_plugin = test_securitygroup.SecurityGroupTestPlugin()
ext_mgr = extensions.PluginAwareExtensionManager(
EXTENSIONS_PATH, {'router': l3_plugin, 'sec': sec_plugin}
)
super(TestRevisionNumberMaintenance, self).setUp(
plugin=PLUGIN_CLASS, service_plugins=service_plugins)
app = config.load_paste_app('extensions_test_app')
self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self.session = db_api.get_writer_session()
revision_plugin.RevisionPlugin()
self.net = self._make_network(self.fmt, 'net1', True)['network']
# Mock the default value for INCONSISTENCIES_OLDER_THAN so
# tests won't need to wait for the timeout in order to validate
# the database inconsistencies
self.older_than_mock = mock.patch(
'neutron.db.ovn_revision_numbers_db.INCONSISTENCIES_OLDER_THAN',
-1)
self.older_than_mock.start()
self.addCleanup(self.older_than_mock.stop)
self.ctx = context.get_admin_context()
def _create_initial_revision(self, resource_uuid, resource_type,
revision_number=ovn_rn_db.INITIAL_REV_NUM,
may_exist=False):
with db_api.CONTEXT_WRITER.using(self.ctx):
ovn_rn_db.create_initial_revision(
self.ctx, resource_uuid, resource_type,
revision_number=revision_number, may_exist=may_exist)
def test_get_inconsistent_resources(self):
# Set the intial revision to -1 to force it to be incosistent
self._create_initial_revision(
self.net['id'], ovn_rn_db.TYPE_NETWORKS, revision_number=-1)
res = ovn_rn_db.get_inconsistent_resources(self.ctx)
self.assertEqual(1, len(res))
self.assertEqual(self.net['id'], res[0].resource_uuid)
def test_get_inconsistent_resources_older_than(self):
# Stop the mock so the INCONSISTENCIES_OLDER_THAN will have
# it's default value
self.older_than_mock.stop()
self._create_initial_revision(
self.net['id'], ovn_rn_db.TYPE_NETWORKS, revision_number=-1)
res = ovn_rn_db.get_inconsistent_resources(self.ctx)
# Assert that nothing is returned because the entry is not old
# enough to be picked as an inconsistency
self.assertEqual(0, len(res))
# Start the mock again and make sure it nows shows up as an
# inconsistency
self.older_than_mock.start()
res = ovn_rn_db.get_inconsistent_resources(self.ctx)
self.assertEqual(1, len(res))
self.assertEqual(self.net['id'], res[0].resource_uuid)
def test_get_inconsistent_resources_consistent(self):
# Set the initial revision to 0 which is the initial revision_number
# for recently created resources
self._create_initial_revision(
self.net['id'], ovn_rn_db.TYPE_NETWORKS, revision_number=0)
res = ovn_rn_db.get_inconsistent_resources(self.ctx)
# Assert nothing is inconsistent
self.assertEqual([], res)
def test_get_deleted_resources(self):
self._create_initial_revision(
self.net['id'], ovn_rn_db.TYPE_NETWORKS, revision_number=0)
self._delete('networks', self.net['id'])
res = ovn_rn_db.get_deleted_resources(self.ctx)
self.assertEqual(1, len(res))
self.assertEqual(self.net['id'], res[0].resource_uuid)
self.assertIsNone(res[0].standard_attr_id)
def _prepare_resources_for_ordering_test(self, delete=False):
subnet = self._make_subnet(self.fmt, {'network': self.net}, '10.0.0.1',
'10.0.0.0/24')['subnet']
self._set_net_external(self.net['id'])
info = {'network_id': self.net['id']}
router = self._make_router(self.fmt, None,
external_gateway_info=info)['router']
fip = self._make_floatingip(self.fmt, self.net['id'])['floatingip']
port = self._make_port(self.fmt, self.net['id'])['port']
sg = self._make_security_group(self.fmt, 'sg1', '')['security_group']
rule = self._build_security_group_rule(
sg['id'], 'ingress', n_const.PROTO_NUM_TCP)
sg_rule = self._make_security_group_rule(
self.fmt, rule)['security_group_rule']
self._create_initial_revision(router['id'], ovn_rn_db.TYPE_ROUTERS)
self._create_initial_revision(subnet['id'], ovn_rn_db.TYPE_SUBNETS)
self._create_initial_revision(fip['id'], ovn_rn_db.TYPE_FLOATINGIPS)
self._create_initial_revision(port['id'], ovn_rn_db.TYPE_PORTS)
self._create_initial_revision(port['id'], ovn_rn_db.TYPE_ROUTER_PORTS)
self._create_initial_revision(sg['id'], ovn_rn_db.TYPE_SECURITY_GROUPS)
self._create_initial_revision(sg_rule['id'],
ovn_rn_db.TYPE_SECURITY_GROUP_RULES)
self._create_initial_revision(self.net['id'], ovn_rn_db.TYPE_NETWORKS)
if delete:
self._delete('security-group-rules', sg_rule['id'])
self._delete('floatingips', fip['id'])
self._delete('ports', port['id'])
self._delete('security-groups', sg['id'])
self._delete('routers', router['id'])
self._delete('subnets', subnet['id'])
self._delete('networks', self.net['id'])
def test_get_inconsistent_resources_order(self):
self._prepare_resources_for_ordering_test()
res = ovn_rn_db.get_inconsistent_resources(self.ctx)
actual_order = tuple(r.resource_type for r in res)
self.assertEqual(ovn_rn_db._TYPES_PRIORITY_ORDER, actual_order)
def test_get_deleted_resources_order(self):
self._prepare_resources_for_ordering_test(delete=True)
res = ovn_rn_db.get_deleted_resources(self.ctx)
actual_order = tuple(r.resource_type for r in res)
self.assertEqual(tuple(reversed(ovn_rn_db._TYPES_PRIORITY_ORDER)),
actual_order)
|
mahak/neutron
|
neutron/tests/unit/db/test_ovn_revision_numbers_db.py
|
Python
|
apache-2.0
| 12,345 | 0 |
# -*- Mode: Python -*- vi:si:et:sw=4:sts=4:ts=4:syntax=python
import os
import shutil
from collections import defaultdict
from cerbero.build import recipe
from cerbero.build.source import SourceType
from cerbero.build.cookbook import CookBook
from cerbero.config import Platform
from cerbero.enums import License
from cerbero.utils import shell, to_unixpath
class GStreamer(recipe.Recipe):
licenses = [License.LGPLv2Plus]
version = '1.13.0.1'
commit = 'origin/master'
def list_gstreamer_1_0_plugins_by_category(config):
cookbook = CookBook(config)
plugins = defaultdict(list)
for r in ['gstreamer-1.0', 'gst-plugins-base-1.0', 'gst-plugins-good-1.0',
'gst-plugins-bad-1.0', 'gst-plugins-ugly-1.0',
'gst-libav-1.0', 'gst-editing-services-1.0', 'gst-rtsp-server-1.0']:
r = cookbook.get_recipe(r)
for attr_name in dir(r):
if attr_name.startswith('files_plugins_'):
cat_name = attr_name[len('files_plugins_'):]
plugins_list = getattr(r, attr_name)
elif attr_name.startswith('platform_files_plugins_'):
cat_name = attr_name[len('platform_files_plugins_'):]
plugins_dict = getattr(r, attr_name)
plugins_list = plugins_dict.get(config.target_platform, [])
else:
continue
for e in plugins_list:
if not e.startswith('lib/gstreamer-'):
continue
c = e.split('/')
if len(c) != 3:
continue
e = c[2]
# we only care about files with the replaceable %(mext)s extension
if not e.endswith ('%(mext)s'):
continue
if e.startswith('libgst'):
e = e[6:-8]
else:
e = e[3:-8]
plugins[cat_name].append(e)
return plugins
|
flexVDI/cerbero
|
recipes/custom.py
|
Python
|
lgpl-2.1
| 2,085 | 0.003357 |
from __future__ import absolute_import
# System modules
import argparse
import sys
import os
import textwrap
# LLDB modules
from . import configuration
def create_parser():
parser = argparse.ArgumentParser(
description='description',
prefix_chars='+-',
add_help=False)
group = None
# Helper function for boolean options (group will point to the current
# group when executing X)
X = lambda optstr, helpstr, **kwargs: group.add_argument(
optstr, help=helpstr, action='store_true', **kwargs)
group = parser.add_argument_group('Help')
group.add_argument(
'-h',
'--help',
dest='h',
action='store_true',
help="Print this help message and exit. Add '-v' for more detailed help.")
# C and Python toolchain options
group = parser.add_argument_group('Toolchain options')
group.add_argument(
'-A',
'--arch',
metavar='arch',
dest='arch',
help=textwrap.dedent('''Specify the architecture(s) to test. This option can be specified more than once'''))
group.add_argument('-C', '--compiler', metavar='compiler', dest='compiler', help=textwrap.dedent(
'''Specify the compiler(s) used to build the inferior executables. The compiler path can be an executable basename or a full path to a compiler executable. This option can be specified multiple times.'''))
if sys.platform == 'darwin':
group.add_argument('--apple-sdk', metavar='apple_sdk', dest='apple_sdk', default="", help=textwrap.dedent(
'''Specify the name of the Apple SDK (macosx, macosx.internal, iphoneos, iphoneos.internal, or path to SDK) and use the appropriate tools from that SDK's toolchain.'''))
# FIXME? This won't work for different extra flags according to each arch.
group.add_argument(
'-E',
metavar='extra-flags',
help=textwrap.dedent('''Specify the extra flags to be passed to the toolchain when building the inferior programs to be debugged
suggestions: do not lump the "-A arch1 -A arch2" together such that the -E option applies to only one of the architectures'''))
group.add_argument('--dsymutil', metavar='dsymutil', dest='dsymutil', help=textwrap.dedent('Specify which dsymutil to use.'))
group.add_argument('--yaml2obj', metavar='yaml2obj', dest='yaml2obj', help=textwrap.dedent('Specify which yaml2obj binary to use.'))
group.add_argument('--filecheck', metavar='filecheck', dest='filecheck', help=textwrap.dedent('Specify which FileCheck binary to use.'))
# Test filtering options
group = parser.add_argument_group('Test filtering options')
group.add_argument(
'-f',
metavar='filterspec',
action='append',
help=('Specify a filter, which looks like "TestModule.TestClass.test_name". '+
'You may also use shortened filters, such as '+
'"TestModule.TestClass", "TestClass.test_name", or just "test_name".'))
group.add_argument(
'-p',
metavar='pattern',
help='Specify a regexp filename pattern for inclusion in the test suite')
group.add_argument('--excluded', metavar='exclusion-file', action='append', help=textwrap.dedent(
'''Specify a file for tests to exclude. File should contain lists of regular expressions for test files or methods,
with each list under a matching header (xfail files, xfail methods, skip files, skip methods)'''))
group.add_argument(
'-G',
'--category',
metavar='category',
action='append',
dest='categories_list',
help=textwrap.dedent('''Specify categories of test cases of interest. Can be specified more than once.'''))
group.add_argument(
'--skip-category',
metavar='category',
action='append',
dest='skip_categories',
help=textwrap.dedent('''Specify categories of test cases to skip. Takes precedence over -G. Can be specified more than once.'''))
group.add_argument(
'--xfail-category',
metavar='category',
action='append',
dest='xfail_categories',
help=textwrap.dedent('''Specify categories of test cases that are expected to fail. Can be specified more than once.'''))
# Configuration options
group = parser.add_argument_group('Configuration options')
group.add_argument(
'--framework',
metavar='framework-path',
help='The path to LLDB.framework')
group.add_argument(
'--executable',
metavar='executable-path',
help='The path to the lldb executable')
group.add_argument(
'--server',
metavar='server-path',
help='The path to the debug server executable to use')
group.add_argument(
'--out-of-tree-debugserver',
dest='out_of_tree_debugserver',
action='store_true',
help='A flag to indicate an out-of-tree debug server is being used')
group.add_argument(
'--dwarf-version',
metavar='dwarf_version',
dest='dwarf_version',
type=int,
help='Override the DWARF version.')
group.add_argument(
'--setting',
metavar='SETTING=VALUE',
dest='settings',
type=str,
nargs=1,
action='append',
help='Run "setting set SETTING VALUE" before executing any test.')
group.add_argument(
'-s',
metavar='name',
help='Specify the name of the dir created to store the session files of tests with errored or failed status. If not specified, the test driver uses the timestamp as the session dir name')
group.add_argument(
'-S',
'--session-file-format',
default=configuration.session_file_format,
metavar='format',
help='Specify session file name format. See configuration.py for a description.')
group.add_argument(
'-y',
type=int,
metavar='count',
help="Specify the iteration count used to collect our benchmarks. An example is the number of times to do 'thread step-over' to measure stepping speed.")
group.add_argument(
'-#',
type=int,
metavar='sharp',
dest='sharp',
help='Repeat the test suite for a specified number of times')
group.add_argument('--channel', metavar='channel', dest='channels', action='append', help=textwrap.dedent(
"Specify the log channels (and optional categories) e.g. 'lldb all' or 'gdb-remote packets' if no categories are specified, 'default' is used"))
group.add_argument(
'--log-success',
dest='log_success',
action='store_true',
help="Leave logs/traces even for successful test runs (useful for creating reference log files during debugging.)")
group.add_argument(
'--codesign-identity',
metavar='Codesigning identity',
default='lldb_codesign',
help='The codesigning identity to use')
group.add_argument(
'--build-dir',
dest='test_build_dir',
metavar='Test build directory',
default='lldb-test-build.noindex',
help='The root build directory for the tests. It will be removed before running.')
group.add_argument(
'--lldb-module-cache-dir',
dest='lldb_module_cache_dir',
metavar='The clang module cache directory used by LLDB',
help='The clang module cache directory used by LLDB. Defaults to <test build directory>/module-cache-lldb.')
group.add_argument(
'--clang-module-cache-dir',
dest='clang_module_cache_dir',
metavar='The clang module cache directory used by Clang',
help='The clang module cache directory used in the Make files by Clang while building tests. Defaults to <test build directory>/module-cache-clang.')
group.add_argument(
'--lldb-libs-dir',
dest='lldb_libs_dir',
metavar='path',
help='The path to LLDB library directory (containing liblldb)')
group.add_argument(
'--enable-plugin',
dest='enabled_plugins',
action='append',
type=str,
metavar='A plugin whose tests will be enabled',
help='A plugin whose tests will be enabled. The only currently supported plugin is intel-pt.')
# Configuration options
group = parser.add_argument_group('Remote platform options')
group.add_argument(
'--platform-name',
dest='lldb_platform_name',
metavar='platform-name',
help='The name of a remote platform to use')
group.add_argument(
'--platform-url',
dest='lldb_platform_url',
metavar='platform-url',
help='A LLDB platform URL to use when connecting to a remote platform to run the test suite')
group.add_argument(
'--platform-working-dir',
dest='lldb_platform_working_dir',
metavar='platform-working-dir',
help='The directory to use on the remote platform.')
# Reproducer options
group = parser.add_argument_group('Reproducer options')
group.add_argument(
'--capture-path',
metavar='reproducer path',
help='The reproducer capture path')
group.add_argument(
'--replay-path',
metavar='reproducer path',
help='The reproducer replay path')
# Test-suite behaviour
group = parser.add_argument_group('Runtime behaviour options')
X('-d', 'Suspend the process after launch to wait indefinitely for a debugger to attach')
X('-t', 'Turn on tracing of lldb command and other detailed test executions')
group.add_argument(
'-u',
dest='unset_env_varnames',
metavar='variable',
action='append',
help='Specify an environment variable to unset before running the test cases. e.g., -u DYLD_INSERT_LIBRARIES -u MallocScribble')
group.add_argument(
'--env',
dest='set_env_vars',
metavar='variable',
action='append',
help='Specify an environment variable to set to the given value before running the test cases e.g.: --env CXXFLAGS=-O3 --env DYLD_INSERT_LIBRARIES')
group.add_argument(
'--inferior-env',
dest='set_inferior_env_vars',
metavar='variable',
action='append',
help='Specify an environment variable to set to the given value for the inferior.')
X('-v', 'Do verbose mode of unittest framework (print out each test case invocation)')
group.add_argument(
'--enable-crash-dialog',
dest='disable_crash_dialog',
action='store_false',
help='(Windows only) When LLDB crashes, display the Windows crash dialog.')
group.set_defaults(disable_crash_dialog=True)
# Remove the reference to our helper function
del X
group = parser.add_argument_group('Test directories')
group.add_argument(
'args',
metavar='test-dir',
nargs='*',
help='Specify a list of directory names to search for test modules named after Test*.py (test discovery). If empty, search from the current working directory instead.')
return parser
|
google/llvm-propeller
|
lldb/packages/Python/lldbsuite/test/dotest_args.py
|
Python
|
apache-2.0
| 11,181 | 0.003935 |
from collections import Counter
from imblearn.datasets import make_imbalance
from imblearn.metrics import classification_report_imbalanced
from imblearn.pipeline import make_pipeline
from imblearn.under_sampling import ClusterCentroids
from imblearn.under_sampling import NearMiss
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sns
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
def scatter_plot_2d(x_ls, y_ls):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y_ls))])
# plot class samples
for idx, c1 in enumerate(np.unique(y_ls)):
plt.scatter(x = x_ls[y_ls == c1, 0], y = x_ls[y_ls == c1, 1],
alpha = .8, c = cmap(idx),
marker = markers[idx], label = c1)
# plt.show()
def deci_bdry_plot_2d(x_ls, y_ls, classifier, resolution = .02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y_ls))])
# plot the decision surface
x1_min, x1_max = x_ls[:, 0].min() - 1, x_ls[:, 0].max() + 1
x2_min, x2_max = x_ls[:, 1].min() - 1, x_ls[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha = .4, cmap = cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, c1 in enumerate(np.unique(y_ls)):
plt.scatter(x = x_ls[y_ls == c1, 0], y = x_ls[y_ls == c1, 1],
alpha = .8, c = cmap(idx),
marker = markers[idx], label = c1)
# plt.show()
def multi_class_under_sampling():
'''
EXAMPLE: Multiclass classification with under-sampling
'''
RANDOM_STATE = 42
iris = load_iris()
X, y = make_imbalance(iris.data, iris.target, ratio = {0:25, 1:50, 2:50}, random_state = 0)
# print (X[:, [1, 2]])
# print (type(y))
X_train, X_test, y_train, y_test = train_test_split(X[:, [1, 2]], y, random_state = RANDOM_STATE)
# print ('Training target statistics: {}'.format(Counter(y_train)))
# print ('Testing target statistics: {}'.format(Counter(y_test)))
nm = NearMiss(version = 1, random_state = RANDOM_STATE)
X_resample_nm, y_resample_nm = nm.fit_sample(X_train, y_train)
cc = ClusterCentroids(random_state = 0)
X_resample_cc, y_resample_cc = cc.fit_sample(X_train, y_train)
'''plot two in one frame'''
fig, (ax0, ax1) = plt.subplots(ncols = 2)
# ax0, ax1 = axes.flatten()
ax0 = scatter_plot_2d(X_resample_nm, y_resample_nm)
ax1 = scatter_plot_2d(X_resample_nm, y_resample_nm)
# fig.tight_layout()
plt.show()
# pipeline_nm = make_pipeline(NearMiss(version = 1, random_state = RANDOM_STATE), LinearSVC(random_state = RANDOM_STATE))
# pipeline_nm.fit(X_train, y_train)
# pipeline_cc = make_pipeline(ClusterCentroids(random_state = 0), LinearSVC(random_state = RANDOM_STATE))
# pipeline_cc.fit(X_train, y_train)
# print (classification_report_imbalanced(y_test, pipeline_nm.predict(X_test)))
# deci_bdry_plot_2d(X[:, [1, 2]], y, pipeline_nm)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.scatter_plot(X[:, [1, 2]], y, pipeline)
# print (classification_report_imbalanced(y_test, pipeline.predict(X_test)))
pipeline_1= make_pipeline(NearMiss(version = 1, random_state = RANDOM_STATE), LinearSVC(random_state = RANDOM_STATE))
pipeline_1.fit(X_train, y_train)
ax2 = fig.add_subplot(212)
ax2.scatter_plot(X[:, [1, 2]], y, pipeline_1)
plt.show()
def wendy_try_iris():
'''
EXAMPLE: Multiclass classification with under-sampling
'''
RANDOM_STATE = 42
iris = load_iris()
# X, y = make_imbalance(iris.data, iris.target, ratio = {0:25, 1:50, 2:50}, random_state = 0)
X = pd.DataFrame(iris.data, columns = ['Sepal_length', 'Sepal_width', 'Petal_length', 'Petal_width'])
y = pd.DataFrame(iris.target, columns = ['Species'])
df = X
df['Species'] = y
'''pair plot for the features'''
# sns.set(style='whitegrid', context='notebook')
# cols = ['Sepal_length', 'Sepal_width', 'Petal_length', 'Petal_width']
# sns.pairplot(df, vars = cols, size=2.5, hue = 'Species')
# plt.show()
'''dimension reduction'''
# print (classification_report_imbalanced(y_test, pipeline_cc.predict(X_test)))
# deci_bdry_plot_2d(X[:, [1, 2]], y, pipeline_cc)
if __name__ == '__main__':
wendy_try_iris()
|
shunw/pythonML_code
|
eg2_real_world_data.py
|
Python
|
mit
| 4,961 | 0.01794 |
a, b = map(int,raw_input().split())
i=0
while(i<a):
j=0
c=[]
if(i%2==0):
while(j<b):
c.append('#')
j=j+1
print (''.join(c))
else:
k = int(i/2)
if (k%2==0):
while(j<(b-1)):
c.append(".")
j=j+1
c.append("#")
print (''.join(c))
else:
c.append('#')
while(j<(b-1)):
c.append(".")
j=j+1
print (''.join(c))
i=i+1
|
Sarthak30/Codeforces
|
fox_and_snake.py
|
Python
|
gpl-2.0
| 361 | 0.113573 |
# table_plugin.py - sublime plugins for pretty print text table
# Copyright (C) 2012 Free Software Foundation, Inc.
# Author: Valery Kocubinsky
# Package: SublimeTableEditor
# Homepage: https://github.com/vkocubinsky/SublimeTableEditor
# This file is part of SublimeTableEditor.
# SublimeTableEditor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# SublimeTableEditor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SublimeTableEditor. If not, see <http://www.gnu.org/licenses/>.
import sublime
import sublime_plugin
import re
try:
from . import table_lib as tlib
from . import table_base as tbase
except ValueError:
import table_lib as tlib
import table_base as tbase
class TableContext:
def __init__(self, view, sel, syntax):
self.view = view
(sel_row, sel_col) = self.view.rowcol(sel.begin())
self.syntax = syntax
self.first_table_row = self._get_first_table_row(sel_row, sel_col)
self.last_table_row = self._get_last_table_row(sel_row, sel_col)
self.table_text = self._get_table_text(self.first_table_row, self.last_table_row)
self.visual_field_num = self._visual_field_num(sel_row, sel_col)
self.row_num = sel_row - self.first_table_row
self.table_pos = tbase.TablePos(self.row_num, self.visual_field_num)
self.table = self.syntax.table_parser.parse_text(self.table_text)
self.table_driver = self.syntax.table_driver
self.field_num = self.table_driver.visual_to_internal_index(self.table, self.table_pos).field_num
def _get_table_text(self, first_table_row, last_table_row):
begin_point = self.view.line(self.view.text_point(first_table_row, 0)
).begin()
end_point = self.view.line(self.view.text_point(last_table_row, 0)
).end()
return self.view.substr(sublime.Region(begin_point, end_point))
def _get_last_table_row(self, sel_row, sel_col):
row = sel_row
last_table_row = sel_row
last_line = self.view.rowcol(self.view.size())[0]
while (row <= last_line and self._is_table_row(row)):
last_table_row = row
row = row + 1
return last_table_row
def _get_first_table_row(self, sel_row, sel_col):
row = sel_row
first_table_row = sel_row
while (row >= 0 and self._is_table_row(row)):
first_table_row = row
row = row - 1
return first_table_row
def _is_table_row(self, row):
text = self._get_text(row)
return self.syntax.table_parser.is_table_row(text)
def _visual_field_num(self, sel_row, sel_col):
line_text = self._get_text(sel_row)
line = self.syntax.line_parser.parse(line_text)
return line.field_num(sel_col)
def _get_text(self, row):
point = self.view.text_point(row, 0)
region = self.view.line(point)
text = self.view.substr(region)
return text
class AbstractTableCommand(sublime_plugin.TextCommand):
def detect_syntax(self):
if self.view.settings().has("table_editor_syntax"):
syntax_name = self.view.settings().get("table_editor_syntax")
else:
syntax_name = self.auto_detect_syntax_name()
table_configuration = tbase.TableConfiguration()
border_style = (self.view.settings().get("table_editor_border_style", None)
or self.view.settings().get("table_editor_style", None))
if border_style == "emacs":
table_configuration.hline_out_border = '|'
table_configuration.hline_in_border = '+'
elif border_style == "grid":
table_configuration.hline_out_border = '+'
table_configuration.hline_in_border = '+'
elif border_style == "simple":
table_configuration.hline_out_border = '|'
table_configuration.hline_in_border = '|'
if self.view.settings().has("table_editor_custom_column_alignment"):
table_configuration.custom_column_alignment = self.view.settings().get("table_editor_custom_column_alignment")
if self.view.settings().has("table_editor_keep_space_left"):
table_configuration.keep_space_left = self.view.settings().get("table_editor_keep_space_left")
if self.view.settings().has("table_editor_align_number_right"):
table_configuration.align_number_right = self.view.settings().get("table_editor_align_number_right")
if self.view.settings().has("table_editor_detect_header"):
table_configuration.detect_header = self.view.settings().get("table_editor_detect_header")
if self.view.settings().has("table_editor_intelligent_formatting"):
table_configuration.intelligent_formatting = self.view.settings().get("table_editor_intelligent_formatting")
syntax = tlib.create_syntax(syntax_name, table_configuration)
return syntax
def auto_detect_syntax_name(self):
view_syntax = self.view.settings().get('syntax')
if (view_syntax == 'Packages/Markdown/MultiMarkdown.tmLanguage' or
view_syntax == 'Packages/Markdown/Markdown.tmLanguage'):
return "MultiMarkdown"
elif view_syntax == 'Packages/Textile/Textile.tmLanguage':
return "Textile"
elif (view_syntax == 'Packages/RestructuredText/reStructuredText.tmLanguage'):
return "reStructuredText"
else:
return "Simple"
def merge(self, edit, ctx):
table = ctx.table
new_lines = table.render_lines()
first_table_row = ctx.first_table_row
last_table_row = ctx.last_table_row
rows = range(first_table_row, last_table_row + 1)
for row, new_text in zip(rows, new_lines):
region = self.view.line(self.view.text_point(row, 0))
old_text = self.view.substr(region)
if old_text != new_text:
self.view.replace(edit, region, new_text)
#case 1: some lines inserted
if len(rows) < len(new_lines):
row = last_table_row
for new_text in new_lines[len(rows):]:
end_point = self.view.line(self.view.text_point(row, 0)).end()
self.view.insert(edit, end_point, "\n" + new_text)
row = row + 1
#case 2: some lines deleted
elif len(rows) > len(new_lines):
for row in rows[len(new_lines):]:
region = self.view.line(self.view.text_point(row, 0))
self.view.erase(edit, region)
def create_context(self, sel):
return TableContext(self.view, sel, self.detect_syntax())
def run(self, edit):
new_sels = []
for sel in self.view.sel():
new_sel = self.run_one_sel(edit, sel)
new_sels.append(new_sel)
self.view.sel().clear()
for sel in new_sels:
self.view.sel().add(sel)
self.view.show(sel, False)
def run_one_sel(self, edit, sel):
ctx = self.create_context(sel)
try:
msg, table_pos = self.run_operation(ctx)
self.merge(edit, ctx)
sublime.status_message("Table Editor: {0}".format(msg))
return self.table_pos_sel(ctx, table_pos)
except tbase.TableException as err:
sublime.status_message("Table Editor: {0}".format(err))
return self.table_pos_sel(ctx, ctx.table_pos)
def visual_field_sel(self, ctx, row_num, visual_field_num):
if ctx.table.empty():
pt = self.view.text_point(ctx.first_table_row, 0)
else:
pos = tbase.TablePos(row_num, visual_field_num)
col = ctx.table_driver.get_cursor(ctx.table, pos)
pt = self.view.text_point(ctx.first_table_row + row_num, col)
return sublime.Region(pt, pt)
def table_pos_sel(self, ctx, table_pos):
return self.visual_field_sel(ctx, table_pos.row_num,
table_pos.field_num)
def field_sel(self, ctx, row_num, field_num):
if ctx.table.empty():
visual_field_num = 0
else:
pos = tbase.TablePos(row_num, field_num)
visual_field_num = ctx.table_driver.internal_to_visual_index(ctx.table, pos).field_num
return self.visual_field_sel(ctx, row_num, visual_field_num)
class TableEditorAlignCommand(AbstractTableCommand):
"""
Key: ctrl+shift+a
Re-align the table without change the current table field.
Move cursor to begin of the current table field.
"""
def run_operation(self, ctx):
return ctx.table_driver.editor_align(ctx.table, ctx.table_pos)
class TableEditorNextField(AbstractTableCommand):
"""
Key: tab
Re-align the table, move to the next field.
Creates a new row if necessary.
"""
def run_operation(self, ctx):
return ctx.table_driver.editor_next_field(ctx.table, ctx.table_pos)
class TableEditorPreviousField(AbstractTableCommand):
"""
Key: shift+tab
Re-align, move to previous field.
"""
def run_operation(self, ctx):
return ctx.table_driver.editor_previous_field(ctx.table, ctx.table_pos)
class TableEditorNextRow(AbstractTableCommand):
"""
Key: enter
Re-align the table and move down to next row.
Creates a new row if necessary.
At the beginning or end of a line, enter still does new line.
"""
def run_operation(self, ctx):
return ctx.table_driver.editor_next_row(ctx.table, ctx.table_pos)
class TableEditorMoveColumnLeft(AbstractTableCommand):
"""
Key: alt+left
Move the current column left.
"""
def run_operation(self, ctx):
return ctx.table_driver.editor_move_column_left(ctx.table,
ctx.table_pos)
class TableEditorMoveColumnRight(AbstractTableCommand):
"""
Key: alt+right
Move the current column right.
"""
def run_operation(self, ctx):
return ctx.table_driver.editor_move_column_right(ctx.table,
ctx.table_pos)
class TableEditorDeleteColumn(AbstractTableCommand):
"""
Key: alt+shift+left
Kill the current column.
"""
def run_operation(self, ctx):
return ctx.table_driver.editor_delete_column(ctx.table,
ctx.table_pos)
class TableEditorInsertColumn(AbstractTableCommand):
"""
Keys: alt+shift+right
Insert a new column to the left of the cursor position.
"""
def run_operation(self, ctx):
return ctx.table_driver.editor_insert_column(ctx.table,
ctx.table_pos)
class TableEditorKillRow(AbstractTableCommand):
"""
Key : alt+shift+up
Kill the current row.
"""
def run_operation(self, ctx):
return ctx.table_driver.editor_kill_row(ctx.table, ctx.table_pos)
class TableEditorInsertRow(AbstractTableCommand):
"""
Key: alt+shift+down
Insert a new row above the current row.
"""
def run_operation(self, ctx):
return ctx.table_driver.editor_insert_row(ctx.table, ctx.table_pos)
class TableEditorMoveRowUp(AbstractTableCommand):
"""
Key: alt+up
Move the current row up.
"""
def run_operation(self, ctx):
return ctx.table_driver.editor_move_row_up(ctx.table, ctx.table_pos)
class TableEditorMoveRowDown(AbstractTableCommand):
"""
Key: alt+down
Move the current row down.
"""
def run_operation(self, ctx):
return ctx.table_driver.editor_move_row_down(ctx.table,
ctx.table_pos)
class TableEditorInsertSingleHline(AbstractTableCommand):
"""
Key: ctrl+k,-
Insert single horizontal line below current row.
"""
def run_operation(self, ctx):
return ctx.table_driver.editor_insert_single_hline(ctx.table,
ctx.table_pos)
class TableEditorInsertDoubleHline(AbstractTableCommand):
"""
Key: ctrl+k,=
Insert double horizontal line below current row.
"""
def run_operation(self, ctx):
return ctx.table_driver.editor_insert_double_hline(ctx.table,
ctx.table_pos)
class TableEditorHlineAndMove(AbstractTableCommand):
"""
Key: ctrl+k, enter
Insert a horizontal line below current row,
and move the cursor into the row below that line.
"""
def run_operation(self, ctx):
return ctx.table_driver.editor_insert_hline_and_move(ctx.table,
ctx.table_pos)
class TableEditorSplitColumnDown(AbstractTableCommand):
"""
Key: alt+enter
Split rest of cell down from current cursor position,
insert new line bellow if current row is last row in the table
or if next line is hline
"""
def remove_rest_line(self, edit, sel):
end_region = self.view.find("\|",
sel.begin())
rest_region = sublime.Region(sel.begin(), end_region.begin())
rest_data = self.view.substr(rest_region)
self.view.replace(edit, rest_region, "")
return rest_data.strip()
def run_one_sel(self, edit, sel):
ctx = self.create_context(sel)
field_num = ctx.field_num
row_num = ctx.row_num
if (ctx.table[row_num].is_separator() or
ctx.table[row_num].is_header_separator()):
sublime.status_message("Table Editor: Split column is not "
"permitted for separator or header "
"separator line")
return self.table_pos_sel(ctx, ctx.table_pos)
if row_num + 1 < len(ctx.table):
if len(ctx.table[row_num + 1]) - 1 < field_num:
sublime.status_message("Table Editor: Split column is not "
"permitted for short line")
return self.table_pos_sel(ctx, ctx.table_pos)
elif ctx.table[row_num + 1][field_num].pseudo():
sublime.status_message("Table Editor: Split column is not "
"permitted to colspan column")
return self.table_pos_sel(ctx, ctx.table_pos)
(sel_row, sel_col) = self.view.rowcol(sel.begin())
rest_data = self.remove_rest_line(edit, sel)
ctx = self.create_context(sel)
field_num = ctx.field_num
row_num = ctx.row_num
if row_num + 1 == len(ctx.table) or ctx.table[row_num + 1].is_separator():
ctx.table.insert_empty_row(row_num + 1)
row_num = row_num + 1
ctx.table[row_num][field_num].data = rest_data + " " + ctx.table[row_num][field_num].data.strip()
ctx.table.pack()
self.merge(edit, ctx)
sublime.status_message("Table Editor: Column splitted down")
return self.field_sel(ctx, row_num, field_num)
class TableEditorJoinLines(AbstractTableCommand):
"""
Key: ctrl+j
Join current row and next row into one if next row is not hline
"""
def run_operation(self, ctx):
return ctx.table_driver.editor_join_lines(ctx.table, ctx.table_pos)
class TableEditorCsvToTable(AbstractTableCommand):
"""
Command: table_csv_to_table
Key: ctrl+k, |
Convert selected CSV region into table
"""
def run_one_sel(self, edit, sel):
if sel.empty():
return sel
else:
syntax = self.detect_syntax()
text = self.view.substr(sel)
table = syntax.table_driver.parse_csv(text)
self.view.replace(edit, sel, table.render())
first_row = self.view.rowcol(sel.begin())[0]
pt = self.view.text_point(first_row, syntax.table_driver.get_cursor(table, tbase.TablePos(0, 0)))
sublime.status_message("Table Editor: Table created from CSV")
return sublime.Region(pt, pt)
class TableEditorDisableForCurrentView(sublime_plugin.TextCommand):
def run(self, args, prop):
self.view.settings().set(prop, False)
class TableEditorEnableForCurrentView(sublime_plugin.TextCommand):
def run(self, args, prop):
self.view.settings().set(prop, True)
class TableEditorDisableForCurrentSyntax(sublime_plugin.TextCommand):
def run(self, edit):
syntax = self.view.settings().get('syntax')
if syntax is not None:
m = re.search("([^/]+)[.]tmLanguage$", syntax)
if m:
base_name = m.group(1) + ".sublime-settings"
settings = sublime.load_settings(base_name)
settings.erase("enable_table_editor")
sublime.save_settings(base_name)
class TableEditorEnableForCurrentSyntax(sublime_plugin.TextCommand):
def run(self, edit):
syntax = self.view.settings().get('syntax')
if syntax is not None:
m = re.search("([^/]+)[.]tmLanguage$", syntax)
if m:
base_name = m.group(1) + ".sublime-settings"
settings = sublime.load_settings(base_name)
settings.set("enable_table_editor", True)
sublime.save_settings(base_name)
class TableEditorSetSyntax(sublime_plugin.TextCommand):
def run(self, edit, syntax):
self.view.settings().set("enable_table_editor", True)
self.view.settings().set("table_editor_syntax", syntax)
sublime.status_message("Table Editor: set syntax to '{0}'"
.format(syntax))
|
SublimeText-Markdown/TableEditor
|
table_plugin.py
|
Python
|
gpl-3.0
| 18,180 | 0.000935 |
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
### #FIXME admin.autodiscover()
import views as htcondor_views
#from ..api.htcondorapi import views as htcondorapi_views
urlpatterns = patterns('',
### HTCondor Jobs
url(r'^/$', htcondor_views.list3HTCondorJobs, name='listHTCondorJobs'),
url(r'^/(?P<globaljobid>[-A-Za-z0-9_.#]+)/$', htcondor_views.htcondorJobDetails, name='HTCondorJobDetails'),
)
|
kiae-grid/panda-bigmon-core
|
core/htcondor/urls.py
|
Python
|
apache-2.0
| 526 | 0.013308 |
from __future__ import unicode_literals
from django.apps import AppConfig
class RegisterConfig(AppConfig):
name = 'register'
|
nanomolina/JP
|
src/odontology/register/apps.py
|
Python
|
apache-2.0
| 132 | 0 |
"""API Handler Class"""
# standard library
import gzip
import os
import shutil
from logging.handlers import RotatingFileHandler
from typing import Optional
class RotatingFileHandlerCustom(RotatingFileHandler):
"""Logger handler for ThreatConnect Exchange File logging."""
def __init__(
self,
filename: str,
mode: Optional[str] = 'a',
maxBytes: Optional[int] = 0,
backupCount: Optional[int] = 0,
encoding: Optional[str] = None,
delay: Optional[bool] = False,
):
"""Customize RotatingFileHandler to create full log path.
Args:
filename: The name of the logfile.
mode: The write mode for the file.
maxBytes: The max file size before rotating.
backupCount: The maximum # of backup files.
encoding: The log file encoding.
delay: If True, then file opening is deferred until the first call to emit().
"""
if encoding is None and os.getenv('LANG') is None:
encoding = 'UTF-8'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename), exist_ok=True)
RotatingFileHandler.__init__(self, filename, mode, maxBytes, backupCount, encoding, delay)
# set namer
self.namer = self.custom_gzip_namer
self.rotator = self.custom_gzip_rotator
@staticmethod
def custom_gzip_namer(name):
"""Namer for rotating log handler with gz extension.
Args:
name: The current name of the logfile.
"""
return name + '.gz'
@staticmethod
def custom_gzip_rotator(source: str, dest: str) -> None:
"""Rotate and compress log file.
Args:
source: The source filename.
dest: The destination filename.
"""
with open(source, 'rb') as f_in:
with gzip.open(dest, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(source)
# class RotatingFileHandlerFormatter(logging.Formatter):
# """Custom logging formatter that allows a different format depending on the logging level."""
#
# def __init__(self):
# """Initialize formatter parent."""
# super().__init__(fmt='%(levelno)d: %(msg)s', datefmt=None, style='%')
#
# def format(self, record):
# """Format file handle log event according to logging level.
#
# Args:
# record (obj): The record to be logged.
# """
# # Replace the original format with one customized by logging level
# self._style._fmt = self.standard_format
# if record.levelno < 10: # <= logging.DEBUG
# self._style._fmt = self.trace_format
#
# # Call the original formatter class to do the grunt work
# result = logging.Formatter.format(self, record)
#
# return result
#
# @property
# def standard_format(self):
# """Return the standard log format"""
# return (
# '%(asctime)s - %(name)s - %(levelname)s - %(message)s '
# '(%(filename)s:%(funcName)s:%(lineno)d:%(threadName)s)'
# )
#
# @property
# def trace_format(self):
# """Return the standard log format"""
# return (
# '%(asctime)s - %(name)s - %(levelname)s - [%(funcName)s:%(lineno)d] %(message)s '
# '(%(filename)s:%(threadName)s)'
# )
|
kstilwell/tcex
|
tcex/logger/rotating_file_handler_custom.py
|
Python
|
apache-2.0
| 3,432 | 0.001166 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Remove openerp.com bindings",
"version": "1.1",
"author": "Therp BV",
"complexity": "normal",
"description": """
This module deactivates all bindings to openerp.com that
come with the standard code:
* update notifier code is deactivated and the function is overwritten
* apps and updates menu items in settings are removed
* help and account menu items in user menu are removed
* prevent lookup of OPW for current database uuid and resulting"""
""" 'unsupported' warning
""",
"category": "",
"depends": [
'base',
'mail',
],
"data": [
'data/ir_ui_menu.xml',
'data/ir_cron.xml',
],
"js": [
'static/src/js/disable_openerp_online.js',
],
"css": [
],
"qweb": [
'static/src/xml/base.xml',
],
"auto_install": False,
"installable": True,
"external_dependencies": {
'python': [],
},
}
|
petrus-v/server-tools
|
disable_openerp_online/__openerp__.py
|
Python
|
agpl-3.0
| 1,917 | 0 |
# -*- coding: utf-8 -*-
# wasp_general/network/web.py
#
# Copyright (C) 2016 the wasp-general authors and contributors
# <see AUTHORS file>
#
# This file is part of wasp-general.
#
# Wasp-general is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Wasp-general is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with wasp-general. If not, see <http://www.gnu.org/licenses/>.
# noinspection PyUnresolvedReferences
from wasp_general.version import __author__, __version__, __credits__, __license__, __copyright__, __email__
# noinspection PyUnresolvedReferences
from wasp_general.version import __status__
import re
from wasp_general.verify import verify_type, verify_value
from wasp_general.network.web.proto import WWebSessionProto, WWebRequestProto
from wasp_general.network.web.headers import WHTTPHeaders
from wasp_general.network.web.re_statements import http_method_name, http_path, http_version
class WWebRequest(WWebRequestProto):
""" :class:`.WWebRequestProto` implementation. Class represent HTTP-request descriptor.
Call :meth:`.WWebRequest.ro` method to create unchangeable copy
"""
request_line_re = re.compile(
'^(' + http_method_name + ') +(' + http_path + ')( +HTTP/(' + http_version + '))?$'
)
"""
Check for HTTP request line. See RFC 2616, Section 5.1
"""
@verify_type(session=WWebSessionProto, method=str, path=str, headers=(WHTTPHeaders, None))
@verify_type(request_data=(bytes, None))
@verify_value(method=lambda x: len(x) > 0)
@verify_value(path=lambda x: len(x) > 0)
def __init__(self, session, method, path, headers=None, request_data=None):
"""
Create new request descriptor
:param session: request origin
:param method: called HTTP-method
:param path: called HTTP-path
"""
WWebRequestProto.__init__(self)
self.__session = session
self.__method = method.upper()
self.__path = path
self.__headers = headers
self.__request_data = request_data
self.__ro_flag = False
def session(self):
""" Return origin session
:return: WWebSessionProto
"""
return self.__session
def method(self):
""" Return requested method
:return: str
"""
return self.__method
def path(self):
""" Return requested path
:return: str
"""
return self.__path
def headers(self):
""" Return request headers
:return: WHTTPHeaders
"""
return self.__headers
@verify_type(headers=WHTTPHeaders)
def set_headers(self, headers):
""" Set headers for request
:param headers: headers to set
:return: None
"""
if self.__ro_flag:
raise RuntimeError('Read-only object changing attempt')
self.__headers = headers
def request_data(self):
""" Return request data
:return: bytes
"""
return self.__request_data
@verify_type(request_data=bytes)
def set_request_data(self, request_data):
""" Set payload data for request
:param request_data: data to set
:return: None
"""
if self.__ro_flag:
raise RuntimeError('Read-only object changing attempt')
self.__request_data = request_data
@classmethod
@verify_type('paranoid', session=WWebSessionProto)
@verify_type(request_line=str)
def parse_request_line(cls, session, request_line):
""" Parse given request line like 'GET /foo' or 'POST /zzz HTTP/1.0'
:param session: origin session
:param request_line: line to parse
:return: WWebRequest
"""
r = cls.request_line_re.search(request_line)
if r is not None:
method, path, protocol_sentence, protocol_version = r.groups()
return WWebRequest(session, method, path)
raise ValueError('Invalid request line')
@verify_type('paranoid', http_code=str)
def parse_headers(self, http_code):
""" Parse http-code (like 'Header-X: foo\r\nHeader-Y: bar\r\n') and retrieve (save) HTTP-headers
:param http_code: code to parse
:return: None
"""
if self.__ro_flag:
raise RuntimeError('Read-only object changing attempt')
self.__headers = WHTTPHeaders.import_headers(http_code)
def ro(self):
""" Create read-only copy
:return: WWebRequest
"""
request = WWebRequest(
self.session(), self.method(), self.path(),
headers=self.headers().ro(), request_data=self.request_data()
)
request.__ro_flag = True
return request
|
a1ezzz/wasp-general
|
wasp_general/network/web/request.py
|
Python
|
lgpl-3.0
| 4,630 | 0.023542 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-12 18:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_auto_20161113_0317'),
]
operations = [
migrations.RenameField(
model_name='backuptarget',
old_name='path',
new_name='path_template',
),
migrations.AddField(
model_name='backuplog',
name='path',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
]
|
sparcs-kaist/heartbeat-server
|
apps/core/migrations/0006_auto_20161113_0341.py
|
Python
|
mit
| 645 | 0 |
# -*- coding: utf-8 -*-
# <markment - markdown-based documentation generator for python>
# Copyright (C) <2013> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from markment import Markment
from markment.engine import MarkmentRenderer
from lxml import html as lhtml
from .base import MARKDOWN
def test_prefix_link_when_needed():
"MarkmentRenderer#prefix_link_if_needed should prefix if link is relative"
renderer = MarkmentRenderer()
renderer.url_prefix = 'http://awesome.com'
result = renderer.prefix_link_if_needed('bar.png')
result.should.equal('http://awesome.com/bar.png')
def test_prefix_link_when_not_needed():
"MarkmentRenderer#prefix_link_if_needed should NOT prefix if link is absolute"
renderer = MarkmentRenderer()
renderer.url_prefix = 'http://awesome.com'
result = renderer.prefix_link_if_needed('http://ok.com/bar.png')
result.should.equal('')
def test_prefix_link_when_not_needed_provided():
"MarkmentRenderer#prefix_link_if_needed should NOT prefix if link is absolute"
renderer = MarkmentRenderer()
result = renderer.prefix_link_if_needed('bar.png')
result.should.equal('')
def test_anchors_in_1st_level_headers():
"Markment should put anchors in 1st level headers"
MD = MARKDOWN("""
# API Reference
some content
""")
mm = Markment(MD)
dom = lhtml.fromstring(mm.rendered)
headers = dom.cssselect("h1")
headers.should.have.length_of(1)
h1 = headers[0]
h1.attrib.should.have.key("name").being.equal("api-reference")
h1.attrib.should.have.key("id").being.equal("api-reference")
links = h1.getchildren()
links.should.have.length_of(1)
a = links[0]
a.text.should.equal("API Reference")
a.attrib.should.have.key("href").equal("#api-reference")
def test_anchors_in_2nd_level_headers():
"Markment should put anchors in 2nd level headers"
MD = MARKDOWN("""
# API Reference
## Rendering content
""")
mm = Markment(MD)
dom = lhtml.fromstring(mm.rendered)
headers = dom.cssselect("h2")
headers.should.have.length_of(1)
h2 = headers[0]
h2.attrib.should.have.key("name").being.equal("rendering-content")
h2.attrib.should.have.key("id").being.equal("rendering-content")
links = h2.getchildren()
links.should.have.length_of(1)
a = links[0]
a.text.should.equal("Rendering content")
a.attrib.should.have.key("href").equal("#rendering-content")
def test_code_block():
"Markment should render code blocks"
MD = MARKDOWN("""
# API Reference
This is good
```python
import os
os.system('ls /')
```
This is not good
```python
import os
os.system('sudo rm -rf /')
```
""")
mm = Markment(MD)
dom = lhtml.fromstring(mm.rendered)
code_blocks = dom.cssselect("div.highlight pre")
code_blocks.should.have.length_of(2)
code1, code2 = code_blocks
code1.attrib.should.have.key("name").equal("api-reference-example-1")
code2.attrib.should.have.key("name").equal("api-reference-example-2")
def test_code_block_guesses_lexer():
"Markment should render code blocks even without a language specified"
MD = MARKDOWN("""
# API Reference
This is good
```
import os
os.system('ls /')
```
This is not good
```python
import os
os.system('sudo rm -rf /')
```
""")
mm = Markment(MD)
dom = lhtml.fromstring(mm.rendered)
code_blocks = dom.cssselect("div.highlight pre")
code_blocks.should.have.length_of(2)
code1, code2 = code_blocks
code1.attrib.should.have.key("name").equal("api-reference-example-1")
code2.attrib.should.have.key("name").equal("api-reference-example-2")
def test_image_relative():
"Markment should render images with relative path"
MD = MARKDOWN("""
# Awesome project

""")
mm = Markment(MD, url_prefix='http://falcao.it')
dom = lhtml.fromstring(mm.rendered)
images = dom.cssselect("img")
images.should.have.length_of(1)
img = images[0]
img.attrib.should.have.key("src").equal("http://falcao.it/logo.png")
img.attrib.should.have.key("alt").equal("LOGO")
def test_image_relative_with_callback():
"Markment should render images with relative path"
MD = MARKDOWN("""
# Awesome project

[Documentation](docs.md)
""")
def process_url(path):
if path.lower().endswith("md"):
return "http://markdown.com/{0}".format(path)
else:
return "http://images.com/{0}".format(path)
mm = Markment(MD, url_prefix=process_url)
dom = lhtml.fromstring(mm.rendered)
images = dom.cssselect("img")
images.should.have.length_of(1)
img = images[0]
img.attrib.should.have.key("src").equal("http://images.com/logo.png")
img.attrib.should.have.key("alt").equal("LOGO")
links = dom.cssselect("a")
links.should.have.length_of(2)
a = links[-1]
a.attrib.should.have.key("href").equal("http://markdown.com/docs.md")
def test_image_absolute():
"Markment should render images with absolute path"
MD = MARKDOWN("""
# Awesome project

""")
mm = Markment(MD, url_prefix='http://falcao.it')
dom = lhtml.fromstring(mm.rendered)
images = dom.cssselect("img")
images.should.have.length_of(1)
img = images[0]
img.attrib.should.have.key("src").equal("http://octomarks.io/logo.png")
img.attrib.should.have.key("alt").equal("LOGO")
def test_link_relative():
"Markment should render links with relative path"
MD = MARKDOWN("""
[LOGO](file.md)
""")
mm = Markment(MD, url_prefix='http://falcao.it')
dom = lhtml.fromstring(mm.rendered)
links = dom.cssselect("a")
links.should.have.length_of(1)
a = links[0]
a.attrib.should.have.key("href").equal("http://falcao.it/file.md")
a.text.should.equal('LOGO')
def test_link_absolute():
"Markment should render links with absolute path"
MD = MARKDOWN("""
[LOGO](http://octomarks.io/file.md)
""")
mm = Markment(MD, url_prefix='http://falcao.it')
dom = lhtml.fromstring(mm.rendered)
links = dom.cssselect("a")
links.should.have.length_of(1)
a = links[0]
a.attrib.should.have.key("href").equal("http://octomarks.io/file.md")
a.text.should.equal('LOGO')
def test_markment_doesnt_fail_if_has_no_headers():
"Markment should find and index 3rd level headers"
MD = MARKDOWN("""
```python
poor code, doesn't have a title
```
""")
mm = Markment(MD)
mm.index().should.equal([])
def test_markment_header_accepts_unicode_characters():
"Markment supports unicode (at least in the headers :)"
MD = MARKDOWN('''
# Curdling
## Curdle /ˈkərdl/
''')
mm = Markment(MD)
dom = lhtml.fromstring(mm.rendered)
headers = dom.cssselect("h2 a")
headers.should_not.be.empty
h2 = headers[0]
h2.text.should.equal('Curdle /ˈkərdl/')
|
grahamc/markment
|
tests/unit/test_html_rendering.py
|
Python
|
mit
| 7,784 | 0.000257 |
# Copyright (C) 2013-2015 Computer Sciences Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import array
import math
import logging
import logging.handlers
from pyaccumulo import Accumulo, Mutation, Range
class EzRPStaticStore(object):
'''
Class to save and retrieve static content from Accumulo.
cf = "static" For all rows
cq = "hash" Stores the hash_value of Static File
cq = "nofchunks" Stores the number of Chunks needed to store Static File
cq = "chunk_000" .. "chunk_nnn" Stores the Chunks of Static File
'''
def __init__(self, host="localhost", port=42424, user='root', password='secret', chunk_size=int(5*1048576), logger=None):
self.__host = host
self.__port = port
self.__user = user
self.__password = password
self.__table = 'ezfrontend'
self.__cf = 'static'
self.__connection = None
if logger is not None:
self.__log = logger
else:
self.__log = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)
self.__log.addHandler(logging.NullHandler())
self.__chunk_size =int(chunk_size)
self._connect(self.__host, self.__port, self.__user, self.__password)
def _connect(self, host, port, user, password):
try:
self.__connection = Accumulo(host, port, user, password)
self.__log.debug('Connected to StaticFile Store')
except Exception as e:
self.__log.exception('Error while connecting to StaticFile Store: %s' % str(e))
raise Exception('Error while connecting to StaticFile Store: %s' % str(e))
def _ensureTableExists(self):
'''
Make sure that the table exists before any other operation.
Reconnect to Accumulo if the Connection is reset.
'''
if not self.__connection.table_exists(self.__table):
self.__log.info('table "{table}" does not exist in StaticFile Store. Creating the table'.format(table=self.__table))
self.__connection.create_table(self.__table)
if not self.__connection.table_exists(self.__table):
self.__log.error('Unable to ensure StaticFile Store table "{table} exists'.format(format(table=self.__table)))
raise Exception('StaticFile Store: Unable to ensure table "{table}" exists'.format(table=self.__table))
def _ensureNoDuplicates(self, usrFacingUrlPrefix):
'''
Ensure a single copy of file for a given usrFacingUrlPrefix
'''
if self._getHash(usrFacingUrlPrefix) is not None:
self.deleteFile(usrFacingUrlPrefix)
def _putNofChunks(self, usrFacingUrlPrefix, length):
'''
Put the number of chunks the static contents is stored
'''
chunks = int(math.ceil(length / float(self.__chunk_size)))
writer = self.__connection.create_batch_writer(self.__table)
m = Mutation(usrFacingUrlPrefix)
m.put(cf=self.__cf, cq="nofchunks", val=str(chunks))
writer.add_mutation(m)
writer.close()
def _getNofChunks(self, usrFacingUrlPrefix):
'''
Get the number of chunks the static contents is stored
'''
scan_range = Range(srow=usrFacingUrlPrefix, scf=self.__cf, scq="nofchunks",
erow=usrFacingUrlPrefix, ecf=self.__cf, ecq="nofchunks")
for entry in self.__connection.scan(self.__table, scanrange=scan_range):
return int(entry.val)
return 0
def _getChunks(self, data):
'''
Break the blob into CHUNK_SIZE.
less than maxFrameSize in Accumulo proxy.properties
'''
data_length = len(data)
for i in range(0, data_length + 1, self.__chunk_size):
yield data[i:i + self.__chunk_size]
def _putHash(self, usrFacingUrlPrefix, hash_str):
'''
Puts the Hash for usrFacingUrlPrefix
'''
writer = self.__connection.create_batch_writer(self.__table)
m = Mutation(usrFacingUrlPrefix)
m.put(cf=self.__cf, cq="hash", val=hash_str)
writer.add_mutation(m)
writer.close()
def _getHash(self, usrFacingUrlPrefix):
scan_range = Range(srow=usrFacingUrlPrefix, scf=self.__cf, scq="hash",
erow=usrFacingUrlPrefix, ecf=self.__cf, ecq="hash")
for entry in self.__connection.scan(self.__table, scanrange=scan_range):
return str(entry.val)
else:
return None
def reConnection(self):
self._connect(self.__host, self.__port, self.__user, self.__password)
def putFile(self, usrFacingUrlPrefix, hash_str, data):
self._ensureTableExists()
self._ensureNoDuplicates(usrFacingUrlPrefix)
self._putHash(usrFacingUrlPrefix, hash_str)
data_length = len(data)
self._putNofChunks(usrFacingUrlPrefix, data_length)
writer = self.__connection.create_batch_writer(self.__table)
for i, chunk in enumerate(self._getChunks(data)):
m = Mutation(usrFacingUrlPrefix)
m.put(cf=self.__cf, cq="chunk_{number:010d}".format(number=i), val=chunk)
writer.add_mutation(m)
self.__log.debug('added static file for "{url}" with hash "{hash}" of length "{length}"'.format(url=usrFacingUrlPrefix, hash=hash_str, length=data_length))
writer.close()
def getFile(self, usrFacingUrlPrefix):
'''
Assembles all the chunks for this row
'''
self._ensureTableExists()
data = array.array('c') # Create a byte array
chunks = self._getNofChunks(usrFacingUrlPrefix)
chunks_read = 0
for i in range(chunks):
cq = 'chunk_{number:010d}'.format(number=i)
for entry in self.__connection.scan(self.__table, None, cols=[[self.__cf, cq]]):
if entry.row == usrFacingUrlPrefix and entry.cq.startswith("chunk_"):
chunks_read += 1
data.extend(entry.val)
# This code gets following error while retrieving over 96MB. Data stops at first chunk_000
# # java.lang.OutOfMemoryError: Java heap space
# -XX:OnOutOfMemoryError="kill -9 %p"
# Executing /bin/sh -c "kill -9 32597"...
# [1]+ Exit 137 sudo -u accumulo /opt/accumulo/current/bin/accumulo proxy -p /opt/accumulo/current/conf/proxy.properties
# startChunk = "chunk_{number:010d}".format(number=0)
# endChunk = "chunk_{number:010d}".format(number=chunks)
# scan_range = Range(srow=usrFacingUrlPrefix, scf=self.__cf, scq=startChunk,
# erow=usrFacingUrlPrefix, ecf=self.__cf, ecq=endChunk)
# for entry in self.__connection.scan(self.__table, scanrange=scan_range):
# #self.__log.info("getFile: row = {0} cq= {1}".format(entry.row, entry.cq))
# if entry.cq.startswith("chunk_"):
# self.__log.info("getFile: row = {0} cq= {1}".format(entry.row, entry.cq))
# chunks_read += 1
# data.extend(entry.val)
self.__log.debug('retrieved static file for {url}'.format(url=usrFacingUrlPrefix))
if chunks_read != chunks:
self.__log.error("did not read all the chunks from StaticFile Store")
return data.tostring() if data.buffer_info()[1] > 0 else None
def deleteFile(self, usrFacingUrlPrefix):
self._ensureTableExists()
writer = self.__connection.create_batch_writer(self.__table)
chunks = self._getNofChunks(usrFacingUrlPrefix)
m = Mutation(usrFacingUrlPrefix)
m.put(cf=self.__cf, cq="hash", is_delete=True)
m.put(cf=self.__cf, cq="nofchunks", is_delete=True)
for i in range(chunks):
cq = 'chunk_{number:010d}'.format(number=i)
m.put(cf=self.__cf, cq=cq, is_delete=True)
writer.add_mutation(m)
self.__log.debug('removed static file for {url}'.format(url=usrFacingUrlPrefix))
writer.close()
def getAttributes(self):
'''
Returns the urlprefix and the hash of all the entries in table as tuple
'''
self._ensureTableExists()
for entry in self.__connection.scan(self.__table, None, cols=[[self.__cf, "hash"]]):
yield (entry.row, str(entry.val))
else:
yield (None, None)
|
b-long/ezbake-platform-services
|
efe/frontend_app/modules/ezRPStaticFileStore.py
|
Python
|
apache-2.0
| 9,011 | 0.003884 |
import re
from __future__ import print_function
HEADER_TEMPLATE=r"""/*
Copyright 2016, Austen Satterlee
This file is part of VOSIMProject.
VOSIMProject is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
VOSIMProject is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with VOSIMProject. If not, see <http://www.gnu.org/licenses/>.
*/
{guard}
"""
IFNDEF_TEMPLATE = r"""
#ifndef __{guardname}__
#define __{guardname}__
#endif
"""
PRAGMA_ONCE_TEMPLATE = r"""
#pragma once
"""
SOURCE_TEMPLATE="#include \"{filename:}.h\""
def find_valid_directories(source_dir, include_dir):
sofar = []
def _walker(ret_list, dirname, fnames):
if (source_dir in fnames) and (include_dir in fnames):
ret_list.append(dirname)
# m_dirname = re.sub(r'\binclude\b|\bsrc\b', '', dirname)
# if m_dirname in sofar:
# ret_list.append(dirname)
# else:
# sofar.append(m_dirname)
valid_directories = []
os.path.walk(".", _walker, valid_directories)
return valid_directories
if __name__=="__main__":
import argparse as ap
import os,sys,datetime
parser = ap.ArgumentParser(formatter_class=ap.ArgumentDefaultsHelpFormatter)
parser.add_argument("-n", "--dry-run", action="store_true",
help="Don't perform any actions, just print would be done")
parser.add_argument("-f", "--force", action="store_true",
help="Overwrite existing files")
parser.add_argument("--guard",choices=["pragma","ifndef"], default="pragma",
help="Choose the type of header guard to use")
subparsers = parser.add_subparsers(title="commands")
parser_auto_add = subparsers.add_parser("auto-add",
help="Add source and include files to their respective directories. Automatically detect source and\
include directories given a base directory")
parser_auto_add.add_argument("directory", nargs='?', type=str, help="Directory that contains 'src' and 'include' dirs")
parser_auto_add.add_argument("filenames", nargs='*', type=str, help="Name of the new files to add (without extension)")
parser_auto_add.add_argument("--list", "-l", action="store_true", help="List valid directories and exit")
parser_auto_add.set_defaults(command="auto-add")
parser_add = subparsers.add_parser("add", help="Add source and include files to the specified directories")
parser_add.add_argument("source_dir", type=str)
parser_add.add_argument("include_dir", type=str)
parser_add.add_argument("filenames", nargs='+', type=str)
parser_add.set_defaults(command="add")
parsed = parser.parse_args()
if parsed.command=="auto-add":
if parsed.list:
print('\n'.join(find_valid_directories('src', 'include')))
sys.exit(1)
if not parsed.directory:
sys.stderr.write("ERROR: Please provide a directory\n")
sys.exit(1)
if not parsed.filenames:
sys.stderr.write("ERROR: Please provide at least one filename\n")
sys.exit(1)
directory = os.path.normpath(parsed.directory)
dir_contents = os.listdir(directory)
if ('src' not in dir_contents) or ('include' not in dir_contents):
raise RuntimeError("'%s' and '%s' directories not found" % (parsed.source_dir,parsed.include_dir))
include_dir = os.path.join(directory,"include")
src_dir = os.path.join(directory,"src")
if parsed.command=="add":
include_dir = os.path.normpath(parsed.include_dir)
src_dir = os.path.normpath(parsed.source_dir)
# Check that directories exist
if not os.path.exists(src_dir):
sys.stderr.write("ERROR: Source directory '%s' does not exist\n" % source_dir)
sys.exit(1)
if not os.path.exists(include_dir):
sys.stderr.write("ERROR: Include directory '%s' does not exist\n" % include_dir)
sys.exit(1)
for filename in parsed.filenames:
include_fname = os.path.join(include_dir, filename+".h")
src_fname = os.path.join(src_dir, filename+".cpp")
if not parsed.force and (os.path.exists(include_fname) or os.path.exists(src_fname)):
sys.stderr.write("ERROR: '%s' or '%s' already exists!\n" % (include_fname,src_fname))
sys.exit(1)
guard_str = PRAGMA_ONCE_TEMPLATE if parsed.guard=="pragma" else IFNDEF_TEMPLATE.format(guardname=filename.toupper())
include_contents = HEADER_TEMPLATE.format(
filename=filename,
guard=guard_str,
date=datetime.date.today().strftime("%m/%Y")
)
src_contents = SOURCE_TEMPLATE.format(filename=filename)
if not parsed.dry_run:
with open(include_fname,"w") as fp:
fp.write(include_contents)
sys.stdout.write("Added header file to {}\n".format(include_fname))
if not parsed.dry_run:
with open(src_fname,"w") as fp:
fp.write(src_contents)
sys.stdout.write("Added source file to {}\n".format(src_fname))
sys.exit()
|
austensatterlee/VOSIMSynth
|
scripts/add_file.py
|
Python
|
gpl-3.0
| 5,486 | 0.006744 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
CERTitude: the seeker of IOC
Copyright (c) 2016 CERT-W
Contact: cert@wavestone.com
Contributors: @iansus, @nervous, @fschwebel
CERTitude is under licence GPL-2.0:
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
if __name__ == "__main__" and __package__ is None:
raise Exception('Erreur : lancez le script depuis main.py et non directement')
# Imports
# Lots of them...
#
import atexit
import base64
import datetime
import json
import logging
import os
import ssl
import subprocess
import sys
try:
import win32event
import win32security
except:
pass
from flask import Flask, render_template, request, session, redirect, url_for, Response, abort, escape
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import netaddr
from config import LISTEN_ADDRESS, LISTEN_PORT, BOKEH_LISTEN_ADDRESS, BOKEH_LISTEN_PORT
from config import IOC_MODE, DEBUG, USE_SSL, SSL_KEY_FILE, SSL_CERT_FILE, CERTITUDE_DATABASE, MIN_SUBMIT_INTERVAL
from helpers.queue_models import Task
from helpers.results_models import Result, IOCDetection
from helpers.misc_models import User, ConfigurationProfile, WindowsCredential, XMLIOC, Batch, GlobalConfig
from helpers.yara_models import YaraRule
from helpers.helpers import hashPassword, checksum, verifyPassword
import helpers.crypto as crypto
import components.scanner.openioc.openiocparser as openiocparser
import helpers.iocscan_modules as ioc_modules
import helpers.hashscan_modules as hash_modules
import xml.etree.ElementTree as ET
from functools import wraps
# Bokeh future
# from bokeh.embed import autoload_server
# from bokeh.client import pull_session
from plyara.plyara import PlyaraParser
# Set up logger
loggingserver = logging.getLogger('api')
# Create database
engine = create_engine(CERTITUDE_DATABASE, echo=False)
dbsession = sessionmaker(bind=engine)()
def genCSRFToken():
return base64.b64encode(crypto.randomBytes(20)).replace('=', '').replace('+', '').replace('/', '')
CSRF_TOKEN_INDEX = '_csrft'
STATIC_ENDPOINT = 'static'
def getCSRFToken():
if not CSRF_TOKEN_INDEX in session:
session[CSRF_TOKEN_INDEX] = genCSRFToken()
return session[CSRF_TOKEN_INDEX]
''' APPLICATION CONFIGURATION '''
app = Flask(__name__, static_folder=STATIC_ENDPOINT)
app.secret_key = os.urandom(24)
app.jinja_env.globals['csrf_token'] = getCSRFToken
app.jinja_env.globals['csrf_token_name'] = CSRF_TOKEN_INDEX
app.config['UPLOAD_FOLDER'] = 'upload'
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 5 * 60
ALLOWED_EXTENSIONS = ['txt']
app.config['IOCS_FOLDER'] = os.path.join('components', 'iocscan', '.', 'ioc')
app.config['RESULT_FILE'] = os.path.join('components', 'interface', 'static', 'data', 'results.csv')
app.config['CERTITUDE_OUTPUT_FOLDER'] = 'results'
app.config['PROCESSED_FOLDER'] = 'processed'
RESULT_FILE_HEADER = 'Title:HostId,Title:Hostname,Lookup:Success,Lookup:IOCScanned,Lookup:HashScanned,Lookup:IP,Lookup:Subnet,Malware,Compromise'
IP_REGEX = '(([0-9]|[1-9][0-9]|1[0-9]{2}|2([0-4][0-9]|5[0-5]))\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2([0-4][0-9]|5[0-5]))'
# ''' Decorator for auth '''
def requires_auth(f):
"""
Wrapper to check on each page the user authentication
"""
@wraps(f)
def decorated(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
return redirect(app.jinja_env.globals['url_for']('login'))
return decorated
# ''' Bokeh configuration '''
bokeh_process = None
# Preventing Flask from running Bokeh twice
# source : https://stackoverflow.com/questions/9449101/how-to-stop-flask-from-initialising-twice-in-debug-mode
if not DEBUG or os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
bokeh_process = subprocess.Popen([
'bokeh',
'serve', 'crossbokeh.py',
'--address', BOKEH_LISTEN_ADDRESS,
'--port', str(BOKEH_LISTEN_PORT),
'--allow-websocket-origin', '%s:%d' % (BOKEH_LISTEN_ADDRESS, BOKEH_LISTEN_PORT),
], stdout=subprocess.PIPE)
@atexit.register
def kill_server():
if bokeh_process is not None:
bokeh_process.kill()
# ''' CSRF Protection '''
@app.before_request
def csrf_protect():
if request.method == 'POST':
token = None if CSRF_TOKEN_INDEX not in session else session[CSRF_TOKEN_INDEX]
arg = request.form.get(CSRF_TOKEN_INDEX)
if not token or token != arg:
print 'Received %s, expected %s' % (arg, token)
abort(400)
# -############################-#
# Pages routing and controlers #
# -############################-#
# INDEX
@app.route('/')
@requires_auth
def index():
return redirect(app.jinja_env.globals['url_for']('scan'))
# SESSION MANAGEMENT
@app.route('/login', methods=['GET', 'POST'])
def login():
error = ''
if request.method == 'POST':
# Get user from username
userList = dbsession.query(User).filter_by(username=request.form['username']).limit(1)
matchingUser = userList.first()
# Check password
if (matchingUser is not None) and (matchingUser.password == hashPassword(request.form['password'])):
# Since there is an "active" status...
if matchingUser.active:
session['logged_in'] = True
session['user_id'] = matchingUser.id
return redirect(app.jinja_env.globals['url_for']('index'))
else:
return render_template('session-login.html', error='User account is disabled')
error = 'User might not exist or password is incorrect'
return render_template('session-login.html', errors=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
return redirect(app.jinja_env.globals['url_for']('index'))
# USER MANAGEMENT
# Lists users
@app.route('/users')
@requires_auth
def users():
allUsers = dbsession.query(User).order_by(User.id.asc())
return render_template('user-list.html', users=allUsers)
# {En,Dis}ables an account
@app.route('/users/<int:userid>/switchactive')
@requires_auth
def userSwitchActive(userid):
u = dbsession.query(User).filter_by(id=userid).first()
if u is None:
return redirect(app.jinja_env.globals['url_for']('users'))
u.active = not u.active
dbsession.commit()
return redirect(app.jinja_env.globals['url_for']('users'))
# Add a new user
# MASTER_KEY is encrypted for the new user
# Clear text MASTER_KEY is retrieved using the current use's credentials
#
@app.route('/user/add', methods=['GET', 'POST'])
@requires_auth
def userAdd():
if request.method == 'GET':
return render_template('user-add.html')
else:
success = True
errors = []
user_password = request.form['user_password']
user = dbsession.query(User).filter_by(id=session['user_id']).first()
# Checks current user password
if user is None or hashPassword(user_password) != user.password:
success = False
errors.append('Your password is incorrect')
# Someone has messed with the database
if success:
mk_cksum = dbsession.query(GlobalConfig).filter_by(key='master_key_checksum').first()
if not mk_cksum:
success = False
errors.append('Database is broken, please create a new one !')
if success:
keyFromPassword = crypto.keyFromText(user_password, base64.b64decode(user.b64_kdf_salt))
MASTER_KEY = crypto.decrypt(user.encrypted_master_key, keyFromPassword)
# Someone changed the master key...
if checksum(MASTER_KEY) != mk_cksum.value:
errors.append('MASTER_KEY may have been altered')
del MASTER_KEY
success = False
# Now check the new user password...
if success:
password1, password2 = request.form['password'], request.form['password2']
if password1 != password2:
success = False
errors.append('New user passwords do not match')
# ... including complexity
if success:
if not verifyPassword(password1):
success = False
errors.append(
'Password is not complex enough (l > 12 and at least three character classes between lowercase, uppercase, numeric and special char)')
# Encrypt the MASTER_KEY for the user
if success:
new_kdf_salt = crypto.randomBytes(crypto.SALT_LENGTH)
keyFromPassword = crypto.keyFromText(password1, new_kdf_salt)
emk = crypto.encrypt(MASTER_KEY, keyFromPassword)
del MASTER_KEY # safer ?
u = User(
username=request.form['username'],
password=hashPassword(password1),
email=request.form['email'],
active=True,
encrypted_master_key=emk,
b64_kdf_salt=base64.b64encode(new_kdf_salt))
if len(request.form['username']) <= 0 or len(request.form['email']) <= 0:
success = False
errors.append('No empty fields allowed.')
if success:
dbsession.add(u)
dbsession.commit()
return redirect(app.jinja_env.globals['url_for']('users'))
else:
return render_template('user-add.html', username=request.form['username'], email=request.form['email'],
errors='\n'.join(errors))
# Delete a user
@app.route('/user/<int:userid>/delete', methods=['POST'])
@requires_auth
def userDelete(userid):
u = dbsession.query(User).filter_by(id=userid).first()
if u is None:
return redirect(app.jinja_env.globals['url_for']('users'))
dbsession.delete(u)
dbsession.commit()
return redirect(app.jinja_env.globals['url_for']('users'))
# CONFIGURATION, IOCs & PROFILES MANAGEMENT
# Configuration homepage
@app.route('/config', )
@requires_auth
def config():
configuration_profiles = dbsession.query(ConfigurationProfile).order_by(ConfigurationProfile.name.asc())
windows_credentials = dbsession.query(WindowsCredential).order_by(WindowsCredential.domain.asc(),
WindowsCredential.login.asc())
xmliocs = dbsession.query(XMLIOC).order_by(XMLIOC.date_added.desc())
yararules = dbsession.query(YaraRule).order_by(YaraRule.date_added.desc())
iocref = {}
for xmlioc in xmliocs:
iocref[str(xmlioc.id)] = xmlioc.name + ' - ' + str(xmlioc.date_added)
yararef = {}
for yararule in yararules:
yararef[str(yararule.id)] = yararule.name + ' - ' + str(yararule.date_added)
iocdesclist = {}
for cp in configuration_profiles:
if len(cp.ioc_list) == 0:
iocdesclist[cp.id] = ''
continue
iocdesclist[cp.id] = '||'.join([iocref[str(id)] for id in cp.ioc_list.split(',')])
yaradesclist = {}
for cp in configuration_profiles:
if len(cp.yara_list) == 0:
yaradesclist[cp.id] = ''
continue
yaradesclist[cp.id] = '||'.join([yararef[str(id)] for id in cp.yara_list.split(',')])
return render_template('config-main.html',
yararules=yararules,
xmliocs=xmliocs,
windows_credentials=windows_credentials,
configuration_profiles=configuration_profiles,
iocdesclist=iocdesclist,
yaradesclist=yaradesclist)
# DELETIONS
@app.route('/config/wincredz/<int:wincredid>/delete', methods=['POST'])
@requires_auth
def wincredDelete(wincredid):
wc = dbsession.query(WindowsCredential).filter_by(id=wincredid).first()
if wc is None:
return redirect(app.jinja_env.globals['url_for']('config'))
dbsession.delete(wc)
dbsession.commit()
return redirect(app.jinja_env.globals['url_for']('config'))
@app.route('/config/xmlioc/<int:xmliocid>/delete', methods=['POST'])
@requires_auth
def xmliocDelete(xmliocid):
xi = dbsession.query(XMLIOC).filter_by(id=xmliocid).first()
if xi is None:
return redirect(app.jinja_env.globals['url_for']('config'))
cps = dbsession.query(ConfigurationProfile).all()
for cp in cps:
ioclist = map(int, cp.ioc_list.split(','))
if xi.id in ioclist:
ioclist.remove(xi.id)
cp.ioc_list = ','.join(map(str, ioclist))
dbsession.add(cp)
dbsession.delete(xi)
dbsession.commit()
return redirect(app.jinja_env.globals['url_for']('config'))
@app.route('/config/profile/<int:profileid>/delete', methods=['POST'])
@requires_auth
def profileDelete(profileid):
p = dbsession.query(ConfigurationProfile).filter_by(id=profileid).first()
if p is None:
return redirect(app.jinja_env.globals['url_for']('config'))
dbsession.delete(p)
dbsession.commit()
return redirect(app.jinja_env.globals['url_for']('config'))
# ADDITIONS
# Adds a new credential
# uses current user's password to decipher MASTER_KEY
#
@app.route('/config/wincredz/add', methods=['GET', 'POST'])
@requires_auth
def wincredAdd():
if request.method == 'GET':
return render_template('config-wincred-add.html')
else:
success = True
errors = []
user_password = request.form['user_password'].encode(sys.stdout.encoding)
user = dbsession.query(User).filter_by(id=session['user_id']).first()
# Password incorrect
if user is None or hashPassword(user_password) != user.password:
success = False
errors.append('Your password is incorrect')
# Database altered
if success:
mk_cksum = dbsession.query(GlobalConfig).filter_by(key='master_key_checksum').first()
if not mk_cksum:
success = False
errors.append('Database is broken, please create a new one !')
# MASTER_KEY altered
if success:
keyFromPassword = crypto.keyFromText(user_password, base64.b64decode(user.b64_kdf_salt))
MASTER_KEY = crypto.decrypt(user.encrypted_master_key, keyFromPassword)
if checksum(MASTER_KEY) != mk_cksum.value:
errors.append('MASTER_KEY may have been altered')
del MASTER_KEY
success = False
if success:
account_password = request.form['password'].encode(sys.stdout.encoding)
encrypted_account_password = crypto.encrypt(account_password, MASTER_KEY)
del MASTER_KEY
# Encrypt Windows Credential's password
wc = WindowsCredential(
domain=request.form['domain'].encode(sys.stdout.encoding),
login=request.form['login'].encode(sys.stdout.encoding),
encrypted_password=encrypted_account_password)
dbsession.add(wc)
dbsession.commit()
if success:
return redirect(app.jinja_env.globals['url_for']('config'))
else:
return render_template('config-wincred-add.html',
errors='\n'.join(errors),
domain=request.form['domain'],
login=request.form['login'],
password=request.form['password'])
@app.route('/config/xmlioc/add', methods=['GET', 'POST'])
@requires_auth
def xmliocAdd():
if request.method == 'GET':
return render_template('config-xmlioc-add.html')
else:
success = True
errors = []
xml_content = request.files['xml_content'].stream.read()
ioc_name = request.form['name']
xi = XMLIOC(
name=ioc_name,
xml_content=base64.b64encode(xml_content))
if len(ioc_name) <= 0:
success = False
errors.append("IOC name cannot be empty.")
else:
existing_ioc = dbsession.query(XMLIOC).filter_by(name=ioc_name).first()
if existing_ioc is not None:
success = False
errors.append("IOC name already exists.")
if len(xml_content) <= 0:
success = False
errors.append("You must specify a file.")
if success:
dbsession.add(xi)
dbsession.commit()
return redirect(app.jinja_env.globals['url_for']('config'))
else:
return render_template('config-xmlioc-add.html', errors='\n'.join(errors), name=ioc_name)
@app.route('/config/profile/add', methods=['GET', 'POST'])
@requires_auth
def profileAdd():
xi = dbsession.query(XMLIOC).order_by(XMLIOC.name.asc())
yr = dbsession.query(YaraRule).order_by(YaraRule.name.asc())
if request.method == 'GET':
return render_template('config-profile-add.html', xmliocs=xi, yararules=yr)
else:
success = True
errors = []
hc = True if 'host_confidential' in request.form else False
profile_name = request.form['name']
ioc_selected_list = ','.join(request.form.getlist('ioc_list'))
yara_selected_list = ','.join(request.form.getlist('yara_list'))
cp = ConfigurationProfile(
name=profile_name,
host_confidential=hc,
ioc_list=ioc_selected_list,
yara_list=yara_selected_list)
if len(profile_name) <= 0:
success = False
errors.append("Profile name cannot be empty.")
else:
existing_profile_name = dbsession.query(ConfigurationProfile).filter_by(name=profile_name).first()
if existing_profile_name is not None:
success = False
errors.append("Profile name already exists.")
if success:
dbsession.add(cp)
dbsession.commit()
return redirect(app.jinja_env.globals['url_for']('config'))
else:
return render_template('config-profile-add.html', errors='\n'.join(errors), host_confidential=hc,
name=request.form['name'], xmliocs=xi)
# MISC. VIEWS
@app.route('/config/profile/<int:profileid>/popupview')
@requires_auth
def profilePopupView(profileid):
return render_template('config-profile-popupview.html')
# CAMPAIGN RESULTS
@app.route('/campaignvisualizations')
@requires_auth
def campaignvisualizations():
batches = dbsession.query(Batch).order_by(Batch.name.asc())
return render_template('campaign-visualizations.html', batches=batches)
@app.route('/campaignvisualizations/<int:batchid>')
@requires_auth
def campaignvisualizationbatch(batchid):
batch = dbsession.query(Batch).filter_by(id=batchid).first()
if batch is None:
return redirect(app.jinja_env.globals['url_for']('campaignvisualizations'))
else:
return render_template('campaign-visualizations-batch.html', batch=batch)
@app.route('/massvisualizations/<int:batchid>')
@requires_auth
def massvisualizationbatch(batchid):
batch = dbsession.query(Batch).filter_by(id=batchid).first()
if batch is None:
return redirect(app.jinja_env.globals['url_for']('campaignvisualizations'))
else:
csv_url = url_for('static', filename='data/results.csv/' + str(batch.id))
iframe = '<iframe src="http://%s:%d/crossbokeh?batchid=%s" style="width:900px; height: 1000px; margin: 0 auto; display:block;" frameborder="0" scrolling="no" marginheight="0" marginwidth="0"></iframe>' % (BOKEH_LISTEN_ADDRESS, BOKEH_LISTEN_PORT, batchid)
return render_template('mass-visualizations-batch.html', bokeh_script=iframe, batch=batch)
@app.route('/ioc/<int:iocid>')
@requires_auth
def iocvizu(iocid):
return render_template('ioc-vizualisation.html', iocid=iocid)
# IOC.json
# File describing an IOC for previsualization in config
#
@app.route('/static/data/ioc.json/<int:iocid>')
@requires_auth
def iocjson(iocid):
# if 'logged_in' in session:
response = ''
# get the IOC
ioc = dbsession.query(XMLIOC).filter_by(id=iocid).first()
if ioc is None:
return Response(status=404, response='This IOC does not exist', content_type='text/plain')
FLAT_MODE = (IOC_MODE == 'flat')
allowedElements = {}
IOCevaluatorList = ioc_modules.flatEvaluatorList if FLAT_MODE else ioc_modules.logicEvaluatorList
HASHevaluatorList = hash_modules.flatEvaluatorList if FLAT_MODE else hash_modules.logicEvaluatorList
evaluatorList = dict(IOCevaluatorList.items() + HASHevaluatorList.items())
for name, classname in evaluatorList.items():
allowedElements[name] = classname.evalList
content = base64.b64decode(ioc.xml_content)
# Parse it, filtering on allowed elements
oip = openiocparser.OpenIOCParser(content, allowedElements, FLAT_MODE, fromString=True)
oip.parse()
# Get the tree
tree = oip.getTree()
return Response(status=200, response=json.dumps(tree.json2(), indent=4), content_type='application/json')
# else:
# return redirect(app.jinja_env.globals['url_for']('login'))
@app.route('/host-result/<int:hostid>')
@requires_auth
def hostresult(hostid):
return render_template('host-result-vizualisation.html', hostid=hostid)
# HOST.json
# Result of the scan on a specific host
@app.route('/static/data/host.json/<int:hostid>')
@requires_auth
def hostjson(hostid):
# if 'logged_in' in session:
response = ''
# Get the result
task, result = dbsession.query(Task, Result).filter(Result.id == hostid).join(Result,
Task.id == Result.tache_id).first()
if task is None or result is None:
return Response(status=404, response='This host does not exist', content_type='text/plain')
# if not reachable, display error on the graph
if not result.smbreachable:
tab = {'name': task.ip, 'infected': True,
'children': [{'name': 'This host could not be joined', 'infected': True}]}
return Response(status=200, response=json.dumps(tab), content_type='application/json')
# Get batch
batch = dbsession.query(Batch).filter_by(id=task.batch_id).first()
# Then profile
cp = dbsession.query(ConfigurationProfile).filter_by(id=batch.configuration_profile_id).first()
# The IOC list
if cp.ioc_list == '':
ioc_list = []
else:
ioc_list = [int(e) for e in cp.ioc_list.split(',')]
# And IOC detections
ioc_detections = dbsession.query(IOCDetection).filter_by(result_id=result.id).all()
# list of GUID per IOC
guids = {i: {} for i in ioc_list}
for iocd in ioc_detections:
if iocd.indicator_data is None:
ioc_data = []
else:
jdata = json.loads(iocd.indicator_data)
if jdata is not None:
ioc_data = [str(escape(d)) for d in jdata]
else:
ioc_data = []
guids[iocd.xmlioc_id][iocd.indicator_id] = ioc_data
# guids[iocd.xmlioc_id][iocd.indicator_id] = str(escape(iocd.indicator_data))
tree = {'name': task.ip, 'children': [], 'infected': False}
for iocid in ioc_list:
ioc = dbsession.query(XMLIOC).filter_by(id=iocid).first()
FLAT_MODE = (IOC_MODE == 'flat')
allowedElements = {}
IOCevaluatorList = ioc_modules.flatEvaluatorList if FLAT_MODE else ioc_modules.logicEvaluatorList
HASHevaluatorList = hash_modules.flatEvaluatorList if FLAT_MODE else hash_modules.logicEvaluatorList
evaluatorList = dict(IOCevaluatorList.items() + HASHevaluatorList.items())
for name, classname in evaluatorList.items():
allowedElements[name] = classname.evalList
content = base64.b64decode(ioc.xml_content)
# Parse IOC
oip = openiocparser.OpenIOCParser(content, allowedElements, FLAT_MODE, fromString=True)
oip.parse()
# Build tree and infect it with the IOC detections
tmp = oip.getTree()
tmp.infect(guids[iocid])
tmp = tmp.json2()
tmptree = {'name': ioc.name, 'children': [tmp], 'infected': tmp['infected']}
tree['children'].append(tmptree)
# Get the infection up
tree['infected'] |= tmp['infected']
return Response(status=200, response=json.dumps(tree, indent=4), content_type='application/json')
def getInfosFromXML(content):
c = base64.b64decode(content)
r = {'guids': {}, 'totalguids': 0}
xml = ET.fromstring(c)
openiocparser.removeNS(xml)
for indic in xml.iter('IndicatorItem'):
guid = indic.attrib['id']
context = indic.findall('Context')[0]
search = context.attrib['search']
content = indic.findall('Content')[0]
value = content.text
r['guids'][guid] = {'search': search, 'value': value}
r['totalguids'] += 1
return r
@app.route('/static/data/results.csv/<int:batchid>')
@requires_auth
def resultscsv(batchid):
response = 'Title:HostId,Title:Hostname-IP,Lookup:Success,Lookup:IOCScanned,Lookup:HashScanned,Lookup:Subnet,Malware,Compromise'
# Get Batch
batch = dbsession.query(Batch).filter_by(id=batchid).first()
if batch is None:
return Response(status=404)
# Get all IOCs
cp = dbsession.query(ConfigurationProfile).filter_by(id=batch.configuration_profile_id).first()
if cp.ioc_list == '':
ioc_list = []
else:
ioc_list = [int(e) for e in cp.ioc_list.split(',')]
iocs = dbsession.query(XMLIOC).filter(XMLIOC.id.in_(ioc_list)).all()
# Complete first line & assoc ioc.id => ioc
all_iocs = {}
for ioc in iocs:
all_iocs[ioc.id] = ioc
response += ',%s' % ioc.name
response += '\n'
all_tasks_results = dbsession.query(Task, Result).filter(Task.batch_id == batchid).join(Result,
Task.id == Result.tache_id).all()
# Get total indicator items / IOC
total_by_ioc = {}
for ioc in iocs:
infos = getInfosFromXML(ioc.xml_content)
total_by_ioc[ioc.id] = infos['totalguids']
for task, result in all_tasks_results:
ioc_detections = dbsession.query(IOCDetection).filter_by(result_id=result.id).all()
response += '%d,%s,%s,%s,%s,%s' % (
result.id, task.ip, result.smbreachable, task.iocscanned, task.hashscanned, task.commentaire)
result_for_host = {e: 0 for e in ioc_list}
# Sum IOC detections
for ioc_detection in ioc_detections:
result_for_host[ioc_detection.xmlioc_id] += 1
# Compute n in [0,1] = % of detection
result_for_host = {id: round(val * 100. / total_by_ioc[id]) / 100 for id, val in result_for_host.items()}
# Get max
mval, mid = 0, -1
for id, val in result_for_host.items():
if val > mval:
mval, mid = val, id
# Complete max compromise
mname = "None" if mid == -1 else all_iocs[mid].name
response += ',%s,%.2f' % (mname, mval)
# Complete detection / IOC
for id in all_iocs:
response += ',%.2f' % result_for_host[id]
response += '\n'
return Response(status=200, response=response, content_type='text/plain')
# CAMPAIGN PLANIFICATION
@app.route('/scan/', methods=['GET', ])
@requires_auth
def scan():
batches = dbsession.query(Batch).order_by(Batch.name.asc())
return render_template('scan-planification.html', batches=batches)
@app.route('/scan/batch/add', methods=['GET', 'POST'])
@requires_auth
def scanbatchAdd():
cp = dbsession.query(ConfigurationProfile).order_by(ConfigurationProfile.name.asc())
wc = dbsession.query(WindowsCredential).order_by(WindowsCredential.domain.asc(), WindowsCredential.login.asc())
if request.method == 'GET':
return render_template('scan-planification-batch-add.html', configuration_profiles=cp, windows_credentials=wc)
else:
success = True
errors = []
batch_name = request.form['name']
batch = Batch(
name=batch_name,
configuration_profile_id=request.form['profile'],
windows_credential_id=request.form['credential'])
if len(batch.name) <= 0:
success = False
errors.append("Batch name cannot be empty.")
else:
existing_batch = dbsession.query(Batch).filter_by(name=batch_name).first()
if existing_batch is not None:
success = False
errors.append("Batch name already exists.")
if success:
dbsession.add(batch)
dbsession.commit()
return redirect(app.jinja_env.globals['url_for']('scan'))
else:
return render_template('scan-planification-batch-add.html', errors='\n'.join(errors),
configuration_profiles=cp, windows_credentials=wc)
@app.route('/scan/task/<int:taskid>/delete')
@requires_auth
def scantaskDelete(taskid):
task = dbsession.query(Task).filter_by(id=taskid).first()
dbsession.delete(task)
dbsession.commit()
return redirect(url_for('progress'))
@app.route('/scan/batch/<int:batchid>/delete')
@requires_auth
def scanbatchDelete(batchid):
batch = dbsession.query(Batch).filter_by(id=batchid).first()
dbsession.delete(batch)
dbsession.commit()
return redirect(url_for('scan'))
@app.route('/scan/batch/<int:batchid>', methods=['GET', ])
@requires_auth
def scanbatch(batchid):
batch = dbsession.query(Batch).filter_by(id=batchid).first()
cp = dbsession.query(ConfigurationProfile).filter_by(id=batch.configuration_profile_id).first()
wc = dbsession.query(WindowsCredential).filter_by(id=batch.windows_credential_id).first()
return render_template('scan-planification-batch.html', batch=batch, configuration_profile=cp,
windows_credential=wc)
@app.route('/api/scan/', methods=['POST'])
@requires_auth
def api_json():
def getCible(param):
param_list = param.get('ip_list', None)
param_ip = param.get('ip', None)
param_hostname = param.get('hostname', None)
list = []
if param_list is not None and param_list != '':
liste = param_list.replace('\r\n', '\n')
ips = liste.split('\n')
list += [(e, 'ipl') for e in ips]
if param_ip is not None and param_ip != '':
list.append((param_ip, 'ipn'))
if param_hostname is not None and param_hostname != '':
list.append((param_hostname, 'host'))
return list
loggingserver.debug('Scan request incoming ')
param = request.form
# param = urlparse.parse_qs(args[1])
# Target IP(s)
ip_list = getCible(param)
if len(ip_list) > 0:
# Priority IOC
try:
priority_ioc = int(param.get('priority_ioc'))
except:
priority_ioc = 10
if not priority_ioc > 0:
priority_ioc = 10
# Priority HASH
try:
priority_hash = int(param.get('priority_hash'))
except:
priority_hash = 10
if not priority_hash > 0:
priority_hash = 10
# Priority Yara
try:
priority_yara = int(param.get('priority_yara'))
except:
priority_yara = 10
if not priority_yara > 0:
priority_yara = 10
# Retries count (IOC)
essais_ioc = param.get('retries_ioc')
if essais_ioc is not None:
try:
assert 0 < int(essais_ioc) <= 10
retries_left_ioc = int(essais_ioc)
except:
retries_left_ioc = 10
else:
retries_left_ioc = 10
# Retries count (hash)
essais_hash = param.get('retries_hash')
if essais_hash is not None:
try:
assert 0 < int(essais_hash) <= 10
retries_left_hash = int(essais_hash)
except:
retries_left_hash = 10
else:
retries_left_hash = 10
# Retries count (yara)
essais_yara = param.get('retries_yara')
if essais_yara is not None:
try:
assert 0 < int(essais_yara) <= 10
retries_left_yara = int(essais_yara)
except:
retries_left_yara = 10
else:
retries_left_yara = 10
subnet = 'n/a'
subnetp = param.get('subnet', None)
if subnetp is not None:
subnet = subnetp
batchp = param.get('batch', None)
if batchp is not None:
batch = batchp
reponse = {}
reponse['code'] = 200
reponse['ips'] = {}
# Ajout à la queue...
nb_ip, nb_ip_ok = 0, 0
for ip, iptype in ip_list:
actualise = False
# try:
# ip_int = int(ip)
# except ValueError,e:
# try:
# ipn = IPNetwork(ip)
# ip_int = int(ipn[0])
# except Exception, e:
# if iptype=='ip':
# reponse['ips'][str(ip)] = 'invalid IP address'
# continue
# ip_int=0
try:
if iptype[:2] == 'ip':
ipn = netaddr.IPNetwork(ip)
else:
ipn = [ip]
ipSubnet = str(ip) if iptype == 'ipl' else subnet
for ipa in ipn:
nb_ip += 1
if param.get('force', None) is None:
limite_avant_nouvel_essai = datetime.datetime.now() - datetime.timedelta(0,
MIN_SUBMIT_INTERVAL)
if dbsession.query(Result).filter(Result.ip == str(ipa),
Result.finished >= limite_avant_nouvel_essai).count() > 0:
reponse['ips'][str(ipa)] = 'already scanned a few moments ago...'
continue
elif dbsession.query(Task).filter(Task.ip == str(ipa), Task.batch_id == batch,
Task.date_soumis >= limite_avant_nouvel_essai).count() > 0:
reponse['ips'][str(ipa)] = 'already requested a few moments ago'
continue
nb_ip_ok += 1
tache = Task(
ip=str(ipa),
priority_ioc=priority_ioc,
priority_hash=priority_hash,
priority_yara=priority_yara,
reserved_ioc=False,
reserved_hash=False,
reserved_yara=False,
iocscanned=False,
hashscanned=False,
yarascanned=False,
ip_demandeur=request.remote_addr,
retries_left_ioc=retries_left_ioc,
retries_left_hash=retries_left_hash,
retries_left_yara=retries_left_yara,
commentaire=ipSubnet,
batch_id=batch
)
dbsession.add(tache)
if batch and len(batch) > 0 and not actualise:
reponse['ips'][str(ip)] = 'added to batch ' + batch
elif batch and len(batch) > 0 and actualise:
reponse['ips'][str(ip)] = 'added to batch ' + batch + ' for retry'
else:
reponse['ips'][str(ip)] = 'added to queue'
reponse['ips'][str(ip)] += ' (%d tries for iocscan, %d tries for hashscan)' % (
retries_left_ioc, retries_left_hash)
except netaddr.core.AddrFormatError:
reponse['ips'][str(ip)] = ' not added to batch ' + batch + ': bad formatting)'
reponse['message'] = 'Requested scan of %d IP addresses, %d were OK' % (nb_ip, nb_ip_ok)
dbsession.commit()
return Response(
status=200,
response=json.dumps(
reponse,
indent=4
),
content_type='application/json'
)
else:
return APIscan()
# YARA
@app.route('/config/yara/add', methods=['GET', 'POST'])
@requires_auth
def yaraAdd():
if request.method == 'GET':
return render_template('config-yara-add.html')
else:
success = True
errors = []
content = request.files['content'].stream.read()
yp = PlyaraParser()
rules = yp.parseString(content)
rules = [e['rule_name'] for e in rules]
name = request.form['name']
yr = YaraRule(
name=name,
content=base64.b64encode(content),
rules=','.join(rules))
if len(name) <= 0:
success = False
errors.append("Rule name cannot be empty.")
else:
existing_rule = dbsession.query(YaraRule).filter_by(name=name).first()
if existing_rule is not None:
success = False
errors.append("Rule name already exists.")
if len(content) <= 0:
success = False
errors.append("You must specify a file.")
if success:
dbsession.add(yr)
dbsession.commit()
return redirect(app.jinja_env.globals['url_for']('config'))
else:
return render_template('config-yara-add.html', errors='\n'.join(errors), name=name)
@app.route('/config/yara/<int:yaraid>/delete', methods=['POST'])
@requires_auth
def yaraDelete(yaraid):
yi = dbsession.query(YaraRule).filter_by(id=yaraid).first()
if yi is None:
return redirect(app.jinja_env.globals['url_for']('config'))
cps = dbsession.query(ConfigurationProfile).all()
for cp in cps:
yaralist = map(int, cp.yara_list.split(','))
if yi.id in ioclist:
ioclist.remove(yi.id)
cp.yaralist_list = ','.join(map(str, yaralist))
dbsession.add(cp)
dbsession.delete(yi)
dbsession.commit()
return redirect(app.jinja_env.globals['url_for']('config'))
# SCAN PROGRESS
@app.route('/progress', methods=['GET', ])
@requires_auth
def progress():
headers = (
'id',
'ip',
'commentaire',
'batch_id',
'date_soumis',
'date_debut',
'iocscanned',
'hashscanned',
'yarascanned',
'reserved_ioc',
'reserved_hash',
'reserved_yara',
'retries_left_ioc',
'retries_left_hash',
'retries_left_yara',
)
tasks = dbsession.query(Task).order_by(Task.id.desc()).limit(50)
tasks_data = [[getattr(t, h) for h in headers] for t in tasks]
return render_template('scan-progress.html', headers=headers, tasks_data=tasks_data)
# SERVER LAUNCH
def run_server():
context = None
if USE_SSL and os.path.isfile(SSL_KEY_FILE) and os.path.isfile(SSL_CERT_FILE):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(SSL_CERT_FILE, SSL_KEY_FILE)
loggingserver.info('Using SSL, open interface in HTTPS')
loggingserver.info('Web interface starting')
app.run(
host=LISTEN_ADDRESS,
port=LISTEN_PORT,
debug=DEBUG,
ssl_context=context
)
if __name__ == '__main__':
run_server()
|
CERT-W/certitude
|
components/interface/web.py
|
Python
|
gpl-2.0
| 40,631 | 0.003323 |
# -*- coding: utf-8 -*-
from django.core import urlresolvers
def custom_processor(request):
proc_data = dict()
resolver = urlresolvers.get_resolver(None)
patterns = sorted(
(key, val[0][0][0]) for key, val in resolver.reverse_dict.iteritems() if isinstance(key, basestring))
proc_data['pat'] = patterns
proc_data['app'] = 'Common app'
proc_data['ip_address'] = request.META['REMOTE_ADDR']
proc_data['user'] = request.user
return proc_data
|
codeboy/coddy-sitetools
|
sitetools/coddy_site/procesors.py
|
Python
|
bsd-3-clause
| 486 | 0.004115 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2016-12-06 09:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cms', '0016_auto_20160608_1535'),
('maps', '0002_auto_20160926_1157'),
]
operations = [
migrations.AddField(
model_name='googlemap',
name='cms_page',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to='cms.Page'),
),
]
|
rouxcode/django-cms-plugins
|
cmsplugins/maps/migrations/0003_googlemap_cms_page.py
|
Python
|
mit
| 601 | 0.001664 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import os
import json
from subprocess import call
import cairis.core.BorgFactory
from cairis.core.Borg import Borg
from cairis.core.ReferenceSynopsis import ReferenceSynopsis
from cairis.core.ReferenceContribution import ReferenceContribution
from cairis.core.ARM import DatabaseProxyException
from cairis.mio.ModelImport import importModelFile
__author__ = 'Shamal Faily'
class UseCaseContributionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cairis.core.BorgFactory.initialise()
importModelFile(os.environ['CAIRIS_SRC'] + '/test/webinos.xml',1)
def setUp(self):
f = open(os.environ['CAIRIS_SRC'] + '/test/usecase_contributions.json')
d = json.load(f)
f.close()
self.csData = d['characteristic_synopses'][0]
self.rcData = d['usecase_contributions'][0]
def tearDown(self):
pass
def testAddContribution(self):
ics = ReferenceSynopsis(-1,self.csData['theReference'],self.csData['theSynopsis'],self.csData['theDimension'],self.csData['theActorType'],self.csData['theActor'])
b = Borg()
b.dbProxy.addCharacteristicSynopsis(ics)
irc = ReferenceContribution(self.rcData['theSource'],self.rcData['theDestination'],self.rcData['theMeansEnd'],self.rcData['theContribution'])
b.dbProxy.addUseCaseContribution(irc)
orcs = b.dbProxy.getUseCaseContributions(self.rcData['theSource'])
orc,rType = orcs[self.rcData['theDestination']]
self.assertEqual(orc.source(), irc.source())
self.assertEqual(orc.destination(), irc.destination())
self.assertEqual(orc.meansEnd(), irc.meansEnd())
self.assertEqual(orc.contribution(), irc.contribution())
def testUpdateContribution(self):
b = Borg()
orcs = b.dbProxy.getUseCaseContributions(self.rcData['theSource'])
orc,rType = orcs[self.rcData['theDestination']]
orc.theContribution = 'Break'
b.dbProxy.updateUseCaseContribution(orc)
urcs = b.dbProxy.getUseCaseContributions(self.rcData['theSource'])
urc,rType = urcs[self.rcData['theDestination']]
self.assertEqual(orc.source(), urc.source())
self.assertEqual(orc.destination(), urc.destination())
self.assertEqual(orc.meansEnd(), urc.meansEnd())
self.assertEqual(orc.contribution(), urc.contribution())
if __name__ == '__main__':
unittest.main()
|
nathanbjenx/cairis
|
cairis/test/test_UseCaseContribution.py
|
Python
|
apache-2.0
| 3,094 | 0.007434 |
# Copyright (C) 2008 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow Google Apps domain administrators to audit user data.
AuditService: Set auditing."""
__author__ = 'jlee@pbu.edu'
from base64 import b64encode
import gdata.apps
import gdata.apps.service
import gdata.service
class AuditService(gdata.apps.service.PropertyService):
"""Client for the Google Apps Audit service."""
def _serviceUrl(self, setting_id, domain=None, user=None):
if domain is None:
domain = self.domain
if user is None:
return '/a/feeds/compliance/audit/%s/%s' % (setting_id, domain)
else:
return '/a/feeds/compliance/audit/%s/%s/%s' % (setting_id, domain, user)
def updatePGPKey(self, pgpkey):
"""Updates Public PGP Key Google uses to encrypt audit data
Args:
pgpkey: string, ASCII text of PGP Public Key to be used
Returns:
A dict containing the result of the POST operation."""
uri = self._serviceUrl('publickey')
b64pgpkey = b64encode(pgpkey)
properties = {}
properties['publicKey'] = b64pgpkey
return self._PostProperties(uri, properties)
def createEmailMonitor(self, source_user, destination_user, end_date,
begin_date=None, incoming_headers_only=False,
outgoing_headers_only=False, drafts=False,
drafts_headers_only=False, chats=False,
chats_headers_only=False):
"""Creates a email monitor, forwarding the source_users emails/chats
Args:
source_user: string, the user whose email will be audited
destination_user: string, the user to receive the audited email
end_date: string, the date the audit will end in
"yyyy-MM-dd HH:mm" format, required
begin_date: string, the date the audit will start in
"yyyy-MM-dd HH:mm" format, leave blank to use current time
incoming_headers_only: boolean, whether to audit only the headers of
mail delivered to source user
outgoing_headers_only: boolean, whether to audit only the headers of
mail sent from the source user
drafts: boolean, whether to audit draft messages of the source user
drafts_headers_only: boolean, whether to audit only the headers of
mail drafts saved by the user
chats: boolean, whether to audit archived chats of the source user
chats_headers_only: boolean, whether to audit only the headers of
archived chats of the source user
Returns:
A dict containing the result of the POST operation."""
uri = self._serviceUrl('mail/monitor', user=source_user)
properties = {}
properties['destUserName'] = destination_user
if begin_date is not None:
properties['beginDate'] = begin_date
properties['endDate'] = end_date
if incoming_headers_only:
properties['incomingEmailMonitorLevel'] = 'HEADER_ONLY'
else:
properties['incomingEmailMonitorLevel'] = 'FULL_MESSAGE'
if outgoing_headers_only:
properties['outgoingEmailMonitorLevel'] = 'HEADER_ONLY'
else:
properties['outgoingEmailMonitorLevel'] = 'FULL_MESSAGE'
if drafts:
if drafts_headers_only:
properties['draftMonitorLevel'] = 'HEADER_ONLY'
else:
properties['draftMonitorLevel'] = 'FULL_MESSAGE'
if chats:
if chats_headers_only:
properties['chatMonitorLevel'] = 'HEADER_ONLY'
else:
properties['chatMonitorLevel'] = 'FULL_MESSAGE'
return self._PostProperties(uri, properties)
def getEmailMonitors(self, user):
""""Gets the email monitors for the given user
Args:
user: string, the user to retrieve email monitors for
Returns:
list results of the POST operation
"""
uri = self._serviceUrl('mail/monitor', user=user)
return self._GetPropertiesList(uri)
def deleteEmailMonitor(self, source_user, destination_user):
"""Deletes the email monitor for the given user
Args:
source_user: string, the user who is being monitored
destination_user: string, theuser who recieves the monitored emails
Returns:
Nothing
"""
uri = self._serviceUrl('mail/monitor', user=source_user+'/'+destination_user)
try:
return self._DeleteProperties(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def createAccountInformationRequest(self, user):
"""Creates a request for account auditing details
Args:
user: string, the user to request account information for
Returns:
A dict containing the result of the post operation."""
uri = self._serviceUrl('account', user=user)
properties = {}
#XML Body is left empty
try:
return self._PostProperties(uri, properties)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def getAccountInformationRequestStatus(self, user, request_id):
"""Gets the status of an account auditing request
Args:
user: string, the user whose account auditing details were requested
request_id: string, the request_id
Returns:
A dict containing the result of the get operation."""
uri = self._serviceUrl('account', user=user+'/'+request_id)
try:
return self._GetProperties(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def getAllAccountInformationRequestsStatus(self):
"""Gets the status of all account auditing requests for the domain
Args:
None
Returns:
list results of the POST operation
"""
uri = self._serviceUrl('account')
return self._GetPropertiesList(uri)
def deleteAccountInformationRequest(self, user, request_id):
"""Deletes the request for account auditing information
Args:
user: string, the user whose account auditing details were requested
request_id: string, the request_id
Returns:
Nothing
"""
uri = self._serviceUrl('account', user=user+'/'+request_id)
try:
return self._DeleteProperties(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def createMailboxExportRequest(self, user, begin_date=None, end_date=None, include_deleted=False, search_query=None, headers_only=False):
"""Creates a mailbox export request
Args:
user: string, the user whose mailbox export is being requested
begin_date: string, date of earliest emails to export, optional, defaults to date of account creation
format is 'yyyy-MM-dd HH:mm'
end_date: string, date of latest emails to export, optional, defaults to current date
format is 'yyyy-MM-dd HH:mm'
include_deleted: boolean, whether to include deleted emails in export, mutually exclusive with search_query
search_query: string, gmail style search query, matched emails will be exported, mutually exclusive with include_deleted
Returns:
A dict containing the result of the post operation."""
uri = self._serviceUrl('mail/export', user=user)
properties = {}
if begin_date is not None:
properties['beginDate'] = begin_date
if end_date is not None:
properties['endDate'] = end_date
if include_deleted is not None:
properties['includeDeleted'] = gdata.apps.service._bool2str(include_deleted)
if search_query is not None:
properties['searchQuery'] = search_query
if headers_only is True:
properties['packageContent'] = 'HEADER_ONLY'
else:
properties['packageContent'] = 'FULL_MESSAGE'
return self._PostProperties(uri, properties)
def getMailboxExportRequestStatus(self, user, request_id):
"""Gets the status of an mailbox export request
Args:
user: string, the user whose mailbox were requested
request_id: string, the request_id
Returns:
A dict containing the result of the get operation."""
uri = self._serviceUrl('mail/export', user=user+'/'+request_id)
try:
return self._GetProperties(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
def getAllMailboxExportRequestsStatus(self):
"""Gets the status of all mailbox export requests for the domain
Args:
None
Returns:
list results of the POST operation
"""
uri = self._serviceUrl('mail/export')
return self._GetPropertiesList(uri)
def deleteMailboxExportRequest(self, user, request_id):
"""Deletes the request for mailbox export
Args:
user: string, the user whose mailbox were requested
request_id: string, the request_id
Returns:
Nothing
"""
uri = self._serviceUrl('mail/export', user=user+'/'+request_id)
try:
return self._DeleteProperties(uri)
except gdata.service.RequestError, e:
raise AppsForYourDomainException(e.args[0])
|
Eforcers/inbox-cleaner
|
src/lib/gdata/apps/audit/service.py
|
Python
|
mit
| 9,512 | 0.006308 |
# proxy module
from apptools.logger.util import *
|
enthought/etsproxy
|
enthought/logger/util.py
|
Python
|
bsd-3-clause
| 50 | 0 |
import game as game
import pytest
import sys
sys.path.insert(0, '..')
def trim_board(ascii_board):
return '\n'.join([i.strip() for i in ascii_board.splitlines()])
t = trim_board
def test_new_board():
game.Board(3,3).ascii() == t("""
...
...
...
""")
game.Board(4,3).ascii() == t("""
....
....
....
""")
game.Board(3,4).ascii() == t("""
...
...
...
...
""")
def test_game():
board = game.Board(3,3,win=3)
assert board.count_tokens == 0
assert board.game_status == 'active'
assert board.turn_color == None
# drop first token
token = board.drop('x',0)
assert board.game_status == 'active'
assert token.position == (0,0)
assert token.color == 'x'
assert board.ascii() == t("""
...
...
x..
""")
assert board.count_tokens == 1
assert board.turn_color == 'o'
# drop second token
token = board.drop('o',0)
assert board.game_status == 'active'
assert token.position == (0,1)
assert token.color == 'o'
assert board.ascii() == t("""
...
o..
x..
""")
assert board.count_tokens == 2
assert board.turn_color == 'x'
# dropping the wrong color should raise an error
with pytest.raises(Exception):
token = board.drop('o',1)
# drop third token
token = board.drop('x',1)
assert board.game_status == 'active'
assert token.position == (1,0)
assert token.color == 'x'
board.ascii() == t("""
...
o..
xx.
""")
assert board.count_tokens == 3
assert board.turn_color == 'o'
# drop fourth token
token = board.drop('o',0)
assert board.game_status == 'active'
assert token.position == (0,2)
assert token.color == 'o'
board.ascii() == t("""
o..
o..
xx.
""")
assert board.count_tokens == 4
# drop fifth token
token = board.drop('x',2)
assert board.game_status == 'over'
assert board.won_by == 'x'
assert token.position == (2,0)
assert token.color == 'x'
board.ascii() == t("""
o..
o..
xxx
""")
assert board.count_tokens == 5
def test_load_board():
"""
The Board class should provide a load method to load a predefined board.
the load method should be implemented as a static method like this:
>>> class Test:
>>> @staticmethod
>>> def a_static_factory():
>>> t = Test()
>>> # do something with t and return it
>>> return t
the load function accepts a board layout. It retrieves the dimensions of the board
and loads the provided data into the board.
"""
board = game.Board.load(t("""
o..
o..
xxx
"""))
def test_axis_strings():
board = game.Board.load(t("""
o..
o..
xxx
"""))
# get the axis strings in this order: | \ / -
axis_strings = board.axis_strings(0,0)
assert axis_strings[0] == 'xoo'
assert axis_strings[1] == 'x'
assert axis_strings[2] == 'x..'
assert axis_strings[3] == 'xxx' # the winner :-)
assert board.won_by == 'x'
|
fweidemann14/x-gewinnt
|
game/test_game.py
|
Python
|
gpl-3.0
| 3,095 | 0.008401 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-11 11:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('equinox_api', '0004_operation_description'),
]
operations = [
migrations.AddField(
model_name='application',
name='new_att',
field=models.BooleanField(default=True),
),
]
|
ivanprjcts/equinox-spring16-API
|
equinox_spring16_api/equinox_api/migrations/0005_application_new_att.py
|
Python
|
lgpl-3.0
| 463 | 0 |
#!/usr/bin/env python
"""Mixin tests for storing Foreman rules in the relational db."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib import rdfvalue
from grr_response_server import foreman_rules
from grr.test_lib import test_lib
class DatabaseTestForemanRulesMixin(object):
"""An abstract class for testing db.Database implementations.
This mixin adds methods to test the handling of foreman rules.
"""
def _GetTestRule(self, hunt_id="H:123456", expires=None):
now = rdfvalue.RDFDatetime.Now()
expiration_time = expires or now + rdfvalue.Duration.From(2, rdfvalue.WEEKS)
rule = foreman_rules.ForemanCondition(
creation_time=now,
expiration_time=expiration_time,
description="Test rule",
hunt_id=hunt_id)
rule.client_rule_set = foreman_rules.ForemanClientRuleSet(rules=[
foreman_rules.ForemanClientRule(
rule_type=foreman_rules.ForemanClientRule.Type.INTEGER,
integer=foreman_rules.ForemanIntegerClientRule(
field="INSTALL_TIME",
operator=foreman_rules.ForemanIntegerClientRule.Operator
.LESS_THAN,
value=now))
])
return rule
def testForemanRuleWrite(self):
rule = self._GetTestRule()
self.db.WriteForemanRule(rule)
read = self.db.ReadAllForemanRules()
self.assertLen(read, 1)
self.assertEqual(read[0], rule)
def testForemanRuleRemove(self):
rule1 = self._GetTestRule("H:123456")
self.db.WriteForemanRule(rule1)
rule2 = self._GetTestRule("H:654321")
self.db.WriteForemanRule(rule2)
rule3 = self._GetTestRule("H:ABCDEF")
self.db.WriteForemanRule(rule3)
read = self.db.ReadAllForemanRules()
self.assertLen(read, 3)
self.db.RemoveForemanRule("H:654321")
read = self.db.ReadAllForemanRules()
self.assertLen(read, 2)
self.assertEqual(
sorted(read, key=lambda rule: rule.hunt_id), [rule1, rule3])
self.db.RemoveForemanRule("H:123456")
read = self.db.ReadAllForemanRules()
self.assertLen(read, 1)
self.assertEqual(read[0], rule3)
def testForemanRuleExpire(self):
for i, ex in enumerate([100, 200, 300, 400, 500, 600]):
expires = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(ex)
rule = self._GetTestRule("H:00000%d" % i, expires=expires)
self.db.WriteForemanRule(rule)
self.assertLen(self.db.ReadAllForemanRules(), 6)
with test_lib.FakeTime(110):
self.db.RemoveExpiredForemanRules()
self.assertLen(self.db.ReadAllForemanRules(), 5)
with test_lib.FakeTime(350):
self.db.RemoveExpiredForemanRules()
self.assertLen(self.db.ReadAllForemanRules(), 3)
with test_lib.FakeTime(590):
self.db.RemoveExpiredForemanRules()
self.assertLen(self.db.ReadAllForemanRules(), 1)
# This file is a test library and thus does not require a __main__ block.
|
dunkhong/grr
|
grr/server/grr_response_server/databases/db_foreman_rules_test.py
|
Python
|
apache-2.0
| 2,967 | 0.005056 |
#0y0r1+syp0ry1+0267*y0r-7i+Psr+1ge
|
fixbugs/py-fixbugs-tools
|
test/75f6.py
|
Python
|
gpl-3.0
| 37 | 0.027027 |
import json
import pprint
from a2qt import QtWidgets
from a2widget.key_value_table import KeyValueTable
from a2widget.a2text_field import A2CodeField
_DEMO_DATA = {
'Name': 'Some Body',
'Surname': 'Body',
'Street. Nr': 'Thingstreet 8',
'Street': 'Thingstreet',
'Nr': '8',
'PLZ': '12354',
'City': 'Frankfurt am Main',
'Phone+': '+1232222222',
'Phone': '2222222',
'Country': 'Germany',
}
class Demo(QtWidgets.QMainWindow):
def __init__(self):
super(Demo, self).__init__()
w = QtWidgets.QWidget(self)
self.setCentralWidget(w)
lyt = QtWidgets.QVBoxLayout(w)
self.key_value_table = KeyValueTable(self)
self.key_value_table.changed.connect(self.table_to_code)
lyt.addWidget(self.key_value_table)
btn = QtWidgets.QPushButton('GET DATA')
btn.clicked.connect(self.get_data)
lyt.addWidget(btn)
self.text_field = A2CodeField(self)
self.text_field.text_changed.connect(self.code_to_table)
lyt.addWidget(self.text_field)
btn = QtWidgets.QPushButton('SET DATA')
btn.clicked.connect(self.set_data)
lyt.addWidget(btn)
self.text_field.setText(json.dumps(_DEMO_DATA, indent=2))
self.set_data()
def table_to_code(self):
data = self.key_value_table.get_data()
self.text_field.setText(json.dumps(data, indent=2))
def code_to_table(self):
data = json.loads(self.text_field.text())
self.key_value_table.set_silent(data)
def get_data(self):
data = self.key_value_table.get_data()
print(data)
pprint.pprint(data, sort_dicts=False)
def set_data(self):
data = json.loads(self.text_field.text())
self.key_value_table.set_data(data)
def show():
app = QtWidgets.QApplication([])
win = Demo()
win.show()
app.exec()
if __name__ == '__main__':
show()
|
ewerybody/a2
|
ui/a2widget/demo/key_value_table_demo.py
|
Python
|
gpl-3.0
| 1,931 | 0 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^', include('authenticate.urls')),
(r'^', include('analytics.urls')),
(r'^admin/', include(admin.site.urls)),
(r'^api/', include('rest_framework.urls', namespace='rest_framework')),
)
|
whitews/BAMA_Analytics
|
BAMA_Analytics/urls.py
|
Python
|
bsd-2-clause
| 338 | 0.002959 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ignorance documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import ignorance
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ignorance'
copyright = u'2015, Steve Cook'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = ignorance.__version__
# The full version, including alpha/beta/rc tags.
release = ignorance.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ignorancedoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'ignorance.tex',
u'Ignorance Documentation',
u'Steve Cook', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ignorance',
u'Ignorance Documentation',
[u'Steve Cook'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ignorance',
u'Ignorance Documentation',
u'Steve Cook',
'ignorance',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
snark/ignorance
|
docs/conf.py
|
Python
|
isc
| 8,405 | 0.005354 |
import csv
import json
import numpy as np
import pandas as pd
def read_delim(filepath):
"""
Reads delimited file (auto-detects delimiter + header). Returns list.
:param filepath: (str) location of delimited file
:return: (list) list of records w/o header
"""
f = open(filepath, 'r')
dialect = csv.Sniffer().sniff(f.read(1024))
f.seek(0)
has_header = csv.Sniffer().has_header(f.read(1024))
f.seek(0)
reader = csv.reader(f, dialect)
if has_header:
reader.next()
ret = [line for line in reader]
return ret
def read_delim_pd(filepath):
"""
Reads delimited file (auto-detects delimiter + header). Returns pandas DataFrame.
:param filepath: (str) location of delimited file
:return: (DataFrame)
"""
f = open(filepath)
has_header = None
if csv.Sniffer().has_header(f.read(1024)):
has_header = 0
f.seek(0)
return pd.read_csv(f, header=has_header, sep=None, engine='python')
def lookup(table, lookup_cols, lookup_vals, output_cols=None, output_recs=None):
"""
Looks up records where lookup_cols == lookup_vals.
Optionally returns only specified output_cols and/or specified output_recs.
:param table: (DataFrame) the pandas DataFrame to use as a lookup table
:param lookup_cols: (str | list)
:param lookup_vals: (val | list)
:param output_cols:
:param output_recs:
:return:
"""
if type(lookup_cols) == str:
lookup_cols = [lookup_cols]
lookup_vals = [lookup_vals]
temp_df = pd.DataFrame(data=lookup_vals, columns=lookup_cols, copy=False)
output = table.merge(temp_df, copy=False)
if output_cols is not None:
if type(output_cols) == str:
output_cols = [output_cols]
output = output[output_cols]
if output_recs is not None:
output = output.iloc[output_recs]
return output
def generate_position_table(num_rc, space_rc, offset=(0.0,0.0,0.0), to_clipboard=False):
"""
Generates a position table for a plate. Assumes that 'x' and 'c' are aligned and that
'y' and 'r' are aligned. These axes can be reflected by negating the corresponding 'space_rc';
translations can be applied via 'offset'. All entries are indexed by 'n' (newspaper order)
and 's' (serpentine order). Other columns may be added as needed, but Autosampler.goto()
requires 'x', 'y', and 'z' to function properly.
:param num_rc: (tup) number of rows and columns (num_rows, num_cols)
:param space_rc: (tup) spacing for rows and columns [mm] (spacing_rows, spacing_cols)
:param offset: (tup) 3-tuple of floats to be added to x,y,z [mm]
:param to_clipboard: (bool) whether to copy the position_table to the OS clipboard
:return: (DataFrame)
"""
# TODO: instead of offset, full affine option? can use negative space rc to reflect,
# but can't remap x -> y
temp = list()
headers = ['n', 's', 'r', 'c', 'name', 'x', 'y', 'z']
for r in range(num_rc[0]):
for c in range(num_rc[1]):
n = c + r * num_rc[1]
s = ((r + 1) % 2) * (c + r * num_rc[1]) + (r % 2) * ((r + 1) * num_rc[1] - (c + 1))
name = chr(64 + r + 1) + '{:02d}'.format(c + 1)
x = float(c * space_rc[1] + offset[0])
y = float(r * space_rc[0] + offset[1])
z = float(offset[2])
temp.append([n, s, r, c, name, x, y, z])
position_table = pd.DataFrame(temp, columns=headers)
if to_clipboard:
position_table.to_clipboard(index=False)
return position_table
def spacing(num_rc, p1, p2):
r, c = map(float, num_rc)
return tuple(abs(np.nan_to_num(np.subtract(p2, p1) / (c - 1, r - 1))))
def load_mm_positionlist(filepath):
"""
Takes a MicroManager position list and converts it to a pandas DataFrame. Will load z-coordinates if
available.
:param filepath: (str)
:return: (DataFrame) position list with headers = "r, c, name, x, y, [z]"
"""
with open(filepath) as f:
data = json.load(f)
df_rcn = pd.io.json.json_normalize(data, ['POSITIONS'])[['GRID_ROW', 'GRID_COL', 'LABEL']]
df_pos = pd.io.json.json_normalize(data, ['POSITIONS', 'DEVICES'])[['DEVICE', 'X', 'Y']]
df_xy = df_pos.query("DEVICE=='XYStage'")[['X','Y']].reset_index(drop=True)
df = pd.concat([df_rcn,df_xy], axis=1)
# check for z-axis
ds_z = df_pos.query("DEVICE=='ZStage'")['X'].reset_index(drop=True)
if len(ds_z)>0:
df['z'] = ds_z
rename = {'GRID_ROW': 'r',
'GRID_COL': 'c',
'LABEL': 'name',
'X': 'x',
'Y': 'y'}
df.rename(columns=rename, inplace=True)
return df
def generate_grid(c0, c1, l_img, p):
"""
Based on two points, creates a 2D-acquisition grid similar to what MicroManager would produce.
:param c0: (arr) first point; numpy 1d array of len 2
:param c1: (arr) second point; numpy 1d array of len 2
:param l_img: (float)
:param p: (float) desired percent overlap
:return: (DataFrame) position_list in the same format as load_mm_positionlist
"""
# TODO: does generate_grid subsume generate_position_table?
# n -> number of stage positions on an axis
n = 1 + np.ceil(np.abs(c1 - c0) / ((1 - p) * l_img)) # ct,ct
n = n.astype('int')
# l_acq = total_movement + l_img
# l_acq = l_img * (n - n*p + p) # um,um
sign = np.sign(c1 - c0)
# could also use cartesian product (itertools.product OR np.mgrid, stack)
# https://stackoverflow.com/questions/1208118/using-numpy-to-build-an-array-of-all-combinations-of-two-arrays
position_list = pd.DataFrame(columns=['r', 'c', 'name', 'x', 'y'], )
for j in xrange(n[1]): # iter y
y = sign[1] * j * l_img * (1 - p) + c0[1]
for i in xrange(n[0]) if not (j % 2) else reversed(xrange(n[0])): # iter x (serp)
x = sign[0] * i * l_img * (1 - p) + c0[0]
r = j
c = i
name = '1-Pos_{:03}_{:03}'.format(c, r)
position_list.loc[len(position_list)] = [r, c, name, x, y]
position_list[['r', 'c']] = position_list[['r', 'c']].astype(int)
return position_list
|
FordyceLab/AcqPack
|
acqpack/utils.py
|
Python
|
mit
| 6,206 | 0.004351 |
import math
import random
import numpy as np
import IMP
import IMP.core
import IMP.test
def _get_beta(N, b):
return 3. / (2. * N * b**2)
def _get_score(z, N, b):
beta = _get_beta(N, b)
return beta * z**2 - math.log(2 * beta * z)
def _get_derv(z, N, b):
beta = _get_beta(N, b)
return 2 * beta * z - 1. / float(z)
def _get_linear_score(z, N, b):
slope = _get_linear_derv(N, b)
intercept = 5.258546595708 - .5 * math.log(_get_beta(N, b))
return slope * z + intercept
def _get_linear_derv(N, b):
return -141.407214101686 * _get_beta(N, b)**.5
class Tests(IMP.test.TestCase):
"""Tests for SurfaceTetheredChain."""
def test_init(self):
"""Test correct initialization."""
func = IMP.core.SurfaceTetheredChain(10, 8)
func.set_was_used(True)
def test_evaluate(self):
"""Test evaluates to correct scores and derivatives."""
for i in range(100):
N = random.randint(1, 10)
b = random.uniform(.1, 5.)
func = IMP.core.SurfaceTetheredChain(N, b)
func.set_was_used(True)
max_z = 2 * N * b
beta = _get_beta(N, b)
min_z = .01 / (2 * beta)**.5
z_range = np.linspace(min_z, max_z, 100)
for z in z_range:
corr_score = _get_score(z, N, b)
corr_derv = _get_derv(z, N, b)
score, deriv = func.evaluate_with_derivative(z)
scoreonly = func.evaluate(z)
self.assertAlmostEqual(scoreonly, corr_score, delta=1e-4)
self.assertAlmostEqual(score, corr_score, delta=1e-4)
self.assertAlmostEqual(deriv, corr_derv, delta=1e-4)
def test_evaluate_linear(self):
"""Test linear region evaluates to correct scores and derivatives."""
for i in range(100):
N = random.randint(3, 10)
b = random.uniform(.1, 5.)
func = IMP.core.SurfaceTetheredChain(N, b)
func.set_was_used(True)
beta = _get_beta(N, b)
min_z = .01 / (2 * beta)**.5
z_range = np.linspace(-1, min_z, 100)
corr_derv = _get_linear_derv(N, b)
for z in z_range:
corr_score = _get_linear_score(z, N, b)
score, deriv = func.evaluate_with_derivative(z)
scoreonly = func.evaluate(z)
self.assertAlmostEqual(scoreonly / corr_score, 1, delta=1e-6)
self.assertAlmostEqual(score / corr_score, 1, delta=1e-6)
self.assertAlmostEqual(deriv / corr_derv, 1, delta=1e-6)
def test_special_values(self):
"""Test special distance values are correctly calculated."""
for i in range(10):
N = random.randint(3, 10)
b = random.uniform(.1, 5.)
func = IMP.core.SurfaceTetheredChain(N, b)
func.set_was_used(True)
beta = _get_beta(N, b)
zmin = 1. / (2 * beta)**.5
zmean = .5 * (math.pi / beta)**.5
self.assertAlmostEqual(func.get_distance_at_minimum(), zmin,
delta=1e-6)
self.assertAlmostEqual(func.evaluate_with_derivative(zmin)[1], 0.,
delta=1e-6)
self.assertAlmostEqual(func.get_average_distance(), zmean,
delta=1e-6)
if __name__ == '__main__':
IMP.test.main()
|
shanot/imp
|
modules/core/test/test_surface_tethered_chain.py
|
Python
|
gpl-3.0
| 3,454 | 0.00029 |
# coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: support@saltedge.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SimplifiedAttempt(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_mode': 'str',
'api_version': 'str',
'automatic_fetch': 'bool',
'daily_refresh': 'bool',
'categorization': 'str',
'created_at': 'datetime',
'custom_fields': 'object',
'device_type': 'str',
'remote_ip': 'str',
'exclude_accounts': 'list[str]',
'user_present': 'bool',
'customer_last_logged_at': 'datetime',
'fail_at': 'datetime',
'fail_error_class': 'str',
'fail_message': 'str',
'fetch_scopes': 'list[str]',
'finished': 'bool',
'finished_recent': 'bool',
'from_date': 'date',
'id': 'str',
'interactive': 'bool',
'locale': 'str',
'partial': 'bool',
'store_credentials': 'bool',
'success_at': 'datetime',
'to_date': 'datetime',
'updated_at': 'datetime',
'show_consent_confirmation': 'bool',
'include_natures': 'list[str]',
'last_stage': 'Stage'
}
attribute_map = {
'api_mode': 'api_mode',
'api_version': 'api_version',
'automatic_fetch': 'automatic_fetch',
'daily_refresh': 'daily_refresh',
'categorization': 'categorization',
'created_at': 'created_at',
'custom_fields': 'custom_fields',
'device_type': 'device_type',
'remote_ip': 'remote_ip',
'exclude_accounts': 'exclude_accounts',
'user_present': 'user_present',
'customer_last_logged_at': 'customer_last_logged_at',
'fail_at': 'fail_at',
'fail_error_class': 'fail_error_class',
'fail_message': 'fail_message',
'fetch_scopes': 'fetch_scopes',
'finished': 'finished',
'finished_recent': 'finished_recent',
'from_date': 'from_date',
'id': 'id',
'interactive': 'interactive',
'locale': 'locale',
'partial': 'partial',
'store_credentials': 'store_credentials',
'success_at': 'success_at',
'to_date': 'to_date',
'updated_at': 'updated_at',
'show_consent_confirmation': 'show_consent_confirmation',
'include_natures': 'include_natures',
'last_stage': 'last_stage'
}
def __init__(self, api_mode=None, api_version=None, automatic_fetch=None, daily_refresh=None, categorization='personal', created_at=None, custom_fields=None, device_type=None, remote_ip=None, exclude_accounts=None, user_present=None, customer_last_logged_at=None, fail_at=None, fail_error_class=None, fail_message=None, fetch_scopes=None, finished=None, finished_recent=None, from_date=None, id=None, interactive=None, locale=None, partial=None, store_credentials=None, success_at=None, to_date=None, updated_at=None, show_consent_confirmation=None, include_natures=None, last_stage=None): # noqa: E501
"""SimplifiedAttempt - a model defined in Swagger""" # noqa: E501
self._api_mode = None
self._api_version = None
self._automatic_fetch = None
self._daily_refresh = None
self._categorization = None
self._created_at = None
self._custom_fields = None
self._device_type = None
self._remote_ip = None
self._exclude_accounts = None
self._user_present = None
self._customer_last_logged_at = None
self._fail_at = None
self._fail_error_class = None
self._fail_message = None
self._fetch_scopes = None
self._finished = None
self._finished_recent = None
self._from_date = None
self._id = None
self._interactive = None
self._locale = None
self._partial = None
self._store_credentials = None
self._success_at = None
self._to_date = None
self._updated_at = None
self._show_consent_confirmation = None
self._include_natures = None
self._last_stage = None
self.discriminator = None
if api_mode is not None:
self.api_mode = api_mode
if api_version is not None:
self.api_version = api_version
if automatic_fetch is not None:
self.automatic_fetch = automatic_fetch
if daily_refresh is not None:
self.daily_refresh = daily_refresh
if categorization is not None:
self.categorization = categorization
if created_at is not None:
self.created_at = created_at
if custom_fields is not None:
self.custom_fields = custom_fields
if device_type is not None:
self.device_type = device_type
if remote_ip is not None:
self.remote_ip = remote_ip
if exclude_accounts is not None:
self.exclude_accounts = exclude_accounts
if user_present is not None:
self.user_present = user_present
if customer_last_logged_at is not None:
self.customer_last_logged_at = customer_last_logged_at
if fail_at is not None:
self.fail_at = fail_at
if fail_error_class is not None:
self.fail_error_class = fail_error_class
if fail_message is not None:
self.fail_message = fail_message
if fetch_scopes is not None:
self.fetch_scopes = fetch_scopes
if finished is not None:
self.finished = finished
if finished_recent is not None:
self.finished_recent = finished_recent
if from_date is not None:
self.from_date = from_date
if id is not None:
self.id = id
if interactive is not None:
self.interactive = interactive
if locale is not None:
self.locale = locale
if partial is not None:
self.partial = partial
if store_credentials is not None:
self.store_credentials = store_credentials
if success_at is not None:
self.success_at = success_at
if to_date is not None:
self.to_date = to_date
if updated_at is not None:
self.updated_at = updated_at
if show_consent_confirmation is not None:
self.show_consent_confirmation = show_consent_confirmation
if include_natures is not None:
self.include_natures = include_natures
if last_stage is not None:
self.last_stage = last_stage
@property
def api_mode(self):
"""Gets the api_mode of this SimplifiedAttempt. # noqa: E501
the API mode of the customer that queried the API. # noqa: E501
:return: The api_mode of this SimplifiedAttempt. # noqa: E501
:rtype: str
"""
return self._api_mode
@api_mode.setter
def api_mode(self, api_mode):
"""Sets the api_mode of this SimplifiedAttempt.
the API mode of the customer that queried the API. # noqa: E501
:param api_mode: The api_mode of this SimplifiedAttempt. # noqa: E501
:type: str
"""
allowed_values = ["app", "service"] # noqa: E501
if api_mode not in allowed_values:
raise ValueError(
"Invalid value for `api_mode` ({0}), must be one of {1}" # noqa: E501
.format(api_mode, allowed_values)
)
self._api_mode = api_mode
@property
def api_version(self):
"""Gets the api_version of this SimplifiedAttempt. # noqa: E501
the API version in which the attempt was created # noqa: E501
:return: The api_version of this SimplifiedAttempt. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this SimplifiedAttempt.
the API version in which the attempt was created # noqa: E501
:param api_version: The api_version of this SimplifiedAttempt. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def automatic_fetch(self):
"""Gets the automatic_fetch of this SimplifiedAttempt. # noqa: E501
whether the connection related to the attempt can be automatically fetched # noqa: E501
:return: The automatic_fetch of this SimplifiedAttempt. # noqa: E501
:rtype: bool
"""
return self._automatic_fetch
@automatic_fetch.setter
def automatic_fetch(self, automatic_fetch):
"""Sets the automatic_fetch of this SimplifiedAttempt.
whether the connection related to the attempt can be automatically fetched # noqa: E501
:param automatic_fetch: The automatic_fetch of this SimplifiedAttempt. # noqa: E501
:type: bool
"""
self._automatic_fetch = automatic_fetch
@property
def daily_refresh(self):
"""Gets the daily_refresh of this SimplifiedAttempt. # noqa: E501
latest assigned value for `daily_refresh` in connection # noqa: E501
:return: The daily_refresh of this SimplifiedAttempt. # noqa: E501
:rtype: bool
"""
return self._daily_refresh
@daily_refresh.setter
def daily_refresh(self, daily_refresh):
"""Sets the daily_refresh of this SimplifiedAttempt.
latest assigned value for `daily_refresh` in connection # noqa: E501
:param daily_refresh: The daily_refresh of this SimplifiedAttempt. # noqa: E501
:type: bool
"""
self._daily_refresh = daily_refresh
@property
def categorization(self):
"""Gets the categorization of this SimplifiedAttempt. # noqa: E501
the type of categorization applied. # noqa: E501
:return: The categorization of this SimplifiedAttempt. # noqa: E501
:rtype: str
"""
return self._categorization
@categorization.setter
def categorization(self, categorization):
"""Sets the categorization of this SimplifiedAttempt.
the type of categorization applied. # noqa: E501
:param categorization: The categorization of this SimplifiedAttempt. # noqa: E501
:type: str
"""
allowed_values = ["none", "personal", "business"] # noqa: E501
if categorization not in allowed_values:
raise ValueError(
"Invalid value for `categorization` ({0}), must be one of {1}" # noqa: E501
.format(categorization, allowed_values)
)
self._categorization = categorization
@property
def created_at(self):
"""Gets the created_at of this SimplifiedAttempt. # noqa: E501
when the attempt was made # noqa: E501
:return: The created_at of this SimplifiedAttempt. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this SimplifiedAttempt.
when the attempt was made # noqa: E501
:param created_at: The created_at of this SimplifiedAttempt. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def custom_fields(self):
"""Gets the custom_fields of this SimplifiedAttempt. # noqa: E501
the custom fields that had been sent when creating connection/connect\\_session/oauth\\_provider # noqa: E501
:return: The custom_fields of this SimplifiedAttempt. # noqa: E501
:rtype: object
"""
return self._custom_fields
@custom_fields.setter
def custom_fields(self, custom_fields):
"""Sets the custom_fields of this SimplifiedAttempt.
the custom fields that had been sent when creating connection/connect\\_session/oauth\\_provider # noqa: E501
:param custom_fields: The custom_fields of this SimplifiedAttempt. # noqa: E501
:type: object
"""
self._custom_fields = custom_fields
@property
def device_type(self):
"""Gets the device_type of this SimplifiedAttempt. # noqa: E501
the type of the device that created the attempt. # noqa: E501
:return: The device_type of this SimplifiedAttempt. # noqa: E501
:rtype: str
"""
return self._device_type
@device_type.setter
def device_type(self, device_type):
"""Sets the device_type of this SimplifiedAttempt.
the type of the device that created the attempt. # noqa: E501
:param device_type: The device_type of this SimplifiedAttempt. # noqa: E501
:type: str
"""
allowed_values = ["desktop", "tablet", "mobile"] # noqa: E501
if device_type not in allowed_values:
raise ValueError(
"Invalid value for `device_type` ({0}), must be one of {1}" # noqa: E501
.format(device_type, allowed_values)
)
self._device_type = device_type
@property
def remote_ip(self):
"""Gets the remote_ip of this SimplifiedAttempt. # noqa: E501
the IP of the device that created the attempt # noqa: E501
:return: The remote_ip of this SimplifiedAttempt. # noqa: E501
:rtype: str
"""
return self._remote_ip
@remote_ip.setter
def remote_ip(self, remote_ip):
"""Sets the remote_ip of this SimplifiedAttempt.
the IP of the device that created the attempt # noqa: E501
:param remote_ip: The remote_ip of this SimplifiedAttempt. # noqa: E501
:type: str
"""
self._remote_ip = remote_ip
@property
def exclude_accounts(self):
"""Gets the exclude_accounts of this SimplifiedAttempt. # noqa: E501
the `ids` of accounts that do not need to be refreshed # noqa: E501
:return: The exclude_accounts of this SimplifiedAttempt. # noqa: E501
:rtype: list[str]
"""
return self._exclude_accounts
@exclude_accounts.setter
def exclude_accounts(self, exclude_accounts):
"""Sets the exclude_accounts of this SimplifiedAttempt.
the `ids` of accounts that do not need to be refreshed # noqa: E501
:param exclude_accounts: The exclude_accounts of this SimplifiedAttempt. # noqa: E501
:type: list[str]
"""
self._exclude_accounts = exclude_accounts
@property
def user_present(self):
"""Gets the user_present of this SimplifiedAttempt. # noqa: E501
whether the request was initiated by the end-user of your application # noqa: E501
:return: The user_present of this SimplifiedAttempt. # noqa: E501
:rtype: bool
"""
return self._user_present
@user_present.setter
def user_present(self, user_present):
"""Sets the user_present of this SimplifiedAttempt.
whether the request was initiated by the end-user of your application # noqa: E501
:param user_present: The user_present of this SimplifiedAttempt. # noqa: E501
:type: bool
"""
self._user_present = user_present
@property
def customer_last_logged_at(self):
"""Gets the customer_last_logged_at of this SimplifiedAttempt. # noqa: E501
the datetime when user was last active in your application # noqa: E501
:return: The customer_last_logged_at of this SimplifiedAttempt. # noqa: E501
:rtype: datetime
"""
return self._customer_last_logged_at
@customer_last_logged_at.setter
def customer_last_logged_at(self, customer_last_logged_at):
"""Sets the customer_last_logged_at of this SimplifiedAttempt.
the datetime when user was last active in your application # noqa: E501
:param customer_last_logged_at: The customer_last_logged_at of this SimplifiedAttempt. # noqa: E501
:type: datetime
"""
self._customer_last_logged_at = customer_last_logged_at
@property
def fail_at(self):
"""Gets the fail_at of this SimplifiedAttempt. # noqa: E501
when the attempt failed to finish # noqa: E501
:return: The fail_at of this SimplifiedAttempt. # noqa: E501
:rtype: datetime
"""
return self._fail_at
@fail_at.setter
def fail_at(self, fail_at):
"""Sets the fail_at of this SimplifiedAttempt.
when the attempt failed to finish # noqa: E501
:param fail_at: The fail_at of this SimplifiedAttempt. # noqa: E501
:type: datetime
"""
self._fail_at = fail_at
@property
def fail_error_class(self):
"""Gets the fail_error_class of this SimplifiedAttempt. # noqa: E501
class of error that triggered the fail for attempt # noqa: E501
:return: The fail_error_class of this SimplifiedAttempt. # noqa: E501
:rtype: str
"""
return self._fail_error_class
@fail_error_class.setter
def fail_error_class(self, fail_error_class):
"""Sets the fail_error_class of this SimplifiedAttempt.
class of error that triggered the fail for attempt # noqa: E501
:param fail_error_class: The fail_error_class of this SimplifiedAttempt. # noqa: E501
:type: str
"""
self._fail_error_class = fail_error_class
@property
def fail_message(self):
"""Gets the fail_message of this SimplifiedAttempt. # noqa: E501
message that describes the error class # noqa: E501
:return: The fail_message of this SimplifiedAttempt. # noqa: E501
:rtype: str
"""
return self._fail_message
@fail_message.setter
def fail_message(self, fail_message):
"""Sets the fail_message of this SimplifiedAttempt.
message that describes the error class # noqa: E501
:param fail_message: The fail_message of this SimplifiedAttempt. # noqa: E501
:type: str
"""
self._fail_message = fail_message
@property
def fetch_scopes(self):
"""Gets the fetch_scopes of this SimplifiedAttempt. # noqa: E501
fetching mode. # noqa: E501
:return: The fetch_scopes of this SimplifiedAttempt. # noqa: E501
:rtype: list[str]
"""
return self._fetch_scopes
@fetch_scopes.setter
def fetch_scopes(self, fetch_scopes):
"""Sets the fetch_scopes of this SimplifiedAttempt.
fetching mode. # noqa: E501
:param fetch_scopes: The fetch_scopes of this SimplifiedAttempt. # noqa: E501
:type: list[str]
"""
allowed_values = ["accounts", "holder_info", "transactions"] # noqa: E501
if not set(fetch_scopes).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `fetch_scopes` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(fetch_scopes) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._fetch_scopes = fetch_scopes
@property
def finished(self):
"""Gets the finished of this SimplifiedAttempt. # noqa: E501
whether the connection had finished fetching # noqa: E501
:return: The finished of this SimplifiedAttempt. # noqa: E501
:rtype: bool
"""
return self._finished
@finished.setter
def finished(self, finished):
"""Sets the finished of this SimplifiedAttempt.
whether the connection had finished fetching # noqa: E501
:param finished: The finished of this SimplifiedAttempt. # noqa: E501
:type: bool
"""
self._finished = finished
@property
def finished_recent(self):
"""Gets the finished_recent of this SimplifiedAttempt. # noqa: E501
whether the connection had finished data for recent range # noqa: E501
:return: The finished_recent of this SimplifiedAttempt. # noqa: E501
:rtype: bool
"""
return self._finished_recent
@finished_recent.setter
def finished_recent(self, finished_recent):
"""Sets the finished_recent of this SimplifiedAttempt.
whether the connection had finished data for recent range # noqa: E501
:param finished_recent: The finished_recent of this SimplifiedAttempt. # noqa: E501
:type: bool
"""
self._finished_recent = finished_recent
@property
def from_date(self):
"""Gets the from_date of this SimplifiedAttempt. # noqa: E501
date from which the data had been fetched # noqa: E501
:return: The from_date of this SimplifiedAttempt. # noqa: E501
:rtype: date
"""
return self._from_date
@from_date.setter
def from_date(self, from_date):
"""Sets the from_date of this SimplifiedAttempt.
date from which the data had been fetched # noqa: E501
:param from_date: The from_date of this SimplifiedAttempt. # noqa: E501
:type: date
"""
self._from_date = from_date
@property
def id(self):
"""Gets the id of this SimplifiedAttempt. # noqa: E501
`id` of the attempt # noqa: E501
:return: The id of this SimplifiedAttempt. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SimplifiedAttempt.
`id` of the attempt # noqa: E501
:param id: The id of this SimplifiedAttempt. # noqa: E501
:type: str
"""
self._id = id
@property
def interactive(self):
"""Gets the interactive of this SimplifiedAttempt. # noqa: E501
whether the connection related to the attempt is interactive # noqa: E501
:return: The interactive of this SimplifiedAttempt. # noqa: E501
:rtype: bool
"""
return self._interactive
@interactive.setter
def interactive(self, interactive):
"""Sets the interactive of this SimplifiedAttempt.
whether the connection related to the attempt is interactive # noqa: E501
:param interactive: The interactive of this SimplifiedAttempt. # noqa: E501
:type: bool
"""
self._interactive = interactive
@property
def locale(self):
"""Gets the locale of this SimplifiedAttempt. # noqa: E501
the language of the Connect widget or/and provider error message in the <a href='http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes' target=\"_blank\">ISO 639-1</a> format. Possible values are: `bg`, `cz`, `de`, `en`, `es-MX`, `es`, `fr`, `he`, `hu`, `it`, `nl`, `pl`, `pt-BR`, `pt`, `ro`, `ru`, `sk`, `tr`, `uk`, `zh-HK`(Traditional), `zh`(Simplified). Defaults to `en` # noqa: E501
:return: The locale of this SimplifiedAttempt. # noqa: E501
:rtype: str
"""
return self._locale
@locale.setter
def locale(self, locale):
"""Sets the locale of this SimplifiedAttempt.
the language of the Connect widget or/and provider error message in the <a href='http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes' target=\"_blank\">ISO 639-1</a> format. Possible values are: `bg`, `cz`, `de`, `en`, `es-MX`, `es`, `fr`, `he`, `hu`, `it`, `nl`, `pl`, `pt-BR`, `pt`, `ro`, `ru`, `sk`, `tr`, `uk`, `zh-HK`(Traditional), `zh`(Simplified). Defaults to `en` # noqa: E501
:param locale: The locale of this SimplifiedAttempt. # noqa: E501
:type: str
"""
self._locale = locale
@property
def partial(self):
"""Gets the partial of this SimplifiedAttempt. # noqa: E501
whether the connection was partially fetched # noqa: E501
:return: The partial of this SimplifiedAttempt. # noqa: E501
:rtype: bool
"""
return self._partial
@partial.setter
def partial(self, partial):
"""Sets the partial of this SimplifiedAttempt.
whether the connection was partially fetched # noqa: E501
:param partial: The partial of this SimplifiedAttempt. # noqa: E501
:type: bool
"""
self._partial = partial
@property
def store_credentials(self):
"""Gets the store_credentials of this SimplifiedAttempt. # noqa: E501
whether the credentials were stored on our side # noqa: E501
:return: The store_credentials of this SimplifiedAttempt. # noqa: E501
:rtype: bool
"""
return self._store_credentials
@store_credentials.setter
def store_credentials(self, store_credentials):
"""Sets the store_credentials of this SimplifiedAttempt.
whether the credentials were stored on our side # noqa: E501
:param store_credentials: The store_credentials of this SimplifiedAttempt. # noqa: E501
:type: bool
"""
self._store_credentials = store_credentials
@property
def success_at(self):
"""Gets the success_at of this SimplifiedAttempt. # noqa: E501
when the attempt succeeded and finished # noqa: E501
:return: The success_at of this SimplifiedAttempt. # noqa: E501
:rtype: datetime
"""
return self._success_at
@success_at.setter
def success_at(self, success_at):
"""Sets the success_at of this SimplifiedAttempt.
when the attempt succeeded and finished # noqa: E501
:param success_at: The success_at of this SimplifiedAttempt. # noqa: E501
:type: datetime
"""
self._success_at = success_at
@property
def to_date(self):
"""Gets the to_date of this SimplifiedAttempt. # noqa: E501
date until which the data has been fetched # noqa: E501
:return: The to_date of this SimplifiedAttempt. # noqa: E501
:rtype: datetime
"""
return self._to_date
@to_date.setter
def to_date(self, to_date):
"""Sets the to_date of this SimplifiedAttempt.
date until which the data has been fetched # noqa: E501
:param to_date: The to_date of this SimplifiedAttempt. # noqa: E501
:type: datetime
"""
self._to_date = to_date
@property
def updated_at(self):
"""Gets the updated_at of this SimplifiedAttempt. # noqa: E501
when last attempt update occurred # noqa: E501
:return: The updated_at of this SimplifiedAttempt. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this SimplifiedAttempt.
when last attempt update occurred # noqa: E501
:param updated_at: The updated_at of this SimplifiedAttempt. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
@property
def show_consent_confirmation(self):
"""Gets the show_consent_confirmation of this SimplifiedAttempt. # noqa: E501
whether any consent was given for this connection # noqa: E501
:return: The show_consent_confirmation of this SimplifiedAttempt. # noqa: E501
:rtype: bool
"""
return self._show_consent_confirmation
@show_consent_confirmation.setter
def show_consent_confirmation(self, show_consent_confirmation):
"""Sets the show_consent_confirmation of this SimplifiedAttempt.
whether any consent was given for this connection # noqa: E501
:param show_consent_confirmation: The show_consent_confirmation of this SimplifiedAttempt. # noqa: E501
:type: bool
"""
self._show_consent_confirmation = show_consent_confirmation
@property
def include_natures(self):
"""Gets the include_natures of this SimplifiedAttempt. # noqa: E501
the natures of the accounts that need to be fetched # noqa: E501
:return: The include_natures of this SimplifiedAttempt. # noqa: E501
:rtype: list[str]
"""
return self._include_natures
@include_natures.setter
def include_natures(self, include_natures):
"""Sets the include_natures of this SimplifiedAttempt.
the natures of the accounts that need to be fetched # noqa: E501
:param include_natures: The include_natures of this SimplifiedAttempt. # noqa: E501
:type: list[str]
"""
self._include_natures = include_natures
@property
def last_stage(self):
"""Gets the last_stage of this SimplifiedAttempt. # noqa: E501
:return: The last_stage of this SimplifiedAttempt. # noqa: E501
:rtype: Stage
"""
return self._last_stage
@last_stage.setter
def last_stage(self, last_stage):
"""Sets the last_stage of this SimplifiedAttempt.
:param last_stage: The last_stage of this SimplifiedAttempt. # noqa: E501
:type: Stage
"""
self._last_stage = last_stage
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SimplifiedAttempt, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SimplifiedAttempt):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
ltowarek/budget-supervisor
|
third_party/saltedge/swagger_client/models/simplified_attempt.py
|
Python
|
mit
| 31,285 | 0.000064 |
"""make banner url longger
Revision ID: e567bd5c0b
Revises: 23ab90c01600
Create Date: 2015-02-01 13:15:59.075956
"""
# revision identifiers, used by Alembic.
revision = 'e567bd5c0b'
down_revision = '23ab90c01600'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column('banners', 'link_url',
type_=sa.String(200),
existing_type=sa.String(100),
nullable=True)
def downgrade():
pass
|
toway/towaymeetups
|
mba/alembic/versions/20150201_e567bd5c0b_make_banner_url_long.py
|
Python
|
gpl-3.0
| 479 | 0.004175 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.