text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import torch.nn as nn
from utils import *
class pspnet(nn.Module):
def __init__(self, feature_scale=4, n_classes=21, is_deconv=True, in_channels=3, is_batchnorm=True):
super(pspnet, self).__init__()
self.is_deconv = is_deconv
self.in_channels = in_channels
self.is_batchnorm = is_batchnorm
self.feature_scale = feature_scale
self.layers = [2, 2, 2, 2] # Currently hardcoded for ResNet-18
filters = [64, 128, 256, 512]
# filters = [x / self.feature_scale for x in filters]
self.inplanes = filters[0]
# Encoder
self.convbnrelu1 = conv2DBatchNormRelu(in_channels=3, k_size=7, n_filters=64,
padding=3, stride=2, bias=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
block = residualBlock
self.encoder1 = self._make_layer(block, filters[0], self.layers[0])
self.encoder2 = self._make_layer(block, filters[1], self.layers[1], stride=2)
self.encoder3 = self._make_layer(block, filters[2], self.layers[2], stride=2)
self.encoder4 = self._make_layer(block, filters[3], self.layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
# Decoder
self.decoder4 = linknetUp(filters[3], filters[2])
self.decoder3 = linknetUp(filters[2], filters[1])
self.decoder2 = linknetUp(filters[1], filters[0])
self.decoder1 = linknetUp(filters[0], filters[0])
# Final Classifier
self.finaldeconvbnrelu1 = deconv2DBatchNormRelu(filters[0], 32/feature_scale, 2, 2, 0)
self.finalconvbnrelu2 = conv2DBatchNormRelu(in_channels=32/feature_scale, k_size=3, n_filters=32/feature_scale, padding=1, stride=1)
self.finalconv3 = nn.Conv2d(int(32/feature_scale), int(n_classes), 3, 1, 1)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = conv2DBatchNorm(self.inplanes, planes*block.expansion, k_size=1, stride=stride, padding=0, bias=False)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
# Encoder
x = self.convbnrelu1(x)
x = self.maxpool(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
# Decoder with Skip Connections
d4 = self.decoder4(e4)
d4 = d4 + e3
d3 = self.decoder3(d4)
d3 = d3 + e2
d2 = self.decoder2(d3)
d2 = d2 + e1
d1 = self.decoder1(d2)
# Final Classification
f1 = self.finaldeconvbnrelu1(d1)
f2 = self.finalconvbnrelu2(f1)
f3 = self.finalconv3(f2)
return f3
| ibadami/pytorch-semseg | ptsemseg/models/pspnet.py | Python | mit | 3,018 | 0.004639 |
#!/usr/bin/env python
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from __future__ import absolute_import
from __future__ import unicode_literals
from gnuradio import gr, blocks
from . import fec_swig as fec
from .threaded_encoder import threaded_encoder
from .capillary_threaded_encoder import capillary_threaded_encoder
from .bitflip import read_bitlist
class extended_encoder(gr.hier_block2):
def __init__(self, encoder_obj_list, threading, puncpat=None):
gr.hier_block2.__init__(self, "extended_encoder",
gr.io_signature(1, 1, gr.sizeof_char),
gr.io_signature(1, 1, gr.sizeof_char))
self.blocks=[]
self.puncpat=puncpat
if(type(encoder_obj_list) == list):
if(type(encoder_obj_list[0]) == list):
gr.log.info("fec.extended_encoder: Parallelism must be 1.")
raise AttributeError
else:
# If it has parallelism of 0, force it into a list of 1
encoder_obj_list = [encoder_obj_list,]
if fec.get_encoder_input_conversion(encoder_obj_list[0]) == "pack":
self.blocks.append(blocks.pack_k_bits_bb(8))
if threading == 'capillary':
self.blocks.append(capillary_threaded_encoder(encoder_obj_list,
gr.sizeof_char,
gr.sizeof_char))
elif threading == 'ordinary':
self.blocks.append(threaded_encoder(encoder_obj_list,
gr.sizeof_char,
gr.sizeof_char))
else:
self.blocks.append(fec.encoder(encoder_obj_list[0],
gr.sizeof_char,
gr.sizeof_char))
if fec.get_encoder_output_conversion(encoder_obj_list[0]) == "packed_bits":
self.blocks.append(blocks.packed_to_unpacked_bb(1, gr.GR_MSB_FIRST))
if self.puncpat != '11':
self.blocks.append(fec.puncture_bb(len(puncpat), read_bitlist(puncpat), 0))
# Connect the input to the encoder and the output to the
# puncture if used or the encoder if not.
self.connect((self, 0), (self.blocks[0], 0));
self.connect((self.blocks[-1], 0), (self, 0));
# If using the puncture block, add it into the flowgraph after
# the encoder.
for i in range(len(self.blocks) - 1):
self.connect((self.blocks[i], 0), (self.blocks[i+1], 0));
| TheWylieStCoyote/gnuradio | gr-fec/python/fec/extended_encoder.py | Python | gpl-3.0 | 2,704 | 0.003328 |
"""Custom utils."""
from datetime import date
from itertools import groupby as groupby_
def to_isoformat(date_str):
"""Convert an ISO 8601 like date string to standard ISO 8601 format.
Args:
date_str (str): An ISO 8601 like date string.
Returns:
str: A standard ISO 8601 date string.
Examples:
>>> to_isoformat('2017-1-1')
2017-01-01
"""
return from_isoformat(date_str).isoformat()
def from_isoformat(date_str):
"""Create date from iso string."""
message = 'Date should be in ISO 8601 format: "YYYY-MM-DD"'
if not isinstance(date_str, str):
raise Exception(message)
try:
parts = [int(part) for part in date_str.split('-')]
return date(parts[0], parts[1], parts[2])
except:
raise Exception(message)
def month_range(start, stop):
"""Return a year month range.
Args:
start (str): Start year month in format '2016-01'
stop (str): Stop year month in format '2017-01'
Returns:
A list of year month string.
Examples:
>>> month_range('2016-11', '2017-01')
['2016-11', '2016-12', '2017-01']
>>> month_range('2017-01', '2016-11')
['2017-01', '2016-12', '2016-11']
"""
start_date = from_isoformat('{0}-01'.format(start))
stop_date = from_isoformat('{0}-01'.format(stop))
if start_date > stop_date:
start_date, stop_date = stop_date, start_date
reverse = True
else:
reverse = False
result = []
while start_date <= stop_date:
result.append(start_date.isoformat()[0:7])
year = start_date.year
month = start_date.month
if month == 12:
year += 1
month = 1
else:
month += 1
start_date = date(year, month, 1)
return reverse and sorted(result, reverse=reverse) or result
def groupby(iterable, key=None, reverse=False):
"""Wrapper of itertools.groupby function.
It make use of built-in itertools.groupby function.
In addition to sort the iterable with the same key as groupby.
Ref: <https://docs.python.org/3/library/itertools.html#itertools.groupby>
"""
if key is None:
key = lambda x: x
return groupby_(sorted(iterable, key=key, reverse=reverse), key)
def lower(func):
def _lower(*args, **kwargs):
return str.lower(str.strip(func(*args, **kwargs)))
return _lower
| CVBDL/ccollab2eeplatform-python | ccollab2eeplatform/utils.py | Python | mit | 2,434 | 0.000822 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from selenium.webdriver.common.by import By
from pages.base import BasePage
from pages.regions.download_button import DownloadButton
class DeveloperPage(BasePage):
_URL_TEMPLATE = "/{locale}/firefox/developer/"
_primary_download_locator = (By.ID, "intro-download")
_secondary_download_locator = (By.ID, "footer-download")
@property
def primary_download_button(self):
el = self.find_element(*self._primary_download_locator)
return DownloadButton(self, root=el)
@property
def secondary_download_button(self):
el = self.find_element(*self._secondary_download_locator)
return DownloadButton(self, root=el)
| flodolo/bedrock | tests/pages/firefox/developer.py | Python | mpl-2.0 | 870 | 0 |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from rally.plugins.openstack.scenarios.ceilometer import utils
from tests.unit import test
CEILOMETER_UTILS = "rally.plugins.openstack.scenarios.ceilometer.utils"
class CeilometerScenarioTestCase(test.ScenarioTestCase):
def setUp(self):
super(CeilometerScenarioTestCase, self).setUp()
self.scenario = utils.CeilometerScenario(self.context)
def test__list_alarms_by_id(self):
self.assertEqual(self.clients("ceilometer").alarms.get.return_value,
self.scenario._list_alarms("alarm-id"))
self.clients("ceilometer").alarms.get.assert_called_once_with(
"alarm-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_alarms")
def test__list_alarms(self):
self.assertEqual(self.clients("ceilometer").alarms.list.return_value,
self.scenario._list_alarms())
self.clients("ceilometer").alarms.list.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_alarms")
def test__create_alarm(self):
alarm_dict = {"alarm_id": "fake-alarm-id"}
orig_alarm_dict = copy.copy(alarm_dict)
self.scenario._generate_random_name = mock.Mock()
self.assertEqual(self.scenario._create_alarm("fake-meter-name", 100,
alarm_dict),
self.clients("ceilometer").alarms.create.return_value)
self.clients("ceilometer").alarms.create.assert_called_once_with(
meter_name="fake-meter-name",
threshold=100,
description="Test Alarm",
alarm_id="fake-alarm-id",
name=self.scenario._generate_random_name.return_value)
# ensure that _create_alarm() doesn't modify the alarm dict as
# a side-effect
self.assertDictEqual(alarm_dict, orig_alarm_dict)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.create_alarm")
def test__delete_alarms(self):
self.scenario._delete_alarm("alarm-id")
self.clients("ceilometer").alarms.delete.assert_called_once_with(
"alarm-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.delete_alarm")
def test__update_alarm(self):
alarm_diff = {"description": "Changed Test Description"}
orig_alarm_diff = copy.copy(alarm_diff)
self.scenario._update_alarm("alarm-id", alarm_diff)
self.clients("ceilometer").alarms.update.assert_called_once_with(
"alarm-id", **alarm_diff)
# ensure that _create_alarm() doesn't modify the alarm dict as
# a side-effect
self.assertDictEqual(alarm_diff, orig_alarm_diff)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.update_alarm")
def test__get_alarm_history(self):
self.assertEqual(
self.scenario._get_alarm_history("alarm-id"),
self.clients("ceilometer").alarms.get_history.return_value)
self.clients("ceilometer").alarms.get_history.assert_called_once_with(
"alarm-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_alarm_history")
def test__get_alarm_state(self):
self.assertEqual(
self.scenario._get_alarm_state("alarm-id"),
self.clients("ceilometer").alarms.get_state.return_value)
self.clients("ceilometer").alarms.get_state.assert_called_once_with(
"alarm-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_alarm_state")
def test__set_alarm_state(self):
alarm = mock.Mock()
self.clients("ceilometer").alarms.create.return_value = alarm
return_alarm = self.scenario._set_alarm_state(alarm, "ok", 100)
self.mock_wait_for.mock.assert_called_once_with(
alarm,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=100, check_interval=1)
self.mock_resource_is.mock.assert_called_once_with("ok")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_alarm)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.set_alarm_state")
def test__list_events(self):
self.assertEqual(
self.scenario._list_events(),
self.admin_clients("ceilometer").events.list.return_value
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_events")
def test__get_events(self):
self.assertEqual(
self.scenario._get_event(event_id="fake_id"),
self.admin_clients("ceilometer").events.get.return_value
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_event")
def test__list_event_types(self):
self.assertEqual(
self.scenario._list_event_types(),
self.admin_clients("ceilometer").event_types.list.return_value
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_event_types")
def test__list_event_traits(self):
self.assertEqual(
self.scenario._list_event_traits(
event_type="fake_event_type", trait_name="fake_trait_name"),
self.admin_clients("ceilometer").traits.list.return_value
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_event_traits")
def test__list_event_trait_descriptions(self):
self.assertEqual(
self.scenario._list_event_trait_descriptions(
event_type="fake_event_type"
),
self.admin_clients("ceilometer").trait_descriptions.list.
return_value
)
self._test_atomic_action_timer(
self.scenario.atomic_actions(),
"ceilometer.list_event_trait_descriptions")
def test__list_meters(self):
self.assertEqual(self.scenario._list_meters(),
self.clients("ceilometer").meters.list.return_value)
self.clients("ceilometer").meters.list.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_meters")
def test__list_resources(self):
self.assertEqual(
self.scenario._list_resources(),
self.clients("ceilometer").resources.list.return_value)
self.clients("ceilometer").resources.list.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_resources")
def test__list_samples(self):
self.assertEqual(
self.scenario._list_samples(),
self.clients("ceilometer").samples.list.return_value)
self.clients("ceilometer").samples.list.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_samples")
def test__get_resource(self):
self.assertEqual(self.scenario._get_resource("fake-resource-id"),
self.clients("ceilometer").resources.get.return_value)
self.clients("ceilometer").resources.get.assert_called_once_with(
"fake-resource-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_resource")
def test__get_stats(self):
self.assertEqual(
self.scenario._get_stats("fake-meter"),
self.clients("ceilometer").statistics.list.return_value)
self.clients("ceilometer").statistics.list.assert_called_once_with(
"fake-meter")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_stats")
def test__create_meter(self):
self.scenario._generate_random_name = mock.Mock()
self.assertEqual(
self.scenario._create_meter(fakearg="fakearg"),
self.clients("ceilometer").samples.create.return_value[0])
self.clients("ceilometer").samples.create.assert_called_once_with(
counter_name=self.scenario._generate_random_name.return_value,
fakearg="fakearg")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.create_meter")
def test__query_alarms(self):
self.assertEqual(
self.scenario._query_alarms("fake-filter", "fake-orderby", 10),
self.clients("ceilometer").query_alarms.query.return_value)
self.clients("ceilometer").query_alarms.query.assert_called_once_with(
"fake-filter", "fake-orderby", 10)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.query_alarms")
def test__query_alarm_history(self):
self.assertEqual(
self.scenario._query_alarm_history(
"fake-filter", "fake-orderby", 10),
self.clients("ceilometer").query_alarm_history.query.return_value)
self.clients(
"ceilometer").query_alarm_history.query.assert_called_once_with(
"fake-filter", "fake-orderby", 10)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.query_alarm_history")
def test__query_samples(self):
self.assertEqual(
self.scenario._query_samples("fake-filter", "fake-orderby", 10),
self.clients("ceilometer").query_samples.query.return_value)
self.clients("ceilometer").query_samples.query.assert_called_once_with(
"fake-filter", "fake-orderby", 10)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.query_samples")
def test__create_sample_no_resource_id(self):
self.scenario._generate_random_name = mock.Mock()
created_sample = self.scenario._create_sample("test-counter-name",
"test-counter-type",
"test-counter-unit",
"test-counter-volume")
self.assertEqual(
created_sample,
self.clients("ceilometer").samples.create.return_value)
self.clients("ceilometer").samples.create.assert_called_once_with(
counter_name="test-counter-name",
counter_type="test-counter-type",
counter_unit="test-counter-unit",
counter_volume="test-counter-volume",
resource_id=self.scenario._generate_random_name.return_value)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.create_sample")
def test__create_sample(self):
created_sample = self.scenario._create_sample("test-counter-name",
"test-counter-type",
"test-counter-unit",
"test-counter-volume",
"test-resource-id")
self.assertEqual(
created_sample,
self.clients("ceilometer").samples.create.return_value)
self.clients("ceilometer").samples.create.assert_called_once_with(
counter_name="test-counter-name",
counter_type="test-counter-type",
counter_unit="test-counter-unit",
counter_volume="test-counter-volume",
resource_id="test-resource-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.create_sample")
| aplanas/rally | tests/unit/plugins/openstack/scenarios/ceilometer/test_utils.py | Python | apache-2.0 | 13,257 | 0 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2015 Steffen Deusch
# Licensed under the MIT license
# Beilage zu MonitorNjus, 14.09.2015 (Version 0.9.3)
import os
workingdir = os.path.dirname(os.path.realpath(__file__))
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import imp
modulesdir = workingdir+"/../modules"
common = imp.load_source("common", modulesdir+"/common.py")
def updateurl_refresh(Name, GETNAME, Seite, Nummer, widgname):
if "index" in referer:
gval = form.getfirst(Name, None)
if gval is not None:
val = gval
else:
val = None
if val is not None:
if val == common.getinfo(GETNAME, Seite, Nummer):
pass
else:
common.writeinfo(Seite, Nummer, GETNAME, unicode(val))
elif "widgets" in referer:
gval = form.getfirst(Name, None)
if gval is not None:
val = gval
else:
val = None
if val is not None:
if val == common.getwidgetinfo(widgname, Nummer, GETNAME):
pass
else:
common.writewidgetinfo(widgname, Nummer, GETNAME, unicode(val))
else:
raise Warning("Function updateurl_refresh: This referer does not exist.")
def updateaktiv(Name, GETNAME, Seite, Nummer, widgname, hidden):
if hidden is None:
val_flag = 1
else:
val_flag = 0
if "index" in referer:
if val_flag == common.getinfo(GETNAME, Seite, Nummer):
pass
else:
common.writeinfo(Seite, Nummer, GETNAME, unicode(val_flag))
elif "widgets" in referer:
if val_flag == common.getwidgetinfo(widgname, ID, GETNAME):
pass
else:
common.writewidgetinfo(widgname, Nummer, GETNAME, unicode(val_flag))
else:
raise Warning("Function updateaktiv: This referer does not exist.")
def update_align(Name, GETNAME, widgname, ID):
if "widgets" in referer:
if form.getfirst(Name, None):
val = form.getfirst(Name, None)
else:
val = None
if val is not None:
if unicode(val) == common.getwidgetinfo(widgname, ID, GETNAME):
pass
else:
common.writewidgetinfo(widgname, ID, GETNAME, unicode(val))
else:
raise Warning("Function update_align: This referer is not allowed.")
def updatetime(Seite, Nummer):
if "index" in referer:
uhrzeit = form.getfirst("uhrzeit-"+Seite+"-"+unicode(Nummer), None)
wochentag = form.getfirst("wochentag-"+Seite+"-"+unicode(Nummer), None)
tag = form.getfirst("tag-"+Seite+"-"+unicode(Nummer), None)
monat = form.getfirst("monat-"+Seite+"-"+unicode(Nummer), None)
if uhrzeit is None and wochentag is None and tag is None and monat is None:
pass
else:
if uhrzeit is None:
uhrzeit = "*"
if wochentag is None:
wochentag = "*"
if tag is None:
tag = "*"
if monat is None:
monat = "*"
common.writeinfo(Seite, Nummer, "VONBIS", uhrzeit+"|"+wochentag+"|"+tag+"|"+monat)
else:
raise Warning("Function updatetime: This referer is not allowed.")
def updateteilung():
if "index" in referer:
teilung = form.getfirst("teilung", None)
if teilung is not None:
if teilung == common.readsettings("TEILUNG"):
pass
else:
common.updatesettings("TEILUNG", teilung)
else:
raise Warning("Function updateteilung: This referer is not allowed.")
try:
import cgi, cgitb
#import cgitb; cgitb.enable()
if common.authentication:
auth = imp.load_source("auth", modulesdir+"/auth.py")
auth.me()
form = cgi.FieldStorage()
referer = form.getfirst('referer', None)
if "index" in referer:
refresh = "<meta http-equiv=\"refresh\" content=\"0; URL=../admin/index.py\">"
for item in form:
if not "teilung" in item and not "referer" in item:
splitteditem = item.split("-")
name = splitteditem[0]
seite = splitteditem[1]
nummer = splitteditem[2]
if not "uhrzeit" in item and not "wochentag" in item and not "tag" in item and not "monat" in item:
if not "aktiv" in name.lower():
updateurl_refresh(item, name, seite, nummer, "")
else:
if "hidden." in item.lower() and not item[7:] in form:
hidden = 0
updateaktiv(item[7:], name[7:], seite, nummer, "", hidden)
elif "hidden." in item.lower() and item[7:] in form:
pass
else:
hidden = None
updateaktiv(item, name, seite, nummer, "", hidden)
else:
updatetime(seite, nummer)
else:
updateteilung()
elif "widgets" in referer:
refresh = "<meta http-equiv=\"refresh\" content=\"0; URL=../admin/widgets.py\">"
for item in form:
if not "referer" in item:
splitteditem = item.split("-")
art = splitteditem[0]
typ = splitteditem[1]
ID = splitteditem[2]
if not "aktiv" in art.lower():
if not "url" in art.lower():
update_align(item, art, typ, ID)
else:
updateurl_refresh(item, art, "", ID, typ)
else:
if "hidden." in item.lower() and not item[7:] in form:
hidden = 0
updateaktiv(item[7:], art[7:], "", ID, typ, hidden)
elif "hidden." in item.lower() and item[7:] in form:
pass
else:
hidden = None
updateaktiv(item, art, "", ID, typ, hidden)
elif "row" in referer:
refresh = "<meta http-equiv=\"refresh\" content=\"0; URL=../admin/index.py\">"
cnum = form.getfirst("createnum", None)
dnum = form.getfirst("delnum", None)
if cnum is not None and cnum.isdigit():
num = int(cnum)
if num == int(common.getrows())+1:
common.createrow(num)
else:
raise Warning("Neues Displayset - falsche Zahl: "+str(num))
elif dnum is not None and dnum.isdigit():
num = int(dnum)
if num <= int(common.getrows()):
common.delrow(num)
else:
raise Warning("Displayset löschen - falsche Zahl: "+str(num))
elif "newwidget" in referer:
refresh = "<meta http-equiv=\"refresh\" content=\"0; URL=../admin/widgets.py\">"
if form.getfirst("art", None):
val = form.getfirst("art", None)
else:
val = None
if val is not None:
if val == "Logo" or val == "Freies_Widget":
count = list(common.getwidgets())
ID = int(common.maxid())+1
common.newwidget(ID, val, val, 0, "placeholder", "bottom", "0px", "center", "0px", "100%", "100%")
else:
raise Warning("Falsches Widget: "+val)
elif "delwidget" in referer:
refresh = "<meta http-equiv=\"refresh\" content=\"0; URL=../admin/widgets.py\">"
num = form.getfirst("delnum", None)
if num is not None:
common.removewidget(unicode(num))
elif "triggerrefresh" in referer:
refresh = "<meta http-equiv=\"refresh\" content=\"0; URL=../admin/index.py\">"
else:
refresh = ""
out = "Content-Type: text/html;charset=utf-8\n"
out += u"""
<!DOCTYPE html>
<html lang="de">
<head>
<meta charset="UTF-8">"""
#for item in form:
#out += item+": "+form[item].value
out += unicode(refresh)
out += u"""\
</head>
</html>"""
print(unicode(out))
if common.triggerrefresh:
datei = open(workingdir+"/../bin/refresh", "w")
datei.write("1")
datei.close()
except Exception as e:
common.debug(e) | SteffenDE/monitornjus-classic | admin/setn.py | Python | mit | 6,805 | 0.031599 |
import unittest
from shapely.geometry import LineString
from shapely.geometry.collection import GeometryCollection
class CollectionTestCase(unittest.TestCase):
def test_array_interface(self):
m = GeometryCollection()
self.failUnlessEqual(len(m), 0)
self.failUnlessEqual(m.geoms, [])
def test_child_with_deleted_parent(self):
# test that we can remove a collection while having
# childs around
a = LineString([(0, 0), (1, 1), (1,2), (2,2)])
b = LineString([(0, 0), (1, 1), (2,1), (2,2)])
collection = a.intersection(b)
child = collection.geoms[0]
# delete parent of child
del collection
# access geometry, this should not seg fault as 1.2.15 did
child.to_wkt()
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(CollectionTestCase)
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/shapely/tests/test_collection.py | Python | agpl-3.0 | 870 | 0.006897 |
import unittest
from contented.app import Application
class AppTests(unittest.TestCase):
def test_load_app(self):
app = Application({})
self.assertTrue(hasattr(app, "settings"))
self.assertTrue(hasattr(app, "content_map"))
self.assertTrue(hasattr(app, "request_processors")) | elbeanio/contented | test/app.py | Python | mit | 312 | 0.003205 |
from django.db import models
from django_extensions.db.models import TimeStampedModel
from django.contrib.auth.models import User, UserManager
import datetime
####################################################################################################
####################################### Catalogs ################################################
####################################################################################################
class Region(TimeStampedModel):
"""Regions where we have operations"""
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
class Currency(TimeStampedModel):
"""Currencies used in contracts"""
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
class Meta:
verbose_name_plural = "Currencies"
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
class Sector(TimeStampedModel):
"""Business sectors"""
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
class Activity(TimeStampedModel):
"""Activities that are predefined by the customer. As in Banorte."""
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
billable = models.BooleanField(default=True)
customer = models.ForeignKey('Company')
class Meta:
verbose_name_plural = "Activities"
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
class Category(TimeStampedModel):
"""(Category description)"""
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
customer = models.ForeignKey('Company')
class Meta:
verbose_name_plural = "Categories"
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
class WorkItem(TimeStampedModel):
"""This could represent a project an artefact or whatever is produced as a result of a worksession"""
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
is_deliverable = models.BooleanField(default=True)
customer = models.ForeignKey('Company')
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
class ProjectType(TimeStampedModel):
"""ProjectType """
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
customer = models.ForeignKey('Company')
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
class ProjectStatus(TimeStampedModel):
"""The current project status. It doesn't have an historic record."""
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
customer = models.ForeignKey('Company')
class Meta:
verbose_name_plural = "Project Statuses"
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
class Application(TimeStampedModel):
"""Customer's applications for a project."""
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
customer = models.ForeignKey('Company')
class Meta:
verbose_name_plural = "Applications"
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
####################################### Domain ##################################################
####################################################################################################
class Employee(User):
"""
We use the django authorization model to represent our employess.
We only define the extra fields required for our timetracking system.
"""
MARITAL_STATUSES = (
(u'M', u'Married'),
(u'S', u'Single'),
)
ENGLISH_LEVELS = (
(u'iBT TOEFL 107-120', u'iBT TOEFL 107-120'),
(u'iBT TOEFL 90-106', u'iBT TOEFL 90-106'),
(u'iBT TOEFL 61-89', u'iBT TOEFL 61-89'),
(u'iBT TOEFL 57-60', u'iBT TOEFL 57-60'),
(u'CPE', u'Cambridge-Certificate of Proficiency in English'),
(u'CAE', u'Cambridge-Certificate in Advance English'),
(u'FCE', u'Cambridge-First Certificate in English'),
(u'PET', u'Cambridge-Preliminary English Test'),
(u'KET', u'Cambridge-Key English Test'),
(u'IELTS 7.5-9.0', u'International English Language Testing System 7.5-9.0'),
(u'IELTS 6.5-7.0', u'International English Language Testing System 6.5-7.0'),
(u'IELTS 5.0-6.0', u'International English Language Testing System 5.0-6.0'),
(u'IELTS 3.5-4.5', u'International English Language Testing System 3.5-4.5'),
(u'IELTS 3.0', u'International English Language Testing System 3.0'),
)
salary = models.DecimalField(max_digits=15, decimal_places=4, help_text="Salary before taxes (Raw)")
is_Manager = models.BooleanField(default=False, help_text="Designates whether this user has a leadership or managerial rol")
telephone = models.CharField(blank=True, null=True, max_length=15)
birth_date = models.DateField(blank=True, null=True)
contract_date = models.DateField(default=datetime.datetime.now)
comments = models.TextField(blank=True, null=True)
has_passport = models.BooleanField(default=True)
is_technical = models.BooleanField(default=False, help_text="Designates whether this user has a technical leadership rol")
can_travel = models.BooleanField(default=False)
english_level = models.CharField(blank=True, null=True, max_length=50, choices=ENGLISH_LEVELS)
marital_status = models.CharField(blank=True, null=True, max_length=15, choices=MARITAL_STATUSES)
# Relationships
region = models.ForeignKey(Region)
def __unicode__(self):
return u"%s, %s" % (self.first_name , self.last_name)
####################################################################################################
class WorkSession(TimeStampedModel):
"""
This class represent a chunk of working time associated to one activity.
We get more flexibility and by the way is easier to register than forcing to use the activity as the unit of work.
In order to support diferent contexts the activity field is optional. In such case we will use the description field instead.
They are mutual exclusive (free or fixed style).
"""
work_date = models.DateField(default=datetime.datetime.today)
time = models.PositiveIntegerField(null=False)
description = models.CharField(blank=True, default='', max_length=100)
comments = models.TextField(blank=True, default='')
billable = models.BooleanField(default=True)
# Relationships
activity = models.ForeignKey(Activity, blank=True, null=True)
work_item = models.ForeignKey(WorkItem, blank=True, null=True, help_text="The time will be charged to this product.")
category = models.ForeignKey(Category, blank=True, null=True, help_text="To which group we will charge this time.")
project = models.ForeignKey('Project')
employee = models.ForeignKey(Employee)
def __unicode__(self):
return u"%s" % (self.description)
####################################################################################################
class Company(TimeStampedModel):
"""
This class models any kind of company: Customer, Partners, Associates, including ourself.
"""
RELATIONSHIP_TYPES = (
(u'C', u'Customer'),
(u'P', u'Partner'),
(u'A', u'Associate'),
)
trade_name = models.CharField(max_length=80, help_text="Common o comercial name. Used normally by marketing purposes")
legal_name = models.CharField(max_length=80, help_text="Name used for contracts")
description = models.TextField(blank=True, null=True)
service_rate = models.DecimalField(max_digits=15, decimal_places=4, help_text="General service rate. It is used as default when the project is created.")
address = models.CharField(blank=True, null=True, max_length=150)
relationship_since = models.DateField(default=datetime.datetime.today, help_text="when the relationship began")
relationship_type = models.CharField(max_length=2, choices=RELATIONSHIP_TYPES)
project_alias = models.CharField(blank=True, null=True, max_length=80, help_text="Name of the project given by the client: folio, service order, execution request.")
# Model Relationships
sector = models.ManyToManyField(Sector)
# contact = models.ForeignKey(User) # Let us include it in another application (Contact manager?)
# admin_team = models.ManyToManyField(Employee, related_name='company_admin_set') #TODO: Maybe create another application in order to assign managers (Staff manager?)
class Meta:
verbose_name_plural = "Companies"
def __unicode__(self):
return u"%s" % (self.trade_name)
####################################################################################################
class BusinessUnit(TimeStampedModel):
"""(BusinessUnit description)"""
name = models.CharField(max_length=80)
description = models.CharField(max_length=100)
enabled = models.BooleanField(default=True)
# Relationships
company = models.ForeignKey(Company, help_text="Company it belongs to")
parent = models.ForeignKey('self', null=True, blank=True, help_text="Another business unit it belongs to")
customer_team = models.ManyToManyField(User, help_text="Customer's people in charge")
#admin_team = models.ManyToManyField(Employee, related_name='businessunit_admin_set' ) #TODO: Maybe let us create another application in order to assign managers (Staff manager?)
def __unicode__(self):
return u"%s, %s" % (self.name , self.description)
####################################################################################################
class Project(TimeStampedModel):
"""(Project description)"""
name = models.CharField(max_length=80)
external_id = models.CharField(blank=True, null=True, max_length=80, help_text="Identifier given by the client")
description = models.CharField(max_length=100)
creation_date = models.DateField(blank=True, null=True, default=datetime.datetime.today, help_text="When the project was internally approved by the client.")
request_date = models.DateField(blank=True, null=True, default=datetime.datetime.today, help_text="When we received the formal request by the client.")
enabled = models.BooleanField(default=True)
planned_start_date = models.DateField(blank=True, null=True, default=datetime.datetime.today)
real_start_date = models.DateField(blank=True, null=True, default=datetime.datetime.today)
planned_end_date = models.DateField(blank=True, null=True, default=datetime.datetime.today)
real_end_date = models.DateField(blank=True, null=True, default=datetime.datetime.today)
planned_effort = models.PositiveIntegerField(blank=True, null=True)
real_effort = models.PositiveIntegerField(blank=True, null=True)
budget = models.DecimalField(max_digits=15, decimal_places=4, help_text="Project cost")
references = models.TextField(blank=True, null=True, help_text="links or other supplemental info")
rate = models.PositiveIntegerField(blank=True, null=True, help_text="Rate of the service. The default is given by the Customer.")
exchange_rate = models.PositiveIntegerField(blank=True, null=True, help_text="Exchange rate used to evaluate the cost of the project")
# Relationships
currency = models.ForeignKey(Currency)
status = models.ForeignKey(ProjectStatus) # canceled, active, suspended
customer = models.ForeignKey(Company)
region = models.ForeignKey(Region) #Mexico, Monterrey, Guadalajara, EU
team = models.ManyToManyField(Employee, related_name='client_projects') # team assigned to the project
client_project_type = models.ForeignKey(ProjectType, related_name='projects', help_text="Clasification given by the client: N1, N2 N3, N4, N5")
internal_project_type = models.ForeignKey(ProjectType, help_text="Clasification given by us: Pruebas, Desarrollo, Analisis")
client_business_unit = models.ForeignKey(BusinessUnit, blank=True, null=True, related_name='projects') # Dominio, etc.
internal_business_unit= models.ForeignKey(BusinessUnit, related_name='internal_projects', help_text="Testing, Development, etc.")
customer_contact = models.ForeignKey(User, related_name='client_business_projects')
work_items = models.ManyToManyField(WorkItem, blank=True, null=True, help_text="It could be applications, artefacts, etc.")
# admin_team = models.ManyToManyField(Employee, related_name='project_admin_set' ) #TODO: Maybe let us create another application in order to assign managers (Staff manager?)
# milestones #(Requerimiento funcional, entregable, fechas, etc.)
# work_items #(Applications, etc.)
def __unicode__(self):
return u"%s, %s" % (self.name , self.alias)
| hiphoox/experior | timetracking/models.py | Python | bsd-3-clause | 14,572 | 0.023813 |
# StationPlaylist Track Tool
# An app module for NVDA
# Copyright 2014-2021 Joseph Lee, released under gPL.
# Functionality is based on JFW scripts for SPL Track Tool by Brian Hartgen.
# Track Tool allows a broadcaster to manage track intros, cues and so forth.
# Each track is a list item with descriptions such as title, file name, intro time and so forth.
# One can press TAB to move along the controls for Track Tool.
# #155 (21.03): remove __future__ import when NVDA runs under Python 3.10.
from __future__ import annotations
from typing import Optional
import appModuleHandler
import addonHandler
import tones
from NVDAObjects.IAccessible import sysListView32
from .splstudio import splconfig, SPLTrackItem
addonHandler.initTranslation()
# Return a tuple of column headers.
# This is just a thinly disguised indexOf function from Studio's track item class.
def indexOf(ttVersion: str) -> tuple[str, ...]:
# Nine columns per line for each tuple.
if ttVersion < "5.31":
return (
"Artist", "Title", "Duration", "Cue", "Overlap", "Intro", "Outro", "Segue", "Hook Start",
"Hook Len", "Year", "Album", "CD Code", "URL 1", "URL 2", "Genre", "Mood", "Energy",
"Tempo", "BPM", "Gender", "Rating", "Filename", "Client", "Other", "Intro Link", "Outro Link",
"ReplayGain", "Record Label", "ISRC"
)
elif "5.31" <= ttVersion < "6.0":
return (
"Artist", "Title", "Duration", "Cue", "Overlap", "Intro", "Outro", "Segue", "Hook Start",
"Hook Len", "Year", "Album", "CD Code", "URL 1", "URL 2", "Genre", "Mood", "Energy",
"Tempo", "BPM", "Gender", "Rating", "Filename", "Client", "Other", "Intro Link", "Outro Link",
"ReplayGain", "Record Label", "ISRC", "Language"
)
else:
return (
"Artist", "Title", "Duration", "Cue", "Overlap", "Intro", "Outro", "Segue", "Hook Start",
"Hook Len", "Year", "Album", "CD Code", "URL 1", "URL 2", "Genre", "Mood", "Energy",
"Tempo", "BPM", "Gender", "Rating", "Filename", "Client", "Other", "Intro Link", "Outro Link",
"ReplayGain", "Record Label", "ISRC", "Language", "Restrictions", "Exclude from Requests"
)
class TrackToolItem(SPLTrackItem):
"""An entry in Track Tool, used to implement some exciting features.
"""
def reportFocus(self):
# Play a beep when intro exists.
if self._getColumnContentRaw(self.indexOf("Intro")) is not None:
tones.beep(550, 100)
super(TrackToolItem, self).reportFocus()
def indexOf(self, header: str) -> Optional[int]:
try:
return indexOf(self.appModule.productVersion).index(header)
except ValueError:
return None
@property
def exploreColumns(self) -> list[str]:
return splconfig.SPLConfig["General"]["ExploreColumnsTT"]
class AppModule(appModuleHandler.AppModule):
def __init__(self, *args, **kwargs):
super(AppModule, self).__init__(*args, **kwargs)
# #64 (18.07): load config database if not done already.
splconfig.openConfig("tracktool")
def terminate(self):
super(AppModule, self).terminate()
splconfig.closeConfig("tracktool")
def chooseNVDAObjectOverlayClasses(self, obj, clsList):
import controlTypes
if obj.windowClassName == "TTntListView.UnicodeClass":
if obj.role == controlTypes.Role.LISTITEM:
clsList.insert(0, TrackToolItem)
elif obj.role == controlTypes.Role.LIST:
clsList.insert(0, sysListView32.List)
| josephsl/stationPlaylist | addon/appModules/tracktool.py | Python | gpl-2.0 | 3,301 | 0.019388 |
import os
import imp
import sys
import yaml
class YamlImportHook:
def find_module(self, fullname, path=None):
name = fullname.split('.')[-1]
for folder in path or sys.path:
if os.path.exists(os.path.join(folder, '%s.yml' % name)):
return self
return None
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
sys.modules[fullname] = mod = imp.new_module(fullname)
if '.' in fullname:
pkg, name = fullname.rsplit('.', 1)
path = sys.modules[pkg].__path__
else:
pkg, name = '', fullname
path = sys.path
for folder in path:
if os.path.exists(os.path.join(folder, '%s.yml' % name)):
mod.__file__ = os.path.join(folder, '%s.yml' % name)
mod.__package__ = pkg
mod.__loader__ = self
mod.__dict__.update(yaml.load(open(mod.__file__)) or {})
return mod
# somehow not found, delete from sys.modules
del sys.modules[fullname]
# support reload()ing this module
try:
hook
except NameError:
pass
else:
try:
sys.meta_path.remove(hook)
except ValueError:
# not found, skip removing
pass
# automatically install hook
hook = YamlImportHook()
sys.meta_path.insert(0, hook)
| sciyoshi/yamlmod | yamlmod.py | Python | mit | 1,179 | 0.028838 |
"""
Module Resty
Date: November 25, 2013
Company: SwissTech Consulting.
Author: Patrick Glass <patrickglass@gmail.com>
Copyright: Copyright 2013 SwissTech Consulting.
This class implements a simple rest api framework for interfacing with the
Server via its REST API.
"""
__title__ = 'Resty'
__version__ = '0.1'
__author__ = 'Patrick Glass'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Patrick Glass'
from resty.api import RestyAPI
from resty.exceptions import (
RestApiException,
RestApiUrlException,
RestApiAuthError,
RestApiBadRequest,
RestApiServersDown
)
from resty.auth import RestAuthToken
from resty.request import request
| patrickglass/Resty | resty/__init__.py | Python | apache-2.0 | 667 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pretend
import requests
from twine import __main__ as dunder_main
from twine.commands import upload
def test_exception_handling(monkeypatch, capsys):
monkeypatch.setattr(sys, "argv", ["twine", "upload", "missing.whl"])
error = dunder_main.main()
assert error
captured = capsys.readouterr()
# Hard-coding control characters for red text; couldn't find a succint alternative.
# Removing trailing whitespace on wrapped lines; trying to test it was ugly.
level = "\x1b[31mERROR \x1b[0m"
assert [line.rstrip() for line in captured.out.splitlines()] == [
f"{level} InvalidDistribution: Cannot find file (or expand pattern):",
" 'missing.whl'",
]
def test_http_exception_handling(monkeypatch, capsys):
monkeypatch.setattr(sys, "argv", ["twine", "upload", "test.whl"])
monkeypatch.setattr(
upload,
"upload",
pretend.raiser(
requests.HTTPError(
response=pretend.stub(
url="https://example.org",
status_code=400,
reason="Error reason",
)
)
),
)
error = dunder_main.main()
assert error
captured = capsys.readouterr()
# Hard-coding control characters for red text; couldn't find a succint alternative.
# Removing trailing whitespace on wrapped lines; trying to test it was ugly.
level = "\x1b[31mERROR \x1b[0m"
assert [line.rstrip() for line in captured.out.splitlines()] == [
f"{level} HTTPError: 400 Bad Request from https://example.org",
" Error reason",
]
def test_no_color_exception(monkeypatch, capsys):
monkeypatch.setattr(sys, "argv", ["twine", "--no-color", "upload", "missing.whl"])
error = dunder_main.main()
assert error
captured = capsys.readouterr()
# Removing trailing whitespace on wrapped lines; trying to test it was ugly.
assert [line.rstrip() for line in captured.out.splitlines()] == [
"ERROR InvalidDistribution: Cannot find file (or expand pattern):",
" 'missing.whl'",
]
# TODO: Test verbose output formatting
| pypa/twine | tests/test_main.py | Python | apache-2.0 | 2,736 | 0.002193 |
from gpiozero import LEDBarGraph
from time import sleep
from __future__ import division # required for python 2
graph = LEDBarGraph(5, 6, 13, 19, 26, 20)
graph.value = 1 # (1, 1, 1, 1, 1, 1)
sleep(1)
graph.value = 1/2 # (1, 1, 1, 0, 0, 0)
sleep(1)
graph.value = -1/2 # (0, 0, 0, 1, 1, 1)
sleep(1)
graph.value = 1/4 # (1, 0, 0, 0, 0, 0)
sleep(1)
graph.value = -1 # (1, 1, 1, 1, 1, 1)
sleep(1)
| waveform80/gpio-zero | docs/examples/led_bargraph_1.py | Python | bsd-3-clause | 400 | 0 |
from accasim.experimentation.workload_generator import workload_generator
if __name__ == '__main__':
#===========================================================================
# Workload filepath
#===========================================================================
workload = 'workload.swf'
#==========================================================================
# System config filepath
#==========================================================================
sys_config = 'config.config'
#===========================================================================
# Performance of the computing units
#===========================================================================
performance = { 'core': 3.334 / 2 }
#===========================================================================
# Request limits for each resource type
#===========================================================================
request_limits = {'min':{'core': 1, 'mem': 1000000 // 4}, 'max': {'core': 4, 'mem': 1000000}}
#===========================================================================
# Create the workload generator instance with the basic inputs
#===========================================================================
generator = workload_generator(workload, sys_config, performance, request_limits)
#===========================================================================
# Generate n jobs and save them to the nw filepath
#===========================================================================
n = 100
nw_filepath = 'new_workload.swf'
jobs = generator.generate_jobs(n, nw_filepath)
| cgalleguillosm/accasim | extra/examples/workload_generator-example.py | Python | mit | 1,782 | 0.018519 |
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.preprocessing.sequence import pad_sequences
import numpy as np
import random, sys
'''
Example script to generate haiku Text.
It is recommended to run this script on GPU, as recurrent
networks are quite computationally intensive.
If you try this script on new data, make sure your corpus
has at least ~100k characters. ~1M is better.
'''
path = "haiku_all.txt"
text = open(path).read().lower()
print('corpus length:', len(text))
chars = set(text)
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 100
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i : i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
print "X.shape: %s, Y.shape: %s" % (X.shape, y.shape)
# build the model: 2 stacked LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(len(chars), 512, return_sequences=False))
model.add(Dropout(0.2))
## Remove above 2 lines and replace by below 2 lines to make 2 layers LSTM.
#model.add(LSTM(len(chars), 512, return_sequences=True))
#model.add(Dropout(0.2))
#model.add(LSTM(512, 512, return_sequences=False))
#model.add(Dropout(0.2))
model.add(Dense(512, len(chars)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# helper function to sample an index from a probability array
def sample(a, temperature=1.0):
a = np.log(a)/temperature
a = np.exp(a)/np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1,a,1))
# train the model, output generated text after each iteration
def generate_from_model(model, begin_sent=None, diversity_l=[0.2, 0.5, 1.0, 1.2]):
if begin_sent is None:
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in diversity_l:
print
print '----- diversity:', diversity
generated = ''
if begin_sent is None:
sentence = text[start_index : start_index + maxlen]
else:
sentence = begin_sent
generated += sentence
print '----- Generating with seed: "' + sentence + '"'
sys.stdout.write(generated)
tot_lines = 0
tot_chars = 0
while True:
if tot_lines > 3 or tot_chars > 120:
break
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
tot_chars += 1
generated += next_char
if next_char == '\t':
tot_lines += 1
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print ""
if __name__ == "__main__":
history = model.fit(X, y, batch_size=200, nb_epoch=20)
generate_from_model(model)
"""
for i in xrange(1,4):
history = model.fit(X, y, batch_size=100*i, nb_epoch=20)
generate_from_model(model)
"""
| napsternxg/haiku_rnn | haiku_gen.py | Python | gpl-2.0 | 3,766 | 0.004514 |
from django.http.response import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from urllib.parse import urlencode
from django.views import View
from django.conf import settings
from django.http.request import HttpRequest
from django.db.utils import IntegrityError
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.exceptions import APIException
from users.utils import is_auth_password_time_valid
from users.views import UserVerifyPasswordView
from users.models import User
from common.utils import get_logger, FlashMessageUtil
from common.utils.random import random_string
from common.utils.django import reverse, get_object_or_none
from common.mixins.views import PermissionsMixin
from common.sdk.im.feishu import FeiShu, URL
from common.utils.common import get_request_ip
from authentication import errors
from authentication.mixins import AuthMixin
from authentication.notifications import OAuthBindMessage
logger = get_logger(__file__)
FEISHU_STATE_SESSION_KEY = '_feishu_state'
class FeiShuQRMixin(PermissionsMixin, View):
def dispatch(self, request, *args, **kwargs):
try:
return super().dispatch(request, *args, **kwargs)
except APIException as e:
msg = str(e.detail)
return self.get_failed_response(
'/',
_('FeiShu Error'),
msg
)
def verify_state(self):
state = self.request.GET.get('state')
session_state = self.request.session.get(FEISHU_STATE_SESSION_KEY)
if state != session_state:
return False
return True
def get_verify_state_failed_response(self, redirect_uri):
msg = _("The system configuration is incorrect. Please contact your administrator")
return self.get_failed_response(redirect_uri, msg, msg)
def get_qr_url(self, redirect_uri):
state = random_string(16)
self.request.session[FEISHU_STATE_SESSION_KEY] = state
params = {
'app_id': settings.FEISHU_APP_ID,
'state': state,
'redirect_uri': redirect_uri,
}
url = URL.AUTHEN + '?' + urlencode(params)
return url
@staticmethod
def get_success_response(redirect_url, title, msg):
message_data = {
'title': title,
'message': msg,
'interval': 5,
'redirect_url': redirect_url,
}
return FlashMessageUtil.gen_and_redirect_to(message_data)
@staticmethod
def get_failed_response(redirect_url, title, msg):
message_data = {
'title': title,
'error': msg,
'interval': 5,
'redirect_url': redirect_url,
}
return FlashMessageUtil.gen_and_redirect_to(message_data)
def get_already_bound_response(self, redirect_url):
msg = _('FeiShu is already bound')
response = self.get_failed_response(redirect_url, msg, msg)
return response
class FeiShuQRBindView(FeiShuQRMixin, View):
permission_classes = (IsAuthenticated,)
def get(self, request: HttpRequest):
user = request.user
redirect_url = request.GET.get('redirect_url')
if not is_auth_password_time_valid(request.session):
msg = _('Please verify your password first')
response = self.get_failed_response(redirect_url, msg, msg)
return response
redirect_uri = reverse('authentication:feishu-qr-bind-callback', external=True)
redirect_uri += '?' + urlencode({'redirect_url': redirect_url})
url = self.get_qr_url(redirect_uri)
return HttpResponseRedirect(url)
class FeiShuQRBindCallbackView(FeiShuQRMixin, View):
permission_classes = (IsAuthenticated,)
def get(self, request: HttpRequest):
code = request.GET.get('code')
redirect_url = request.GET.get('redirect_url')
if not self.verify_state():
return self.get_verify_state_failed_response(redirect_url)
user = request.user
if user.feishu_id:
response = self.get_already_bound_response(redirect_url)
return response
feishu = FeiShu(
app_id=settings.FEISHU_APP_ID,
app_secret=settings.FEISHU_APP_SECRET
)
user_id = feishu.get_user_id_by_code(code)
if not user_id:
msg = _('FeiShu query user failed')
response = self.get_failed_response(redirect_url, msg, msg)
return response
try:
user.feishu_id = user_id
user.save()
except IntegrityError as e:
if e.args[0] == 1062:
msg = _('The FeiShu is already bound to another user')
response = self.get_failed_response(redirect_url, msg, msg)
return response
raise e
ip = get_request_ip(request)
OAuthBindMessage(user, ip, _('FeiShu'), user_id).publish_async()
msg = _('Binding FeiShu successfully')
response = self.get_success_response(redirect_url, msg, msg)
return response
class FeiShuEnableStartView(UserVerifyPasswordView):
def get_success_url(self):
referer = self.request.META.get('HTTP_REFERER')
redirect_url = self.request.GET.get("redirect_url")
success_url = reverse('authentication:feishu-qr-bind')
success_url += '?' + urlencode({
'redirect_url': redirect_url or referer
})
return success_url
class FeiShuQRLoginView(FeiShuQRMixin, View):
permission_classes = (AllowAny,)
def get(self, request: HttpRequest):
redirect_url = request.GET.get('redirect_url')
redirect_uri = reverse('authentication:feishu-qr-login-callback', external=True)
redirect_uri += '?' + urlencode({'redirect_url': redirect_url})
url = self.get_qr_url(redirect_uri)
return HttpResponseRedirect(url)
class FeiShuQRLoginCallbackView(AuthMixin, FeiShuQRMixin, View):
permission_classes = (AllowAny,)
def get(self, request: HttpRequest):
code = request.GET.get('code')
redirect_url = request.GET.get('redirect_url')
login_url = reverse('authentication:login')
if not self.verify_state():
return self.get_verify_state_failed_response(redirect_url)
feishu = FeiShu(
app_id=settings.FEISHU_APP_ID,
app_secret=settings.FEISHU_APP_SECRET
)
user_id = feishu.get_user_id_by_code(code)
if not user_id:
# 正常流程不会出这个错误,hack 行为
msg = _('Failed to get user from FeiShu')
response = self.get_failed_response(login_url, title=msg, msg=msg)
return response
user = get_object_or_none(User, feishu_id=user_id)
if user is None:
title = _('FeiShu is not bound')
msg = _('Please login with a password and then bind the FeiShu')
response = self.get_failed_response(login_url, title=title, msg=msg)
return response
try:
self.check_oauth2_auth(user, settings.AUTH_BACKEND_FEISHU)
except errors.AuthFailedError as e:
self.set_login_failed_mark()
msg = e.msg
response = self.get_failed_response(login_url, title=msg, msg=msg)
return response
return self.redirect_to_guard_view()
| jumpserver/jumpserver | apps/authentication/views/feishu.py | Python | gpl-3.0 | 7,469 | 0.000538 |
"""
This package supplies tools for working with automated services
connected to a server. It was written with IRC in mind, so it's not
very generic, in that it pretty much assumes a single client connected
to a central server, and it's not easy for a client to add further connections
at runtime (But possible, though you might have to avoid selector.Reactor.loop.
"""
__all__ = [
"irc",
"selector",
"connection",
"irc2num"
]
| kaaveland/anybot | im/__init__.py | Python | gpl-2.0 | 449 | 0.002227 |
import re
import itertools
import subprocess
import collections
def convert_pdf(filename, type='xml'):
commands = {'text': ['pdftotext', '-layout', filename, '-'],
'text-nolayout': ['pdftotext', filename, '-'],
'xml': ['pdftohtml', '-xml', '-stdout', filename],
'html': ['pdftohtml', '-stdout', filename]}
try:
pipe = subprocess.Popen(commands[type], stdout=subprocess.PIPE,
close_fds=True).stdout
except OSError as e:
raise EnvironmentError("error running %s, missing executable? [%s]" %
' '.join(commands[type]), e)
data = pipe.read()
pipe.close()
return data
def clean_spaces(s):
return re.sub('\s+', ' ', s, flags=re.U).strip()
class PlaintextColumns(object):
'''
Parse plain text columns like this into a table:
cols = """
Austin Errington Lawson, L Pryor
Bartlett Forestal Macer Riecken
Battles GiaQuinta Moed Shackleford
Bauer Goodin Moseley Smith, V
Brown,C Hale Niezgodsk Stemler
Candelaria Reardon Harris Pelath Summers
DeLaney Kersey Pierce VanDenburgh
Dvorak Klinker Porter
"""
Usage:
>>> table = PlaintextColumns(cols)
>>> next(table.rows())
('Austin', 'Errington', 'Lawson, L', 'Pryor')
>>> next(table.cols())
('Austin',
'Bartlett',
'Battles',
'Bauer',
'Brown,C',
'Candelaria Reardon',
'DeLaney',
'Dvorak')
>>> list(table.cells())
['Austin', 'Errington', 'Lawson, L', ...]
'''
def __init__(self, text, threshold=3):
'''Threshold is how many times a column boundary (an integer offset
from the beginning of the line) must be found in order to qualify
as a boundary and not an outlier.
'''
self.text = text.strip()
self.threshold = threshold
def _get_column_ends(self):
'''Guess where the ends of the columns lie.
'''
ends = collections.Counter()
for line in self.text.splitlines():
for matchobj in re.finditer('\s{2,}', line.lstrip()):
ends[matchobj.end()] += 1
return ends
def _get_column_boundaries(self):
'''Use the guessed ends to guess the boundaries of the plain
text columns.
'''
# Try to figure out the most common column boundaries.
ends = self._get_column_ends()
if not ends:
# If there aren't even any nontrivial sequences of whitespace
# dividing text, there may be just one column. In which case,
# Return a single span, effectively the whole line.
return [slice(None, None)]
most_common = []
threshold = self.threshold
for k, v in collections.Counter(ends.values()).most_common():
if k >= threshold:
most_common.append(k)
if most_common:
boundaries = []
for k, v in ends.items():
if v in most_common:
boundaries.append(k)
else:
# Here there weren't enough boundaries to guess the most common
# ones, so just use the apparent boundaries. In other words, we
# have only 1 row. Potentially a source of inaccuracy.
boundaries = ends.keys()
# Convert the boundaries into a list of span slices.
boundaries.sort()
last_boundary = boundaries[-1]
boundaries = zip([0] + boundaries, boundaries)
boundaries = list(itertools.starmap(slice, boundaries))
# And get from the last boundary to the line ending.
boundaries.append(slice(last_boundary, None))
return boundaries
@property
def boundaries(self):
_boundaries = getattr(self, '_boundaries', None)
if _boundaries is not None:
return _boundaries
self._boundaries = _boundaries = self._get_column_boundaries()
return _boundaries
def getcells(self, line):
'''Using self.boundaries, extract cells from the given line.
'''
for boundary in self.boundaries:
cell = line.lstrip()[boundary].strip()
if cell:
for cell in re.split('\s{3,}', cell):
yield cell
else:
yield None
def rows(self):
'''Returns an iterator of row tuples.
'''
for line in self.text.splitlines():
yield tuple(self.getcells(line))
def cells(self):
'''Returns an interator of all cells in the table.
'''
for line in self.text.splitlines():
for cell in self.getcells(line):
yield cell
def cols(self):
'''Returns an interator of column tuples.
'''
return itertools.izip(*list(self.rows()))
__iter__ = cells
| sunlightlabs/billy | billy/scrape/utils.py | Python | bsd-3-clause | 5,127 | 0.000585 |
"""Error codes used during the analysis."""
ERR_INPUT_INVALID = {
"name": "ERR_INPUT_INVALID ",
"msg": "Input is invalid."
}
ERR_MODEL_NOT_AVAILABLE = {
"name": "ERR_MODEL_NOT_AVAILABLE",
"msg": "Model does not seem to be available! It should be either trained or loaded "
"before scoring."
}
| sara-02/fabric8-analytics-stack-analysis | util/error/error_codes.py | Python | gpl-3.0 | 322 | 0.003106 |
# -*- coding utf-8 -*-
from __future__ import unicode_literals
import pytest
from structures.insertion_sort import insertion_sort
@pytest.fixture
def sorted_list():
return [i for i in xrange(10)]
@pytest.fixture
def reverse_list():
return [i for i in xrange(9, -1, -1)]
@pytest.fixture
def average_list():
return [5, 9, 2, 4, 1, 6, 8, 7, 0, 3]
def test_sorted(sorted_list):
insertion_sort(sorted_list)
assert sorted_list == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_worst(reverse_list):
insertion_sort(reverse_list)
assert reverse_list == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_average(average_list):
insertion_sort(average_list)
assert average_list == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_repeats():
l = [3, 6, 7, 3, 9, 5, 2, 7]
insertion_sort(l)
assert l == [2, 3, 3, 5, 6, 7, 7, 9]
def test_multiple_types():
l = [3, 'foo', 2.8, True, []]
# python 2 sorting is crazy
insertion_sort(l)
assert l == [True, 2.8, 3, [], 'foo']
| tlake/data-structures-mk2 | tests/test_insertion_sort.py | Python | mit | 1,008 | 0.003968 |
# Exercise 1
#
# Improve the Critter caretaker program by allowing the user to specify how much food he or she feeds the critter and
# how long he or she plays with the critter. Have these values affect how quickly the critter's hunger and boredom
# levels drop.
#
class Critter(object):
"""A virtual pet"""
def __init__(self, name, hunger = 0, boredom = 0):
self.name = name
self.hunger = hunger
self.boredom = boredom
def __pass_time(self, hunger_val = 1, boredom_val = 1):
self.hunger += hunger_val
self.boredom += boredom_val
@property
def mood(self):
unhappiness = self.hunger + self.boredom
if unhappiness < 5:
m = "happy"
elif 5 <= unhappiness <= 10:
m = "okay"
elif 11 <= unhappiness <= 15:
m = "frustrated"
else:
m = "mad"
return m
def talk(self):
print("I'm", self.name, "and I feel", self.mood, "now.\n")
self.__pass_time()
def eat(self, food = 4):
print("Brruppp. Thank you.")
self.hunger -= food
if self.hunger < 0:
self.hunger = 0
self.__pass_time(boredom_val=food/2)
def play(self, fun = 4):
print("Wheee!")
self.boredom -= fun
if self.boredom < 0:
self.boredom = 0
self.__pass_time(hunger_val=fun/2)
def main():
crit_name = input("What do you want to name your critter?: ")
crit = Critter(crit_name)
choice = None
while choice != "0":
print \
("""
Critter Caretaker
0 - Quit
1 - Listen to your critter
2 - Feed your critter
3 - Play with your critter
""")
choice = input("Choice: ")
print()
# exit
if choice == "0":
print("Good-bye.")
# listen to your critter
elif choice == "1":
crit.talk()
# feed your critter
elif choice == "2":
crit.eat(int(input("How much food do you wish to feed the critter? ")))
# play with your critter
elif choice == "3":
crit.play(int(input("How much time do you wish to play with the critter? ")))
# some unknown choice
else:
print("\nSorry, but", choice, "isn't a valid choice.")
main()
("\n\nPress the enter key to exit.")
| dmartinezgarcia/Python-Programming | Chapter 8 - Software Objects/exercise_1.py | Python | gpl-2.0 | 2,396 | 0.00793 |
# encoding: utf-8
"""
Plot-related objects. A plot is known as a chart group in the MS API. A chart
can have more than one plot overlayed on each other, such as a line plot
layered over a bar plot.
"""
from __future__ import absolute_import, print_function, unicode_literals
from .category import Categories
from .datalabel import DataLabels
from ..enum.chart import XL_CHART_TYPE as XL
from ..oxml.ns import qn
from ..oxml.simpletypes import ST_BarDir, ST_Grouping
from .series import SeriesCollection
from ..util import lazyproperty
class _BasePlot(object):
"""
A distinct plot that appears in the plot area of a chart. A chart may
have more than one plot, in which case they appear as superimposed
layers, such as a line plot appearing on top of a bar chart.
"""
def __init__(self, xChart, chart):
super(_BasePlot, self).__init__()
self._element = xChart
self._chart = chart
@lazyproperty
def categories(self):
"""
Returns a |category.Categories| sequence object containing
a |category.Category| object for each of the category labels
associated with this plot. The |category.Category| class derives from
``str``, so the returned value can be treated as a simple sequence of
strings for the common case where all you need is the labels in the
order they appear on the chart. |category.Categories| provides
additional properties for dealing with hierarchical categories when
required.
"""
return Categories(self._element)
@property
def chart(self):
"""
The |Chart| object containing this plot.
"""
return self._chart
@property
def data_labels(self):
"""
|DataLabels| instance providing properties and methods on the
collection of data labels associated with this plot.
"""
dLbls = self._element.dLbls
if dLbls is None:
raise ValueError(
"plot has no data labels, set has_data_labels = True first"
)
return DataLabels(dLbls)
@property
def has_data_labels(self):
"""
Read/write boolean, |True| if the series has data labels. Assigning
|True| causes data labels to be added to the plot. Assigning False
removes any existing data labels.
"""
return self._element.dLbls is not None
@has_data_labels.setter
def has_data_labels(self, value):
"""
Add, remove, or leave alone the ``<c:dLbls>`` child element depending
on current state and assigned *value*. If *value* is |True| and no
``<c:dLbls>`` element is present, a new default element is added with
default child elements and settings. When |False|, any existing dLbls
element is removed.
"""
if bool(value) is False:
self._element._remove_dLbls()
else:
if self._element.dLbls is None:
dLbls = self._element._add_dLbls()
dLbls.showVal.val = True
@lazyproperty
def series(self):
"""
A sequence of |Series| objects representing the series in this plot,
in the order they appear in the plot.
"""
return SeriesCollection(self._element)
@property
def vary_by_categories(self):
"""
Read/write boolean value specifying whether to use a different color
for each of the points in this plot. Only effective when there is
a single series; PowerPoint automatically varies color by series when
more than one series is present.
"""
varyColors = self._element.varyColors
if varyColors is None:
return True
return varyColors.val
@vary_by_categories.setter
def vary_by_categories(self, value):
self._element.get_or_add_varyColors().val = bool(value)
class AreaPlot(_BasePlot):
"""
An area plot.
"""
class Area3DPlot(_BasePlot):
"""
A 3-dimensional area plot.
"""
class BarPlot(_BasePlot):
"""
A bar chart-style plot.
"""
@property
def gap_width(self):
"""
Width of gap between bar(s) of each category, as an integer
percentage of the bar width. The default value for a new bar chart is
150, representing 150% or 1.5 times the width of a single bar.
"""
gapWidth = self._element.gapWidth
if gapWidth is None:
return 150
return gapWidth.val
@gap_width.setter
def gap_width(self, value):
gapWidth = self._element.get_or_add_gapWidth()
gapWidth.val = value
@property
def overlap(self):
"""
Read/write int value in range -100..100 specifying a percentage of
the bar width by which to overlap adjacent bars in a multi-series bar
chart. Default is 0. A setting of -100 creates a gap of a full bar
width and a setting of 100 causes all the bars in a category to be
superimposed. A stacked bar plot has overlap of 100 by default.
"""
overlap = self._element.overlap
if overlap is None:
return 0
return overlap.val
@overlap.setter
def overlap(self, value):
"""
Set the value of the ``<c:overlap>`` child element to *int_value*,
or remove the overlap element if *int_value* is 0.
"""
if value == 0:
self._element._remove_overlap()
return
self._element.get_or_add_overlap().val = value
class BubblePlot(_BasePlot):
"""
A bubble chart plot.
"""
@property
def bubble_scale(self):
"""
An integer between 0 and 300 inclusive indicating the percentage of
the default size at which bubbles should be displayed. Assigning
|None| produces the same behavior as assigning `100`.
"""
bubbleScale = self._element.bubbleScale
if bubbleScale is None:
return 100
return bubbleScale.val
@bubble_scale.setter
def bubble_scale(self, value):
bubbleChart = self._element
bubbleChart._remove_bubbleScale()
if value is None:
return
bubbleScale = bubbleChart._add_bubbleScale()
bubbleScale.val = value
class DoughnutPlot(_BasePlot):
"""
An doughnut plot.
"""
class LinePlot(_BasePlot):
"""
A line chart-style plot.
"""
class PiePlot(_BasePlot):
"""
A pie chart-style plot.
"""
class RadarPlot(_BasePlot):
"""
A radar-style plot.
"""
class XyPlot(_BasePlot):
"""
An XY (scatter) plot.
"""
def PlotFactory(xChart, chart):
"""
Return an instance of the appropriate subclass of _BasePlot based on the
tagname of *xChart*.
"""
try:
PlotCls = {
qn("c:areaChart"): AreaPlot,
qn("c:area3DChart"): Area3DPlot,
qn("c:barChart"): BarPlot,
qn("c:bubbleChart"): BubblePlot,
qn("c:doughnutChart"): DoughnutPlot,
qn("c:lineChart"): LinePlot,
qn("c:pieChart"): PiePlot,
qn("c:radarChart"): RadarPlot,
qn("c:scatterChart"): XyPlot,
}[xChart.tag]
except KeyError:
raise ValueError("unsupported plot type %s" % xChart.tag)
return PlotCls(xChart, chart)
class PlotTypeInspector(object):
"""
"One-shot" service object that knows how to identify the type of a plot
as a member of the XL_CHART_TYPE enumeration.
"""
@classmethod
def chart_type(cls, plot):
"""
Return the member of :ref:`XlChartType` that corresponds to the chart
type of *plot*.
"""
try:
chart_type_method = {
"AreaPlot": cls._differentiate_area_chart_type,
"Area3DPlot": cls._differentiate_area_3d_chart_type,
"BarPlot": cls._differentiate_bar_chart_type,
"BubblePlot": cls._differentiate_bubble_chart_type,
"DoughnutPlot": cls._differentiate_doughnut_chart_type,
"LinePlot": cls._differentiate_line_chart_type,
"PiePlot": cls._differentiate_pie_chart_type,
"RadarPlot": cls._differentiate_radar_chart_type,
"XyPlot": cls._differentiate_xy_chart_type,
}[plot.__class__.__name__]
except KeyError:
raise NotImplementedError(
"chart_type() not implemented for %s" % plot.__class__.__name__
)
return chart_type_method(plot)
@classmethod
def _differentiate_area_3d_chart_type(cls, plot):
return {
ST_Grouping.STANDARD: XL.THREE_D_AREA,
ST_Grouping.STACKED: XL.THREE_D_AREA_STACKED,
ST_Grouping.PERCENT_STACKED: XL.THREE_D_AREA_STACKED_100,
}[plot._element.grouping_val]
@classmethod
def _differentiate_area_chart_type(cls, plot):
return {
ST_Grouping.STANDARD: XL.AREA,
ST_Grouping.STACKED: XL.AREA_STACKED,
ST_Grouping.PERCENT_STACKED: XL.AREA_STACKED_100,
}[plot._element.grouping_val]
@classmethod
def _differentiate_bar_chart_type(cls, plot):
barChart = plot._element
if barChart.barDir.val == ST_BarDir.BAR:
return {
ST_Grouping.CLUSTERED: XL.BAR_CLUSTERED,
ST_Grouping.STACKED: XL.BAR_STACKED,
ST_Grouping.PERCENT_STACKED: XL.BAR_STACKED_100,
}[barChart.grouping_val]
if barChart.barDir.val == ST_BarDir.COL:
return {
ST_Grouping.CLUSTERED: XL.COLUMN_CLUSTERED,
ST_Grouping.STACKED: XL.COLUMN_STACKED,
ST_Grouping.PERCENT_STACKED: XL.COLUMN_STACKED_100,
}[barChart.grouping_val]
raise ValueError("invalid barChart.barDir value '%s'" % barChart.barDir.val)
@classmethod
def _differentiate_bubble_chart_type(cls, plot):
def first_bubble3D(bubbleChart):
results = bubbleChart.xpath("c:ser/c:bubble3D")
return results[0] if results else None
bubbleChart = plot._element
bubble3D = first_bubble3D(bubbleChart)
if bubble3D is None:
return XL.BUBBLE
if bubble3D.val:
return XL.BUBBLE_THREE_D_EFFECT
return XL.BUBBLE
@classmethod
def _differentiate_doughnut_chart_type(cls, plot):
doughnutChart = plot._element
explosion = doughnutChart.xpath("./c:ser/c:explosion")
return XL.DOUGHNUT_EXPLODED if explosion else XL.DOUGHNUT
@classmethod
def _differentiate_line_chart_type(cls, plot):
lineChart = plot._element
def has_line_markers():
matches = lineChart.xpath('c:ser/c:marker/c:symbol[@val="none"]')
if matches:
return False
return True
if has_line_markers():
return {
ST_Grouping.STANDARD: XL.LINE_MARKERS,
ST_Grouping.STACKED: XL.LINE_MARKERS_STACKED,
ST_Grouping.PERCENT_STACKED: XL.LINE_MARKERS_STACKED_100,
}[plot._element.grouping_val]
else:
return {
ST_Grouping.STANDARD: XL.LINE,
ST_Grouping.STACKED: XL.LINE_STACKED,
ST_Grouping.PERCENT_STACKED: XL.LINE_STACKED_100,
}[plot._element.grouping_val]
@classmethod
def _differentiate_pie_chart_type(cls, plot):
pieChart = plot._element
explosion = pieChart.xpath("./c:ser/c:explosion")
return XL.PIE_EXPLODED if explosion else XL.PIE
@classmethod
def _differentiate_radar_chart_type(cls, plot):
radarChart = plot._element
radar_style = radarChart.xpath("c:radarStyle")[0].get("val")
def noMarkers():
matches = radarChart.xpath("c:ser/c:marker/c:symbol")
if matches and matches[0].get("val") == "none":
return True
return False
if radar_style is None:
return XL.RADAR
if radar_style == "filled":
return XL.RADAR_FILLED
if noMarkers():
return XL.RADAR
return XL.RADAR_MARKERS
@classmethod
def _differentiate_xy_chart_type(cls, plot):
scatterChart = plot._element
def noLine():
return bool(scatterChart.xpath("c:ser/c:spPr/a:ln/a:noFill"))
def noMarkers():
symbols = scatterChart.xpath("c:ser/c:marker/c:symbol")
if symbols and symbols[0].get("val") == "none":
return True
return False
scatter_style = scatterChart.xpath("c:scatterStyle")[0].get("val")
if scatter_style == "lineMarker":
if noLine():
return XL.XY_SCATTER
if noMarkers():
return XL.XY_SCATTER_LINES_NO_MARKERS
return XL.XY_SCATTER_LINES
if scatter_style == "smoothMarker":
if noMarkers():
return XL.XY_SCATTER_SMOOTH_NO_MARKERS
return XL.XY_SCATTER_SMOOTH
return XL.XY_SCATTER
| scanny/python-pptx | pptx/chart/plot.py | Python | mit | 13,213 | 0.000076 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_fold.util.proto."""
import os
# import google3
import tensorflow as tf
from tensorflow_fold.util import proto_tools
from tensorflow_fold.util import test3_pb2
from tensorflow_fold.util import test_pb2
from google.protobuf import text_format
# Make sure SerializedMessageToTree can see our proto files.
proto_tools.map_proto_source_tree_path("", os.getcwd())
# Note: Tests run in the bazel root directory, which we will use as the root for
# our source protos.
proto_tools.import_proto_file("tensorflow_fold/util/test.proto")
proto_tools.import_proto_file("tensorflow_fold/util/test3.proto")
def MakeCyclicProto(message_str):
return text_format.Parse(message_str, test_pb2.CyclicType())
def MakeCyclicProto3(message_str):
return text_format.Parse(message_str, test3_pb2.CyclicType3())
def MakeOneAtomProto(message_str):
return text_format.Parse(message_str, test_pb2.OneAtom())
class ProtoTest(tf.test.TestCase):
def testSerializedMessageToTree(self):
example = MakeCyclicProto(
"some_same<"
" many_int32: 1"
" many_int32: 2"
" some_same<"
" many_int32: 3"
" many_int32: 4"
" some_bool: false"
" >"
">"
"some_enum: THAT")
result = proto_tools.serialized_message_to_tree(
"tensorflow.fold.CyclicType", example.SerializeToString())
self.assertEqual(result["some_same"]["many_int32"], [1, 2])
self.assertEqual(result["some_same"]["some_same"]["many_int32"], [3, 4])
self.assertEqual(result["some_same"]["some_same"]["some_bool"], False)
self.assertEqual(result["many_bool"], [])
self.assertEqual(result["some_bool"], None)
self.assertEqual(result["some_same"]["many_bool"], [])
self.assertEqual(result["some_same"]["some_bool"], None)
self.assertEqual(result["some_enum"]["name"], "THAT")
self.assertEqual(result["some_enum"]["index"], 1)
self.assertEqual(result["some_enum"]["number"], 1)
def testSerializedMessageToTreeProto3(self):
example = MakeCyclicProto3(
"some_same<"
" many_int32: 1"
" many_int32: 2"
" some_same<"
" many_int32: 3"
" many_int32: 4"
" some_bool: false"
" >"
">"
"some_enum: THAT")
result = proto_tools.serialized_message_to_tree(
"tensorflow.fold.CyclicType3", example.SerializeToString())
self.assertEqual(result["some_same"]["many_int32"], [1, 2])
self.assertEqual(result["some_same"]["some_same"]["many_int32"], [3, 4])
self.assertEqual(result["some_same"]["some_same"]["some_bool"], False)
self.assertEqual(result["many_bool"], [])
self.assertEqual(result["some_bool"], False)
self.assertEqual(result["some_same"]["many_bool"], [])
self.assertEqual(result["some_same"]["some_bool"], False)
self.assertEqual(result["some_enum"]["name"], "THAT")
self.assertEqual(result["some_enum"]["index"], 1)
self.assertEqual(result["some_enum"]["number"], 1)
def testSerializedMessageToTreeOneofEmpty(self):
empty_proto = MakeOneAtomProto("").SerializeToString()
empty_result = proto_tools.serialized_message_to_tree(
"tensorflow.fold.OneAtom", empty_proto)
self.assertEqual(empty_result["atom_type"], None)
self.assertEqual(empty_result["some_int32"], None)
self.assertEqual(empty_result["some_int64"], None)
self.assertEqual(empty_result["some_uint32"], None)
self.assertEqual(empty_result["some_uint64"], None)
self.assertEqual(empty_result["some_double"], None)
self.assertEqual(empty_result["some_float"], None)
self.assertEqual(empty_result["some_bool"], None)
self.assertEqual(empty_result["some_enum"], None)
self.assertEqual(empty_result["some_string"], None)
def testSerializedMessageToTreeOneof(self):
empty_proto = MakeOneAtomProto("some_string: \"x\"").SerializeToString()
empty_result = proto_tools.serialized_message_to_tree(
"tensorflow.fold.OneAtom", empty_proto)
self.assertEqual(empty_result["atom_type"], "some_string")
self.assertEqual(empty_result["some_int32"], None)
self.assertEqual(empty_result["some_int64"], None)
self.assertEqual(empty_result["some_uint32"], None)
self.assertEqual(empty_result["some_uint64"], None)
self.assertEqual(empty_result["some_double"], None)
self.assertEqual(empty_result["some_float"], None)
self.assertEqual(empty_result["some_bool"], None)
self.assertEqual(empty_result["some_enum"], None)
self.assertEqual(empty_result["some_string"], "x")
def testNonConsecutiveEnum(self):
name = "tensorflow.fold.NonConsecutiveEnumMessage"
msg = test_pb2.NonConsecutiveEnumMessage(
the_enum=test_pb2.NonConsecutiveEnumMessage.THREE)
self.assertEqual(
{"the_enum": {"name": "THREE", "index": 1, "number": 3}},
proto_tools.serialized_message_to_tree(name, msg.SerializeToString()))
msg.the_enum = test_pb2.NonConsecutiveEnumMessage.SEVEN
self.assertEqual(
{"the_enum": {"name": "SEVEN", "index": 0, "number": 7}},
proto_tools.serialized_message_to_tree(name, msg.SerializeToString()))
if __name__ == "__main__":
tf.test.main()
| pklfz/fold | tensorflow_fold/util/proto_test.py | Python | apache-2.0 | 5,810 | 0.001721 |
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from hueversion import VERSION
import os
def expand_package_data(src_dirs, strip=""):
ret = []
for src_dir in src_dirs:
for path, dnames, fnames in os.walk(src_dir):
for fname in fnames:
ret.append(os.path.join(path, fname).replace(strip, ""))
return ret
os.chdir(os.path.dirname(os.path.abspath(__file__)))
setup(
name = "shell",
version = VERSION,
url = 'http://github.com/cloudera/hue',
description = 'Shell interface in Hue',
author = 'Hue',
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools', 'desktop'],
entry_points = { 'desktop.sdk.application': 'shell=shell' },
zip_safe = False,
package_data = {
# Include static resources. Package_data doesn't
# deal well with directory globs, so we enumerate
# the files manually.
'shell': expand_package_data(
["src/shell/templates", "src/shell/static"],
"src/shell/")
}
)
| 2013Commons/HUE-SHARK | apps/shell/setup.py | Python | apache-2.0 | 1,754 | 0.017104 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division,absolute_import)
from .K2onSilicon import K2onSilicon
| danxhuber/k2epic | checkk2fov/__init__.py | Python | mit | 135 | 0.014815 |
# 96. Unique Binary Search Trees My Submissions QuestionEditorial Solution
# Total Accepted: 84526 Total Submissions: 224165 Difficulty: Medium
# Given n, how many structurally unique BST's (binary search trees) that store values 1...n?
#
# For example,
# Given n = 3, there are a total of 5 unique BST's.
#
# 1 3 3 2 1
# \ / / / \ \
# 3 2 1 1 3 2
# / / \ \
# 2 1 2 3
#
class Solution(object):
def numTrees(self, n):
"""
:type n: int
:rtype: int
"""
A = [0] * (n + 1)
A[0] = 1
A[1] = 1
for i in xrange(2, n+1):
for k in xrange(0, i):
A[i] += A[k]*A[i-1-k]
return A[n]
# 4 4 4 4 4
# / / / / /
# 1 3 3 2 1
# \ / / / \ \
# 3 2 1 1 3 2
# / / \ \
# 2 1 2 3
#
# 1 3 3 2 1 2
# \ / \ / \ / \ \ / \
# 3 2 4 1 4 1 3 2 1 4
# / \ / \ \ \ /
# 2 4 1 2 4 3 3
# \
# 4
#
# Subscribe to see which companies asked this question
# Analysis:
# n = 0, 1
# n = 1, 1
# n = 2, 2 = (0,1) + (1,0)
# n = 3, 5 = 2(0,2) + 2(2,0) + 1(1,1)
# n = 4, 10 = (0,3), (1,2), (2,1), (0,3)
# n = 5,
class Solution(object):
def numTrees(self, n):
"""
:type n: int
:rtype: int
"""
if n == 0: return 0
res = [0 for x in xrange(0,n+1)]
res[0], res[1] = 1, 1
for n in xrange(2, n+1):
i, tmp = 0, 0
while i < n:
tmp += res[i] * res[n-1-i]
i += 1
res[n] = tmp
return res[n]
import unittest
class TestSolution(unittest.TestCase):
def test_0(self):
self.assertEqual(Solution().numTrees(3), 5)
def test_1(self):
self.assertEqual(Solution().numTrees(2), 2)
def test_2(self):
self.assertEqual(Solution().numTrees(4), 14)
if __name__ == "__main__":
unittest.main()
| shawncaojob/LC | QUESTIONS/96_unique_binary_search_trees.py | Python | gpl-3.0 | 2,402 | 0.005412 |
# Copyright 2017 Janos Czentye, Balazs Nemeth, Balazs Sonkoly
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Classes for handling the elements of the NF-FG data structure.
"""
import json
import uuid
from collections import Iterable, OrderedDict
from itertools import chain
################################################################################
# ---------- BASE classes of NFFG elements -------------------
################################################################################
class Persistable(object):
"""
Define general persist function for the whole NFFG structure.
"""
__slots__ = ()
def persist (self):
"""
Common function to persist the actual element into a plain text format.
:return: generated empty object fit to JSON
:rtype: dict
"""
return OrderedDict()
def load (self, data, *args, **kwargs):
"""
Common function to fill self with data from JSON data.
:raise: :any:`exceptions.NotImplementedError`
:param data: object structure in JSON
:return: self
"""
pass
@classmethod
def parse (cls, data, *args, **kwargs):
"""
Common function to parse the given JSON object structure as the actual NF-FG
entity type and return a newly created object.
:param data: raw JSON object structure
:type data: object
:return: parsed data as the entity type
:rtype: :any:`Persistable`
"""
return cls().load(data, *args, **kwargs)
def copy (self):
"""
Return the copy of the object. This copy function is meant to use when a new
``NFFG`` object structure is created. It can handles the references pointed
to internal NFFG element in order to avoid unnecessary deep copies. These
references are always None in the copied object which are overwritten by
adder functions in every case.
:return: copied object
:rtype: :any:`Element`
"""
from copy import deepcopy
return deepcopy(self)
class Element(Persistable):
"""
Main base class for NF-FG elements with unique id.
Contains the common functionality.
"""
# Operation constants
OP_CREATE = "create"
OP_REPLACE = "replace"
OP_MERGE = "merge"
OP_REMOVE = "remove"
OP_DELETE = "delete"
# Status constants
STATUS_INIT = "INITIALIZED"
STATUS_PENDING = "PENDING"
STATUS_DEPLOY = "DEPLOYED"
STATUS_RUN = "RUNNING"
STATUS_STOP = "STOPPED"
STATUS_FAIL = "FAILED"
__slots__ = ('id', 'type', 'operation', 'status')
def __init__ (self, id=None, type="ELEMENT", operation=None, status=None):
"""
Init.
:param id: optional identification (generated by default)
:type id: str or int
:param type: explicit object type both for nodes and edges
:type type: str
:return: None
"""
super(Element, self).__init__()
self.id = id if id is not None else self.generate_unique_id()
self.type = type
self.operation = operation
self.status = status
@staticmethod
def generate_unique_id ():
"""
Generate a unique id for the object based on uuid module: :rfc:`4122`.
:return: unique id
:rtype: str
"""
return str(uuid.uuid1())
def regenerate_id (self):
"""
Regenerate and set id. Useful for object copy.
:return: new id
:rtype: str
"""
self.id = self.generate_unique_id()
return self.id
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
# Need to override
element = super(Element, self).persist()
element['id'] = self.id
if self.operation is not None:
element["operation"] = self.operation
if self.status is not None:
element["status"] = self.status
return element
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
self.id = data['id']
super(Element, self).load(data=data)
self.operation = data.get("operation") # optional
self.status = data.get("status") # optional
return self
def dump (self):
"""
Dump the Element in a pretty format for debugging.
:return: Element in JSON format
:rtype: str
"""
return json.dumps(self.persist(), indent=2, sort_keys=False)
##############################################################################
# dict specific functions
##############################################################################
def __getitem__ (self, item):
"""
Return the attribute of the element given by ``item``.
:param item: attribute name
:type item: str or int
:return: attribute
:rtype: object
"""
if hasattr(self, item):
return getattr(self, item)
else:
raise KeyError(
"%s object has no key: %s" % (self.__class__.__name__, item))
def __setitem__ (self, key, value):
"""
Set the attribute given by ``key`` with ``value``:
:param key: attribute name
:type key: str or int
:param value: new value
:type value: object
:return: new value
:rtype: object
"""
if hasattr(self, key):
return setattr(self, key, value)
else:
raise KeyError(
"%s object has no key: %s" % (self.__class__.__name__, key))
def __contains__ (self, item):
"""
Return true if the given ``item`` is exist.
:param item: searched attribute name
:type item: str or int
:return: given item is exist or not
:rtype: bool
"""
return hasattr(self, item)
def get (self, item, default=None):
"""
Return with the attribute given by ``item``, else ``default``.
:param item: searched attribute name
:type item: str
:param default: default value
:type default: object
:return: found attribute or default
:rtype: object
"""
try:
return self[item]
except KeyError:
return default
def setdefault (self, key, default=None):
"""
Set the attribute given by ``key``. Use the ``default`` value is it is
not given.
:param key: attribute name
:type key: str or int
:param default: default value
:type default: object
:return: None
"""
if key not in self:
self[key] = default
def clear (self):
"""
Overrided for safety reasons.
:raise: :any:`exceptions.RuntimeError`
"""
raise RuntimeError("This standard dict functions is not supported by NFFG!")
def update (self, dict2):
"""
Overrided for safety reasons.
:raise: :any:`exceptions.RuntimeError`
"""
raise RuntimeError(
"This standard dict functions is not supported by NFFG! self: %s dict2: "
"%s" % (self, dict2))
class L3Address(Element):
"""
Wrapper class for storing L3 address values.
"""
__slots__ = ('name', 'configure', 'client', 'requested', 'provided')
def __init__ (self, id, name=None, configure=None, client=None,
requested=None, provided=None):
"""
Init.
:param id: optional id
:type id: str or int
:param name: optional name
:type name: str
:param configure: request address
:type configure: bool
:param client: client of the address request
:type client: str
:param requested: requested IP
:type requested: str
:param provided: provided IP
:type provided: str
:return: None
"""
super(L3Address, self).__init__(id=id, type="L3ADDRESS")
self.name = name
self.configure = configure
self.client = client
self.requested = requested
self.provided = provided
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(L3Address, self).load(data=data)
self.name = data.get('name')
self.configure = data.get('configure')
self.requested = data.get('requested')
self.provided = data.get('provided')
return self
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
l3 = super(L3Address, self).persist()
if self.name is not None:
l3['name'] = self.name
if self.configure is not None:
l3['configure'] = self.configure
if self.client is not None:
l3['client'] = self.client
if self.requested is not None:
l3['requested'] = self.requested
if self.provided is not None:
l3['provided'] = self.provided
return l3
class L3AddressContainer(Persistable):
"""
Container class for storing L3 address data.
"""
__slots__ = ('container',)
def __init__ (self, container=None):
"""
Init.
:param container: optional container for L3 addresses.
:type container: collection.Iterable
:return: None
"""
super(L3AddressContainer, self).__init__()
self.container = container if container is not None else []
def __getitem__ (self, id):
"""
Return with the :any:`L3Address` given by ``id``.
:param id: L3 address id
:type id: str or int
:return: L3 address
:rtype: :any:`L3Address`
"""
for l3 in self.container:
if l3.id == id:
return l3
raise KeyError("L3 address with id: %s is not defined!" % id)
def __iter__ (self):
"""
Return with an iterator over the container.
:return: iterator
:rtype: collection.Iterable
"""
return iter(self.container)
def __len__ (self):
"""
Return the number of stored :any:`L3Address`.
:return: number of addresses
:rtype: int
"""
return len(self.container)
def __contains__ (self, item):
"""
Return True if address given by ``id`` is exist in the container.
:param item: address object
:type: :any:`L3Address`
:return: found address or not
:rtype: bool
"""
if not isinstance(item, L3Address):
raise RuntimeError(
"L3AddressContainer's operator \"in\" works only with L3Address "
"objects (and not ID-s!)")
return item in self.container
def append (self, item):
"""
Add a new address to the container.
:param item: address object
:type: :any:`L3Address`
:return: added address
:rtype: :any:`L3Address`
"""
self.container.append(item)
return item
def remove (self, item):
"""
Remove L3 address from container.
:param item: address object
:type: :any:`L3Address`
:return: removed address
:rtype: :any:`L3Address`
"""
return self.container.remove(item)
def clear (self):
"""
Remove all the stored address from container.
:return: None
"""
del self.container[:]
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return str(self.container)
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return str(self)
def add_l3address (self, id, name=None, configure=None, client=None,
requested=None, provided=None):
"""
Add a new address to the container based on given :any:`L3Address`
attributes.
:param id: optional id
:type id: str or int
:param name: optional name
:type name: str
:param configure: request address
:type configure: bool
:param client: client of the address request
:type client: str
:param requested: requested IP
:type requested: str
:param provided: provided IP
:type provided: str
:return: None
"""
self.container.append(
L3Address(id, name=name, configure=configure, client=client,
requested=requested, provided=provided))
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: list
"""
return [l3.persist() for l3 in self.container]
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
for item in data:
self.add_l3address(id=item['id'], name=item.get('name'),
configure=item.get('configure'),
client=item.get('client'),
requested=item.get('requested'),
provided=item.get('provided'))
class Port(Element):
"""
Class for storing a port of an NF.
"""
# Port type
TYPE = "PORT"
"""Port type"""
ROLE_CONSUMER = "consumer"
ROLE_PROVIDER = "provider"
ROLE_EXTERNAL = "EXTERNAL"
__slots__ = ('__node', 'properties', 'metadata', 'name', 'sap', 'capability',
'technology', 'role', 'delay', 'bandwidth', 'cost', 'qos',
'controller', 'orchestrator', 'l2', 'l3', 'l4')
def __init__ (self, node, id=None, name=None, properties=None, sap=None,
capability=None, technology=None, role=None, delay=None,
bandwidth=None, cost=None, qos=None, controller=None,
orchestrator=None, l2=None, l4=None, metadata=None):
"""
Init.
:param node: container node
:type node: :any:`Node`
:param id: optional id
:type id: str or int
:param properties: supported properties of the port
:type properties: str or iterable(str)
:param name: optional name
:type name: str
:param name: optional capabilities
:type name: str
:param sap: inter-domain SAP identifier
:type sap: str
:param technology: supported technologies
:type technology: str
:param delay: delay
:type delay: float
:param bandwidth: bandwidth
:type bandwidth: float
:param cost: cost
:type cost: str
:param qos: traffic QoS class
:type qos: str
:param controller: controller URL
:type controller: str
:param orchestrator: orchestrator URL
:type orchestrator: str
:param l2: l2 address
:param l2: str
:param l4: l4 fields
:type l4: str
:param metadata: metadata related to Node
:type metadata: dict
:return: None
"""
super(Port, self).__init__(id=id, type=self.TYPE)
if not isinstance(node, Node):
raise RuntimeError("Port's container node must be derived from Node!")
self.__node = node
# Set properties list according to given param type
self.properties = OrderedDict(properties if properties else {})
self.metadata = OrderedDict(metadata if metadata else {})
# Virtualizer-related data
self.name = name
self.sap = sap
self.capability = capability
# sap_data
self.technology = technology
# sap_data/role
self.role = role
# sap_data/resources
self.delay = delay
self.bandwidth = bandwidth
self.cost = cost
self.qos = qos
# control
self.controller = controller
self.orchestrator = orchestrator
# addresses
self.l2 = l2
self.l3 = L3AddressContainer()
self.l4 = l4
@property
def node (self):
"""
Return with the container reference.
:return: container reference
:rtype: :any:`Persistable`
"""
return self.__node
def copy (self):
"""
Skip container ``node`` deepcopy in case the :any:`Port` object is copied
directly. Deepcopy called on an upper object has already cloned the
container node when it gets to a Port object and it will skip the re-cloning
due to its internal memoization feature.
:return: copied object
:rtype: :any:`Port`
"""
tmp, self.__node = self.__node, None
clone = super(Port, self).copy()
self.__node = tmp
return clone
@node.deleter
def node (self):
del self.__node
def add_property (self, property, value):
"""
Add a property to the :any:`Port`.
:param property: property
:type property: str
:param value: property value
:type value: str
:return: the Port object to allow function chaining
:rtype: :any:`Port`
"""
self.properties[property] = value
return self
def has_property (self, property):
"""
Return True if :any:`Port` has a property with given `property`.
:param property: property
:type property: str
:return: has a property with given name or not
:rtype: bool
"""
return property in self.properties
def del_property (self, property=None):
"""
Remove the property from the :any:`Port`. If no property is given all the
properties will be removed from the :any:`Port`.
:param property: property name
:type property: str
:return: removed property or None
:rtype: str or None
"""
if property is None:
self.properties.clear()
else:
return self.properties.pop(property, None)
def get_property (self, property):
"""
Return the value of property.
:param property: property
:type property: str
:return: the value of the property
:rtype: str
"""
return self.properties.get(property)
def add_metadata (self, name, value):
"""
Add metadata with the given `name`.
:param name: metadata name
:type name: str
:param value: metadata value
:type value: str
:return: the :any:`Port` object to allow function chaining
:rtype: :any:`Port`
"""
self.metadata[name] = value
return self
def has_metadata (self, name):
"""
Return True if the :any:`Port` has a metadata with the given `name`.
:param name: metadata name
:type name: str
:return: has metadata with given name or not
:rtype: bool
"""
return name in self.metadata
def del_metadata (self, name=None):
"""
Remove the metadata from the :any:`Port`. If no metadata is given all the
metadata will be removed.
:param name: name of the metadata
:type name: str
:return: removed metadata or None
:rtype: str or None
"""
if name is None:
self.metadata.clear()
else:
return self.metadata.pop(name, None)
def get_metadata (self, name):
"""
Return the value of metadata.
:param name: name of the metadata
:type name: str
:return: metadata value
:rtype: str
"""
return self.metadata.get(name)
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
port = super(Port, self).persist()
if self.properties:
port["property"] = self.properties.copy()
if self.name is not None:
port['name'] = self.name
if self.sap is not None:
port['sap'] = self.sap
if self.capability is not None:
port['capability'] = self.capability
if any(v is not None for v in (self.technology, self.role, self.delay,
self.bandwidth, self.cost)):
port['sap_data'] = {}
if self.technology is not None:
port['sap_data']['technology'] = self.technology
if self.role is not None:
port['sap_data']['role'] = self.role
if any(v is not None for v in (self.delay, self.bandwidth, self.cost)):
port['sap_data']['resources'] = {}
if self.delay is not None:
port['sap_data']['resources']['delay'] = self.delay
if self.bandwidth is not None:
port['sap_data']['resources']['bandwidth'] = self.bandwidth
if self.cost is not None:
port['sap_data']['resources']['cost'] = self.cost
if self.qos is not None:
port['sap_data']['resources']['qos'] = self.qos
if any(v is not None for v in (self.controller, self.orchestrator)):
port['control'] = {}
if self.controller is not None:
port['control']['controller'] = self.controller
if self.orchestrator is not None:
port['control']['orchestrator'] = self.orchestrator
if any(v is not None for v in
(self.l2, self.l4, True if self.l3 else None)):
port['addresses'] = {}
if self.l2 is not None:
port['addresses']['l2'] = self.l2
if self.l4 is not None:
port['addresses']['l4'] = self.l4
if len(self.l3):
port['addresses']['l3'] = self.l3.persist()
if self.metadata:
port["metadata"] = self.metadata.copy()
return port
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(Port, self).load(data=data)
self.properties = OrderedDict(data.get('property', ()))
self.sap = data.get('sap')
self.name = data.get('name')
self.capability = data.get('capability')
if 'sap_data' in data:
self.technology = data['sap_data'].get('technology')
self.role = data['sap_data'].get('role')
if 'resources' in data['sap_data']:
self.delay = data['sap_data']['resources'].get('delay')
self.bandwidth = data['sap_data']['resources'].get('bandwidth')
self.cost = data['sap_data']['resources'].get('cost')
self.qos = data['sap_data']['resources'].get('qos')
else:
self.technology = self.delay = self.bandwidth = self.cost = None
if 'control' in data:
self.controller = data['control'].get('controller')
self.orchestrator = data['control'].get('orchestrator')
else:
self.controller = self.orchestrator = None
if 'addresses' in data:
self.l2 = data['addresses'].get('l2')
self.l3.load(data=data['addresses'].get('l3', ()))
self.l4 = data['addresses'].get('l4')
else:
self.l2 = self.l4 = None
self.metadata = OrderedDict(data.get('metadata', ()))
return self
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return "%s(node: %s, id: %s)" % (
self.__class__.__name__, self.node.id, self.id)
class PortContainer(Persistable):
"""
Basic container class for ports.
Implements a Container-like behavior for getting a Port with id:
>>> cont = PortContainer()
>>> ...
>>> cont["port_id"]
"""
__slots__ = ('container',)
def __init__ (self, container=None):
"""
Init.
:param container: use given container for init
:type container: :any:`collections.Container`
"""
self.container = container if container is not None else []
def __getitem__ (self, id):
"""
Return with the :any:`Port` given by ``id``.
:param id: port id
:type id: str or int
:return: port object
:rtype: :any:`Port`
"""
for port in self.container:
if port.id == id:
return port
raise KeyError("Port with id: %s is not defined in: %s!"
% (id, [p.id for p in self.container]))
def __iter__ (self):
"""
Return with an iterator over the container.
:return: iterator
:rtype: collection.Iterable
"""
return iter(self.container)
def __len__ (self):
"""
Return the number of stored :any:`Port`.
:return: number of ports
:rtype: int
"""
return len(self.container)
def __contains__ (self, item):
"""
Return True if port given by ``id`` is exist in the container.
:param item: port object
:type: :any:`Port`
:return: found port or not
:rtype: bool
"""
# this type checking is important because with Port ID input the function
# would silently return False!
if isinstance(item, Port):
return item in self.container
else:
return item in (p.id for p in self.container)
@property
def flowrules (self):
"""
Return with an iterator over the flowrules sored in the ports.
:return: iterator of flowrules
:rtype: collections.Iterator
"""
return chain(*[port.flowrules for port in self.container])
def append (self, item):
"""
Add new port object to the container.
:param item: port object
:type item: :any:`Port`
:return: added object
:rtype: :any:`Port`
"""
self.container.append(item)
return item
def remove (self, item):
"""
Remove port object from the container.
:param item: port object
:type item: :any:`Port`
:return: None
"""
try:
return self.container.remove(item)
except ValueError:
return
def clear (self):
"""
Remove all the stored objects.
:return: None
"""
del self.container[:]
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return str(self.container)
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return str(self)
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: list
"""
return [port.persist() for port in self.container]
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
pass
class Constraints(Persistable):
"""
Container class for constraints.
"""
__slots__ = ('affinity', 'antiaffinity', 'variable', 'constraint',
'restorability')
def __init__ (self):
"""
Init.
"""
super(Constraints, self).__init__()
self.affinity = OrderedDict()
self.antiaffinity = OrderedDict()
self.variable = OrderedDict()
self.constraint = OrderedDict()
self.restorability = None
def add_affinity (self, id, value):
"""
Set affinity value.
:param id: unique ID
:type id: str or int
:param value: new value
:type value: str or int
:return: new value
:rtype: str or int
"""
self.affinity[id] = value
return value
def has_affinity (self, id):
"""
Return True if affinity value with id is exist.
:param id: unique ID
:type id: str or int
:return: value exits or not
:rtype: bool
"""
return id in self.affinity
def del_affinity (self, id):
"""
Remove affinity value with given id.
:param id: unique ID
:type id: str or int
:return: removed value
:rtype: str or int
"""
return self.affinity.pop(id, None)
def add_antiaffinity (self, id, value):
"""
Set antiaffinity value.
:param id: unique ID
:type id: str or int
:param value: new value
:type value: str or int
:return: new value
:rtype: str or int
"""
self.antiaffinity[id] = value
return value
def has_antiaffinity (self, id):
"""
Return True if antiaffinity value with id is exist.
:param id: unique ID
:type id: str or int
:return: value exits or not
:rtype: bool
"""
return id in self.antiaffinity
def del_antiaffinity (self, id):
"""
Remove antiaffinity value with given id.
:param id: unique ID
:type id: str or int
:return: removed value
:rtype: str or int
"""
return self.antiaffinity.pop(id, None)
def add_variable (self, key, id):
"""
Set variable value.
:param key: unique key
:type key: str or int
:param id: new value
:type id: str or int
:return: new value
:rtype: str or int
"""
self.variable[key] = id
return id
def has_variable (self, key):
"""
Return True if variable value with key is exist.
:param key: unique key
:type key: str or int
:return: value exits or not
:rtype: bool
"""
return key in self.variable
def del_variable (self, key):
"""
Remove variable value with given key.
:param key: unique key
:type key: str or int
:return: removed value
:rtype: str or int
"""
return self.variable.pop(key, None)
def add_constraint (self, id, formula):
"""
Set constraint value.
:param id: unique ID
:type id: str or int
:param formula: new value
:type formula: str or int
:return: new value
:rtype: str or int
"""
self.constraint[id] = formula
return formula
def has_constraint (self, id):
"""
Return True if variable value with key is exist.
:param id: unique ID
:type id: str or int
:return: value exits or not
:rtype: bool
"""
return id in self.constraint
def del_constraint (self, id):
"""
Remove antiaffinity value with given id.
:param id: unique ID
:type id: str or int
:return: removed value
:rtype: str or int
"""
if id in self.constraint:
return self.constraint.pop(id)
else:
return None
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: list
"""
constraints = super(Constraints, self).persist()
if self.affinity:
constraints['affinity'] = self.affinity
if self.antiaffinity:
constraints['antiaffinity'] = self.antiaffinity
if self.variable:
constraints['variable'] = self.variable
if self.constraint:
constraints['constraint'] = self.constraint
if self.restorability:
constraints['restorability'] = self.restorability
return constraints
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(Constraints, self).load(data=data)
self.affinity = data.get('affinity', OrderedDict())
self.antiaffinity = data.get('antiaffinity', OrderedDict())
self.variable = data.get('variable', OrderedDict())
self.constraint = data.get('constraint', OrderedDict())
self.restorability = data.get('restorability')
return self
class Node(Element):
"""
Base class for different types of nodes in the NF-FG.
"""
# Class of the contained ports
PORT_CLASS = Port
"""Class of the contained ports"""
# Node type constants:
# Infrastructure node --> abstract node represents one or more physical node
INFRA = "INFRA"
# SAP nodes --> abstract node represents end point/ports of a service
SAP = "SAP"
# Network Function (NF) node --> abstract node represents a virtual function
NF = "NF"
__slots__ = ('name', 'ports', 'metadata', 'constraints')
def __init__ (self, type, id=None, name=None, metadata=None):
"""
Init.
:param type: node type
:type type: str
:param id: optional id
:type id: str or int
:param name: optional name
:type name: str
:param metadata: metadata related to Node
:type metadata: dict
:return: None
"""
super(Node, self).__init__(id=id, type=type)
self.name = name if name is not None else str(id) # optional
self.ports = PortContainer() # list of Ports
self.metadata = OrderedDict(metadata if metadata else {})
self.constraints = Constraints()
@property
def short_name (self):
"""
Return a generic shor name.
:return: short name
:rtype: str
"""
return self.name if self.name else "id: %s" % self.id
def flowrules (self):
"""
Return with an iterator over the flowrules sored in the ports.
:return: iterator of flowrules
:rtype: collections.Iterator
"""
return self.ports.flowrules
def add_port (self, id=None, name=None, properties=None, sap=None,
capability=None, technology=None, delay=None, bandwidth=None,
cost=None, controller=None, orchestrator=None, l2=None, l4=None,
metadata=None):
"""
Add a port with the given params to the :any:`Node`.
:param id: optional id
:type id: str or int
:param properties: supported properties of the port
:type properties: str or iterable(str)
:param name: optional name
:type name: str
:param sap: inter-domain SAP identifier
:type sap: str
:param capability: optional capabilities
:type capability: str
:param technology: supported technologies
:type technology: str
:param delay: delay
:type delay: float
:param bandwidth: bandwidth
:type bandwidth: float
:param cost: cost
:type cost: str
:param controller: controller URL
:type controller: str
:param orchestrator: orchestrator URL
:type orchestrator: str
:param l2: l2 address
:param l2: str
:param l4: l4 fields
:type l4: str
:param metadata: metadata related to Node
:type metadata: dict
:return: newly created and stored Port object
:rtype: :any:`Port`
"""
port = Port(node=self, id=id, name=name, properties=properties, sap=sap,
capability=capability, technology=technology, delay=delay,
bandwidth=bandwidth, cost=cost, controller=controller,
orchestrator=orchestrator, l2=l2, l4=l4, metadata=metadata)
self.ports.append(port)
return port
def del_port (self, id):
"""
Remove the port with the given id from the Node.
:param id: port id
:type id: int or str
:return: the actual Port is found and removed or not
:rtype: bool
"""
for port in self.ports:
if port.id == id:
del port.node
return self.ports.remove(port)
return False
def has_port (self, id):
"""
Return True if the :any:`Node` has a port with the given `id`.
:param id: optional id
:type id: str or int
:return: has port with given id or not
:rtype: bool
"""
for p in self.ports:
if p.id == id:
return True
return False
def add_metadata (self, name, value):
"""
Add metadata with the given `name`.
:param name: metadata name
:type name: str
:param value: metadata value
:type value: str
:return: the :any:`Node` object to allow function chaining
:rtype: :any:`Node`
"""
self.metadata[name] = value
return self
def has_metadata (self, name):
"""
Return True if the :any:`Node` has a metadata with the given `name`.
:param name: metadata name
:type name: str
:return: has metadata with given name or not
:rtype: bool
"""
return name in self.metadata
def del_metadata (self, name=None):
"""
Remove the metadata from the :any:`Node`. If no metadata is given all the
metadata will be removed.
:param name: name of the metadata
:type name: str
:return: removed metadata or None
:rtype: str or None
"""
if name is None:
self.metadata.clear()
else:
return self.metadata.pop(name, None)
def get_metadata (self, name):
"""
Return the value of metadata.
:param name: name of the metadata
:type name: str
:return: metadata value
:rtype: str
"""
return self.metadata.get(name)
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
node = super(Node, self).persist()
if self.name is not None:
node["name"] = self.name
ports = self.ports.persist()
if ports:
node["ports"] = ports
if self.metadata:
node["metadata"] = self.metadata.copy()
constraints = self.constraints.persist()
if constraints:
node['constraints'] = constraints
return node
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(Node, self).load(data=data)
self.name = data.get('name') # optional
for item in data.get('ports', ()):
port = self.PORT_CLASS(node=self)
port.load(data=item)
self.ports.append(port)
self.metadata = OrderedDict(data.get('metadata', ()))
if 'constraints' in data:
self.constraints.load(data=data['constraints'])
return self
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return "<|ID: %s, Type: %s --> %s|>" % (
self.id, self.type, super(Element, self).__repr__())
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "%s(id:%s, type:%s)" % (self.__class__.__name__, self.id, self.type)
class Link(Element):
"""
Base class for different types of edges in the NF-FG.
"""
# Edge type constants:
# Static link --> physical link between saps and infras
STATIC = "STATIC"
# Dynamic link --> virtual link between nfs and infras created on demand
DYNAMIC = "DYNAMIC"
# SG next hop --> virtual link to describe connection between elements in SG
SG = "SG"
# Requirement --> virtual link to define constraints between SG elements
REQUIREMENT = "REQUIREMENT"
__slots__ = ('src', 'dst', 'constraints')
def __init__ (self, src, dst, type=None, id=None, constraints=None):
"""
Init.
:param src: source port
:type src: :any:`Port`
:param dst: destination port
:type dst: :any:`Port`
:param type: link type
:type type: str
:param id: optional id
:type id: str or int
:param constraints: optional Constraints object
:type constraints: :class:`Constraints`
:return: None
"""
super(Link, self).__init__(id=id, type=type)
if (src is not None and not isinstance(src, Port)) or \
(dst is not None and not isinstance(dst, Port)):
raise RuntimeError("Src and dst must be Port objects!")
# Reference to src Port object
self.src = src # mandatory
# Reference to dst Port object
self.dst = dst # mandatory
self.constraints = constraints if constraints is not None else Constraints()
def copy (self):
"""
Skip deepcopy of ``src`` and ``dst`` references in case the :any:`Link`
object is copied directly. Deepcopy called on an upper object has already
cloned the references when it gets to a Port object and it will skip the
re-cloning due to its internal memoization feature.
:return: copied object
:rtype: :any:`Link`
"""
tmp_src, tmp_dst = self.src, self.dst
self.src = self.dst = None
clone = super(Link, self).copy()
self.src, self.dst = tmp_src, tmp_dst
return clone
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
link = super(Link, self).persist()
link['src_node'] = self.src.node.id
link['src_port'] = self.src.id
link['dst_node'] = self.dst.node.id
link['dst_port'] = self.dst.id
constraints = self.constraints.persist()
if constraints:
link['constraints'] = constraints
return link
def load (self, data, container=None, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:param container: main container node
:type container: :any:`NFFGModel`
:return: None
"""
if container is None:
raise RuntimeError(
"Container reference is not given for edge endpoint lookup!")
super(Link, self).load(data=data)
self.src = container.get_port(data['src_node'], data['src_port'])
self.dst = container.get_port(data['dst_node'], data['dst_port'])
if self.src is None:
raise RuntimeError("Src not found with params: %s !" % data)
if self.dst is None:
raise RuntimeError("Dst not found with params: %s !" % data)
if 'constraints' in data:
self.constraints.load(data=data['constraints'])
return self
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return "<|ID: %s, Type: %s, src: %s[%s], dst: %s[%s] --> %s|>" % (
self.id, self.type, self.src.node.id, self.src.id, self.dst.node.id,
self.dst.id, super(Element, self).__repr__())
################################################################################
# ---------- NODE AND LINK RESOURCES, ATTRIBUTES -------------------
################################################################################
class DelayMatrix(Persistable):
"""
Delay Matrix keyed by Port IDs.
"""
__slots__ = ('matrix',)
def __init__ (self):
super(DelayMatrix, self).__init__()
self.matrix = OrderedDict()
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: list
"""
res = super(DelayMatrix, self).persist()
for k, v in self.matrix.iteritems():
if not isinstance(v, dict):
continue
for kk, vv in v.iteritems():
if k not in res:
res[k] = OrderedDict()
try:
res[k][kk] = float(vv)
except ValueError:
res[k][kk] = vv
return res
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
self.matrix.update(data)
return self
def is_empty (self):
"""
Check if matrix object is empty or not.
:return: is empty
:rtype: bool
"""
return sum([len(e) for e in self.matrix]) == 0
def add_delay (self, src, dst, delay):
"""
Add delay value with given ports.
:param src: source port object
:type src: :class:`Port`
:param dst: destination port object
:type dst: :class:`Port`
:param delay: delay value between ports
:type delay: int or float
:return: None
"""
if src not in self.matrix:
self.matrix[src] = OrderedDict()
self.matrix[src][dst] = delay
def get_delay (self, src, dst):
"""
Return delay value defined between given ports.
:param src: source port object
:type src: :class:`Port`
:param dst: destination port object
:type dst: :class:`Port`
:return: delay value
:rtype: int or float
"""
# id-s are always string in delay matrix, because of JSON standard
if src in self.matrix:
if dst in self.matrix[src]:
return self.matrix[src][dst]
def del_delay (self, src, dst):
"""
Remove delay value from matrix.
:param src: source port object
:type src: :class:`Port`
:param dst: destination port object
:type dst: :class:`Port`
:return: removed value
:rtype: int or float or None
"""
# id-s are always string in delay matrix, because of JSON standard
if src in self.matrix:
if dst in self.matrix[src]:
return self.matrix[src].pop(dst)
def __contains__ (self, item):
return item in self.matrix
def __getitem__ (self, item):
return self.matrix[item]
def __iter__ (self):
return ((src, dst, self.matrix[src][dst])
for src in self.matrix
for dst in self.matrix[src])
class NodeResource(Persistable):
"""
Class for storing resource information for Nodes.
"""
__slots__ = ('cpu', 'mem', 'storage', 'cost', 'zone', 'delay', 'bandwidth')
def __init__ (self, cpu=None, mem=None, storage=None, cost=None, zone=None,
delay=None, bandwidth=None):
"""
Init.
:param cpu: CPU resource
:type cpu: float
:param mem: memory resource
:type mem: float
:param storage: storage resource
:type storage: float
:param cost: cost
:type cost: float
:param zone: zone
:type zone: str
:param delay: delay property of the Node
:type delay: float
:param bandwidth: bandwidth property of the Node
:type bandwidth: float
:return: None
"""
super(NodeResource, self).__init__()
# container: compute
self.cpu = cpu
self.mem = mem
self.storage = storage
# container
self.cost = cost
self.zone = zone
self.delay = delay
self.bandwidth = bandwidth
def subtractNodeRes (self, subtrahend, maximal, link_count=1):
"""
Subtracts the subtrahend nffg_elements.NodeResource object from the current.
Note: only delay component is not subtracted, for now we neglect the load`s
influence on the delay. Link count identifies how many times the bandwidth
should be subtracted. Throw exception if any field of the 'current' would
exceed 'maximal' or get below zero.
:param subtrahend: the object to be subtracted from current
:type subtrahend: NodeResource
:param maximal: The maximal value which must not be exceeded.
:type maximal: NodeResource
:param link_count: how many times the should the bandwidth component be
subtracted.
:type link_count: int
:return: self resource object
:rtype: :any:`NodeResource`
"""
attrlist = ['cpu', 'mem', 'storage', 'bandwidth'] # delay excepted!
if reduce(lambda a, b: a or b, (self[attr] is None for attr in attrlist)):
raise RuntimeError("Node resource components should always be given"
"One of %s`s components is None" % str(self))
if not reduce(lambda a, b: a and b,
(-1e-6 <= self[attr] - subtrahend[attr] <= maximal[
attr] + 1e-6 for attr in attrlist if
attr != 'bandwidth' and subtrahend[attr] is not None)):
raise RuntimeError("Node resource got below zero, or "
"exceeded the maximal value!")
if subtrahend['bandwidth'] is not None:
if not -1e-6 <= self['bandwidth'] - link_count * subtrahend[
'bandwidth'] <= maximal['bandwidth'] + 1e-6:
raise RuntimeError("Internal bandwidth cannot get below "
"zero, or exceed the maximal value!")
for attr in attrlist:
k = 1
if attr == 'bandwidth':
k = link_count
if subtrahend[attr] is not None:
self[attr] -= k * subtrahend[attr]
return self
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
res = super(NodeResource, self).persist()
if self.cpu is not None:
res["cpu"] = self.cpu
if self.mem is not None:
res["mem"] = self.mem
if self.storage is not None:
res["storage"] = self.storage
if self.cost is not None:
res["cost"] = self.cost
if self.zone is not None:
res["zone"] = self.zone
if self.delay is not None:
res["delay"] = self.delay
if self.bandwidth is not None:
res["bandwidth"] = self.bandwidth
return res
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
self.cpu = float(data['cpu']) if 'cpu' in data else None
self.mem = float(data['mem']) if 'mem' in data else None
self.storage = float(data['storage']) if 'storage' in data else None
self.cost = data['cost'] if 'cost' in data else None
self.zone = float(data['zone']) if 'zone' in data else None
self.delay = float(data['delay']) if 'delay' in data else None
self.bandwidth = float(data['bandwidth']) if 'bandwidth' in data else None
return self
def __getitem__ (self, item):
"""
Return the resource attribute given by ``item``:
:param item: attribute name
:type item: str
:return: attribute value
:rtype: int or object
"""
if hasattr(self, item):
return getattr(self, item)
else:
raise KeyError(
"%s object has no key: %s" % (self.__class__.__name__, item))
def __setitem__ (self, key, value):
"""
Set the resource attribute given by ``key`` with ``value``:
:param key: attribute name
:type key: str
:param value: new value
:type value: int or object
:return: None
"""
if hasattr(self, key):
return setattr(self, key, value)
else:
raise KeyError(
"%s object has no key: %s" % (self.__class__.__name__, key))
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return "Resources of %s: cpu: %s, mem: %s, storage: %s, bandwidth: %s, " \
"delay: %s" % (self.__class__.__name__, self.cpu, self.mem,
self.storage, self.bandwidth, self.delay)
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "cpu: %s mem: %s storage: %s cost: %s zone: %s bandwidth: %s" \
" delay: %s" % (self.cpu, self.mem, self.storage, self.cost,
self.zone, self.bandwidth, self.delay)
def is_empty (self):
"""
Return False if no resource value are set or 0.
:return: resource values are set or not
:rtype: bool
"""
return False if any((self.cpu, self.mem, self.storage, self.cost, self.zone,
self.delay, self.bandwidth)) else True
class Flowrule(Element):
"""
Class for storing a flowrule.
"""
__slots__ = ('match', 'action', 'bandwidth', 'delay', 'cost', 'qos',
'external', 'constraints')
def __init__ (self, id=None, match="", action="", bandwidth=None, delay=None,
cost=None, qos=None, external=False, constraints=None):
"""
Init.
:param match: matching rule
:type match: str
:param action: forwarding action
:type action: str
:param bandwidth: bandwidth
:type bandwidth: float
:param delay: delay
:type delay: float
:param external: mark the flowrule as external --> should not process
:type external: bool
:return: None
"""
super(Flowrule, self).__init__(id=id, type="FLOWRULE")
self.match = match # mandatory
self.action = action # mandatory
self.bandwidth = bandwidth
self.delay = delay
self.cost = cost
self.qos = qos
self.external = external
self.constraints = constraints if constraints is not None else Constraints()
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
flowrule = super(Flowrule, self).persist()
if self.match:
flowrule['match'] = self.match
if self.action:
flowrule['action'] = self.action
if self.bandwidth:
flowrule['bandwidth'] = self.bandwidth
if self.delay:
flowrule['delay'] = self.delay
if self.cost:
flowrule['cost'] = self.cost
if self.qos:
flowrule['qos'] = self.qos
if self.external:
flowrule['external'] = self.external
constraints = self.constraints.persist()
if constraints:
flowrule['constraints'] = constraints
return flowrule
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(Flowrule, self).load(data=data)
self.match = data.get('match')
self.action = data.get('action')
self.bandwidth = float(data['bandwidth']) if 'bandwidth' in data else None
self.delay = float(data['delay']) if 'delay' in data else None
self.cost = data.get('cost')
self.qos = data.get('qos')
self.external = float(data['external']) if 'external' in data else False
if 'constraints' in data:
self.constraints.load(data=data['constraints'])
return self
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return "Flowrule object:\nmatch: %s\naction: %s\nbandwidth: " \
"%s\ndelay: %s\ncost: %s\nqos: %s\nexternal: %s" \
% (self.match, self.action, self.bandwidth, self.delay, self.cost,
self.qos, self.external)
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "%s(id:%s, match: %s, action: %s, bandwidth: %s, delay: %s," \
"cost: %s, qos: %s, external: %s)" % (self.__class__.__name__,
self.id, self.match,
self.action, self.bandwidth,
self.delay, self.cost,
self.qos, self.external)
class InfraPort(Port):
"""
Class for storing a port of Infra Node and handles flowrules.
"""
__slots__ = ('flowrules',)
def __init__ (self, node, id=None, name=None, properties=None, sap=None,
capability=None, technology=None, delay=None, bandwidth=None,
cost=None, controller=None, orchestrator=None, l2=None, l4=None,
metadata=None):
"""
Init.
:param node: container node
:type node: :any:`Node`
:param id: optional id
:type id: str or int
:param properties: supported properties of the port
:type properties: str or iterable(str)
:param metadata: metadata related to Node
:type metadata: dict
:return: None
"""
super(InfraPort, self).__init__(node=node, id=id, name=name,
properties=properties, sap=sap,
capability=capability,
technology=technology, delay=delay,
bandwidth=bandwidth, cost=cost,
controller=controller,
orchestrator=orchestrator, l2=l2, l4=l4,
metadata=metadata)
self.flowrules = []
def add_flowrule (self, match, action, bandwidth=None, delay=None, cost=None,
qos=None, id=None, external=False, constraints=None):
"""
Add a flowrule with the given params to the port of an Infrastructure Node.
:param match: matching rule
:type match: str
:param action: forwarding action
:type action: str
:param bandwidth: bandwidth
:type bandwidth: float
:param delay: delay
:type delay: float
:param id: specific id of the flowrule
:type id: str or int
:param external: marked as external
:type external: bool
:param constraints: additional constraint object
:type constraints: :class:`Constraints`
:return: newly created and stored flowrule
:rtype: :any:`Flowrule`
"""
flowrule = Flowrule(id=id, match=match, action=action, bandwidth=bandwidth,
delay=delay, cost=cost, qos=qos, external=external,
constraints=constraints)
self.flowrules.append(flowrule)
return flowrule
def del_flowrule (self, id=None, match=None, action=None):
"""
Remove the flowrule with the given id or all flowrules which match the given
action/match parameters.
:param id: flowrule id
:type id: int or str
:param match: matching rule
:type match: str
:param action: forwarding action
:type action: str
:return: the actual FlowRule is found and removed or not
:rtype: bool
"""
if id is not None:
for f in self.flowrules:
if f.id == id:
self.flowrules.remove(f)
return True
else:
deletable = []
ret = False
for f in self.flowrules:
if f.match == match or f.action == action:
deletable.append(f)
for f in deletable:
self.flowrules.remove(f)
ret = True
return ret
def clear_flowrules (self):
"""
Delete all the flowrules from the port.
:return: None
"""
del self.flowrules[:]
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
port = super(InfraPort, self).persist()
flowrules = [f.persist() for f in self.flowrules]
if flowrules:
port["flowrules"] = flowrules
return port
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(InfraPort, self).load(data=data)
for fr in data.get('flowrules', ()):
self.flowrules.append(Flowrule().load(data=fr))
################################################################################
# ------------------------ NF / SAP / INFRASTRUCTURE NODES -------------------
################################################################################
class NodeNF(Node):
"""
Network Function (NF) nodes in the graph.
"""
__slots__ = ('functional_type', 'deployment_type', 'resources',
'placement_criteria')
def __init__ (self, id=None, name=None, func_type=None, dep_type=None,
res=None):
"""
Init.
:param func_type: functional type (default: "None")
:type func_type: str
:param dep_type: deployment type (default: "None")
:type dep_type: str
:param res: optional NF resources
:type res: :any:`NodeResource`
:return: None
"""
super(NodeNF, self).__init__(id=id, type=Node.NF, name=name)
self.functional_type = func_type # mandatory
# container: specification
self.deployment_type = dep_type
self.resources = res if res is not None else NodeResource()
# container
# Internal attributes for mapping
self.placement_criteria = ()
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
node = super(NodeNF, self).persist()
if self.functional_type is not None:
node["functional_type"] = self.functional_type
specification = OrderedDict()
if self.deployment_type is not None:
specification["deployment_type"] = self.deployment_type
res = self.resources.persist()
if res:
specification["resources"] = res
if specification:
node["specification"] = specification
return node
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(NodeNF, self).load(data=data)
self.functional_type = data.get('functional_type')
if 'specification' in data:
self.deployment_type = data['specification'].get('deployment_type')
if 'resources' in data['specification']:
self.resources.load(data['specification']['resources'])
return self
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "%s(id:%s, type:%s)" % (
self.__class__.__name__, self.id, self.functional_type)
class NodeSAP(Node):
"""
Class for SAP nodes in the NF-FG.
"""
__slots__ = ('binding', 'placement_criteria')
def __init__ (self, id=None, name=None, binding=None, metadata=None):
"""
Init.
:param id: optional id
:type id: str or int
:param name: optional name
:type name: str
:param binding: interface binding
:type binding: str
:param metadata: metadata related to Node
:type metadata: dict
:return: None
"""
super(NodeSAP, self).__init__(id=id, type=Node.SAP, name=name,
metadata=metadata)
# Signals if the SAP is an inter-domain SAP
self.binding = binding
# Internal attributes for mapping
self.placement_criteria = ()
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "SAP(id: %s, name: %s)" % (self.id, self.name)
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return super(NodeSAP, self).__repr__()
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
sap = super(NodeSAP, self).persist()
if self.binding is not None:
sap['binding'] = self.binding
return sap
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(NodeSAP, self).load(data=data)
self.binding = data.get('binding')
return self
class NodeInfra(Node):
"""
Class for infrastructure nodes in the NF-FG.
"""
PORT_CLASS = InfraPort
# Defined Infra types
TYPE_BISBIS = "BiSBiS"
TYPE_EE = "EE" # default Execution Environment with NETCONF
TYPE_STATIC_EE = "STATIC" # Static EE probably will never use
TYPE_SDN_SWITCH = "SDN-SWITCH" # Common OVS switch - can't run NF
# Defined domain type
DEFAULT_DOMAIN = "VIRTUAL"
__slots__ = ('mapping_features', 'domain', 'infra_type', 'supported',
'resources', 'delay_matrix', 'availres', 'weight')
def __init__ (self, id=None, name=None, domain=None, infra_type=None,
supported=None, res=None, mapping_features=None):
"""
Init.
:param mapping_features: dict from features string to bool
:type mapping_features: dict
:param domain: domain of the Infrastructure Node
:type domain: str
:param infra_type: type of the Infrastructure Node
:type infra_type: int or str
:param supported: list of supported functional types
:type supported: list
:param res: optional Infra resources
:type res: :any:`NodeResource`
:return: None
"""
super(NodeInfra, self).__init__(id=id, type=Node.INFRA, name=name)
self.mapping_features = mapping_features if mapping_features else {}
self.domain = domain if domain is not None else self.DEFAULT_DOMAIN
self.infra_type = infra_type if infra_type is not None else \
self.TYPE_BISBIS
# Set supported types according to given param type
if isinstance(supported, basestring):
self.supported = [str(supported), ]
elif isinstance(supported, Iterable):
self.supported = [sup for sup in supported]
elif supported is None:
self.supported = []
# Set resource container
self.resources = res if res is not None else NodeResource()
self.delay_matrix = DelayMatrix()
# Internal attributes for mapping
self.availres = None
self.weight = None
def add_port (self, id=None, name=None, properties=None, sap=None,
capability=None, technology=None, delay=None, bandwidth=None,
cost=None, controller=None, orchestrator=None, l2=None, l4=None,
metadata=None):
"""
Add a port with the given params to the Infrastructure Node.
Override the basic ``add_port()`` to use :any:`InfraPort` objects.
Add a port with the given params to the :any:`Node`.
:param id: optional id
:type id: str or int
:param properties: supported properties of the port
:type properties: str or iterable(str)
:param name: optional name
:type name: str
:param sap: inter-domain SAP identifier
:type sap: str
:param capability: optional capabilities
:type capability: str
:param technology: supported technologies
:type technology: str
:param delay: delay
:type delay: float
:param bandwidth: bandwidth
:type bandwidth: float
:param cost: cost
:type cost: str
:param controller: controller URL
:type controller: str
:param orchestrator: orchestrator URL
:type orchestrator: str
:param l2: l2 address
:param l2: str
:param l4: l4 fields
:type l4: str
:param metadata: metadata related to Node
:type metadata: dict
:return: newly created and stored Port object
:rtype: :any:`InfraPort`
"""
port = InfraPort(self, id=id, name=name, properties=properties, sap=sap,
capability=capability, technology=technology, delay=delay,
bandwidth=bandwidth, cost=cost, controller=controller,
orchestrator=orchestrator, l2=l2, l4=l4, metadata=metadata)
self.ports.append(port)
return port
def add_supported_type (self, functional_type):
"""
Add a supported functional type or list of types to the Infrastructure Node.
:param functional_type: the functional type
:type functional_type: str or list or tuple
:return: the Node object to allow function chaining
:rtype: :any:`NodeInfra`
"""
if isinstance(functional_type, basestring):
self.supported.append(functional_type)
elif isinstance(functional_type, Iterable):
self.supported.extend(functional_type)
else:
raise RuntimeError("Not supported parameter type!")
return self
def has_supported_type (self, functional_type):
"""
Return true if :any:`InfraPort` object has the given `functional_type`.
:param functional_type: functional type name
:type functional_type: str
:return: has the given functional type or not
:rtype: bool
"""
for ft in self.supported:
if ft == functional_type:
return True
return False
def del_supported_type (self, functional_type=None):
"""
Remove the given functional type from the Infrastructure Node. If no type
is given then all supported type will be removed.
:param functional_type: the functional type
:type functional_type: str
:return: None
"""
if functional_type is None:
self.supported[:] = []
else:
self.supported.remove(functional_type)
def has_enough_resource (self, res):
"""
Checks whether this :any:`NodeInfra` has at least 'res' resources available.
:param res: res name
:type res: :any:`NodeResource`
:return: has enough resource or not
:rtype: bool
"""
if not hasattr(self, 'availres'):
raise RuntimeError("Available resources not yet calculated for Infra %s!"
" Call calculate_available_node_res function first on "
"the containing NFFG instance!" % self.id)
try:
from copy import deepcopy
# do not do the actual subtraction!
availres = deepcopy(self.availres)
# throws RuntimeError if it couldn't be subtracted.
availres.subtractNodeRes(res, self.resources)
return True
except RuntimeError:
return False
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
node = super(NodeInfra, self).persist()
if self.domain is not None:
node["domain"] = self.domain
node["type"] = self.infra_type
supported = [sup for sup in self.supported]
if supported:
node['supported'] = supported
res = self.resources.persist()
if res:
node["resources"] = res
if self.mapping_features:
node['mapping_features'] = self.mapping_features.copy()
if not self.delay_matrix.is_empty():
node['delay_matrix'] = self.delay_matrix.persist()
return node
def load (self, data, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:return: None
"""
super(NodeInfra, self).load(data=data)
self.domain = data.get('domain', self.DEFAULT_DOMAIN) # optional
self.infra_type = data['type']
if 'supported' in data:
self.supported = data['supported']
if 'resources' in data:
self.resources.load(data['resources'])
if 'mapping_features' in data:
self.mapping_features = data['mapping_features']
if 'delay_matrix' in data:
self.delay_matrix.load(data['delay_matrix'])
return self
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "Infra(id: %s, name: %s, type: %s)" % (
self.id, self.name, self.infra_type)
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return super(NodeInfra, self).__repr__()
################################################################################
# ---------- SG REQUIREMENTS / SG NEXT_HOPS / INFRASTRUCTURE LINKS -----------
################################################################################
class EdgeLink(Link):
"""
Class for static and dynamic links in the NF-FG.
Represent a static or dynamic link.
"""
__slots__ = ('backward', 'delay', 'bandwidth', 'cost', 'qos',
'availbandwidth', 'weight')
def __init__ (self, src=None, dst=None, type=None, id=None, backward=False,
delay=None,
bandwidth=None, cost=None, qos=None):
"""
Init.
:param src: source port
:type src: :any:`Port`
:param dst: destination port
:type dst: :any:`Port`
:param type: type of the link (default: Link.STATIC)
:type type: str
:param id: optional link id
:type id: str or int
:param backward: the link is a backward link compared to an another Link
:type backward: bool
:param delay: delay resource
:type delay: float
:param bandwidth: bandwidth resource
:type bandwidth: float
:param cost: cost
:type cost: str
:param qos: traffic QoS class
:type qos: str
:return: None
"""
type = type if type is not None else Link.STATIC
super(EdgeLink, self).__init__(src=src, dst=dst, type=type, id=id)
# Signal if the link is a backward link compared to an another existing
# Link with the same src and dst Node
self.backward = backward # always False by default
self.delay = delay # optional
self.bandwidth = bandwidth # optional
self.cost = cost
self.qos = qos
# Internal attributes for mapping
self.availbandwidth = None
self.weight = None
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
link = super(EdgeLink, self).persist()
if self.delay is not None:
link["delay"] = self.delay
if self.bandwidth is not None:
link["bandwidth"] = self.bandwidth
if self.cost is not None:
link["cost"] = self.cost
if self.qos is not None:
link["qos"] = self.qos
if self.backward:
link["backward"] = self.backward
return link
def load (self, data, container=None, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:param container: main container object
:type container: :any:`NFFGModel`
:return: None
"""
if container is None:
raise RuntimeError(
"Container reference is not given for edge endpoint lookup!")
for link in container.edge_links:
if link.id == data['id']:
raise RuntimeError("ID conflict during EdgeLink loading: %s" % link.id)
super(EdgeLink, self).load(data=data, container=container)
self.delay = float(data['delay']) if 'delay' in data else None
self.bandwidth = float(data['bandwidth']) if 'bandwidth' in data else None
self.cost = data.get('cost')
self.qos = data.get('qos')
self.backward = data.get('backward', False)
return self
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "EdgeLink(id: %s, src: %s[%s], dst: %s[%s], type: %s, backward: " \
"%s, delay:%s, bandwidth: %s, cost: %s, qos: %s)" % (
self.id, self.src.node.id, self.src.id, self.dst.node.id,
self.dst.id, self.type, self.backward, self.delay, self.bandwidth,
self.cost, self.qos)
def __repr__ (self):
"""
Return with specific string representation.
:return: specific representation
:rtype: str
"""
return "<|ID: %s, Type: %s, Back: %s, src: %s[%s], dst: %s[%s] --> %s|>" % (
self.id, self.type, self.backward, self.src.node.id, self.src.id,
self.dst.node.id, self.dst.id, super(Element, self).__repr__())
class EdgeSGLink(Link):
"""
Class for links of SG.
Represent an edge between SG elements.
"""
__slots__ = ('flowclass', 'tag_info', 'delay', 'bandwidth',
'additional_actions')
def __init__ (self, src=None, dst=None, id=None, flowclass=None,
tag_info=None,
delay=None, bandwidth=None, constraints=None,
additional_actions=None):
"""
Init.
:param additional_actions: not traffic steering actions in a flowrule.
:type additional_actions: str
:param src: source port
:type src: :any:`Port`
:param dst: destination port
:type dst: :any:`Port`
:param id: optional id
:type id: str or int
:param flowclass: flowclass of SG next hop link a.k.a a match
:type flowclass: str
:param tag_info: tag info
:type tag_info: str
:param delay: requested delay on the SG next hop
:type delay: float
:param bandwidth: requested bandwidth on the SG next hop
:type bandwidth: float
:param constraints: optional Constraints object
:type constraints: :class:`Constraints`
:param additional_actions: additional actions
:type additional_actions: str
:return: None
"""
super(EdgeSGLink, self).__init__(src=src, dst=dst, type=Link.SG, id=id,
constraints=constraints)
self.flowclass = flowclass # flowrule without action
self.tag_info = tag_info
self.delay = delay
self.bandwidth = bandwidth
self.additional_actions = additional_actions
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
link = super(EdgeSGLink, self).persist()
if self.flowclass is not None:
link["flowclass"] = self.flowclass
if self.tag_info is not None:
link["tag_info"] = self.tag_info
if self.delay is not None:
link["delay"] = self.delay
if self.bandwidth is not None:
link["bandwidth"] = self.bandwidth
if self.additional_actions is not None:
link["additional_actions"] = self.additional_actions
return link
def load (self, data, container=None, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:param container: main container object
:type container: :any:`NFFGModel`
:return: None
"""
if container is None:
raise RuntimeError(
"Container reference is not given for edge endpoint lookup!")
for link in container.edge_sg_nexthops:
if link.id == data['id']:
raise RuntimeError(
"ID conflict during EdgeSGLink loading: %s" % link.id)
super(EdgeSGLink, self).load(data=data, container=container)
self.flowclass = data.get('flowclass')
self.tag_info = data.get('tag_info')
self.additional_actions = data.get('additional_actions')
self.delay = float(data['delay']) if 'delay' in data else None
self.bandwidth = float(data['bandwidth']) if 'bandwidth' in data else None
return self
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "SGLink(id: %s, src: %s[%s], dst: %s[%s], tag: %s, delay: %s, " \
"bandwidth: %s)" % (
self.id, self.src.node.id, self.src.id, self.dst.node.id,
self.dst.id, self.tag_info, self.delay, self.bandwidth)
class EdgeReq(Link):
"""
Class for constraint of networking parameters between SG elements.
Class for requirements between arbitrary NF modes.
"""
__slots__ = ('delay', 'bandwidth', 'sg_path')
def __init__ (self, src=None, dst=None, id=None, delay=None, bandwidth=None,
sg_path=None):
"""
Init.
:param src: source port
:type src: :any:`Port`
:param dst: destination port
:type dst: :any:`Port`
:param id: optional id
:type id: str or int
:param delay: delay resource
:type delay: float
:param bandwidth: bandwidth resource
:type bandwidth: float
:param sg_path: list of ids of sg_links represents end-to-end requirement
:type sg_path: list ot tuple
:return: None
"""
super(EdgeReq, self).__init__(src=src, dst=dst, type=Link.REQUIREMENT,
id=id)
self.delay = delay # optional
self.bandwidth = bandwidth # optional
# Set sg_path types according to given param type
if isinstance(sg_path, basestring):
self.sg_path = [str(sg_path), ]
elif isinstance(sg_path, Iterable):
self.sg_path = [p for p in sg_path]
elif sg_path is None:
self.sg_path = []
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
link = super(EdgeReq, self).persist()
if self.delay is not None:
link["delay"] = self.delay
if self.bandwidth is not None:
link["bandwidth"] = self.bandwidth
sg_path = self.sg_path[:]
if sg_path:
link['sg_path'] = sg_path
return link
def load (self, data, container=None, *args, **kwargs):
"""
Instantiate object from JSON.
:param data: JSON data
:type data: dict
:param container: main container object
:type container: :any:`NFFGModel`
:return: None
"""
if container is None:
raise RuntimeError(
"Container reference is not given for edge endpoint lookup!")
for link in container.edge_reqs:
if link.id == data['id']:
raise RuntimeError("ID conflict during EdgeReq loading: %s" % link.id)
super(EdgeReq, self).load(data=data, container=container)
self.delay = float(data['delay']) if 'delay' in data else None
self.bandwidth = float(data['bandwidth']) if 'bandwidth' in data else None
if 'sg_path' in data:
self.sg_path = data['sg_path']
return self
def __str__ (self):
"""
Return with string representation.
:return: string representation
:rtype: str
"""
return "ReqLink(id: %s, src: %s[%s], dst: %s[%s], path: %s, delay:%s, " \
"bandwidth: %s)" % (
self.id, self.src.node.id, self.src.id, self.dst.node.id,
self.dst.id, self.sg_path, self.delay, self.bandwidth)
################################################################################
# --------========== MAIN CONTAINER STARTS HERE =========-------------
################################################################################
class NFFGParseError(RuntimeError):
"""
Exception class for specific parsing errors.
"""
pass
class NFFGModel(Element):
"""
Wrapper class for a single NF-FG.
Network Function Forwarding Graph (NF-FG) data model.
"""
# Default version
VERSION = "1.0"
"""Default version"""
# Namespace
NAMESPACE = "http://csikor.tmit.bme.hu/netconf/unify/nffg"
"""Namespace"""
# prefix
PREFIX = "nffg"
"""prefix"""
# Organization
ORGANIZATION = "BME-TMIT"
"""Organization"""
# Description
DESCRIPTION = "Network Function Forwarding Graph (NF-FG) data model"
"""Description"""
# Container type
TYPE = "NFFG"
__slots__ = ('name', 'service_id', 'version', 'metadata', 'mode', 'node_nfs',
'node_saps', 'node_infras', 'edge_links', 'edge_sg_nexthops',
'edge_reqs')
def __init__ (self, id=None, name=None, service_id=None, metadata=None,
mode=None, status=None, version=None):
"""
Init.
:param id: optional NF-FG identifier (generated by default)
:type id: str or int
:param name: optional NF-FG name
:type name: str
:param service_id: service id this NFFG is originated from
:type service_id: str or int
:param version: optional version (default: 1.0)
:type version: str
:return: None
"""
super(NFFGModel, self).__init__(id=id, type=self.TYPE, status=status)
self.name = name
self.service_id = service_id
self.version = version if version is not None else self.VERSION
self.metadata = OrderedDict(metadata if metadata else ())
self.mode = mode
self.node_nfs = []
self.node_saps = []
self.node_infras = []
self.edge_links = []
self.edge_sg_nexthops = []
self.edge_reqs = []
@property
def nodes (self):
"""
Return all the node in the Container as a list.
:return: nodes
:rtype: list
"""
# shallow copy
nodes = self.node_nfs[:]
nodes.extend(self.node_saps)
nodes.extend(self.node_infras)
return nodes
@property
def edges (self):
"""
Return all the edges in the Container as a list.
:return: edges
:rtype: list
"""
# shallow copy
edges = self.edge_links[:]
edges.extend(self.edge_reqs)
edges.extend(self.edge_sg_nexthops)
return edges
def get_port (self, node_id, port_id):
"""
Return the Port reference according to the given Node and Port ids.
:param node_id: node id
:type node_id: str
:param port_id: port id
:type port_id: str
:return: port object
:rtype: :any:`Port`
"""
for node in self.nodes:
if node.id == node_id:
for port in node.ports:
if port.id == port_id:
return port
return None
def add_nf (self, **kwargs):
"""
Create and store a NF Node with the given parameters.
:return: the created NF
:rtype: :any:`NodeNF`
"""
nf = NodeNF(**kwargs)
for node in self.node_nfs:
if node.id == nf.id:
raise RuntimeError(
"NodeNF with id: %s already exist in the container!" % node.id)
self.node_nfs.append(nf)
return nf
def del_nf (self, id):
"""
Remove the NF Node with the given id.
:param id: NF id
:param id: str
:return: the actual Node is found and removed or not
:rtype: bool
"""
for node in self.node_nfs:
if node.id == id:
self.node_nfs.remove(node)
return True
def add_sap (self, **kwargs):
"""
Create and store a SAP Node with the given parameters.
:return: the created SAP
:rtype: :any:`NodeSAP`
"""
sap = NodeSAP(**kwargs)
for node in self.node_saps:
if node.id == sap.id:
raise RuntimeError(
"NodeNF with id: %s already exist in the container!" % node.id)
self.node_saps.append(sap)
return sap
def del_sap (self, id):
"""
Remove the SAP Node with the given id.
:param id: SAP id
:param id: str
:return: the actual Node is found and removed or not
:rtype: bool
"""
for node in self.node_saps:
if node.id == id:
self.node_saps.remove(node)
return True
def add_infra (self, **kwargs):
"""
Create and store an Infrastructure Node with the given parameters.
:return: the created Infra
:rtype: :any:`NodeInfra`
"""
infra = NodeInfra(**kwargs)
for node in self.node_infras:
if node.id == infra.id:
raise RuntimeError(
"NodeNF with id: %s already exist in the container!" % node.id)
self.node_infras.append(infra)
return infra
def del_infra (self, id):
"""
Remove Infrastructure Node with the given id.
:param id: Infra id
:param id: str
:return: the actual Node is found and removed or not
:rtype: bool
"""
for node in self.node_infras:
if node.id == id:
self.node_infras.remove(node)
return True
def add_link (self, src, dst, **kwargs):
"""
Create and store a Link Edge with the given src and dst nodes.
:param src: source node
:type src: :any:`Node`
:param dst: destination node
:type dst: :any:`Node`
:return: the created edge
:rtype: :any:`EdgeLink`
"""
link = EdgeLink(src=src, dst=dst, **kwargs)
for edge in self.edge_links:
if edge.src.id == src.id and edge.dst.id == dst.id:
raise RuntimeError(
"EdgeLink with src(%s) and dst(%s) endpoints already exist in the "
"container!" % (src.id, dst.id))
self.edge_links.append(link)
return link
def del_link (self, src, dst):
"""
Remove Link Edge with given src and dst nodes.
:param src: source node
:type src: :any:`Node`
:param dst: destination node
:type dst: :any:`Node`
:return: the actual Edge is found and removed or not
:rtype: bool
"""
for edge in self.edge_links:
if edge.src.id == src.id and edge.dst.id == dst.id:
self.edge_links.remove(edge)
return True
def add_sg_hop (self, src, dst, **kwargs):
"""
Create and store an SG next hop Edge with the given src and dst nodes.
:param src: source node
:type src: :any:`Node`
:param dst: destination node
:type dst: :any:`Node`
:return: the created edge
:rtype: :any:`EdgeSGLink`
"""
hop = EdgeSGLink(src=src, dst=dst, **kwargs)
for edge in self.edge_sg_nexthops:
if edge.src.id == src.id and edge.dst.id == dst.id:
raise RuntimeError(
"EdgeSGLink with src(%s) and dst(%s) endpoints already exist in the "
"container!" % (src.id, dst.id))
self.edge_sg_nexthops.append(hop)
return hop
def del_sg_hop (self, src, dst):
"""
Remove SG next hop Edge with given src and dst nodes.
:param src: source node
:type src: :any:`Node`
:param dst: destination node
:type dst: :any:`Node`
:return: the actual Edge is found and removed or not
:rtype: bool
"""
for edge in self.edge_sg_nexthops:
if edge.src.id == src.id and edge.dst.id == dst.id:
self.edge_sg_nexthops.remove(edge)
return True
def add_req (self, src, dst, **kwargs):
"""
Create and store a Requirement Edge with the given src and dst nodes.
:param src: source node
:type src: :any:`Node`
:param dst: destination node
:type dst: :any:`Node`
:return: the created edge
:rtype: :any:`EdgeReq`
"""
req = EdgeReq(src=src, dst=dst, **kwargs)
for edge in self.edge_reqs:
if edge.src.id == src.id and edge.dst.id == dst.id:
raise RuntimeError(
"EdgeReq with src(%s) and dst(%s) endpoints already exist in the "
"container!" % (src.id, dst.id))
self.edge_sg_nexthops.append(req)
return req
def del_req (self, src, dst):
"""
Remove Requirement Edge with given src and dst nodes.
:param src: source node
:type src: :any:`Node`
:param dst: destination node
:type dst: :any:`Node`
:return: the actual Edge is found and removed or not
:rtype: bool
"""
for edge in self.edge_reqs:
if edge.src.id == src.id and edge.dst.id == dst.id:
self.edge_sg_nexthops.remove(edge)
return True
def persist (self):
"""
Persist object.
:return: JSON representation
:rtype: dict
"""
super(NFFGModel, self).persist()
nffg = OrderedDict(parameters=OrderedDict(id=self.id))
if self.name is not None:
nffg["parameters"]["name"] = self.name
if self.service_id is not None:
nffg["parameters"]["service_id"] = self.service_id
nffg["parameters"]["version"] = self.version
if self.metadata:
nffg["parameters"]["metadata"] = self.metadata
if self.mode:
nffg['parameters']['mode'] = self.mode
if self.node_nfs:
nffg["node_nfs"] = [nf.persist() for nf in self.node_nfs]
if self.node_saps:
nffg["node_saps"] = [sap.persist() for sap in self.node_saps]
if self.node_infras:
nffg["node_infras"] = [infra.persist() for infra in self.node_infras]
if self.edge_links:
nffg["edge_links"] = [link.persist() for link in self.edge_links]
if self.edge_sg_nexthops:
nffg["edge_sg_nexthops"] = [sg.persist() for sg in self.edge_sg_nexthops]
if self.edge_reqs:
nffg["edge_reqs"] = [req.persist() for req in self.edge_reqs]
return nffg
def load (self, raw_data, *args, **kwargs):
"""
Read the given JSON object structure and try to convert to an NF-FG
representation as an :any:`NFFGModel`.
:param raw_data: raw date in JSON
:type raw_data: str
:return: the constructed NF-FG representation
:rtype: :any:`NFFGModel`
"""
# Converter function to avoid unicode
def unicode_to_str (input):
"""
Converter function to avoid unicode.
:param input: data part
:type input: unicode
:return: converted data
:rtype: str
"""
if isinstance(input, dict):
return OrderedDict(
[(unicode_to_str(key), unicode_to_str(value)) for key, value in
input.iteritems()])
elif isinstance(input, list):
return [unicode_to_str(element) for element in input]
elif isinstance(input, unicode):
# return input.encode('utf-8').replace(' ', '_')
return input.encode('utf-8')
else:
return input
try:
# Load from plain text
data = json.loads(raw_data, object_hook=unicode_to_str)
# Create container and fill container fields
container = NFFGModel(
id=data['parameters'].get('id'), # mandatory
name=data['parameters'].get('name'), # can be None
service_id=data['parameters'].get('service_id'), # can be None
metadata=data['parameters'].get('metadata'),
mode=data['parameters'].get('mode'),
status=data['parameters'].get('status'),
version=data['parameters'].get('version')) # mandatory
# Fill Container lists
for n in data.get('node_nfs', ()):
container.node_nfs.append(NodeNF.parse(data=n))
for n in data.get('node_saps', ()):
container.node_saps.append(NodeSAP.parse(data=n))
for n in data.get('node_infras', ()):
container.node_infras.append(NodeInfra.parse(data=n))
for e in data.get('edge_links', ()):
container.edge_links.append(EdgeLink.parse(data=e, container=container))
for e in data.get('edge_sg_nexthops', ()):
container.edge_sg_nexthops.append(
EdgeSGLink().parse(data=e, container=container))
for e in data.get('edge_reqs', ()):
container.edge_reqs.append(EdgeReq.parse(data=e, container=container))
except KeyError as e:
raise RuntimeError("Not a valid NFFGModel format!", e)
except ValueError as e:
raise NFFGParseError("Parsed data is not valid JSON: %s" % e)
return container
def dump (self):
"""
Dump the container in plain text based on JSON structure.
:return: NF-FG representation as plain text
:rtype: str
"""
return json.dumps(self.persist(), indent=2, sort_keys=False)
| hsnlab/nffg | nffg_elements.py | Python | apache-2.0 | 90,210 | 0.008081 |
"""
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
from collections import Counter, Iterator, Mapping, OrderedDict, namedtuple
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import (
EmptyResultSet, FieldDoesNotExist, FieldError,
)
from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Col, Ref
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.lookups import Lookup
from django.db.models.query_utils import (
Q, check_rel_lookup_compatibility, refs_expression,
)
from django.db.models.sql.constants import (
INNER, LOUTER, ORDER_DIR, ORDER_PATTERN, SINGLE,
)
from django.db.models.sql.datastructures import (
BaseTable, Empty, Join, MultiJoin,
)
from django.db.models.sql.where import (
AND, OR, ExtraWhere, NothingNode, WhereNode,
)
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.tree import Node
__all__ = ['Query', 'RawQuery']
def get_field_names_from_opts(opts):
return set(chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,)
for f in opts.get_fields()
))
def get_children_from_q(q):
for child in q.children:
if isinstance(child, Node):
yield from get_children_from_q(child)
else:
yield child
JoinInfo = namedtuple(
'JoinInfo',
('final_field', 'targets', 'opts', 'joins', 'path')
)
class RawQuery:
"""A single raw SQL query."""
def __init__(self, sql, using, params=None):
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
def chain(self, using):
return self.clone(using)
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.column_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
@property
def params_type(self):
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = {key: adapter(val) for key, val in self.params.items()}
else:
raise RuntimeError("Unexpected params type: %s" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
class Query:
"""A single SQL query."""
alias_prefix = 'T'
subq_aliases = frozenset([alias_prefix])
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = OrderedDict()
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
self.external_aliases = set()
self.table_map = {} # Maps table names to list of aliases.
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.used_aliases = set()
self.filter_is_sticky = False
self.subquery = False
# SQL-related attributes
# Select and related select clauses are expressions to use in the
# SELECT clause of the query.
# The select is used for cases where we want to set up the select
# clause to contain other than default fields (values(), subqueries...)
# Note that annotations go to annotations dictionary.
self.select = ()
self.where = where()
self.where_class = where
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
# - A tuple of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
self.group_by = None
self.order_by = ()
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = ()
self.select_for_update = False
self.select_for_update_nowait = False
self.select_for_update_skip_locked = False
self.select_for_update_of = ()
self.select_related = False
# Arbitrary limit for select_related to prevents infinite recursion.
self.max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
self.values_select = ()
# SQL annotation-related attributes
# The _annotations will be an OrderedDict when used. Due to the cost
# of creating OrderedDict this attribute is created lazily (in
# self.annotations property).
self._annotations = None # Maps alias -> Annotation Expression
self.annotation_select_mask = None
self._annotation_select_cache = None
# Set combination attributes
self.combinator = None
self.combinator_all = False
self.combined_queries = ()
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
# The _extra attribute is an OrderedDict, lazily created similarly to
# .annotations
self._extra = None # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (frozenset(), True)
self._filtered_relations = {}
@property
def extra(self):
if self._extra is None:
self._extra = OrderedDict()
return self._extra
@property
def annotations(self):
if self._annotations is None:
self._annotations = OrderedDict()
return self._annotations
@property
def has_select_fields(self):
return bool(self.select or self.annotation_select_mask or self.extra_select_mask)
@cached_property
def base_table(self):
for alias in self.alias_map:
return alias
def __str__(self):
"""
Return the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Return the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
"""Limit the amount of work when a Query is deepcopied."""
result = self.clone()
memo[id(self)] = result
return result
def _prepare(self, field):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Return the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self):
"""
Return a copy of the current Query. A lightweight alternative to
to deepcopy().
"""
obj = Empty()
obj.__class__ = self.__class__
# Copy references to everything.
obj.__dict__ = self.__dict__.copy()
# Clone attributes that can't use shallow copy.
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.external_aliases = self.external_aliases.copy()
obj.table_map = self.table_map.copy()
obj.where = self.where.clone()
obj._annotations = self._annotations.copy() if self._annotations is not None else None
if self.annotation_select_mask is None:
obj.annotation_select_mask = None
else:
obj.annotation_select_mask = self.annotation_select_mask.copy()
# _annotation_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both annotations and
# _annotation_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._annotation_select_cache = None
obj._extra = self._extra.copy() if self._extra is not None else None
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
if 'subq_aliases' in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.used_aliases = self.used_aliases.copy()
obj._filtered_relations = self._filtered_relations.copy()
# Clear the cached_property
try:
del obj.base_table
except AttributeError:
pass
return obj
def chain(self, klass=None):
"""
Return a copy of the current Query that's ready for another operation.
The klass argument changes the type of the Query, e.g. UpdateQuery.
"""
obj = self.clone()
if klass and obj.__class__ != klass:
obj.__class__ = klass
if not obj.filter_is_sticky:
obj.used_aliases = set()
obj.filter_is_sticky = False
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def relabeled_clone(self, change_map):
clone = self.clone()
clone.change_aliases(change_map)
return clone
def rewrite_cols(self, annotation, col_cnt):
# We must make sure the inner query has the referred columns in it.
# If we are aggregating over an annotation, then Django uses Ref()
# instances to note this. However, if we are annotating over a column
# of a related model, then it might be that column isn't part of the
# SELECT clause of the inner query, and we must manually make sure
# the column is selected. An example case is:
# .aggregate(Sum('author__awards'))
# Resolving this expression results in a join to author, but there
# is no guarantee the awards column of author is in the select clause
# of the query. Thus we must manually add the column to the inner
# query.
orig_exprs = annotation.get_source_expressions()
new_exprs = []
for expr in orig_exprs:
# FIXME: These conditions are fairly arbitrary. Identify a better
# method of having expressions decide which code path they should
# take.
if isinstance(expr, Ref):
# Its already a Ref to subquery (see resolve_ref() for
# details)
new_exprs.append(expr)
elif isinstance(expr, (WhereNode, Lookup)):
# Decompose the subexpressions further. The code here is
# copied from the else clause, but this condition must appear
# before the contains_aggregate/is_summary condition below.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
elif isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary):
# Reference to column. Make sure the referenced column
# is selected.
col_cnt += 1
col_alias = '__col%d' % col_cnt
self.annotations[col_alias] = expr
self.append_annotation_mask([col_alias])
new_exprs.append(Ref(col_alias, expr))
else:
# Some other expression not referencing database values
# directly. Its subexpression might contain Cols.
new_expr, col_cnt = self.rewrite_cols(expr, col_cnt)
new_exprs.append(new_expr)
annotation.set_source_expressions(new_exprs)
return annotation, col_cnt
def get_aggregation(self, using, added_aggregate_names):
"""
Return the dictionary with the values of the existing aggregations.
"""
if not self.annotation_select:
return {}
has_limit = self.low_mark != 0 or self.high_mark is not None
has_existing_annotations = any(
annotation for alias, annotation
in self.annotations.items()
if alias not in added_aggregate_names
)
# Decide if we need to use a subquery.
#
# Existing annotations would cause incorrect results as get_aggregation()
# must produce just one result and thus must not use GROUP BY. But we
# aren't smart enough to remove the existing annotations from the
# query, so those would force us to use GROUP BY.
#
# If the query has limit or distinct, or uses set operations, then
# those operations must be done in a subquery so that the query
# aggregates on the limit and/or distinct results instead of applying
# the distinct and limit after the aggregation.
if (isinstance(self.group_by, tuple) or has_limit or has_existing_annotations or
self.distinct or self.combinator):
from django.db.models.sql.subqueries import AggregateQuery
outer_query = AggregateQuery(self.model)
inner_query = self.clone()
inner_query.select_for_update = False
inner_query.select_related = False
if not has_limit and not self.distinct_fields:
# Queries with distinct_fields need ordering and when a limit
# is applied we must take the slice from the ordered query.
# Otherwise no need for ordering.
inner_query.clear_ordering(True)
if not inner_query.distinct:
# If the inner query uses default select and it has some
# aggregate annotations, then we must make sure the inner
# query is grouped by the main model's primary key. However,
# clearing the select clause can alter results if distinct is
# used.
if inner_query.default_cols and has_existing_annotations:
inner_query.group_by = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),)
inner_query.default_cols = False
relabels = {t: 'subquery' for t in inner_query.alias_map}
relabels[None] = 'subquery'
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
col_cnt = 0
for alias, expression in list(inner_query.annotation_select.items()):
if expression.is_summary:
expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt)
outer_query.annotations[alias] = expression.relabeled_clone(relabels)
del inner_query.annotations[alias]
# Make sure the annotation_select wont use cached results.
inner_query.set_annotation_mask(inner_query.annotation_select_mask)
if inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask:
# In case of Model.objects[0:3].count(), there would be no
# field selected in the inner query, yet we must use a subquery.
# So, make sure at least one field is selected.
inner_query.select = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),)
try:
outer_query.add_subquery(inner_query, using)
except EmptyResultSet:
return {
alias: None
for alias in outer_query.annotation_select
}
else:
outer_query = self
self.select = ()
self.default_cols = False
self._extra = {}
outer_query.clear_ordering(True)
outer_query.clear_limits()
outer_query.select_for_update = False
outer_query.select_related = False
compiler = outer_query.get_compiler(using)
result = compiler.execute_sql(SINGLE)
if result is None:
result = [None] * len(outer_query.annotation_select)
converters = compiler.get_converters(outer_query.annotation_select.values())
result = next(compiler.apply_converters((result,), converters))
return dict(zip(outer_query.annotation_select, result))
def get_count(self, using):
"""
Perform a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count('*'), alias='__count', is_summary=True)
number = obj.get_aggregation(using, ['__count'])['__count']
if number is None:
number = 0
return number
def has_filters(self):
return self.where
def has_results(self, using):
q = self.clone()
if not q.distinct:
if q.group_by is True:
q.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
q.set_group_by()
q.clear_select_clause()
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
assert self.distinct_fields == rhs.distinct_fields, \
"Cannot combine queries with different distinct fields."
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = (connector == AND)
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.alias_map)
# Base table must be present in the query - this is the same
# table on both sides.
self.get_initial_alias()
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == INNER)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
rhs_tables = list(rhs.alias_map)[1:]
for alias in rhs_tables:
join = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
join = join.relabeled_clone(change_map)
new_alias = self.join(join, reuse=reuse)
if join.join_type == INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
if alias != new_alias:
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
w = rhs.where.clone()
w.relabel_aliases(change_map)
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
if rhs.select:
self.set_select([col.relabeled_clone(change_map) for col in rhs.select])
else:
self.select = ()
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self._extra and rhs._extra:
raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Convert the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.get_meta()
seen = {}
must_include = {orig_opts.concrete_model: {orig_opts.pk}}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model._meta.concrete_model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
if name in self._filtered_relations:
name = self._filtered_relations[name].relation_name
source = opts.get_field(name)
if is_reverse_o2o(source):
cur_model = source.related_model
else:
cur_model = source.remote_field.model
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# (if it's not a reverse relation) to the things we select.
if not is_reverse_o2o(source):
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field = opts.get_field(parts[-1])
is_reverse_object = field.auto_created and not field.concrete
model = field.related_model if is_reverse_object else field.model
model = model._meta.concrete_model
if model == opts.model:
model = cur_model
if not is_reverse_o2o(field):
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in seen.items():
for field in model._meta.local_fields:
if field in values:
continue
m = field.model._meta.concrete_model
add_to_dict(workset, m, field)
for model, values in must_include.items():
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in workset.items():
callback(target, model, values)
else:
for model, values in must_include.items():
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
seen.setdefault(model, set())
for model, values in seen.items():
callback(target, model, values)
def table_alias(self, table_name, create=False, filtered_relation=None):
"""
Return a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = filtered_relation.alias if filtered_relation is not None else table_name
self.table_map[table_name] = [alias]
self.alias_refcount[alias] = 1
return alias, True
def ref_alias(self, alias):
"""Increases the reference count for this alias."""
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
"""Decreases the reference count for this alias."""
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promote recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, only promote the join if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].parent_alias
parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER
already_louter = self.alias_map[alias].join_type == LOUTER
if ((self.alias_map[alias].nullable or parent_louter) and
not already_louter):
self.alias_map[alias] = self.alias_map[alias].promote()
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join for join in self.alias_map
if self.alias_map[join].parent_alias == alias and join not in aliases
)
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == LOUTER:
self.alias_map[alias] = self.alias_map[alias].demote()
parent_alias = self.alias_map[alias].parent_alias
if self.alias_map[parent_alias].join_type == INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
Reset reference counts for aliases so that they match the value passed
in `to_counts`.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Change the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map).isdisjoint(change_map.values())
# 1. Update references in "select" (normal columns plus aliases),
# "group by" and "where".
self.where.relabel_aliases(change_map)
if isinstance(self.group_by, tuple):
self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by])
self.select = tuple([col.relabeled_clone(change_map) for col in self.select])
if self._annotations:
self._annotations = OrderedDict(
(key, col.relabeled_clone(change_map)) for key, col in self._annotations.items())
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.items():
if old_alias not in self.alias_map:
continue
alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
self.alias_map[new_alias] = alias_data
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
self.external_aliases = {change_map.get(alias, alias)
for alias in self.external_aliases}
def bump_prefix(self, outer_query):
"""
Change the alias prefix to the next letter in the alphabet in a way
that the outer query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call.
"""
def prefix_gen():
"""
Generate a sequence of characters in alphabetical order:
-> 'A', 'B', 'C', ...
When the alphabet is finished, the sequence will continue with the
Cartesian product:
-> 'AA', 'AB', 'AC', ...
"""
alphabet = ascii_uppercase
prefix = chr(ord(self.alias_prefix) + 1)
yield prefix
for n in count(1):
seq = alphabet[alphabet.index(prefix):] if prefix else alphabet
for s in product(seq, repeat=n):
yield ''.join(s)
prefix = None
if self.alias_prefix != outer_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
local_recursion_limit = 127 # explicitly avoid infinite loop
for pos, prefix in enumerate(prefix_gen()):
if prefix not in self.subq_aliases:
self.alias_prefix = prefix
break
if pos > local_recursion_limit:
raise RuntimeError(
'Maximum recursion depth exceeded: too many subqueries.'
)
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases)
change_map = OrderedDict()
for pos, alias in enumerate(self.alias_map):
new_alias = '%s%d' % (self.alias_prefix, pos)
change_map[alias] = new_alias
self.change_aliases(change_map)
def get_initial_alias(self):
"""
Return the first alias for this query, after increasing its reference
count.
"""
if self.alias_map:
alias = self.base_table
self.ref_alias(alias)
else:
alias = self.join(BaseTable(self.get_meta().db_table, None))
return alias
def count_active_tables(self):
"""
Return the number of tables in this query with a non-zero reference
count. After execution, the reference counts are zeroed, so tables
added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, join, reuse=None, reuse_with_filtered_relation=False):
"""
Return an alias for the 'join', either reusing an existing alias for
that join or creating a new one. 'join' is either a
sql.datastructures.BaseTable or Join.
The 'reuse' parameter can be either None which means all joins are
reusable, or it can be a set containing the aliases that can be reused.
The 'reuse_with_filtered_relation' parameter is used when computing
FilteredRelation instances.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new
joins are created as LOUTER if the join is nullable.
"""
if reuse_with_filtered_relation and reuse:
reuse_aliases = [
a for a, j in self.alias_map.items()
if a in reuse and j.equals(join, with_filtered_relation=False)
]
else:
reuse_aliases = [
a for a, j in self.alias_map.items()
if (reuse is None or a in reuse) and j == join
]
if reuse_aliases:
self.ref_alias(reuse_aliases[0])
return reuse_aliases[0]
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(join.table_name, create=True, filtered_relation=join.filtered_relation)
if join.join_type:
if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
join_type = LOUTER
else:
join_type = INNER
join.join_type = join_type
join.table_alias = alias
self.alias_map[alias] = join
return alias
def join_parent_model(self, opts, model, alias, seen):
"""
Make sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if not chain:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
curr_opts = int_model._meta
alias = seen[int_model]
continue
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
join_info = self.setup_joins([link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = join_info.joins[-1]
return alias or seen[None]
def add_annotation(self, annotation, alias, is_summary=False):
"""Add a single annotation expression to the Query."""
annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None,
summarize=is_summary)
self.append_annotation_mask([alias])
self.annotations[alias] = annotation
def resolve_expression(self, query, *args, **kwargs):
clone = self.clone()
# Subqueries need to use a different set of aliases than the outer query.
clone.bump_prefix(query)
clone.subquery = True
# It's safe to drop ordering if the queryset isn't using slicing,
# distinct(*fields) or select_for_update().
if (self.low_mark == 0 and self.high_mark is None and
not self.distinct_fields and
not self.select_for_update):
clone.clear_ordering(True)
return clone
def as_sql(self, compiler, connection):
return self.get_compiler(connection=connection).as_sql()
def resolve_lookup_value(self, value, can_reuse, allow_joins):
if hasattr(value, 'resolve_expression'):
value = value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins)
elif isinstance(value, (list, tuple)):
# The items of the iterable may be expressions and therefore need
# to be resolved independently.
for sub_value in value:
if hasattr(sub_value, 'resolve_expression'):
sub_value.resolve_expression(self, reuse=can_reuse, allow_joins=allow_joins)
return value
def solve_lookup_type(self, lookup):
"""
Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self._annotations:
expression, expression_lookups = refs_expression(lookup_splitted, self.annotations)
if expression:
return expression_lookups, (), expression
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) > 1 and not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".' %
(lookup, self.get_meta().model.__name__)
)
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts, field):
"""
Check whether the object passed while querying is of the correct type.
If not, raise a ValueError specifying the wrong object.
"""
if hasattr(value, '_meta'):
if not check_rel_lookup_compatibility(value._meta.model, opts, field):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.' %
(value, opts.object_name))
def check_related_objects(self, field, value, opts):
"""Check the type of object passed to query relations."""
if field.is_relation:
# Check that the field and the queryset use the same model in a
# query like .filter(author=Author.objects.all()). For example, the
# opts would be Author's (from the author field) and value.model
# would be Author.objects.all() queryset's .model (Author also).
# The field is the related field on the lhs side.
if (isinstance(value, Query) and not value.has_select_fields and
not check_rel_lookup_compatibility(value.model, opts, field)):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' %
(value.model._meta.object_name, opts.object_name)
)
elif hasattr(value, '_meta'):
self.check_query_object_type(value, opts, field)
elif hasattr(value, '__iter__'):
for v in value:
self.check_query_object_type(v, opts, field)
def build_lookup(self, lookups, lhs, rhs):
"""
Try to extract transforms and lookup from given lhs.
The lhs value is something that works like SQLExpression.
The rhs value is what the lookup is going to compare against.
The lookups is a list of names to extract using get_lookup()
and get_transform().
"""
# __exact is the default lookup if one isn't given.
if not lookups:
lookups = ['exact']
for name in lookups[:-1]:
lhs = self.try_transform(lhs, name)
# First try get_lookup() so that the lookup takes precedence if the lhs
# supports both transform and lookup for the name.
lookup_class = lhs.get_lookup(lookups[-1])
if not lookup_class:
if lhs.field.is_relation:
raise FieldError('Related Field got invalid lookup: {}'.format(lookups[-1]))
# A lookup wasn't found. Try to interpret the name as a transform
# and do an Exact lookup against it.
lhs = self.try_transform(lhs, lookups[-1])
lookup_class = lhs.get_lookup('exact')
if not lookup_class:
return
lookup = lookup_class(lhs, rhs)
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if lookup.rhs is None:
if lookup.lookup_name not in ('exact', 'iexact'):
raise ValueError("Cannot use None as a query value")
return lhs.get_lookup('isnull')(lhs, True)
# For Oracle '' is equivalent to null. The check must be done at this
# stage because join promotion can't be done in the compiler. Using
# DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here.
# A similar thing is done in is_nullable(), too.
if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and
lookup.lookup_name == 'exact' and lookup.rhs == ''):
return lhs.get_lookup('isnull')(lhs, True)
return lookup
def try_transform(self, lhs, name):
"""
Helper method for build_lookup(). Try to fetch and initialize
a transform for name parameter from lhs.
"""
transform_class = lhs.get_transform(name)
if transform_class:
return transform_class(lhs)
else:
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted." %
(name, lhs.output_field.__class__.__name__))
def build_filter(self, filter_expr, branch_negated=False, current_negated=False,
can_reuse=None, allow_joins=True, split_subq=True,
reuse_with_filtered_relation=False):
"""
Build a WhereNode for a single filter clause but don't add it
to this Query. Query.add_q() will then add this filter to the where
Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_negated and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
If 'reuse_with_filtered_relation' is True, then only joins in can_reuse
will be reused.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
if isinstance(filter_expr, dict):
raise FieldError("Cannot parse keyword query as dict")
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
if not getattr(reffed_expression, 'filterable', True):
raise NotSupportedError(
reffed_expression.__class__.__name__ + ' is disallowed in '
'the filter clause.'
)
if not allow_joins and len(parts) > 1:
raise FieldError("Joined field references are not permitted in this query")
pre_joins = self.alias_refcount.copy()
value = self.resolve_lookup_value(value, can_reuse, allow_joins)
used_joins = {k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)}
clause = self.where_class()
if reffed_expression:
condition = self.build_lookup(lookups, reffed_expression, value)
clause.add(condition, AND)
return clause, []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated or not split_subq
try:
join_info = self.setup_joins(
parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many,
reuse_with_filtered_relation=reuse_with_filtered_relation,
)
# Prevent iterator from being consumed by check_related_objects()
if isinstance(value, Iterator):
value = list(value)
self.check_related_objects(join_info.final_field, value, join_info.opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_info.joins
except MultiJoin as e:
return self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse, e.names_with_path)
# Update used_joins before trimming since they are reused to determine
# which joins could be later promoted to INNER.
used_joins.update(join_info.joins)
targets, alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path)
if can_reuse is not None:
can_reuse.update(join_list)
if join_info.final_field.is_relation:
# No support for transforms for relational fields
num_lookups = len(lookups)
if num_lookups > 1:
raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0]))
if len(targets) == 1:
col = targets[0].get_col(alias, join_info.final_field)
else:
col = MultiColSource(alias, targets, join_info.targets, join_info.final_field)
else:
col = targets[0].get_col(alias, join_info.final_field)
condition = self.build_lookup(lookups, col, value)
lookup_type = condition.lookup_name
clause.add(condition, AND)
require_outer = lookup_type == 'isnull' and condition.rhs is True and not current_negated
if current_negated and (lookup_type != 'isnull' or condition.rhs is False):
require_outer = True
if (lookup_type != 'isnull' and (
self.is_nullable(targets[0]) or
self.alias_map[join_list[-1]].join_type == LOUTER)):
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
lookup_class = targets[0].get_lookup('isnull')
clause.add(lookup_class(targets[0].get_col(alias, join_info.targets[0]), False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_clause):
self.add_q(Q(**{filter_clause[0]: filter_clause[1]}))
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for doing final
join promotion.
"""
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER}
clause, _ = self._add_q(q_object, self.used_aliases)
if clause:
self.where.add(clause, AND)
self.demote_joins(existing_inner)
def _add_q(self, q_object, used_aliases, branch_negated=False,
current_negated=False, allow_joins=True, split_subq=True):
"""Add a Q-object to the current filter."""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = self.where_class(connector=connector,
negated=q_object.negated)
joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause, needed_inner = self._add_q(
child, used_aliases, branch_negated,
current_negated, allow_joins, split_subq)
joinpromoter.add_votes(needed_inner)
else:
child_clause, needed_inner = self.build_filter(
child, can_reuse=used_aliases, branch_negated=branch_negated,
current_negated=current_negated, allow_joins=allow_joins,
split_subq=split_subq,
)
joinpromoter.add_votes(needed_inner)
if child_clause:
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def build_filtered_relation_q(self, q_object, reuse, branch_negated=False, current_negated=False):
"""Add a FilteredRelation object to the current filter."""
connector = q_object.connector
current_negated ^= q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = self.where_class(connector=connector, negated=q_object.negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause = self.build_filtered_relation_q(
child, reuse=reuse, branch_negated=branch_negated,
current_negated=current_negated,
)
else:
child_clause, _ = self.build_filter(
child, can_reuse=reuse, branch_negated=branch_negated,
current_negated=current_negated,
allow_joins=True, split_subq=False,
reuse_with_filtered_relation=True,
)
target_clause.add(child_clause, connector)
return target_clause
def add_filtered_relation(self, filtered_relation, alias):
filtered_relation.alias = alias
lookups = dict(get_children_from_q(filtered_relation.condition))
for lookup in chain((filtered_relation.relation_name,), lookups):
lookup_parts, field_parts, _ = self.solve_lookup_type(lookup)
shift = 2 if not lookup_parts else 1
if len(field_parts) > (shift + len(lookup_parts)):
raise ValueError(
"FilteredRelation's condition doesn't support nested "
"relations (got %r)." % lookup
)
self._filtered_relations[filtered_relation.alias] = filtered_relation
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walk the list of names and turns them into PathInfo tuples. A single
name in 'names' can generate multiple PathInfos (m2m, for example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Return a list of PathInfo tuples. In addition return the final field
(the last used join field) and target (which is a field guaranteed to
contain the same value as the final field). Finally, return those names
that weren't found (which are likely transforms and the final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == 'pk':
name = opts.pk.name
field = None
filtered_relation = None
try:
field = opts.get_field(name)
except FieldDoesNotExist:
if name in self.annotation_select:
field = self.annotation_select[name].output_field
elif name in self._filtered_relations and pos == 0:
filtered_relation = self._filtered_relations[name]
field = opts.get_field(filtered_relation.relation_name)
if field is not None:
# Fields that contain one-to-many relations with a generic
# model (like a GenericForeignKey) cannot generate reverse
# relations and therefore cannot be used for reverse querying.
if field.is_relation and not field.related_model:
raise FieldError(
"Field %r does not generate an automatic reverse "
"relation and therefore cannot be used for reverse "
"querying. If it is a GenericForeignKey, consider "
"adding a GenericRelation." % name
)
try:
model = field.model._meta.concrete_model
except AttributeError:
# QuerySet.annotate() may introduce fields that aren't
# attached to a model.
model = None
else:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
field_names = list(get_field_names_from_opts(opts))
available = sorted(
field_names + list(self.annotation_select) +
list(self._filtered_relations)
)
raise FieldError("Cannot resolve keyword '%s' into field. "
"Choices are: %s" % (name, ", ".join(available)))
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if model is not opts.model:
path_to_parent = opts.get_path_to_parent(model)
if path_to_parent:
path.extend(path_to_parent)
cur_names_with_path[1].extend(path_to_parent)
opts = path_to_parent[-1].to_opts
if hasattr(field, 'get_path_info'):
pathinfos = field.get_path_info(filtered_relation)
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name))
break
return path, final_field, targets, names[pos + 1:]
def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True,
reuse_with_filtered_relation=False):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
The 'reuse_with_filtered_relation' can be used to force 'can_reuse'
parameter and force the relation on the given connections.
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Return the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins and the
field path travelled to generate the joins.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# First, generate the path for the names
path, final_field, targets, rest = self.names_to_path(
names, opts, allow_many, fail_on_missing=True)
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for join in path:
if join.filtered_relation:
filtered_relation = join.filtered_relation.clone()
table_alias = filtered_relation.alias
else:
filtered_relation = None
table_alias = None
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = Join(
opts.db_table, alias, table_alias, INNER, join.join_field,
nullable, filtered_relation=filtered_relation,
)
reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None
alias = self.join(
connection, reuse=reuse,
reuse_with_filtered_relation=reuse_with_filtered_relation,
)
joins.append(alias)
if filtered_relation:
filtered_relation.path = joins[:]
return JoinInfo(final_field, targets, opts, joins, path)
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Return the final target field and table alias and the new active
joins.
Always trim any direct join if the target column is already in the
previous table. Can't trim reverse joins as it's unknown if there's
anything on the other side of the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
if info.filtered_relation:
break
join_targets = {t.column for t in info.join_field.foreign_related_fields}
cur_targets = {t.column for t in targets}
if not cur_targets.issubset(join_targets):
break
targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets}
targets = tuple(targets_dict[t.column] for t in targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):
if not allow_joins and LOOKUP_SEP in name:
raise FieldError("Joined field references are not permitted in this query")
if name in self.annotations:
if summarize:
# Summarize currently means we are doing an aggregate() query
# which is executed as a wrapped subquery if any of the
# aggregate() elements reference an existing annotation. In
# that case we need to return a Ref to the subquery's annotation.
return Ref(name, self.annotation_select[name])
else:
return self.annotation_select[name]
else:
field_list = name.split(LOOKUP_SEP)
join_info = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse)
targets, _, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path)
if len(targets) > 1:
raise FieldError("Referencing multicolumn fields with F() objects "
"isn't supported")
if reuse is not None:
reuse.update(join_list)
col = targets[0].get_col(join_list[-1], join_info.targets[0])
return col
def split_exclude(self, filter_expr, prefix, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
As an example we could have original filter ~Q(child__name='foo').
We would get here with filter_expr = child__name, prefix = child and
can_reuse is a set of joins usable for filters in the original query.
We will turn this into equivalent of:
WHERE NOT (pk IN (SELECT parent_id FROM thetable
WHERE name = 'foo' AND parent_id IS NOT NULL))
It might be worth it to consider using WHERE NOT EXISTS as that has
saner null handling, and is easier for the backend's optimizer to
handle.
"""
# Generate the inner query.
query = Query(self.model)
query.add_filter(filter_expr)
query.clear_ordering(True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
# Add extra check to make sure the selected field will not be null
# since we are adding an IN <subquery> clause. This prevents the
# database from tripping over IN (...,NULL,...) selects and returning
# nothing
col = query.select[0]
select_field = col.target
alias = col.alias
if self.is_nullable(select_field):
lookup_class = select_field.get_lookup('isnull')
lookup = lookup_class(select_field.get_col(alias), False)
query.where.add(lookup, AND)
if alias in can_reuse:
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup('exact')
# Note that the query.select[0].alias is different from alias
# due to bump_prefix above.
lookup = lookup_class(pk.get_col(query.select[0].alias),
pk.get_col(alias))
query.where.add(lookup, AND)
query.external_aliases.add(alias)
condition, needed_inner = self.build_filter(
('%s__in' % trimmed_prefix, query),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
if contains_louter:
or_null_condition, _ = self.build_filter(
('%s__isnull' % trimmed_prefix, True),
current_negated=True, branch_negated=True, can_reuse=can_reuse)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where.add(NothingNode(), AND)
def is_empty(self):
return any(isinstance(c, NothingNode) for c in self.where.children)
def set_limits(self, low=None, high=None):
"""
Adjust the limits on the rows retrieved. Use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, convert them to the appropriate offset and limit values.
Apply any limits passed in here to the existing constraints. Add low
to the current low value and clamp both to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
if self.low_mark == self.high_mark:
self.set_empty()
def clear_limits(self):
"""Clear any existing limits."""
self.low_mark, self.high_mark = 0, None
def has_limit_one(self):
return self.high_mark is not None and (self.high_mark - self.low_mark) == 1
def can_filter(self):
"""
Return True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_clause(self):
"""Remove all fields from SELECT clause."""
self.select = ()
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_annotation_mask(())
def clear_select_fields(self):
"""
Clear the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = ()
self.values_select = ()
def set_select(self, cols):
self.default_cols = False
self.select = tuple(cols)
def add_distinct_fields(self, *field_names):
"""
Add and resolve the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Add the given (model) fields to the select set. Add the field names in
the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
cols = []
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
join_info = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m)
targets, final_alias, joins = self.trim_joins(
join_info.targets,
join_info.joins,
join_info.path,
)
for target in targets:
cols.append(target.get_col(final_alias))
if cols:
self.set_select(cols)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
else:
names = sorted(
list(get_field_names_from_opts(opts)) + list(self.extra) +
list(self.annotation_select) + list(self._filtered_relations)
)
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
def add_ordering(self, *ordering):
"""
Add items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or OrderBy
expressions.
If 'ordering' is empty, clear all ordering from the query.
"""
errors = []
for item in ordering:
if not hasattr(item, 'resolve_expression') and not ORDER_PATTERN.match(item):
errors.append(item)
if getattr(item, 'contains_aggregate', False):
raise FieldError(
'Using an aggregate in order_by() without also including '
'it in annotate() is not allowed: %s' % item
)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by += ordering
else:
self.default_ordering = False
def clear_ordering(self, force_empty):
"""
Remove any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = ()
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expand the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
group_by = list(self.select)
if self.annotation_select:
for alias, annotation in self.annotation_select.items():
for col in annotation.get_group_by_cols():
group_by.append(col)
self.group_by = tuple(group_by)
def add_select_related(self, fields):
"""
Set up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Add data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = OrderedDict()
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = force_text(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
if pos == 0 or entry[pos - 1] != '%':
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
# This is order preserving, since self.extra_select is an OrderedDict.
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""Remove any fields from the deferred loading set."""
self.deferred_loading = (frozenset(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. Add the new field names to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, remove
those names from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = frozenset(field_names), False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, return a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of its fields are
deferred.
If no fields are marked for deferral, return an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
try:
return self._loaded_field_names_cache
except AttributeError:
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
self._loaded_field_names_cache = collection
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""Callback used by get_deferred_field_names()."""
target[model] = {f.attname for f in fields}
def set_annotation_mask(self, names):
"""Set the mask of annotations that will be returned by the SELECT."""
if names is None:
self.annotation_select_mask = None
else:
self.annotation_select_mask = set(names)
self._annotation_select_cache = None
def append_annotation_mask(self, names):
if self.annotation_select_mask is not None:
self.set_annotation_mask(self.annotation_select_mask.union(names))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT.
Don't remove them from the Query since they might be used later.
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def set_values(self, fields):
self.select_related = False
self.clear_deferred_loading()
self.clear_select_fields()
if self.group_by is True:
self.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
self.set_group_by()
self.clear_select_fields()
if fields:
field_names = []
extra_names = []
annotation_names = []
if not self._extra and not self._annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
self.default_cols = False
for f in fields:
if f in self.extra_select:
extra_names.append(f)
elif f in self.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
self.set_extra_mask(extra_names)
self.set_annotation_mask(annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
self.values_select = tuple(field_names)
self.add_fields(field_names, True)
@property
def annotation_select(self):
"""
Return the OrderedDict of aggregate columns that are not masked and
should be used in the SELECT clause. Cache this result for performance.
"""
if self._annotation_select_cache is not None:
return self._annotation_select_cache
elif not self._annotations:
return {}
elif self.annotation_select_mask is not None:
self._annotation_select_cache = OrderedDict(
(k, v) for k, v in self.annotations.items()
if k in self.annotation_select_mask
)
return self._annotation_select_cache
else:
return self.annotations
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self._extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = OrderedDict(
(k, v) for k, v in self.extra.items()
if k in self.extra_select_mask
)
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trim joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also set the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Return a lookup usable for doing outerq.filter(lookup=self) and a
boolean indicating if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [
t for t in self.alias_map
if t in self._lookup_joins or t == self.base_table
]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
contains_louter = True
alias = lookup_tables[trimmed_paths]
self.unref_alias(alias)
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(
join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for LEFT JOINs because we would
# miss those rows that have nothing on the outer side.
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type != LOUTER:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
self.where_class, None, lookup_tables[trimmed_paths + 1])
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
# The found starting point is likely a Join instead of a BaseTable reference.
# But the first entry in the query's FROM clause must not be a JOIN.
for table in self.alias_map:
if self.alias_refcount[table] > 0:
self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table)
break
self.set_select([f.get_col(select_alias) for f in select_fields])
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
Check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
if connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and field.empty_strings_allowed:
return True
else:
return field.null
def get_order_dir(field, default='ASC'):
"""
Return the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def add_to_dict(data, key, value):
"""
Add "value" to the set of values for "key", whether or not "key" already
exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = {value}
def is_reverse_o2o(field):
"""
Check if the given field is reverse-o2o. The field is expected to be some
sort of relation field or related object.
"""
return field.is_relation and field.one_to_one and not field.concrete
class JoinPromoter:
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.votes = Counter()
def add_votes(self, votes):
"""
Add single vote per item to self.votes. Parameter can be any
iterable.
"""
self.votes.update(votes)
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == 'OR' and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == 'AND' or (
self.effective_connector == 'OR' and votes == self.num_children):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
| uranusjr/django | django/db/models/sql/query.py | Python | bsd-3-clause | 96,523 | 0.000912 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup
this_dir = os.path.abspath(os.path.dirname(__file__))
VERSIONFILE = os.path.join(this_dir, "textx", "__init__.py")
VERSION = None
for line in open(VERSIONFILE, "r").readlines():
if line.startswith('__version__'):
VERSION = line.split('"')[1]
if not VERSION:
raise RuntimeError('No version defined in textx.__init__.py')
if sys.argv[-1].startswith('publish'):
if os.system("pip list | grep twine"):
print("twine not installed.\nUse `pip install twine`.\nExiting.")
sys.exit()
os.system("python setup.py sdist bdist_wheel")
if sys.argv[-1] == 'publishtest':
os.system("twine upload -r test dist/*")
else:
os.system("twine upload dist/*")
print("You probably want to also tag the version now:")
print(" git tag -a {0} -m 'version {0}'".format(VERSION))
print(" git push --tags")
sys.exit()
setup(version=VERSION)
| igordejanovic/textX | setup.py | Python | mit | 1,005 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.rbac_policies \
import forms as rbac_policy_forms
from openstack_dashboard.dashboards.admin.rbac_policies \
import tables as rbac_policy_tables
from openstack_dashboard.dashboards.admin.rbac_policies \
import tabs as rbac_policy_tabs
class IndexView(tables.DataTableView):
table_class = rbac_policy_tables.RBACPoliciesTable
page_title = _("RBAC Policies")
@memoized.memoized_method
def _get_tenants(self):
try:
tenants, has_more = api.keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _("Unable to retrieve information about the "
"policies' projects.")
exceptions.handle(self.request, msg)
tenant_dict = OrderedDict([(t.id, t.name) for t in tenants])
return tenant_dict
def _get_networks(self):
try:
networks = api.neutron.network_list(self.request)
except Exception:
networks = []
msg = _("Unable to retrieve information about the "
"policies' networks.")
exceptions.handle(self.request, msg)
return dict((n.id, n.name) for n in networks)
def _get_qos_policies(self):
qos_policies = []
try:
if api.neutron.is_extension_supported(self.request,
extension_alias='qos'):
qos_policies = api.neutron.policy_list(self.request)
except Exception:
msg = _("Unable to retrieve information about the "
"policies' qos policies.")
exceptions.handle(self.request, msg)
return dict((q.id, q.name) for q in qos_policies)
def get_data(self):
try:
rbac_policies = api.neutron.rbac_policy_list(self.request)
except Exception:
rbac_policies = []
messages.error(self.request,
_("Unable to retrieve RBAC policies."))
if rbac_policies:
tenant_dict = self._get_tenants()
network_dict = self._get_networks()
qos_policy_dict = self._get_qos_policies()
for p in rbac_policies:
# Set tenant name and object name
p.tenant_name = tenant_dict.get(p.tenant_id, p.tenant_id)
p.target_tenant_name = tenant_dict.get(p.target_tenant,
p.target_tenant)
if p.object_type == "network":
p.object_name = network_dict.get(p.object_id, p.object_id)
elif p.object_type == "qos_policy":
p.object_name = qos_policy_dict.get(p.object_id,
p.object_id)
return rbac_policies
class CreateView(forms.ModalFormView):
template_name = 'admin/rbac_policies/create.html'
form_id = "create_rbac_policy_form"
form_class = rbac_policy_forms.CreatePolicyForm
submit_label = _("Create RBAC Policy")
submit_url = reverse_lazy("horizon:admin:rbac_policies:create")
success_url = reverse_lazy("horizon:admin:rbac_policies:index")
page_title = _("Create A RBAC Policy")
class UpdateView(forms.ModalFormView):
context_object_name = 'rbac_policies'
template_name = 'admin/rbac_policies/update.html'
form_class = rbac_policy_forms.UpdatePolicyForm
form_id = "update_rbac_policy_form"
submit_label = _("Save Changes")
submit_url = 'horizon:admin:rbac_policies:update'
success_url = reverse_lazy('horizon:admin:rbac_policies:index')
page_title = _("Update RBAC Policy")
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
args = (self.kwargs['rbac_policy_id'],)
context["rbac_policy_id"] = self.kwargs['rbac_policy_id']
context["submit_url"] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
rbac_policy_id = self.kwargs['rbac_policy_id']
try:
return api.neutron.rbac_policy_get(self.request, rbac_policy_id)
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve rbac policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
rbac_policy = self._get_object()
return {'rbac_policy_id': rbac_policy['id'],
'target_tenant': rbac_policy['target_tenant']}
class DetailView(tabs.TabView):
tab_group_class = rbac_policy_tabs.RBACDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ rbac_policy.id }}"
| NeCTAR-RC/horizon | openstack_dashboard/dashboards/admin/rbac_policies/views.py | Python | apache-2.0 | 5,711 | 0 |
"""
Predicting potential dopants
"""
import warnings
import numpy as np
from pymatgen.analysis.structure_prediction.substitution_probability import (
SubstitutionPredictor,
)
from pymatgen.core.periodic_table import Element, Species
def get_dopants_from_substitution_probabilities(structure, num_dopants=5, threshold=0.001, match_oxi_sign=False):
"""
Get dopant suggestions based on substitution probabilities.
Args:
structure (Structure): A pymatgen structure decorated with
oxidation states.
num_dopants (int): The number of suggestions to return for
n- and p-type dopants.
threshold (float): Probability threshold for substitutions.
match_oxi_sign (bool): Whether to force the dopant and original species
to have the same sign of oxidation state. E.g. If the original site
is in a negative charge state, then only negative dopants will be
returned.
Returns:
(dict): Dopant suggestions, given as a dictionary with keys "n_type" and
"p_type". The suggestions for each doping type are given as a list of
dictionaries, each with they keys:
- "probability": The probability of substitution.
- "dopant_species": The dopant species.
- "original_species": The substituted species.
"""
els_have_oxi_states = [hasattr(s, "oxi_state") for s in structure.species]
if not all(els_have_oxi_states):
raise ValueError("All sites in structure must have oxidation states to predict dopants.")
sp = SubstitutionPredictor(threshold=threshold)
subs = [sp.list_prediction([s]) for s in set(structure.species)]
subs = [
{
"probability": pred["probability"],
"dopant_species": list(pred["substitutions"].keys())[0],
"original_species": list(pred["substitutions"].values())[0],
}
for species_preds in subs
for pred in species_preds
]
subs.sort(key=lambda x: x["probability"], reverse=True)
return _get_dopants(subs, num_dopants, match_oxi_sign)
def get_dopants_from_shannon_radii(bonded_structure, num_dopants=5, match_oxi_sign=False):
"""
Get dopant suggestions based on Shannon radii differences.
Args:
bonded_structure (StructureGraph): A pymatgen structure graph
decorated with oxidation states. For example, generated using the
CrystalNN.get_bonded_structure() method.
num_dopants (int): The nummber of suggestions to return for
n- and p-type dopants.
match_oxi_sign (bool): Whether to force the dopant and original species
to have the same sign of oxidation state. E.g. If the original site
is in a negative charge state, then only negative dopants will be
returned.
Returns:
(dict): Dopant suggestions, given as a dictionary with keys "n_type" and
"p_type". The suggestions for each doping type are given as a list of
dictionaries, each with they keys:
- "radii_diff": The difference between the Shannon radii of the species.
- "dopant_spcies": The dopant species.
- "original_species": The substituted species.
"""
# get a list of all Species for all elements in all their common oxid states
all_species = [Species(el, oxi) for el in Element for oxi in el.common_oxidation_states]
# get a series of tuples with (coordination number, specie)
cn_and_species = {
(
bonded_structure.get_coordination_of_site(i),
bonded_structure.structure[i].specie,
)
for i in range(bonded_structure.structure.num_sites)
}
cn_to_radii_map = {}
possible_dopants = []
for cn, species in cn_and_species:
cn_roman = _int_to_roman(cn)
try:
species_radius = species.get_shannon_radius(cn_roman)
except KeyError:
warnings.warn(f"Shannon radius not found for {species} with coordination number {cn}.\nSkipping...")
continue
if cn not in cn_to_radii_map:
cn_to_radii_map[cn] = _shannon_radii_from_cn(all_species, cn_roman, radius_to_compare=species_radius)
shannon_radii = cn_to_radii_map[cn]
possible_dopants += [
{
"radii_diff": p["radii_diff"],
"dopant_species": p["species"],
"original_species": species,
}
for p in shannon_radii
]
possible_dopants.sort(key=lambda x: abs(x["radii_diff"]))
return _get_dopants(possible_dopants, num_dopants, match_oxi_sign)
def _get_dopants(substitutions, num_dopants, match_oxi_sign):
"""
Utility method to get n- and p-type dopants from a list of substitutions.
"""
n_type = [
pred
for pred in substitutions
if pred["dopant_species"].oxi_state > pred["original_species"].oxi_state
and (
not match_oxi_sign
or np.sign(pred["dopant_species"].oxi_state) == np.sign(pred["original_species"].oxi_state)
)
]
p_type = [
pred
for pred in substitutions
if pred["dopant_species"].oxi_state < pred["original_species"].oxi_state
and (
not match_oxi_sign
or np.sign(pred["dopant_species"].oxi_state) == np.sign(pred["original_species"].oxi_state)
)
]
return {"n_type": n_type[:num_dopants], "p_type": p_type[:num_dopants]}
def _shannon_radii_from_cn(species_list, cn_roman, radius_to_compare=0):
"""
Utility func to get Shannon radii for a particular coordination number.
As the Shannon radii depends on charge state and coordination number,
species without an entry for a particular coordination number will
be skipped.
Args:
species_list (list): A list of Species to get the Shannon radii for.
cn_roman (str): The coordination number as a roman numeral. See
Species.get_shannon_radius for more details.
radius_to_compare (float, optional): If set, the data will be returned
with a "radii_diff" key, containing the difference between the
shannon radii and this radius.
Returns:
(list of dict): The Shannon radii for all Species in species. Formatted
as a list of dictionaries, with the keys:
- "species": The species with charge state.
- "radius": The Shannon radius for the species.
- "radius_diff": The difference between the Shannon radius and the
radius_to_compare optional argument.
"""
shannon_radii = []
for s in species_list:
try:
radius = s.get_shannon_radius(cn_roman)
shannon_radii.append(
{
"species": s,
"radius": radius,
"radii_diff": radius - radius_to_compare,
}
)
except KeyError:
pass
return shannon_radii
def _int_to_roman(number):
"""Utility method to convert an int (less than 20) to a roman numeral."""
roman_conv = [(10, "X"), (9, "IX"), (5, "V"), (4, "IV"), (1, "I")]
result = []
for (arabic, roman) in roman_conv:
(factor, number) = divmod(number, arabic)
result.append(roman * factor)
if number == 0:
break
return "".join(result)
| vorwerkc/pymatgen | pymatgen/analysis/structure_prediction/dopant_predictor.py | Python | mit | 7,420 | 0.001887 |
import t
class b08(t.Test):
class TestResource(t.Resource):
def is_authorized(self, req, rsp):
if req.headers.get('authorization') == 'yay':
return True
return 'oauth'
def to_html(self, req, rsp):
return "nom nom"
def test_ok(self):
self.req.headers['authorization'] = 'yay'
self.go()
t.eq(self.rsp.status, '200 OK')
t.eq(self.rsp.body, 'nom nom')
def test_not_ok(self):
self.go()
t.eq(self.rsp.status, '401 Unauthorized')
t.eq(self.rsp.headers['www-authenticate'], 'oauth')
t.eq(self.rsp.body, '') | benoitc/pywebmachine | tests/decisions/b08_test.py | Python | mit | 664 | 0.00753 |
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.utils.translation import get_language
import django_browserid.views
import waffle
from flicks.base import regions
from flicks.base.util import redirect
from flicks.users.forms import UserProfileForm
from flicks.users.tasks import newsletter_subscribe
from flicks.videos.models import Video, Vote
@login_required
def profile(request):
"""Display and process the profile creation form."""
form = UserProfileForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
profile = form.save(commit=False)
profile.user = request.user
profile.locale = get_language()
profile.save()
if form.cleaned_data['mailing_list_signup']:
format = form.cleaned_data['mailing_list_format']
newsletter_subscribe.delay(request.user.email,
source_url=request.build_absolute_uri(),
format=format)
return redirect('flicks.videos.upload')
return render(request, 'users/profile.html', {
'form': form,
'regions': regions,
})
class Verify(django_browserid.views.Verify):
def login_success(self, *args, **kwargs):
"""
Extend successful login to check if the user was attempting to vote for
a video, and create the vote if they were.
"""
response = super(Verify, self).login_success(*args, **kwargs)
if not waffle.flag_is_active(self.request, 'voting'):
return response
try:
video_id = self.request.session['vote_video']
video = Video.objects.get(id=video_id)
Vote.objects.get_or_create(user=self.request.user, video=video)
del self.request.session['vote_video']
# Set cookie so the JavaScript knows they successfully voted.
response.set_cookie('just_voted', '1', max_age=3600, httponly=False)
except (Video.DoesNotExist, ValueError):
# Avoid retrying on an invalid video.
del self.request.session['vote_video']
except KeyError:
pass # Do nothing if the key never existed.
return response
def login_failure(self, *args, **kwargs):
"""
Extend login failure so that if login fails, the user's attempts to
vote for a video are cancelled.
"""
try:
del self.request.session['vote_video']
except KeyError:
pass
return super(Verify, self).login_failure(*args, **kwargs)
| mozilla/firefox-flicks | flicks/users/views.py | Python | bsd-3-clause | 2,642 | 0.000379 |
# Copyright (C) 2018 DataArt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
class Notification(object):
"""Notification class."""
DEVICE_ID_KEY = 'deviceId'
ID_KEY = 'id'
NOTIFICATION_KEY = 'notification'
PARAMETERS_KEY = 'parameters'
TIMESTAMP_KEY = 'timestamp'
def __init__(self, notification):
self._device_id = notification[self.DEVICE_ID_KEY]
self._id = notification[self.ID_KEY]
self._notification = notification[self.NOTIFICATION_KEY]
self._parameters = notification[self.PARAMETERS_KEY]
self._timestamp = notification[self.TIMESTAMP_KEY]
@property
def device_id(self):
return self._device_id
@property
def id(self):
return self._id
@property
def notification(self):
return self._notification
@property
def parameters(self):
return self._parameters
@property
def timestamp(self):
return self._timestamp
| devicehive/devicehive-python | devicehive/notification.py | Python | apache-2.0 | 1,546 | 0 |
#!/usr/bin/env python
"""
This is the main module driver for Ross Flieger-Allison's
python-utils module.
"""
__author__ = "Ross Flieger-Allison"
__date__ = "23-10-2015"
__version__ = "1.0.0"
| rfoxfa/python-utils | utils/__init__.py | Python | gpl-2.0 | 199 | 0.020101 |
from muntjac.api import VerticalLayout, OptionGroup, Label
from muntjac.data.property import IValueChangeListener
class OptionGroupsExample(VerticalLayout, IValueChangeListener):
_cities = ['Berlin', 'Brussels', 'Helsinki', 'Madrid', 'Oslo',
'Paris', 'Stockholm']
def __init__(self):
super(OptionGroupsExample, self).__init__()
self.setSpacing(True)
# 'Shorthand' constructor - also supports data binding using Containers
citySelect = OptionGroup('Please select a city', self._cities)
# user can not 'unselect'
citySelect.setNullSelectionAllowed(False)
# select this by default
citySelect.select('Berlin')
# send the change to the server at once
citySelect.setImmediate(True)
# react when the user selects something
citySelect.addListener(self, IValueChangeListener)
self.addComponent(citySelect)
self.addComponent(Label('<h3>Multi-selection</h3>',
Label.CONTENT_XHTML))
# Create the multiselect option group
# 'Shorthand' constructor - also supports data binding using Containers
citySelect = OptionGroup('Please select cities', self._cities)
citySelect.setMultiSelect(True) # FIXME: multi-select
# user can not 'unselect'
citySelect.setNullSelectionAllowed(False)
# select this by default
citySelect.select('Berlin')
# send the change to the server at once
citySelect.setImmediate(True)
# react when the user selects something
citySelect.addListener(self, IValueChangeListener)
self.addComponent(citySelect)
# Shows a notification when a selection is made. The listener will be
# called whenever the value of the component changes, i.e when the user
# makes a new selection.
def valueChange(self, event):
v = event.getProperty().getValue()
if isinstance(v, set):
v = list(v)
self.getWindow().showNotification('Selected city: %s' % v)
| rwl/muntjac | muntjac/demo/sampler/features/selects/OptionGroupsExample.py | Python | apache-2.0 | 2,051 | 0.000975 |
from __future__ import print_function
num = 17
test = 2
while test < num:
if num % test == 0 and num != test:
print(num,'equals',test, '*', num/test)
print(num,'is not a prime number')
break
test = test + 1
else:
print(num,'is a prime number!')
| sjm-ec/cbt-python | Units/06-Loops/GoodExample3.py | Python | gpl-2.0 | 257 | 0.042802 |
import unittest
converter = __import__("obj-to-sm-conversion")
model = """
# Blender v2.71 (sub 0) OBJ File:
# www.blender.org
mtllib object.mtl
o Cube
v 1.000000 -1.000000 -1.000000
v 1.000000 -1.000000 1.000000
v -1.000000 -1.000000 1.000000
v -1.000000 -1.000000 -1.000000
v 1.000000 1.000000 -0.999999
v 0.999999 1.000000 1.000001
v -1.000000 1.000000 1.000000
v -1.000000 1.000000 -1.000000
v 0.493105 -0.493106 2.246419
v -0.493106 -0.493106 2.246419
v 0.493105 0.493105 2.246419
v -0.493106 0.493105 2.246419
v 0.493105 -0.493106 3.738037
v -0.493106 -0.493106 3.738037
v 0.493104 0.493105 3.738037
v -0.493107 0.493105 3.738037
v 0.493105 -0.493106 4.284467
v -0.493107 -0.493106 4.284467
v 0.493104 0.493105 4.284468
v -0.493107 0.493105 4.284467
v 0.493104 1.012896 3.738037
v -0.493107 1.012896 3.738037
v 0.493104 1.343554 4.284468
v -0.493107 1.343554 4.284467
v 0.493105 1.845343 3.234304
v -0.493106 1.845343 3.234304
v 0.493105 2.176001 3.780735
v -0.493106 2.176001 3.780734
v 0.570207 -1.571936 -0.570207
v 0.570207 -1.571936 0.570207
v -0.570207 -1.571936 0.570207
v -0.570207 -1.571936 -0.570208
v 0.570207 -3.115134 -0.570207
v 0.570207 -3.115134 0.570207
v -0.570207 -3.115134 0.570207
v -0.570207 -3.115134 -0.570208
vn -0.799400 -0.600800 -0.000000
vn 0.000000 1.000000 0.000000
vn 1.000000 -0.000000 0.000000
vn -0.000000 0.926300 0.376700
vn -1.000000 -0.000000 -0.000000
vn 0.000000 0.000000 -1.000000
vn -0.926300 -0.000000 0.376700
vn 0.926300 0.000000 0.376700
vn 0.000000 -0.926300 0.376700
vn 0.000000 -1.000000 0.000000
vn -0.000000 -0.000000 1.000000
vn 0.000000 0.855600 -0.517700
vn -0.000000 0.517700 0.855600
vn 0.000000 -0.517700 -0.855600
vn -0.000000 -0.600800 0.799400
vn 0.000000 -0.600800 -0.799400
vn 0.799400 -0.600800 0.000000
usemtl Material
s off
f 4//1 32//1 31//1
f 8//2 7//2 6//2
f 1//3 5//3 6//3
f 7//4 12//4 11//4
f 7//5 8//5 4//5
f 1//6 4//6 8//6
f 12//2 16//2 15//2
f 7//7 3//7 10//7
f 2//8 6//8 11//8
f 2//9 9//9 10//9
f 16//5 20//5 24//5
f 12//5 10//5 14//5
f 9//3 11//3 15//3
f 9//10 13//10 14//10
f 17//11 19//11 20//11
f 16//5 14//5 18//5
f 15//3 19//3 17//3
f 13//10 17//10 18//10
f 22//5 24//5 28//5
f 15//3 21//3 23//3
f 19//11 23//11 24//11
f 16//6 22//6 21//6
f 26//12 28//12 27//12
f 23//3 21//3 25//3
f 23//13 27//13 28//13
f 22//14 26//14 25//14
f 32//5 36//5 35//5
f 3//15 31//15 30//15
f 1//16 29//16 32//16
f 2//17 30//17 29//17
f 34//10 35//10 36//10
f 31//11 35//11 34//11
f 29//6 33//6 36//6
f 29//3 30//3 34//3
f 3//1 4//1 31//1
f 5//2 8//2 6//2
f 2//3 1//3 6//3
f 6//4 7//4 11//4
f 3//5 7//5 4//5
f 5//6 1//6 8//6
f 11//2 12//2 15//2
f 12//7 7//7 10//7
f 9//8 2//8 11//8
f 3//9 2//9 10//9
f 22//5 16//5 24//5
f 16//5 12//5 14//5
f 13//3 9//3 15//3
f 10//10 9//10 14//10
f 18//11 17//11 20//11
f 20//5 16//5 18//5
f 13//3 15//3 17//3
f 14//10 13//10 18//10
f 26//5 22//5 28//5
f 19//3 15//3 23//3
f 20//11 19//11 24//11
f 15//6 16//6 21//6
f 25//12 26//12 27//12
f 27//3 23//3 25//3
f 24//13 23//13 28//13
f 21//14 22//14 25//14
f 31//5 32//5 35//5
f 2//15 3//15 30//15
f 4//16 1//16 32//16
f 1//17 2//17 29//17
f 33//10 34//10 36//10
f 30//11 31//11 34//11
f 32//6 29//6 36//6
f 33//3 29//3 34//3
"""
class TestConvertFunctions(unittest.TestCase):
def test_conversion(self):
global model
(format, faces, vertexes, normals, texture) = converter.convert_to_objects(model)
self.assertEqual(len(faces), 68)
self.assertEqual(len(vertexes), 36)
self.assertEqual(len(normals), 17)
self.assertEqual(len(texture), 0)
self.assertEqual(format, 'vn')
return 0
| stbd/stoolbox | tests/obj-to-sm-test/conversion-test.py | Python | mit | 3,598 | 0.000556 |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61968.Common.Document import Document
class Agreement(Document):
"""Formal agreement between two parties defining the terms and conditions for a set of services. The specifics of the services are, in turn, defined via one or more service agreements.Formal agreement between two parties defining the terms and conditions for a set of services. The specifics of the services are, in turn, defined via one or more service agreements.
"""
def __init__(self, signDate='', validityInterval=None, *args, **kw_args):
"""Initialises a new 'Agreement' instance.
@param signDate: Date this agreement was consummated among associated persons and/or organisations.
@param validityInterval: Date and time interval this agreement is valid (from going into effect to termination).
"""
#: Date this agreement was consummated among associated persons and/or organisations.
self.signDate = signDate
self.validityInterval = validityInterval
super(Agreement, self).__init__(*args, **kw_args)
_attrs = ["signDate"]
_attr_types = {"signDate": str}
_defaults = {"signDate": ''}
_enums = {}
_refs = ["validityInterval"]
_many_refs = []
# Date and time interval this agreement is valid (from going into effect to termination).
validityInterval = None
| rwl/PyCIM | CIM15/IEC61968/Common/Agreement.py | Python | mit | 2,456 | 0.003257 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-03 19:17
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dmax_website', '0009_auto_20160103_1911'),
]
operations = [
migrations.RenameField(
model_name='projectitem',
old_name='project_abbreviation',
new_name='abbreviation',
),
]
| maxwelld90/personal_web | django_project/personal_web/dmax_website/migrations/0010_auto_20160103_1917.py | Python | gpl-2.0 | 457 | 0 |
"""
Unit tests and test utilities for django Organice.
NOTE: Having an __init__ file in test directories is bad practice according to
py.test recommendations:
http://pytest.org/latest/goodpractises.html#choosing-a-test-layout-import-rules
However, this makes relative imports work in test modules (e.g. helper from ``utils.py``).
"""
# NOTE 1: This file makes the 'test' folder importable! (i.e. `import tests`) Not good.
# Though, the test folder is pruned by MANIFEST.in, hence it's not installed anywhere.
# TODO: Consider inlining the tests into the package, or find a solution without relative imports.
# NOTE 2: The import of `DjangoSettingsManager` for probe_values_in_list() makes the
# test.utils dependent on an installed version of Organice. Also tests are run with
# helpers from the unit under test! No, not good.
# TODO: Make tests and test helpers independent from the implementation being tested.
| Organice/django-organice | tests/units/__init__.py | Python | apache-2.0 | 920 | 0.007609 |
"""A symbolic AI that forms decisions by using a decision tree."""
import random
from .interface import IPlayerController
from model.card import Card
class SymbolicAI_PC(IPlayerController):
"""player controller that returns resolutes its choices using a decision tree.
It is called symbolic because it is not actually an AI"""
def __init__(self, player, args,container):
super().__init__(player,args)
def pollDraft(self, state):
"""Function that returns the choice of a stack from the draw field"""
"""Tries to pick a varied hand"""
# Bind in the percieved fields
percievedField = state.deck.percieveCardField()
# just return if there are no fields left
if len(percievedField) == 0:
return None
# if only one stack left, return that one
if len(percievedField) == 1:
return percievedField[0].index
###### if more than one choice left:
# get cards that you already have (your hand)
hand = self.player.getHand()
handTrac = [c for c in hand if c.getType() == Card.Type.tractor]
handNormLow = [c for c in hand if c.getType() == Card.Type.normal and c.getValue() <= 8]
handNormHigh = [c for c in hand if c.getType() == Card.Type.normal and c.getValue() >= 3]
handRep = [c for c in hand if c.getType() == Card.Type.repulsor]
# order field options on card type
tractors = [f for f in percievedField if f.card.getType() == Card.Type.tractor]
normalHighs = [f for f in percievedField if f.card.getType() == Card.Type.normal and f.card.getValue() < 7]
normalLows = [f for f in percievedField if f.card.getType() == Card.Type.normal and f.card.getValue() >= 3]
repulsors = [f for f in percievedField if f.card.getType() == Card.Type.repulsor]
# if there are tractors available, and you don't have one in your hand
if len(tractors) > 0 and len(handTrac) == 0:
return tractors[0].index
# if there are repulsors, but you dont have them in your hand
if len(repulsors) > 0 and len(handRep) == 0:
return repulsors[0].index
# get lowest normal that plays first
if len(normalLows) > 0 and len(handNormLow) == 0:
lowFirstSorted = sorted(normalLows, key = lambda x:x.card.getName()[0])# sort on first letter
return lowFirstSorted[0].index
# get highest normal that plays first
if len(normalHighs) > 0 and len(handNormHigh) == 0:
highFirstSorted = sorted(normalHighs, key = lambda x:x.card.getName()[0])# sort on first letter
return highFirstSorted[0].index
# if nothin else works, just take a random field
randomField = random.choice(percievedField)
return randomField.index
def pollPlay(self, state):
"""Function that returns which card the PC want to play"""
"""chooses mainly on player-direction and cardtype. It does not take into account
the cardNames (and thus playing-timing) or more complex ship-configurations
or one-step-ahead-strategies"""
# get player hand
hand = self.player.getHand()
# on empty hand
if len(hand) == 0:
return None
# on 1 card in hand, play only card left
if len(hand) == 1:
return hand[0]
##### on more cards, make a choice between them
# order options on card type
tractors = [c for c in hand if c.getType() == Card.Type.tractor]
normals = [c for c in hand if c.getType() == Card.Type.normal]
repulsors = [c for c in hand if c.getType() == Card.Type.repulsor]
# find closest ship
targetName = state.getTarget(self.player.getName())
# if no closest ship, the player is Stuck
if targetName is None:
# if available, try to play a tractor
if len(tractors) > 0:
return tractors[0]
# otherwise, just play some card
else:
return random.choice(hand)
# there is a closest ship: find moving direction
target = state.getShip(targetName).ship
distance = target.getPos() - self.player.getPos()
# moving forward...
if distance > 0:
# so choose highest-value normal card
if len(normals) > 0:
orderedNormals = sorted(normals, key = lambda x: x.getValue() )
return orderedNormals[0]
# if there are no normal cards, use tractor or lowest repulsor
else:
if len(tractors) > 0:
# use a tractor (does not mather which one since they are similar)
return tractors[0]
# since then hand is not empty, there are only repulsors left
else:
# chooce lowest repulsor
orderedRepulsors = sorted(repulsors, key = lambda x: -x.getValue() )
return orderedRepulsors[0]
# moving backward...
else: # if distance <= 0:
# so choose highest-value repulsor card
if len(repulsors) > 0:
orderedRepulsors = sorted(repulsors, key = lambda x: x.getValue() )
return orderedRepulsors[0]
# if there are no repulsor cards, use tractor or lowest normal
else:
if len(tractors) > 0:
# use a tractor (does not mather which one since they are similar)
return tractors[0]
# since then hand is not empty, there are only normals left
else:
# chooce lowest normal
orderedNormals = sorted(normals, key = lambda x: -x.getValue() )
return orderedNormals[0]
def pollEmergencyStop(self, state):
"""Function that returns the choice of using the emergency stop as a boolean.
Right now the choice is rather egocentric; no other-player-bullying is done."""
# get closest ship
targetName = state.getTarget(self.player.getName())
if targetName is None:
# player is stuck, don't waste ES!
return False
if self._playedCard.getType() == Card.Type.tractor:
# choice of using ES with tractor cardType is complex...so dont
return False
# distance to closest ship (sign equals direction)
target = state.getShip(targetName).ship
distance = target.getPos() - self.player.getPos()
if distance < 0 and self._playedCard.getType() == Card.Type.normal:
# going in normal direction with closest ship just behind you: use ES
return True
if distance > 0 and self._playedCard.getType() == Card.Type.repulsor:
# getting repulsed with closest ship just behind you: use ES
return True
# return default
return False
def announceWinner(self, state):
"""Function that updates the PC after the last turn"""
return None
def informReveal(self, cards):
"""The definitive set of played cards in a round are shown to the player"""
self.log.info("Random ai informed about %s" % cards)
self._reveal = cards
self._playedCard = [c for c in cards if cards[c] == self.player.getName()][0] # find unique card owned by player
def isHuman(self):
"""The board need to be able to find the human player, which this function eill help with"""
return False
| DrSLDR/mgmag-proj | gravitas/controller/symbolic.py | Python | mit | 7,657 | 0.007575 |
import logging
from .. import exceptions
from ..plan import COMPLETE, Plan
from ..status import NotSubmittedStatus, NotUpdatedStatus
from . import build
import difflib
import json
logger = logging.getLogger(__name__)
def diff_dictionaries(old_dict, new_dict):
"""Diffs two single dimension dictionaries
Returns the number of changes and an unordered list
expressing the common entries and changes.
Args:
old_dict(dict): old dictionary
new_dict(dict): new dictionary
Returns: list()
int: number of changed records
list: [str(<change type>), <key>, <value>]
Where <change type>: +, - or <space>
"""
old_set = set(old_dict)
new_set = set(new_dict)
added_set = new_set - old_set
removed_set = old_set - new_set
common_set = old_set & new_set
changes = 0
output = []
for key in added_set:
changes += 1
output.append(["+", key, new_dict[key]])
for key in removed_set:
changes += 1
output.append(["-", key, old_dict[key]])
for key in common_set:
if str(old_dict[key]) != str(new_dict[key]):
changes += 1
output.append(["-", key, old_dict[key]])
output.append(["+", key, new_dict[key]])
else:
output.append([" ", key, new_dict[key]])
return [changes, output]
def print_diff_parameters(parameter_diff):
"""Handles the printing of differences in parameters.
Args:
parameter_diff (list): A list dictionaries detailing the differences
between two parameters returned by
:func:`stacker.actions.diff.diff_dictionaries`
"""
print """--- Old Parameters
+++ New Parameters
******************"""
for line in parameter_diff:
print "%s%s = %s" % (line[0], line[1], line[2])
def diff_parameters(old_params, new_params):
"""Compares the old vs. new parameters and prints a "diff"
If there are no changes, we print nothing.
Args:
old_params(dict): old paramters
new_params(dict): new parameters
Returns:
list: A list of differences
"""
[changes, diff] = diff_dictionaries(old_params, new_params)
if changes == 0:
return []
return diff
def print_stack_changes(stack_name, new_stack, old_stack, new_params,
old_params):
"""Prints out the paramters (if changed) and stack diff"""
from_file = "old_%s" % (stack_name,)
to_file = "new_%s" % (stack_name,)
lines = difflib.context_diff(
old_stack, new_stack,
fromfile=from_file, tofile=to_file)
template_changes = list(lines)
if not template_changes:
print "*** No changes to template ***"
else:
param_diffs = diff_parameters(old_params, new_params)
print_diff_parameters(param_diffs)
print "".join(template_changes)
class Action(build.Action):
""" Responsible for diff'ing CF stacks in AWS and on disk
Generates the build plan based on stack dependencies (these dependencies
are determined automatically based on references to output values from
other stacks).
The plan is then used to pull the current CloudFormation template from
AWS and compare it to the generated templated based on the current
config.
"""
def _normalize_json(self, template):
"""Normalizes our template for diffing
Args:
template(str): json string representing the template
Returns:
list: json representation of the parameters
"""
obj = json.loads(template)
json_str = json.dumps(obj, sort_keys=True, indent=4)
result = []
lines = json_str.split("\n")
for line in lines:
result.append(line + "\n")
return result
def _print_new_stack(self, stack, parameters):
"""Prints out the parameters & stack contents of a new stack"""
print "New template parameters:"
for param in sorted(parameters,
key=lambda param: param['ParameterKey']):
print "%s = %s" % (param['ParameterKey'], param['ParameterValue'])
print "\nNew template contents:"
print "".join(stack)
def _diff_stack(self, stack, **kwargs):
"""Handles the diffing a stack in CloudFormation vs our config"""
if not build.should_submit(stack):
return NotSubmittedStatus()
if not build.should_update(stack):
return NotUpdatedStatus()
# get the current stack template & params from AWS
try:
[old_template, old_params] = self.provider.get_stack_info(
stack.fqn)
except exceptions.StackDoesNotExist:
old_template = None
old_params = {}
stack.resolve_variables(self.context, self.provider)
# generate our own template & params
new_template = stack.blueprint.rendered
parameters = self.build_parameters(stack)
new_params = dict()
for p in parameters:
new_params[p['ParameterKey']] = p['ParameterValue']
new_stack = self._normalize_json(new_template)
print "============== Stack: %s ==============" % (stack.name,)
# If this is a completely new template dump our params & stack
if not old_template:
self._print_new_stack(new_stack, parameters)
else:
# Diff our old & new stack/parameters
old_stack = self._normalize_json(old_template)
print_stack_changes(stack.name, new_stack, old_stack, new_params,
old_params)
return COMPLETE
def _generate_plan(self):
plan = Plan(description="Diff stacks")
stacks = self.context.get_stacks_dict()
dependencies = self._get_dependencies()
for stack_name in self.get_stack_execution_order(dependencies):
plan.add(
stacks[stack_name],
run_func=self._diff_stack,
requires=dependencies.get(stack_name),
)
return plan
def run(self, *args, **kwargs):
plan = self._generate_plan()
debug_plan = self._generate_plan()
debug_plan.outline(logging.DEBUG)
logger.info("Diffing stacks: %s", ", ".join(plan.keys()))
plan.execute()
"""Don't ever do anything for pre_run or post_run"""
def pre_run(self, *args, **kwargs):
pass
def post_run(self, *args, **kwargs):
pass
| mhahn/stacker | stacker/actions/diff.py | Python | bsd-2-clause | 6,533 | 0 |
# -*- coding: utf-8 -*-
#
# Copyright © 2013-2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
'''
Extras API endpoints for the Flask application.
'''
import flask
import requests
import pkgdb2.lib as pkgdblib
from pkgdb2 import SESSION, APP
from pkgdb2.api import API
def request_wants_json():
""" Return weather a json output was requested. """
best = flask.request.accept_mimetypes \
.best_match(['application/json', 'text/html'])
return best == 'application/json' and \
flask.request.accept_mimetypes[best] > \
flask.request.accept_mimetypes['text/html']
#@pkgdb.CACHE.cache_on_arguments(expiration_time=3600)
def _bz_acls_cached(name=None, out_format='text'):
'''Return the package attributes used by bugzilla.
:kwarg collection: Name of the bugzilla collection to gather data on.
:kwarg out_format: Specify if the output if text or json.
Note: The data returned by this function is for the way the current
Fedora bugzilla is setup as of (2007/6/25). In the future, bugzilla
may change to have separate products for each collection-version.
When that happens we'll have to change what this function returns.
The returned data looks like this:
bugzillaAcls[collection][package].attribute
attribute is one of:
:owner: FAS username for the owner
:qacontact: if the package has a special qacontact, their userid
is listed here
:summary: Short description of the package
:cclist: list of FAS userids that are watching the package
'''
packages = pkgdblib.bugzilla(
session=SESSION,
name=name)
output = []
if out_format == 'json':
output = {'bugzillaAcls': {},
'title': 'Fedora Package Database -- Bugzilla ACLs'}
for clt in sorted(packages):
for pkg in sorted(packages[clt]):
if out_format == 'json':
user = []
group = []
for ppl in packages[clt][pkg]['cc'].split(','):
if ppl.startswith('group::'):
group.append(ppl.replace('group::', '@').encode('UTF-8'))
elif ppl:
user.append(ppl.encode('UTF-8'))
poc = packages[clt][pkg]['poc']
if poc.startswith('group::'):
poc = poc.replace('group::', '@')
if clt not in output['bugzillaAcls']:
output['bugzillaAcls'][clt.encode('UTF-8')] = {}
output['bugzillaAcls'][clt][pkg.encode('UTF-8')] = {
'owner': poc.encode('UTF-8'),
'cclist': {
'groups': group,
'people': user,
},
'qacontact': None,
'summary': packages[clt][pkg]['summary'].encode('UTF-8')
}
else:
output.append(
'%(collection)s|%(name)s|%(summary)s|%(poc)s|%(qa)s'
'|%(cc)s' % (packages[clt][pkg])
)
return output
#@pkgdb.CACHE.cache_on_arguments(expiration_time=3600)
def _bz_notify_cache(
name=None, version=None, eol=False, out_format='text', acls=None):
'''List of usernames that should be notified of changes to a package.
For the collections specified we want to retrieve all of the owners,
watchbugzilla, and watchcommits accounts.
:kwarg name: Set to a collection name to filter the results for that
:kwarg version: Set to a collection version to further filter results
for a single version
:kwarg eol: Set to True if you want to include end of life
distributions
:kwarg out_format: Specify if the output if text or json.
'''
packages = pkgdblib.notify(
session=SESSION,
eol=eol,
name=name,
version=version,
acls=acls)
output = []
if out_format == 'json':
output = {'packages': {},
'eol': eol,
'name': name,
'version': version,
'title': 'Fedora Package Database -- Notification List'}
for package in sorted(packages):
if out_format == 'json':
output['packages'][package] = packages[package].split(',')
else:
output.append('%s|%s\n' % (package, packages[package]))
return output
#@pkgdb.CACHE.cache_on_arguments(expiration_time=3600)
def _vcs_acls_cache(out_format='text', eol=False):
'''Return ACLs for the version control system.
:kwarg out_format: Specify if the output if text or json.
:kwarg eol: A boolean specifying whether to include information about
End Of Life collections or not. Defaults to ``False``.
'''
packages = pkgdblib.vcs_acls(
session=SESSION, eol=eol, oformat=out_format,
skip_pp=APP.config.get('PKGS_NOT_PROVENPACKAGER', None))
output = []
if out_format == 'json':
output = {'packageAcls': packages,
'title': 'Fedora Package Database -- VCS ACLs'}
else:
for package in sorted(packages):
for branch in sorted(packages[package]):
if packages[package][branch]['group']:
packages[package][branch]['group'] += ','
output.append(
'avail | %(group)s%(user)s | rpms/%(name)s/%(branch)s'
% (packages[package][branch]))
return output
@API.route('/bugzilla/')
@API.route('/bugzilla')
def api_bugzilla():
'''
Bugzilla information
--------------------
Return the package attributes used by bugzilla.
::
/api/bugzilla
:karg collection: Name of the bugzilla collection to gather data on.
:kwarg format: Specify if the output if text or json.
Note: The data returned by this function is for the way the current
Fedora bugzilla is setup as of (2007/6/25). In the future, bugzilla
may change to have separate products for each collection-version.
When that happens we'll have to change what this function returns.
The returned data looks like this::
bugzillaAcls[collection][package].attribute
attribute is one of:
:owner: FAS username for the owner
:qacontact: if the package has a special qacontact, their userid
is listed here
:summary: Short description of the package
:cclist: list of FAS userids that are watching the package
'''
name = flask.request.args.get('collection', None)
out_format = flask.request.args.get('format', 'text')
if out_format not in ('text', 'json'):
out_format = 'text'
if request_wants_json():
out_format = 'json'
intro = r"""# Package Database VCS Acls
# Text Format
# Collection|Package|Description|Owner|Initial QA|Initial CCList
# Backslashes (\) are escaped as \u005c Pipes (|) are escaped as \u007c
"""
acls = _bz_acls_cached(name, out_format)
if out_format == 'json':
return flask.jsonify(acls)
else:
return flask.Response(
intro + "\n".join(acls),
content_type="text/plain;charset=UTF-8"
)
@API.route('/notify/')
@API.route('/notify')
def api_notify():
'''
Notification information
------------------------
List of usernames that have commit or approveacls ACL for each package.
::
/api/notify
For the collections specified retrieve all of the users having at least
one of the following ACLs for each package: commit, approveacls.
:kwarg name: Set to a collection name to filter the results for that
:kwarg version: Set to a collection version to further filter results
for a single version
:kwarg eol: Set to True if you want to include end of life
distributions
:kwarg format: Specify if the output if text or json.
'''
name = flask.request.args.get('name', None)
version = flask.request.args.get('version', None)
eol = flask.request.args.get('eol', False)
out_format = flask.request.args.get('format', 'text')
if out_format not in ('text', 'json'):
out_format = 'text'
if request_wants_json():
out_format = 'json'
output = _bz_notify_cache(
name, version, eol, out_format,
acls=['commit', 'approveacls', 'watchcommits'])
if out_format == 'json':
return flask.jsonify(output)
else:
return flask.Response(
output,
content_type="text/plain;charset=UTF-8"
)
@API.route('/notify/all/')
@API.route('/notify/all')
def api_notify_all():
'''
Notification information 2
--------------------------
List of usernames that should be notified of changes to a package.
::
/api/notify/all
For the collections specified we want to retrieve all of the users,
having at least one ACL for each package.
:kwarg name: Set to a collection name to filter the results for that
:kwarg version: Set to a collection version to further filter results
for a single version
:kwarg eol: Set to True if you want to include end of life
distributions
:kwarg format: Specify if the output if text or json.
'''
name = flask.request.args.get('name', None)
version = flask.request.args.get('version', None)
eol = flask.request.args.get('eol', False)
out_format = flask.request.args.get('format', 'text')
if out_format not in ('text', 'json'):
out_format = 'text'
if request_wants_json():
out_format = 'json'
output = _bz_notify_cache(name, version, eol, out_format, acls='all')
if out_format == 'json':
return flask.jsonify(output)
else:
return flask.Response(
output,
content_type="text/plain;charset=UTF-8"
)
@API.route('/vcs/')
@API.route('/vcs')
def api_vcs():
'''
Version Control System ACLs
---------------------------
Return ACLs for the version control system.
::
/api/vcs
:kwarg format: Specify if the output if text or json.
:kwarg eol: A boolean specifying whether to include information about
End Of Life collections or not. Defaults to ``False``.
'''
intro = """# VCS ACLs
# avail|@groups,users|rpms/Package/branch
"""
out_format = flask.request.args.get('format', 'text')
eol = flask.request.args.get('eol', False)
if out_format not in ('text', 'json'):
out_format = 'text'
if request_wants_json():
out_format = 'json'
acls = _vcs_acls_cache(out_format, eol=eol)
if out_format == 'json':
return flask.jsonify(acls)
else:
return flask.Response(
intro + "\n".join(acls),
content_type="text/plain;charset=UTF-8"
)
@API.route('/critpath/')
@API.route('/critpath')
def api_critpath():
'''
Critical path packages
----------------------
Return the list of package marked as critpath for some or all active
releases of fedora.
::
/api/critpath
:kwarg branches: Return the list of packages marked as critpath in the
specified branch(es).
:kwarg format: Specify if the output if text or json.
'''
out_format = flask.request.args.get('format', 'text')
branches = flask.request.args.getlist('branches')
if out_format not in ('text', 'json'):
out_format = 'text'
if request_wants_json():
out_format = 'json'
output = {}
if not branches:
active_collections = pkgdblib.search_collection(
SESSION, '*', status='Under Development')
active_collections.extend(
pkgdblib.search_collection(SESSION, '*', status='Active'))
else:
active_collections = []
for branch in branches:
active_collections.extend(
pkgdblib.search_collection(SESSION, branch)
)
for collection in active_collections:
if collection.name != 'Fedora':
continue
pkgs = pkgdblib.get_critpath_packages(
SESSION, branch=collection.branchname)
if not pkgs:
continue
output[collection.branchname] = [pkg.package.name for pkg in pkgs]
if out_format == 'json':
output = {"pkgs": output}
return flask.jsonify(output)
else:
output_str = []
keys = output.keys()
keys.reverse()
for key in keys:
output_str.append("== %s ==\n" % key)
for pkg in output[key]:
output_str.append("* %s\n" % pkg)
return flask.Response(
''.join(output_str),
content_type="text/plain;charset=UTF-8"
)
@API.route('/pendingacls/')
@API.route('/pendingacls')
def api_pendingacls():
'''
Pending ACLs requests
---------------------
Return the list ACLs request that are ``Awaiting Approval``.
::
/api/pendingacls
:kwarg username: Return the list of pending ACL requests requiring
action from the specified user.
:kwarg format: Specify if the output if text or json.
'''
out_format = flask.request.args.get('format', 'text')
username = flask.request.args.get('username', None)
if out_format not in ('text', 'json'):
out_format = 'text'
if request_wants_json():
out_format = 'json'
output = {}
pending_acls = pkgdblib.get_pending_acl_user(
SESSION, username)
if out_format == 'json':
output = {"pending_acls": pending_acls}
output['total_requests_pending'] = len(pending_acls)
return flask.jsonify(output)
else:
pending_acls.sort(key=lambda it: it['package'])
output = [
"# Number of requests pending: %s" % len(pending_acls)]
for entry in pending_acls:
output.append(
"%(package)s:%(collection)s has %(user)s waiting for "
"%(acl)s" % (entry))
return flask.Response(
'\n'.join(output),
content_type="text/plain;charset=UTF-8"
)
@API.route('/groups/')
@API.route('/groups')
def api_groups():
'''
List group maintainer
---------------------
Return the list FAS groups which have ACLs on one or more packages.
::
/api/groups
:kwarg format: Specify if the output if text or json.
'''
out_format = flask.request.args.get('format', 'text')
if out_format not in ('text', 'json'):
out_format = 'text'
if request_wants_json():
out_format = 'json'
output = {}
groups = pkgdblib.get_groups(SESSION)
if out_format == 'json':
output = {"groups": groups}
output['total_groups'] = len(groups)
return flask.jsonify(output)
else:
output = [
"# Number of groups: %s" % len(groups)]
for entry in sorted(groups):
output.append("%s" % (entry))
return flask.Response(
'\n'.join(output),
content_type="text/plain;charset=UTF-8"
)
@API.route('/monitored/')
@API.route('/monitored')
def api_monitored():
'''
List packages monitored
-----------------------
Return the list of packages in pkgdb that have been flagged to be
monitored by `anitya <http://release-monitoring.org>`_.
::
/api/monitored
:kwarg format: Specify if the output is text or json (default: text).
'''
out_format = flask.request.args.get('format', 'text')
if out_format not in ('text', 'json'):
out_format = 'text'
if request_wants_json():
out_format = 'json'
output = {}
pkgs = pkgdblib.get_monitored_package(SESSION)
if out_format == 'json':
output = {"packages": [pkg.name for pkg in pkgs]}
output['total_packages'] = len(pkgs)
return flask.jsonify(output)
else:
output = [
"# Number of packages: %s" % len(pkgs)]
for pkg in pkgs:
output.append("%s" % (pkg.name))
return flask.Response(
'\n'.join(output),
content_type="text/plain;charset=UTF-8"
)
@API.route('/koschei/')
@API.route('/koschei')
def api_koschei():
'''
List packages monitored by koschei
----------------------------------
Return the list of packages in pkgdb that have been flagged to be
monitored by `koschei <https://apps.fedoraproject.org/koschei>`_.
::
/api/koschei
:kwarg format: Specify if the output is text or json (default: text).
'''
out_format = flask.request.args.get('format', 'text')
if out_format not in ('text', 'json'):
out_format = 'text'
if request_wants_json():
out_format = 'json'
output = {}
pkgs = pkgdblib.get_koschei_monitored_package(SESSION)
if out_format == 'json':
output = {"packages": [pkg.name for pkg in pkgs]}
output['total_packages'] = len(pkgs)
return flask.jsonify(output)
else:
output = [
"# Number of packages: %s" % len(pkgs)]
for pkg in pkgs:
output.append("%s" % (pkg.name))
return flask.Response(
'\n'.join(output),
content_type="text/plain;charset=UTF-8"
)
@API.route('/dead/package/<pkg_name>/<clt_name>')
def api_dead_package(pkg_name, clt_name):
'''
Returned the content of the of dead.package file
-----------------------
Retired packages should have in their git a ``dead.package`` file
containing the explanation as why the package was retired.
This method calls cgit to return that explanation.
::
/api/dead/package/acheck/master
'''
req = requests.get(
'http://pkgs.fedoraproject.org/cgit/%s.git/plain/'
'dead.package?h=%s' % (pkg_name, clt_name)
)
return flask.Response(
req.text,
content_type="text/plain;charset=UTF-8",
status=req.status_code,
)
@API.route('/retired/')
@API.route('/retired')
def api_retired():
'''
List packages retired
---------------------
Return the list of packages in pkgdb that have been retired on all
Fedora or EPEL collections.
::
/api/retired
:kwarg collection: Either `Fedora` or `Fedora EPEL` or any other
collection name (default: Fedora)
:kwarg format: Specify if the output is text or json (default: text).
'''
collection = flask.request.args.get('collection', 'Fedora')
out_format = flask.request.args.get('format', 'text')
if out_format not in ('text', 'json'):
out_format = 'text'
if request_wants_json():
out_format = 'json'
output = {}
pkgs = pkgdblib.get_retired_packages(SESSION, collection=collection)
if out_format == 'json':
output = {
"packages": [pkg.name for pkg in pkgs],
"total_packages": len(pkgs),
"collection": collection,
}
return flask.jsonify(output)
else:
output = [
"# Number of packages: %s" % len(pkgs),
"# collection: %s" % collection]
for pkg in pkgs:
output.append("%s" % (pkg.name))
return flask.Response(
'\n'.join(output),
content_type="text/plain;charset=UTF-8"
)
| crobinso/pkgdb2 | pkgdb2/api/extras.py | Python | gpl-2.0 | 20,250 | 0.000198 |
import json
from quizzer.domain.answer import Answer
from quizzer.domain.grade import Grade
from quizzer.domain.questions import *
__author__ = 'David Moreno García'
def deserialize_answers(json_string):
"""
Deserializes the JSON representation received as arguments to a map of student ids to Answer objects.
:param json_string: JSON representation of the answers objects
:return: a map of student ids to Answer objects
"""
answers = dict()
if json_string:
data = json.loads(json_string)
if 'items' in data:
for item in data['items']:
try:
answers[item['studentId']] = [Answer(answer['question'], answer['value']) for answer in
item['answers']]
except KeyError:
pass
return answers
def deserialize_grades(json_string):
"""
Deserializes the JSON representation received as arguments to a map of student ids to Grade objects.
:param json_string: JSON representation of the grades objects
:return: a map of student ids to Grade objects
"""
grades = dict()
if json_string:
data = json.loads(json_string)
if 'scores' in data:
for grade in data['scores']:
if 'studentId' in grade and 'value' in grade:
grades[grade['studentId']] = Grade(grade['studentId'], grade['value'])
return grades
def deserialize_multichoice(hash):
"""
Deserialize a Multichoice question
:param hash: HashMap containing the question data
:return: question created
"""
question = MultichoiceQuestion(hash['id'], hash['questionText'])
if 'alternatives' in hash:
for alternative in hash['alternatives']:
if 'code' in alternative and 'text' in alternative and 'value' in alternative:
question.add_alternative(alternative['code'], alternative['text'], alternative['value'])
return question
def deserialize_numerical(hash):
"""
Deserialize a Numerical question
:param hash: HashMap containing the question data
:return: question created
"""
question = NumericalQuestion(hash['id'], hash['questionText'])
if 'correct' in hash:
question.correct = hash['correct']
if 'valueOk' in hash:
question.value_correct = hash['valueOk']
if 'valueFailed' in hash:
question.value_incorrect = hash['valueFailed']
return question
def deserialize_true_false(hash):
"""
Deserialize a True/False question
:param hash: HashMap containing the question data
:return: question created
"""
question = TrueFalseQuestion(hash['id'], hash['questionText'])
if 'correct' in hash:
question.correct = hash['correct']
if 'valueOK' in hash:
question.value_correct = hash['valueOK']
if 'valueFailed' in hash:
question.value_incorrect = hash['valueFailed']
if 'feedback' in hash:
question.feedback = hash['feedback']
return question
# Hash used to decide what method to call based on the question type
question_type = {
'multichoice': deserialize_multichoice,
'numerical': deserialize_numerical,
'truefalse': deserialize_true_false
}
def deserialize_questions(json_string):
"""
Deserializes the JSON representation received as arguments to a map of questions ids to Question objects.
:param json_string: JSON representation of the questions objects
:return: a map of questions ids to Question objects
"""
questions = dict()
if json_string:
data = json.loads(json_string)
if 'questions' in data:
for question in data['questions']:
try:
if 'id' in question and 'questionText' in question:
questions[question['id']] = question_type[question['type']](question)
except KeyError:
pass
return questions
| davidmogar/quizzer-python | quizzer/deserializers/assessment_deserializer.py | Python | mit | 4,007 | 0.002496 |
#!/usr/bin/env python
"""
This script plots various quantities.
"""
from __future__ import division, print_function
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import argparse
ylabels = {"cl": r"$C_l$", "cd": r"$C_d$", "cl/cd": r"$C_l/C_d$", "k": "$k$",
"omega": r"$\omega$", "epsilon": r"$\epsilon$"}
def plot_foil_perf(quantity="cl/cd", foil="0012", Re=2e5):
df = pd.read_csv("processed/NACA{}_{:.1e}.csv".format(foil, Re))
plt.figure()
if quantity == "cl/cd":
q = df.cl/df.cd
else:
q = df[quantity]
plt.plot(df.alpha_deg, q, "-o")
plt.xlabel(r"$\alpha$ (deg)")
plt.ylabel(ylabels[quantity])
plt.grid(True)
plt.tight_layout()
if __name__ == "__main__":
try:
import seaborn
seaborn.set(style="white", context="notebook", font_scale=1.5)
except ImportError:
print("Could not import seaborn for plot styling. Try")
print("\n conda install seaborn\n\nor")
print("\n pip install seaborn\n")
parser = argparse.ArgumentParser(description="Plotting results")
parser.add_argument("quantity", nargs="?", default="cl/cd",
help="Which quantity to plot",
choices=["cl", "cd", "cl/cd", "k", "omega", "epsilon"])
parser.add_argument("--foil", "-f", help="Foil", default="0012")
parser.add_argument("--Reynolds", "-R", help="Reynolds number", default=2e5)
parser.add_argument("--save", "-s", action="store_true", help="Save plots")
parser.add_argument("--noshow", action="store_true", default=False,
help="Do not show")
args = parser.parse_args()
plot_foil_perf(args.quantity, args.foil, float(args.Reynolds))
if args.save:
if not os.path.isdir("figures"):
os.mkdir("figures")
plt.savefig("figures/{}.pdf".format(args.quantity))
if not args.noshow:
plt.show()
| karasinski/NACAFoil-OpenFOAM | plot.py | Python | gpl-3.0 | 1,969 | 0.004571 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "json_schema.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| richardjmarini/JsonSchema | manage.py | Python | gpl-2.0 | 254 | 0 |
"""
https://en.wikipedia.org/wiki/Square_root_of_a_matrix
B is the sqrt of a matrix A if B*B = A
"""
import numpy as np
from scipy.linalg import sqrtm
from scipy.stats import special_ortho_group
def denman_beaver(A, n=50):
Y = A
Z = np.eye(len(A))
for i in range(n):
Yn = 0.5*(Y + np.linalg.inv(Z))
Zn = 0.5*(Z + np.linalg.inv(Y))
Y = Yn
Z = Zn
return (Y, Z)
def babylonian(A, n=50):
X = np.eye(len(A))
for i in range(n):
X = 0.5*(X + np.dot(A, np.linalg.inv(X)))
return X
def gen_random_matrix(n):
return np.random.rand(n, n)
def gen_rotation_matrix(n):
return special_ortho_group.rvs(n)*np.random.randint(-100, 101)
def gen_symmetric_matrix(n):
A = np.random.randint(-10, 11, size=(n, n))
A = 0.5*(A + A.T)
return A
def test(title, gen_matrix, size, iters):
print("Testing {} matrix".format(title))
for i in range(1, size):
for j in range(iters):
try:
A = gen_matrix(i)
d = np.linalg.det(A)
Y, _ = denman_beaver(A)
X = babylonian(A)
Z = sqrtm(A)
print("{}x{} matrix (det {})".format(i, i, d))
print(A)
print("Denman Beaver")
print(np.dot(Y, Y))
print("Babylonian")
print(np.dot(X, X))
print("Scipy")
print(np.dot(Z, Z))
print()
except:
pass
# iteration methods above tend to fail on random and symmetric matrices
test("random", gen_random_matrix, 5, 10)
test("symmetric", gen_symmetric_matrix, 5, 10)
# for rotation matrices, the iteration methods work
test("rotation", gen_rotation_matrix, 5, 10)
| qeedquan/misc_utilities | math/matrix-sqrt.py | Python | mit | 1,781 | 0.005053 |
from django.db import models
from django.db.models import permalink
from django.utils.translation import ugettext_lazy as _
from tagging.fields import TagField
from django.utils import timezone
class Post(models.Model):
STATUS_DRAFT = 1
STATUS_PUBLIC = 2
TEXT_CUT = "===cut==="
STATUS_CHOICES = (
(STATUS_DRAFT, _('Draft')),
(STATUS_PUBLIC, _('Public')),
)
title = models.CharField(_('title'), max_length=255)
slug = models.SlugField(_('slug'), unique=True)
text = models.TextField(_('text'), help_text="<a href='http://daringfireball.net/projects/markdown/syntax'>Markdown</a>")
status = models.IntegerField(_('status'), choices=STATUS_CHOICES, default=1)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True, auto_now_add=True)
tag = TagField()
def save(self):
if not self.created_at:
self.created_at = timezone.now()
super(Post, self).save()
def __unicode__(self):
return self.title
@property
def get_text_cut(self):
return u'%s' % self.text.split(Post.TEXT_CUT)[0]
@property
def get_text(self):
return u'%s' % self.text.replace(Post.TEXT_CUT, "")
@permalink
def get_absolute_url(self):
return ('blog_post_detail', None, {'slug': self.slug}) | tedor/home-blog | blog/models.py | Python | bsd-3-clause | 1,381 | 0.005793 |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
"""Memory allocation algorithm for vertex arrays and buffers.
The region allocator is used to allocate vertex indices within a vertex
domain's multiple buffers. ("Buffer" refers to any abstract buffer presented
by `pyglet.graphics.vertexbuffer`.
The allocator will at times request more space from the buffers. The current
policy is to double the buffer size when there is not enough room to fulfil an
allocation. The buffer is never resized smaller.
The allocator maintains references to free space only; it is the caller's
responsibility to maintain the allocated regions.
"""
# Common cases:
# -regions will be the same size (instances of same object, e.g. sprites)
# -regions will not usually be resized (only exception is text)
# -alignment of 4 vertices (glyphs, sprites, images, ...)
#
# Optimise for:
# -keeping regions adjacent, reduce the number of entries in glMultiDrawArrays
# -finding large blocks of allocated regions quickly (for drawing)
# -finding block of unallocated space is the _uncommon_ case!
#
# Decisions:
# -don't over-allocate regions to any alignment -- this would require more
# work in finding the allocated spaces (for drawing) and would result in
# more entries in glMultiDrawArrays
# -don't move blocks when they truncate themselves. try not to allocate the
# space they freed too soon (they will likely need grow back into it later,
# and growing will usually require a reallocation).
# -allocator does not track individual allocated regions. Trusts caller
# to provide accurate (start, size) tuple, which completely describes
# a region from the allocator's point of view.
# -this means that compacting is probably not feasible, or would be hideously
# expensive
class AllocatorMemoryException(Exception):
"""The buffer is not large enough to fulfil an allocation.
Raised by `Allocator` methods when the operation failed due to lack of
buffer space. The buffer should be increased to at least
requested_capacity and then the operation retried (guaranteed to
pass second time).
"""
def __init__(self, requested_capacity):
self.requested_capacity = requested_capacity
class Allocator:
"""Buffer space allocation implementation."""
def __init__(self, capacity):
"""Create an allocator for a buffer of the specified capacity.
:Parameters:
`capacity` : int
Maximum size of the buffer.
"""
self.capacity = capacity
# Allocated blocks. Start index and size in parallel lists.
#
# # = allocated, - = free
#
# 0 3 5 15 20 24 40
# |###--##########-----####----------------------|
#
# starts = [0, 5, 20]
# sizes = [3, 10, 4]
#
# To calculate free blocks:
# for i in range(0, len(starts)):
# free_start[i] = starts[i] + sizes[i]
# free_size[i] = starts[i+1] - free_start[i]
# free_size[i+1] = self.capacity - free_start[-1]
self.starts = list()
self.sizes = list()
def set_capacity(self, size):
"""Resize the maximum buffer size.
The capaity cannot be reduced.
:Parameters:
`size` : int
New maximum size of the buffer.
"""
assert size > self.capacity
self.capacity = size
def alloc(self, size):
"""Allocate memory in the buffer.
Raises `AllocatorMemoryException` if the allocation cannot be
fulfilled.
:Parameters:
`size` : int
Size of region to allocate.
:rtype: int
:return: Starting index of the allocated region.
"""
assert size >= 0
if size == 0:
return 0
# return start
# or raise AllocatorMemoryException
if not self.starts:
if size <= self.capacity:
self.starts.append(0)
self.sizes.append(size)
return 0
else:
raise AllocatorMemoryException(size)
# Allocate in a free space
free_start = self.starts[0] + self.sizes[0]
for i, (alloc_start, alloc_size) in \
enumerate(zip(self.starts[1:], self.sizes[1:])):
# Danger!
# i is actually index - 1 because of slicing above...
# starts[i] points to the block before this free space
# starts[i+1] points to the block after this free space, and is
# always valid.
free_size = alloc_start - free_start
if free_size == size:
# Merge previous block with this one (removing this free space)
self.sizes[i] += free_size + alloc_size
del self.starts[i + 1]
del self.sizes[i + 1]
return free_start
elif free_size > size:
# Increase size of previous block to intrude into this free
# space.
self.sizes[i] += size
return free_start
free_start = alloc_start + alloc_size
# Allocate at end of capacity
free_size = self.capacity - free_start
if free_size >= size:
self.sizes[-1] += size
return free_start
raise AllocatorMemoryException(self.capacity + size - free_size)
def realloc(self, start, size, new_size):
"""Reallocate a region of the buffer.
This is more efficient than separate `dealloc` and `alloc` calls, as
the region can often be resized in-place.
Raises `AllocatorMemoryException` if the allocation cannot be
fulfilled.
:Parameters:
`start` : int
Current starting index of the region.
`size` : int
Current size of the region.
`new_size` : int
New size of the region.
"""
assert size >= 0 and new_size >= 0
if new_size == 0:
if size != 0:
self.dealloc(start, size)
return 0
elif size == 0:
return self.alloc(new_size)
# return start
# or raise AllocatorMemoryException
# Truncation is the same as deallocating the tail cruft
if new_size < size:
self.dealloc(start + new_size, size - new_size)
return start
# Find which block it lives in
for i, (alloc_start, alloc_size) in \
enumerate(zip(*(self.starts, self.sizes))):
p = start - alloc_start
if p >= 0 and size <= alloc_size - p:
break
if not (p >= 0 and size <= alloc_size - p):
print(list(zip(self.starts, self.sizes)))
print(start, size, new_size)
print(p, alloc_start, alloc_size)
assert p >= 0 and size <= alloc_size - p, 'Region not allocated'
if size == alloc_size - p:
# Region is at end of block. Find how much free space is after
# it.
is_final_block = i == len(self.starts) - 1
if not is_final_block:
free_size = self.starts[i + 1] - (start + size)
else:
free_size = self.capacity - (start + size)
# TODO If region is an entire block being an island in free space,
# can possibly extend in both directions.
if free_size == new_size - size and not is_final_block:
# Merge block with next (region is expanded in place to
# exactly fill the free space)
self.sizes[i] += free_size + self.sizes[i + 1]
del self.starts[i + 1]
del self.sizes[i + 1]
return start
elif free_size > new_size - size:
# Expand region in place
self.sizes[i] += new_size - size
return start
# The block must be repositioned. Dealloc then alloc.
# But don't do this! If alloc fails, we've already silently dealloc'd
# the original block.
# self.dealloc(start, size)
# return self.alloc(new_size)
# It must be alloc'd first. We're not missing an optimisation
# here, because if freeing the block would've allowed for the block to
# be placed in the resulting free space, one of the above in-place
# checks would've found it.
result = self.alloc(new_size)
self.dealloc(start, size)
return result
def dealloc(self, start, size):
"""Free a region of the buffer.
:Parameters:
`start` : int
Starting index of the region.
`size` : int
Size of the region.
"""
assert size >= 0
if size == 0:
return
assert self.starts
# Find which block needs to be split
for i, (alloc_start, alloc_size) in \
enumerate(zip(*(self.starts, self.sizes))):
p = start - alloc_start
if p >= 0 and size <= alloc_size - p:
break
# Assert we left via the break
assert p >= 0 and size <= alloc_size - p, 'Region not allocated'
if p == 0 and size == alloc_size:
# Remove entire block
del self.starts[i]
del self.sizes[i]
elif p == 0:
# Truncate beginning of block
self.starts[i] += size
self.sizes[i] -= size
elif size == alloc_size - p:
# Truncate end of block
self.sizes[i] -= size
else:
# Reduce size of left side, insert block at right side
# $ = dealloc'd block, # = alloc'd region from same block
#
# <------8------>
# <-5-><-6-><-7->
# 1 2 3 4
# #####$$$$$#####
#
# 1 = alloc_start
# 2 = start
# 3 = start + size
# 4 = alloc_start + alloc_size
# 5 = start - alloc_start = p
# 6 = size
# 7 = {8} - ({5} + {6}) = alloc_size - (p + size)
# 8 = alloc_size
#
self.sizes[i] = p
self.starts.insert(i + 1, start + size)
self.sizes.insert(i + 1, alloc_size - (p + size))
def get_allocated_regions(self):
"""Get a list of (aggregate) allocated regions.
The result of this method is ``(starts, sizes)``, where ``starts`` is
a list of starting indices of the regions and ``sizes`` their
corresponding lengths.
:rtype: (list, list)
"""
# return (starts, sizes); len(starts) == len(sizes)
return (self.starts, self.sizes)
def get_fragmented_free_size(self):
"""Returns the amount of space unused, not including the final
free block.
:rtype: int
"""
if not self.starts:
return 0
# Variation of search for free block.
total_free = 0
free_start = self.starts[0] + self.sizes[0]
for i, (alloc_start, alloc_size) in \
enumerate(zip(self.starts[1:], self.sizes[1:])):
total_free += alloc_start - free_start
free_start = alloc_start + alloc_size
return total_free
def get_free_size(self):
"""Return the amount of space unused.
:rtype: int
"""
if not self.starts:
return self.capacity
free_end = self.capacity - (self.starts[-1] + self.sizes[-1])
return self.get_fragmented_free_size() + free_end
def get_usage(self):
"""Return fraction of capacity currently allocated.
:rtype: float
"""
return 1. - self.get_free_size() / float(self.capacity)
def get_fragmentation(self):
"""Return fraction of free space that is not expandable.
:rtype: float
"""
free_size = self.get_free_size()
if free_size == 0:
return 0.
return self.get_fragmented_free_size() / float(self.get_free_size())
def _is_empty(self):
return not self.starts
def __str__(self):
return 'allocs=' + repr(list(zip(self.starts, self.sizes)))
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, str(self))
| bitcraft/pyglet | pyglet/graphics/allocation.py | Python | bsd-3-clause | 14,197 | 0 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import django_filters
from django.forms import TextInput
from django.utils.translation import gettext_lazy as _
from base.models.entity_version import EntityVersion
class EntityVersionFilter(django_filters.FilterSet):
acronym = django_filters.CharFilter(
lookup_expr='icontains', label=_("Acronym"),
widget=TextInput(attrs={'style': "text-transform:uppercase"})
)
title = django_filters.CharFilter(lookup_expr='icontains', label=_("Title"), )
class Meta:
model = EntityVersion
fields = ["entity_type"]
| uclouvain/OSIS-Louvain | base/forms/entity.py | Python | agpl-3.0 | 1,833 | 0.001092 |
#! /usr/bin/python
import cv2
import sys
from os import path, getenv
PPRZ_SRC = getenv("PAPARAZZI_SRC", path.normpath(path.join(path.dirname(path.abspath(__file__)), '../../../')))
sys.path.append(PPRZ_SRC + "/sw/ext/pprzlink/lib/v1.0/python")
from pprzlink.ivy import IvyMessagesInterface
from pprzlink.message import PprzMessage
class RtpViewer:
frame = None
mouse = dict()
def __init__(self, src):
# Create the video capture device
self.cap = cv2.VideoCapture(src)
# Start the ivy interface
self.ivy = IvyMessagesInterface("RTPviewer", start_ivy=False)
self.ivy.start()
# Create a named window and add a mouse callback
cv2.namedWindow('rtp')
cv2.setMouseCallback('rtp', self.on_mouse)
def run(self):
# Start an 'infinite' loop
while True:
# Read a frame from the video capture
ret, self.frame = self.cap.read()
# Quit if frame could not be retrieved or 'q' is pressed
if not ret or cv2.waitKey(1) & 0xFF == ord('q'):
break
# Run the computer vision function
self.cv()
def cv(self):
# If a selection is happening
if self.mouse.get('start'):
# Draw a rectangle indicating the region of interest
cv2.rectangle(self.frame, self.mouse['start'], self.mouse['now'], (0, 255, 0), 2)
# Show the image in a window
cv2.imshow('rtp', self.frame)
def on_mouse(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.mouse['start'] = (x, y)
if event == cv2.EVENT_RBUTTONDOWN:
self.mouse['start'] = None
if event == cv2.EVENT_MOUSEMOVE:
self.mouse['now'] = (x, y)
if event == cv2.EVENT_LBUTTONUP:
# If mouse start is defined, a region has been selected
if not self.mouse.get('start'):
return
# Obtain mouse start coordinates
sx, sy = self.mouse['start']
# Create a new message
msg = PprzMessage("datalink", "VIDEO_ROI")
msg['ac_id'] = None
msg['startx'] = sx
msg['starty'] = sy
msg['width'] = abs(x - sx)
msg['height'] = abs(y - sy)
msg['downsized_width'] = self.frame.shape[1]
# Send message via the ivy interface
self.ivy.send_raw_datalink(msg)
# Reset mouse start
self.mouse['start'] = None
def cleanup(self):
# Shutdown ivy interface
self.ivy.shutdown()
if __name__ == '__main__':
viewer = RtpViewer("rtp_viewer.sdp")
if not viewer.cap.isOpened():
viewer.cleanup()
sys.exit("Can't open video stream")
viewer.run()
viewer.cleanup()
| TomasDuro/paparazzi | sw/tools/rtp_viewer/rtp_viewer.py | Python | gpl-2.0 | 2,844 | 0.001758 |
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_response_errors,
webapi_request_fields)
from djblets.webapi.errors import (DOES_NOT_EXIST, INVALID_FORM_DATA,
NOT_LOGGED_IN, PERMISSION_DENIED)
from reviewboard.reviews.models import Screenshot
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.base_screenshot_comment import \
BaseScreenshotCommentResource
class ReviewScreenshotCommentResource(BaseScreenshotCommentResource):
"""Provides information on screenshots comments made on a review.
If the review is a draft, then comments can be added, deleted, or
changed on this list. However, if the review is already published,
then no changes can be made.
"""
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
policy_id = 'review_screenshot_comment'
model_parent_key = 'review'
def get_queryset(self, request, review_id, *args, **kwargs):
q = super(ReviewScreenshotCommentResource, self).get_queryset(
request, *args, **kwargs)
return q.filter(review=review_id)
@webapi_check_local_site
@webapi_login_required
@webapi_request_fields(
required=dict({
'screenshot_id': {
'type': int,
'description': 'The ID of the screenshot being commented on.',
},
'x': {
'type': int,
'description': 'The X location for the comment.',
},
'y': {
'type': int,
'description': 'The Y location for the comment.',
},
'w': {
'type': int,
'description': 'The width of the comment region.',
},
'h': {
'type': int,
'description': 'The height of the comment region.',
},
}, **BaseScreenshotCommentResource.REQUIRED_CREATE_FIELDS),
optional=BaseScreenshotCommentResource.OPTIONAL_CREATE_FIELDS,
allow_unknown=True,
)
def create(self, request, screenshot_id, *args, **kwargs):
"""Creates a screenshot comment on a review.
This will create a new comment on a screenshot as part of a review.
The comment contains text and dimensions for the area being commented
on.
"""
try:
review_request = \
resources.review_request.get_object(request, *args, **kwargs)
review = resources.review.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not resources.review.has_modify_permissions(request, review):
return self.get_no_access_error(request)
try:
screenshot = Screenshot.objects.get(pk=screenshot_id,
review_request=review_request)
except ObjectDoesNotExist:
return INVALID_FORM_DATA, {
'fields': {
'screenshot_id': ['This is not a valid screenshot ID'],
}
}
new_comment = self.create_comment(
review=review,
screenshot=screenshot,
fields=('screenshot', 'x', 'y', 'w', 'h'),
**kwargs)
review.screenshot_comments.add(new_comment)
return 201, {
self.item_result_key: new_comment,
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
optional=dict({
'x': {
'type': int,
'description': 'The X location for the comment.',
},
'y': {
'type': int,
'description': 'The Y location for the comment.',
},
'w': {
'type': int,
'description': 'The width of the comment region.',
},
'h': {
'type': int,
'description': 'The height of the comment region.',
},
}, **BaseScreenshotCommentResource.OPTIONAL_UPDATE_FIELDS),
allow_unknown=True
)
def update(self, request, *args, **kwargs):
"""Updates a screenshot comment.
This can update the text or region of an existing comment. It
can only be done for comments that are part of a draft review.
"""
try:
resources.review_request.get_object(request, *args, **kwargs)
review = resources.review.get_object(request, *args, **kwargs)
screenshot_comment = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
# Determine whether or not we're updating the issue status.
if self.should_update_issue_status(screenshot_comment, **kwargs):
return self.update_issue_status(request, self, *args, **kwargs)
if not resources.review.has_modify_permissions(request, review):
return self.get_no_access_error(request)
self.update_comment(screenshot_comment, ('x', 'y', 'w', 'h'), **kwargs)
return 200, {
self.item_result_key: screenshot_comment,
}
@webapi_check_local_site
@augment_method_from(BaseScreenshotCommentResource)
def delete(self, *args, **kwargs):
"""Deletes the comment.
This will remove the comment from the review. This cannot be undone.
Only comments on draft reviews can be deleted. Attempting to delete
a published comment will return a Permission Denied error.
Instead of a payload response on success, this will return :http:`204`.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseScreenshotCommentResource)
def get_list(self, *args, **kwargs):
"""Returns the list of screenshot comments made on a review."""
pass
review_screenshot_comment_resource = ReviewScreenshotCommentResource()
| beol/reviewboard | reviewboard/webapi/resources/review_screenshot_comment.py | Python | mit | 6,387 | 0 |
#!/usr/bin/env python
import os
import sys
PROJECT_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__))
)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dj.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| vojtatranta/django-is-core | example/manage.py | Python | lgpl-3.0 | 323 | 0 |
# Copyright (c) 2014 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from u2fval.model import Device
from u2fval.core.controller import U2FController
from u2fval.core.jsobjects import (
RegisterRequestData, RegisterResponseData, AuthenticateRequestData,
AuthenticateResponseData)
from u2fval.core.exc import U2fException, BadInputException
from M2Crypto import X509
from webob.dec import wsgify
from webob import exc, Response
from cachetools import lru_cache, LRUCache
import json
import logging
log = logging.getLogger(__name__)
__all__ = ['create_application']
def u2f_error(e):
server_e = exc.HTTPBadRequest()
server_e.body = e.json
server_e.content_type = 'application/json'
return server_e
class U2FServerApplication(object):
def __init__(self, session, memstore, metadata, allow_untrusted=False):
self._session = session
self._memstore = memstore
self._metadata = metadata
self._require_trusted = not allow_untrusted
@wsgify
def __call__(self, request):
client_name = request.environ.get('REMOTE_USER')
if not client_name:
raise u2f_error(BadInputException('Client not specified'))
try:
resp = self.client(request, client_name)
if not isinstance(resp, Response):
resp = Response(json.dumps(resp),
content_type='application/json')
return resp
except Exception as e:
self._session.rollback()
if isinstance(e, U2fException):
e = u2f_error(e)
elif isinstance(e, exc.HTTPException):
pass
else:
log.exception('Server error')
e = exc.HTTPServerError(e.message)
raise e
finally:
self._session.commit()
@lru_cache(maxsize=16)
def _get_controller(self, client_name):
return U2FController(self._session, self._memstore, client_name,
self._metadata, self._require_trusted)
def client(self, request, client_name):
user_id = request.path_info_pop()
controller = self._get_controller(client_name)
if not user_id:
if request.method == 'GET':
return controller.get_trusted_facets()
else:
raise exc.HTTPMethodNotAllowed
return self.user(request, controller, user_id.encode('utf-8'))
def user(self, request, controller, user_id):
if request.path_info_peek():
page = request.path_info_pop()
if page == 'register':
return self.register(request, controller, user_id)
elif page == 'authenticate':
return self.authenticate(request, controller, user_id)
else:
return self.device(request, controller, user_id, page)
if request.method == 'GET':
return controller.get_descriptors(user_id)
elif request.method == 'DELETE':
controller.delete_user(user_id)
return exc.HTTPNoContent()
else:
raise exc.HTTPMethodNotAllowed
def register(self, request, controller, user_id):
if request.method == 'GET':
register_requests, sign_requests = controller.register_start(
user_id)
return RegisterRequestData(
registerRequests=register_requests,
authenticateRequests=sign_requests
)
elif request.method == 'POST':
data = RegisterResponseData(request.body)
try:
handle = controller.register_complete(user_id,
data.registerResponse)
except KeyError:
raise exc.HTTPBadRequest
controller.set_props(handle, data.properties)
return controller.get_descriptor(user_id, handle)
else:
raise exc.HTTPMethodNotAllowed
def authenticate(self, request, controller, user_id):
if request.method == 'GET':
sign_requests = controller.authenticate_start(user_id)
return AuthenticateRequestData(
authenticateRequests=sign_requests
)
elif request.method == 'POST':
data = AuthenticateResponseData(request.body)
try:
handle = controller.authenticate_complete(
user_id, data.authenticateResponse)
except KeyError:
raise BadInputException('Malformed request')
except ValueError as e:
log.exception('Error in authenticate')
raise BadInputException(e.message)
controller.set_props(handle, data.properties)
return controller.get_descriptor(user_id, handle)
else:
raise exc.HTTPMethodNotAllowed
def device(self, request, controller, user_id, handle):
try:
if request.method == 'GET':
return controller.get_descriptor(user_id, handle)
elif request.method == 'POST':
props = json.loads(request.body)
controller.set_props(handle, props)
return controller.get_descriptor(user_id, handle)
elif request.method == 'DELETE':
controller.unregister(handle)
return exc.HTTPNoContent()
else:
raise exc.HTTPMethodNotAllowed
except ValueError as e:
raise exc.HTTPNotFound(e.message)
class MetadataCache(object):
def __init__(self, provider, maxsize=64):
self._provider = provider
self._cache = LRUCache(maxsize=maxsize)
def get_attestation(self, device_or_cert):
if isinstance(device_or_cert, Device):
device = device_or_cert
if device.certificate_id not in self._cache:
cert = X509.load_cert_der_string(device.certificate.der)
attestation = self._provider.get_attestation(cert)
self._cache[device.certificate_id] = attestation
return self._cache[device.certificate_id]
else:
return self._provider.get_attestation(device_or_cert)
def get_metadata(self, device):
attestation = self.get_attestation(device)
if attestation:
metadata = {}
if attestation.vendor_info:
metadata['vendor'] = attestation.vendor_info
if attestation.device_info:
metadata['device'] = attestation.device_info
return metadata
return None
def create_application(settings):
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine(settings['db'], echo=False)
Session = sessionmaker(bind=engine)
session = Session()
from u2flib_server.attestation import MetadataProvider, create_resolver
import os
data = settings['metadata']
# If pointing to an empty or non-existant directory, set to None so that
# built-in metadata is used.
if isinstance(data, basestring) \
and not os.path.isfile(data) \
and (not os.path.isdir(data) or len(os.listdir(data)) == 0):
data = None
metadata = MetadataCache(MetadataProvider(create_resolver(data)))
if settings['mc']:
from u2fval.core.transactionmc import MemcachedStore
memstore = MemcachedStore(settings['mc_hosts'])
else:
from u2fval.core.transactiondb import DBStore
memstore = DBStore(session)
return U2FServerApplication(session, memstore, metadata,
settings['allow_untrusted'])
| moreati/u2fval | u2fval/core/api.py | Python | bsd-2-clause | 9,048 | 0 |
"""Add autograph, interview, and travel plan checklist items
Revision ID: a1a5bd54b2aa
Revises: f619fbd56912
Create Date: 2017-09-21 07:17:46.817443
"""
# revision identifiers, used by Alembic.
revision = 'a1a5bd54b2aa'
down_revision = 'f619fbd56912'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
import residue
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def sqlite_column_reflect_listener(inspector, table, column_info):
"""Adds parenthesis around SQLite datetime defaults for utcnow."""
if column_info['default'] == "datetime('now', 'utc')":
column_info['default'] = utcnow_server_default
sqlite_reflect_kwargs = {
'listeners': [('column_reflect', sqlite_column_reflect_listener)]
}
# ===========================================================================
# HOWTO: Handle alter statements in SQLite
#
# def upgrade():
# if is_sqlite:
# with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
# batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)
# else:
# op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)
#
# ===========================================================================
def upgrade():
op.create_table('guest_autograph',
sa.Column('id', residue.UUID(), nullable=False),
sa.Column('guest_id', residue.UUID(), nullable=False),
sa.Column('num', sa.Integer(), server_default='0', nullable=False),
sa.Column('length', sa.Integer(), server_default='60', nullable=False),
sa.ForeignKeyConstraint(['guest_id'], ['guest_group.id'], name=op.f('fk_guest_autograph_guest_id_guest_group')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_guest_autograph')),
sa.UniqueConstraint('guest_id', name=op.f('uq_guest_autograph_guest_id'))
)
op.create_table('guest_interview',
sa.Column('id', residue.UUID(), nullable=False),
sa.Column('guest_id', residue.UUID(), nullable=False),
sa.Column('will_interview', sa.Boolean(), server_default='False', nullable=False),
sa.Column('email', sa.Unicode(), server_default='', nullable=False),
sa.Column('direct_contact', sa.Boolean(), server_default='False', nullable=False),
sa.ForeignKeyConstraint(['guest_id'], ['guest_group.id'], name=op.f('fk_guest_interview_guest_id_guest_group')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_guest_interview')),
sa.UniqueConstraint('guest_id', name=op.f('uq_guest_interview_guest_id'))
)
op.create_table('guest_travel_plans',
sa.Column('id', residue.UUID(), nullable=False),
sa.Column('guest_id', residue.UUID(), nullable=False),
sa.Column('modes', sa.Unicode(), server_default='', nullable=False),
sa.Column('modes_text', sa.Unicode(), server_default='', nullable=False),
sa.Column('details', sa.Unicode(), server_default='', nullable=False),
sa.ForeignKeyConstraint(['guest_id'], ['guest_group.id'], name=op.f('fk_guest_travel_plans_guest_id_guest_group')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_guest_travel_plans')),
sa.UniqueConstraint('guest_id', name=op.f('uq_guest_travel_plans_guest_id'))
)
def downgrade():
op.drop_table('guest_travel_plans')
op.drop_table('guest_interview')
op.drop_table('guest_autograph')
| magfest/ubersystem | alembic/versions/a1a5bd54b2aa_add_autograph_interview_and_travel_plan_.py | Python | agpl-3.0 | 3,669 | 0.010902 |
# -*- coding: utf-8 -*-
# * Copyright (C) 2012-2014 Croissance Commune
# * Authors:
# * Arezki Feth <f.a@majerti.fr>;
# * Miotte Julien <j.m@majerti.fr>;
# * TJEBBES Gaston <g.t@majerti.fr>
#
# This file is part of Autonomie : Progiciel de gestion de CAE.
#
# Autonomie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Autonomie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Autonomie. If not, see <http://www.gnu.org/licenses/>.
"""
Base tools for administrable options
"""
from sqlalchemy import (
Column,
Integer,
String,
Boolean,
ForeignKey,
)
from sqlalchemy.util import classproperty
from sqlalchemy.sql.expression import func
from autonomie_base.utils.ascii import camel_case_to_name
from autonomie_base.models.base import (
DBBASE,
default_table_args,
DBSESSION,
)
from autonomie.forms import (
get_hidden_field_conf,
EXCLUDED,
)
class ConfigurableOption(DBBASE):
"""
Base class for options
"""
__table_args__ = default_table_args
id = Column(
Integer,
primary_key=True,
info={'colanderalchemy': get_hidden_field_conf()}
)
label = Column(
String(100),
info={'colanderalchemy': {'title': u'Libellé'}},
nullable=False,
)
active = Column(
Boolean(),
default=True,
info={'colanderalchemy': EXCLUDED}
)
order = Column(
Integer,
default=0,
info={'colanderalchemy': get_hidden_field_conf()}
)
type_ = Column(
'type_',
String(30),
nullable=False,
info={'colanderalchemy': EXCLUDED}
)
@classproperty
def __mapper_args__(cls):
name = cls.__name__
if name == 'ConfigurableOption':
return {
'polymorphic_on': 'type_',
'polymorphic_identity': 'configurable_option'
}
else:
return {'polymorphic_identity': camel_case_to_name(name)}
@classmethod
def query(cls, *args):
query = super(ConfigurableOption, cls).query(*args)
query = query.filter(ConfigurableOption.active == True)
query = query.order_by(ConfigurableOption.order)
return query
def __json__(self, request):
return dict(
id=self.id,
label=self.label,
active=self.active,
)
def move_up(self):
"""
Move the current instance up in the category's order
"""
order = self.order
if order > 0:
new_order = order - 1
self.__class__.insert(self, new_order)
def move_down(self):
"""
Move the current instance down in the category's order
"""
order = self.order
new_order = order + 1
self.__class__.insert(self, new_order)
@classmethod
def get_next_order(cls):
"""
:returns: The next available order
:rtype: int
"""
query = DBSESSION().query(func.max(cls.order)).filter_by(active=True)
query = query.filter_by(
type_=cls.__mapper_args__['polymorphic_identity']
)
query = query.first()
if query is not None and query[0] is not None:
result = query[0] + 1
else:
result = 0
return result
@classmethod
def _query_active_items(cls):
"""
Build a query to collect active items of the current class
:rtype: :class:`sqlalchemy.Query`
"""
return DBSESSION().query(cls).filter_by(
type_=cls.__mapper_args__['polymorphic_identity']
).filter_by(active=True)
@classmethod
def insert(cls, item, new_order):
"""
Place the item at the given index
:param obj item: The item to move
:param int new_order: The new index of the item
"""
query = cls._query_active_items()
items = query.filter(cls.id != item.id).order_by(cls.order).all()
items.insert(new_order, item)
for index, item in enumerate(items):
item.order = index
DBSESSION().merge(item)
@classmethod
def reorder(cls):
"""
Regenerate order attributes
"""
items = cls._query_active_items().order_by(cls.order).all()
for index, item in enumerate(items):
item.order = index
DBSESSION().merge(item)
def get_id_foreignkey_col(foreignkey_str):
"""
Return an id column as a foreignkey with correct colander configuration
foreignkey_str
The foreignkey our id is pointing to
"""
column = Column(
"id",
Integer,
ForeignKey(foreignkey_str),
primary_key=True,
info={'colanderalchemy': get_hidden_field_conf()},
)
return column
| CroissanceCommune/autonomie | autonomie/models/options.py | Python | gpl-3.0 | 5,322 | 0.000188 |
# coding: utf-8
""" Compile publication data for astronomy journals over the last 10 years. """
from __future__ import division, print_function
__author__ = "Andy Casey <acasey@mso.anu.edu.au>"
# Standard library
import json
# Module specific
import ads
if __name__ == "__main__":
# Let's select the years and journals we want to compare
years = (1993, 2013)
journals = [ # (Scraped from Wikipedia)
# "AIAA Journal",
"Astrobiology",
"Astronomical Journal",
"Astronomical Review",
# "Astronomische Nachrichten",
"Astronomy and Astrophysics",
# "Astronomy and Computing",
# "Astronomy & Geophysics",
"Astronomy Letters",
"Astronomy Now",
"Astronomy Reports",
"Astroparticle Physics",
"The Astrophysical Journal",
# "The Astrophysical Journal Letters",
"The Astrophysical Journal Supplement Series",
"Astrophysics and Space Science",
"Celestial Mechanics and Dynamical Astronomy",
"Classical and Quantum Gravity",
# "Connaissance des Temps",
"Cosmic Research",
# "Earth, Moon, and Planets",
"Earth and Planetary Science Letters",
"General Relativity and Gravitation",
"Geophysical Research Letters",
"Icarus",
"International Astronomical Union Circular",
"International Journal of Astrobiology",
"Journal of the British Interplanetary Society",
"Journal of Cosmology",
"Journal of Cosmology and Astroparticle Physics",
"Journal of Geophysical Research",
# "Journal for the History of Astronomy",
# "Journal of the Korean Astronomical Society",
# "Journal of the Royal Astronomical Society of Canada",
# "Meteoritics & Planetary Science",
"Monthly Notices of the Royal Astronomical Society",
# "Nature Geoscience",
"New Astronomy",
"The Observatory",
"Planetary and Space Science",
# "Publications of the Astronomical Society of Japan",
"Publications of the Astronomical Society of the Pacific",
"Solar Physics",
"Space Science Reviews",
]
publication_data = []
for journal in journals:
# Initiate the dictionary for this journal
journal_data = {
"name": journal,
"articles": [],
"total": 0
}
for year in range(years[0], years[1] + 1):
# Perform the query
# We actually don't want all the results, we just want the metadata
# which tells us how many publications there were
q = ads.SearchQuery(q="pub:\"{journal}\" year:{year}".format(journal=journal, year=year), fl=['id'], rows=1)
q.execute()
num = int(q.response.numFound)
print("{journal} had {num} publications in {year}"
.format(journal=journal, num=num, year=year))
# Save this data
journal_data["articles"].append([year, num])
journal_data["total"] += num
# Let's only save it if there were actually any publications
if journal_data["total"] > 0:
publication_data.append(journal_data)
sorted_publication_data = []
totals = [journal["total"] for journal in publication_data]
indices = sorted(range(len(totals)),key=totals.__getitem__)
for index in indices:
sorted_publication_data.append(publication_data[index])
# Save the data
with open('journal-publications.json', 'w') as fp:
json.dump(sorted_publication_data, fp, indent=2)
| jonnybazookatone/ads | examples/journal-publications-over-time/journals.py | Python | mit | 3,651 | 0.004656 |
#!bin/python
# TSV to Dublin Core/McMaster Repository conversion tool
# Matt McCollow <mccollo@mcmaster.ca>, 2011
# Nick Ruest <ruestn@mcmaster.ca>, 2011
from DublinCore import DublinCore
import csv
from sys import argv
from xml.dom.minidom import Document
from os.path import basename
DC_NS = 'http://purl.org/dc/elements/1.1/'
XSI_NS = 'http://www.w3.org/2001/XMLSchema-instance'
MACREPO_NS = 'http://repository.mcmaster.ca/schema/macrepo/elements/1.0/'
class TabFile(object):
""" A dialect for the csv.DictReader constructor """
delimiter = '\t'
def parse(fn):
""" Parse a TSV file """
try:
fp = open(fn)
fields = fp.readline().rstrip('\n').split('\t')
tsv = csv.DictReader(fp, fieldnames=fields, dialect=TabFile)
for row in tsv:
dc = makedc(row)
writefile(row['dc:identifier'], dc)
xml = makexml(row)
writefile(row['dc:identifier'], xml)
except IOError as (errno, strerror):
print "Error ({0}): {1}".format(errno, strerror)
raise SystemExit
fp.close()
def makedc(row):
""" Generate a Dublin Core XML file from a TSV """
metadata = DublinCore()
metadata.Contributor = row.get('dc:contributor', '')
metadata.Coverage = row.get('dc:coverage', '')
metadata.Creator = row.get('dc:creator', '')
metadata.Date = row.get('dc:date', '')
metadata.Description = row.get('dc:description', '')
metadata.Format = row.get('dc:format', '')
metadata.Identifier = row.get('dc:identifier', '')
metadata.Language = row.get('dc:language', '')
metadata.Publisher = row.get('dc:publisher', '')
metadata.Relation = row.get('dc:relation', '').split('|')
metadata.Rights = row.get('dc:rights', '')
metadata.Source = row.get('dc:source', '')
metadata.Subject = row.get('dc:subject', '')
metadata.Title = row.get('dc:title', '')
return metadata
def makexml(row):
""" Generate an XML file conforming to the macrepo schema from a TSV """
doc = Document()
root = doc.createElement('metadata')
root.setAttribute('xmlns:xsi', XSI_NS)
root.setAttribute('xmlns:macrepo', MACREPO_NS)
doc.appendChild(root)
oldnid = doc.createElement('macrepo:oldNid')
oldnid.appendChild(doc.createTextNode(row.get('macrepo:oldNid', '')))
root.appendChild(oldnid)
notes = doc.createElement('macrepo:notes')
notes.appendChild(doc.createTextNode(row.get('macrepo:notes', '')))
root.appendChild(notes)
scale = doc.createElement('macrepo:scale')
scale.appendChild(doc.createTextNode(row.get('macrepo:scale', '')))
root.appendChild(scale)
return doc
def writefile(name, obj):
""" Writes Dublin Core or Macrepo XML object to a file """
if isinstance(obj, DublinCore):
fp = open(name + '-DC.xml', 'w')
fp.write(obj.makeXML(DC_NS))
elif isinstance(obj, Document):
fp = open(name + '-macrepo.xml', 'w')
fp.write(obj.toprettyxml())
fp.close()
def chkarg(arg):
""" Was a TSV file specified? """
return False if len(arg) < 2 else True
def usage():
""" Print a nice usage message """
print "Usage: bin/python " + basename(__file__) + " <filename>.tsv"
if __name__ == "__main__":
if chkarg(argv):
parse(argv[1])
else:
usage()
| mmccollow/TSV-Convert | tsv-convert.py | Python | gpl-2.0 | 3,059 | 0.024191 |
import base64
import hashlib
import logging
import math
import time
from django.conf import settings
from ebaysuds import TradingAPI
from suds.plugin import PluginContainer
from suds.sax.parser import Parser
logging.basicConfig()
log = logging.getLogger(__name__)
class UnrecognisedPayloadTypeError(Exception):
pass
class NotificationValidationError(Exception):
pass
class TimestampOutOfBounds(NotificationValidationError):
pass
class InvalidSignature(NotificationValidationError):
pass
def ebay_timestamp_string(datetime_obj):
# convert python datetime obj to string representation used by eBay
# appears to be a bug in suds - eBay's milliseconds are loaded into python datetime
# as microseconds so the datetime_obj we get from suds is not accurate to the data
return '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s.%(millisecond)sZ' % {
'year': '%04d' % datetime_obj.year,
'month': '%02d' % datetime_obj.month,
'day': '%02d' % datetime_obj.day,
'hour': '%02d' % datetime_obj.hour,
'minute': '%02d' % datetime_obj.minute,
'second': '%02d' % datetime_obj.second,
'millisecond': '%03d' % datetime_obj.microsecond# don't need to x1000 as we're omitting three digits of zero-padding
}
class NotificationHandler(object):
def __init__(self, wsdl_url=None, token=None, sandbox=False, _validate=True):
es_kwargs = {
'sandbox': sandbox,
}
if wsdl_url is not None:
es_kwargs['wsdl_url'] = wsdl_url
if token is not None:
es_kwargs['token'] = token
self.client = TradingAPI(**es_kwargs)
self.saxparser = Parser()
self._validate = _validate
def decode(self, payload_type, message):
try:
payload_method = getattr(self.client.sudsclient.service, payload_type)
except AttributeError:
raise UnrecognisedPayloadTypeError('Unrecognised payload type: %s' % payload_type)
# don balaclava, hijack a suds SoapClient instance to decode our payload for us
sc_class = payload_method.clientclass({})
soapclient = sc_class(self.client.sudsclient, payload_method.method)
# copy+pasted from SoapClient.send :(
plugins = PluginContainer(soapclient.options.plugins)
ctx = plugins.message.received(reply=message)
result = soapclient.succeeded(soapclient.method.binding.input, ctx.reply)
# `result` only contains the soap:Body of the response (parsed into objects)
# but the signature we need is in the soap:Header element
signature = self._parse_signature(message)
if not self._validate or self.validate(result, signature):
return result
def _parse_signature(self, message):
xml = self.saxparser.parse(string=message)
return xml.getChild("Envelope").getChild("Header").getChild('RequesterCredentials').getChild('NotificationSignature').text
def validate(self, message, signature):
"""
As per:
http://developer.ebay.com/DevZone/XML/docs/WebHelp/wwhelp/wwhimpl/common/html/wwhelp.htm?context=eBay_XML_API&file=WorkingWithNotifications-Receiving_Platform_Notifications.html
"""
timestamp_str = ebay_timestamp_string(message.Timestamp)
floattime = time.mktime(message.Timestamp.timetuple())
if not settings.DEBUG:
# check timestamp is within 10 minutes of current time
diff_seconds = math.fabs(time.time() - floattime)
if diff_seconds > 600:
raise TimestampOutOfBounds("Payload timestamp was %s seconds away from current time." % diff_seconds)
# make hash
m = hashlib.md5()
m.update(timestamp_str)
m.update(self.client.config.get('keys', 'dev_id'))
m.update(self.client.config.get('keys', 'app_id'))
m.update(self.client.config.get('keys', 'cert_id'))
computed_hash = base64.standard_b64encode(m.digest())
if computed_hash != signature:
raise InvalidSignature("%s != %s" % (computed_hash, signature))
return True
| anentropic/django-ebaysync | ebaysync/notifications.py | Python | lgpl-3.0 | 4,183 | 0.004542 |
SECRET_KEY = 'fake-key'
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
}
INSTALLED_APPS = [
"django_nose",
"tests",
]
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--with-coverage',
'--cover-package=search_views',
]
| bianchimro/django-search-views | tests/settings.py | Python | mit | 289 | 0.00346 |
"""ProvidedInstance provider async mode tests."""
import asyncio
from dependency_injector import containers, providers
from pytest import mark, raises
from .common import RESOURCE1, init_resource
@mark.asyncio
async def test_provided_attribute():
class TestClient:
def __init__(self, resource):
self.resource = resource
class TestService:
def __init__(self, resource):
self.resource = resource
class TestContainer(containers.DeclarativeContainer):
resource = providers.Resource(init_resource, providers.Object(RESOURCE1))
client = providers.Factory(TestClient, resource=resource)
service = providers.Factory(TestService, resource=client.provided.resource)
container = TestContainer()
instance1, instance2 = await asyncio.gather(
container.service(),
container.service(),
)
assert instance1.resource is RESOURCE1
assert instance2.resource is RESOURCE1
assert instance1.resource is instance2.resource
@mark.asyncio
async def test_provided_attribute_error():
async def raise_exception():
raise RuntimeError()
class TestContainer(containers.DeclarativeContainer):
client = providers.Factory(raise_exception)
container = TestContainer()
with raises(RuntimeError):
await container.client.provided.attr()
@mark.asyncio
async def test_provided_attribute_undefined_attribute():
class TestClient:
def __init__(self, resource):
self.resource = resource
class TestContainer(containers.DeclarativeContainer):
resource = providers.Resource(init_resource, providers.Object(RESOURCE1))
client = providers.Factory(TestClient, resource=resource)
container = TestContainer()
with raises(AttributeError):
await container.client.provided.attr()
@mark.asyncio
async def test_provided_item():
class TestClient:
def __init__(self, resource):
self.resource = resource
def __getitem__(self, item):
return getattr(self, item)
class TestService:
def __init__(self, resource):
self.resource = resource
class TestContainer(containers.DeclarativeContainer):
resource = providers.Resource(init_resource, providers.Object(RESOURCE1))
client = providers.Factory(TestClient, resource=resource)
service = providers.Factory(TestService, resource=client.provided["resource"])
container = TestContainer()
instance1, instance2 = await asyncio.gather(
container.service(),
container.service(),
)
assert instance1.resource is RESOURCE1
assert instance2.resource is RESOURCE1
assert instance1.resource is instance2.resource
@mark.asyncio
async def test_provided_item_error():
async def raise_exception():
raise RuntimeError()
class TestContainer(containers.DeclarativeContainer):
client = providers.Factory(raise_exception)
container = TestContainer()
with raises(RuntimeError):
await container.client.provided["item"]()
@mark.asyncio
async def test_provided_item_undefined_item():
class TestContainer(containers.DeclarativeContainer):
resource = providers.Resource(init_resource, providers.Object(RESOURCE1))
client = providers.Factory(dict, resource=resource)
container = TestContainer()
with raises(KeyError):
await container.client.provided["item"]()
@mark.asyncio
async def test_provided_method_call():
class TestClient:
def __init__(self, resource):
self.resource = resource
def get_resource(self):
return self.resource
class TestService:
def __init__(self, resource):
self.resource = resource
class TestContainer(containers.DeclarativeContainer):
resource = providers.Resource(init_resource, providers.Object(RESOURCE1))
client = providers.Factory(TestClient, resource=resource)
service = providers.Factory(TestService, resource=client.provided.get_resource.call())
container = TestContainer()
instance1, instance2 = await asyncio.gather(
container.service(),
container.service(),
)
assert instance1.resource is RESOURCE1
assert instance2.resource is RESOURCE1
assert instance1.resource is instance2.resource
@mark.asyncio
async def test_provided_method_call_parent_error():
async def raise_exception():
raise RuntimeError()
class TestContainer(containers.DeclarativeContainer):
client = providers.Factory(raise_exception)
container = TestContainer()
with raises(RuntimeError):
await container.client.provided.method.call()()
@mark.asyncio
async def test_provided_method_call_error():
class TestClient:
def method(self):
raise RuntimeError()
class TestContainer(containers.DeclarativeContainer):
client = providers.Factory(TestClient)
container = TestContainer()
with raises(RuntimeError):
await container.client.provided.method.call()()
| ets-labs/python-dependency-injector | tests/unit/providers/async/test_provided_instance_py36.py | Python | bsd-3-clause | 5,113 | 0.001565 |
#
# The Python Imaging Library.
# $Id: CurImagePlugin.py,v 1.2 2007/06/17 14:12:14 robertoconnor Exp $
#
# Windows Cursor support for PIL
#
# notes:
# uses BmpImagePlugin.py to read the bitmap data.
#
# history:
# 96-05-27 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.1"
import string
import Image, BmpImagePlugin
#
# --------------------------------------------------------------------
def i16(c):
return ord(c[0]) + (ord(c[1])<<8)
def i32(c):
return ord(c[0]) + (ord(c[1])<<8) + (ord(c[2])<<16) + (ord(c[3])<<24)
def _accept(prefix):
return prefix[:4] == "\0\0\2\0"
##
# Image plugin for Windows Cursor files.
class CurImageFile(BmpImagePlugin.BmpImageFile):
format = "CUR"
format_description = "Windows Cursor"
def _open(self):
offset = self.fp.tell()
# check magic
s = self.fp.read(6)
if not _accept(s):
raise SyntaxError, "not an CUR file"
# pick the largest cursor in the file
m = ""
for i in range(i16(s[4:])):
s = self.fp.read(16)
if not m:
m = s
elif ord(s[0]) > ord(m[0]) and ord(s[1]) > ord(m[1]):
m = s
#print "width", ord(s[0])
#print "height", ord(s[1])
#print "colors", ord(s[2])
#print "reserved", ord(s[3])
#print "hotspot x", i16(s[4:])
#print "hotspot y", i16(s[6:])
#print "bytes", i32(s[8:])
#print "offset", i32(s[12:])
# load as bitmap
self._bitmap(i32(m[12:]) + offset)
# patch up the bitmap height
self.size = self.size[0], self.size[1]/2
d, e, o, a = self.tile[0]
self.tile[0] = d, (0,0)+self.size, o, a
return
#
# --------------------------------------------------------------------
Image.register_open("CUR", CurImageFile, _accept)
Image.register_extension("CUR", ".cur")
| arpruss/plucker | plucker_desktop/installer/osx/application_bundle_files/Resources/parser/python/vm/PIL/CurImagePlugin.py | Python | gpl-2.0 | 2,171 | 0.00783 |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dense Prediction Cell class that can be evolved in semantic segmentation.
DensePredictionCell is used as a `layer` in semantic segmentation whose
architecture is determined by the `config`, a dictionary specifying
the architecture.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
from third_party.deeplab.core import utils
slim = contrib_slim
# Local constants.
_META_ARCHITECTURE_SCOPE = 'meta_architecture'
_CONCAT_PROJECTION_SCOPE = 'concat_projection'
_OP = 'op'
_CONV = 'conv'
_PYRAMID_POOLING = 'pyramid_pooling'
_KERNEL = 'kernel'
_RATE = 'rate'
_GRID_SIZE = 'grid_size'
_TARGET_SIZE = 'target_size'
_INPUT = 'input'
def dense_prediction_cell_hparams():
"""DensePredictionCell HParams.
Returns:
A dictionary of hyper-parameters used for dense prediction cell with keys:
- reduction_size: Integer, the number of output filters for each operation
inside the cell.
- dropout_on_concat_features: Boolean, apply dropout on the concatenated
features or not.
- dropout_on_projection_features: Boolean, apply dropout on the projection
features or not.
- dropout_keep_prob: Float, when `dropout_on_concat_features' or
`dropout_on_projection_features' is True, the `keep_prob` value used
in the dropout operation.
- concat_channels: Integer, the concatenated features will be
channel-reduced to `concat_channels` channels.
- conv_rate_multiplier: Integer, used to multiply the convolution rates.
This is useful in the case when the output_stride is changed from 16
to 8, we need to double the convolution rates correspondingly.
"""
return {
'reduction_size': 256,
'dropout_on_concat_features': True,
'dropout_on_projection_features': False,
'dropout_keep_prob': 0.9,
'concat_channels': 256,
'conv_rate_multiplier': 1,
}
class DensePredictionCell(object):
"""DensePredictionCell class used as a 'layer' in semantic segmentation."""
def __init__(self, config, hparams=None):
"""Initializes the dense prediction cell.
Args:
config: A dictionary storing the architecture of a dense prediction cell.
hparams: A dictionary of hyper-parameters, provided by users. This
dictionary will be used to update the default dictionary returned by
dense_prediction_cell_hparams().
Raises:
ValueError: If `conv_rate_multiplier` has value < 1.
"""
self.hparams = dense_prediction_cell_hparams()
if hparams is not None:
self.hparams.update(hparams)
self.config = config
# Check values in hparams are valid or not.
if self.hparams['conv_rate_multiplier'] < 1:
raise ValueError('conv_rate_multiplier cannot have value < 1.')
def _get_pyramid_pooling_arguments(
self, crop_size, output_stride, image_grid, image_pooling_crop_size=None):
"""Gets arguments for pyramid pooling.
Args:
crop_size: A list of two integers, [crop_height, crop_width] specifying
whole patch crop size.
output_stride: Integer, output stride value for extracted features.
image_grid: A list of two integers, [image_grid_height, image_grid_width],
specifying the grid size of how the pyramid pooling will be performed.
image_pooling_crop_size: A list of two integers, [crop_height, crop_width]
specifying the crop size for image pooling operations. Note that we
decouple whole patch crop_size and image_pooling_crop_size as one could
perform the image_pooling with different crop sizes.
Returns:
A list of (resize_value, pooled_kernel)
"""
resize_height = utils.scale_dimension(crop_size[0], 1. / output_stride)
resize_width = utils.scale_dimension(crop_size[1], 1. / output_stride)
# If image_pooling_crop_size is not specified, use crop_size.
if image_pooling_crop_size is None:
image_pooling_crop_size = crop_size
pooled_height = utils.scale_dimension(
image_pooling_crop_size[0], 1. / (output_stride * image_grid[0]))
pooled_width = utils.scale_dimension(
image_pooling_crop_size[1], 1. / (output_stride * image_grid[1]))
return ([resize_height, resize_width], [pooled_height, pooled_width])
def _parse_operation(self, config, crop_size, output_stride,
image_pooling_crop_size=None):
"""Parses one operation.
When 'operation' is 'pyramid_pooling', we compute the required
hyper-parameters and save in config.
Args:
config: A dictionary storing required hyper-parameters for one
operation.
crop_size: A list of two integers, [crop_height, crop_width] specifying
whole patch crop size.
output_stride: Integer, output stride value for extracted features.
image_pooling_crop_size: A list of two integers, [crop_height, crop_width]
specifying the crop size for image pooling operations. Note that we
decouple whole patch crop_size and image_pooling_crop_size as one could
perform the image_pooling with different crop sizes.
Returns:
A dictionary stores the related information for the operation.
"""
if config[_OP] == _PYRAMID_POOLING:
(config[_TARGET_SIZE],
config[_KERNEL]) = self._get_pyramid_pooling_arguments(
crop_size=crop_size,
output_stride=output_stride,
image_grid=config[_GRID_SIZE],
image_pooling_crop_size=image_pooling_crop_size)
return config
def build_cell(self,
features,
output_stride=16,
crop_size=None,
image_pooling_crop_size=None,
weight_decay=0.00004,
reuse=None,
is_training=False,
fine_tune_batch_norm=False,
scope=None):
"""Builds the dense prediction cell based on the config.
Args:
features: Input feature map of size [batch, height, width, channels].
output_stride: Int, output stride at which the features were extracted.
crop_size: A list [crop_height, crop_width], determining the input
features resolution.
image_pooling_crop_size: A list of two integers, [crop_height, crop_width]
specifying the crop size for image pooling operations. Note that we
decouple whole patch crop_size and image_pooling_crop_size as one could
perform the image_pooling with different crop sizes.
weight_decay: Float, the weight decay for model variables.
reuse: Reuse the model variables or not.
is_training: Boolean, is training or not.
fine_tune_batch_norm: Boolean, fine-tuning batch norm parameters or not.
scope: Optional string, specifying the variable scope.
Returns:
Features after passing through the constructed dense prediction cell with
shape = [batch, height, width, channels] where channels are determined
by `reduction_size` returned by dense_prediction_cell_hparams().
Raises:
ValueError: Use Convolution with kernel size not equal to 1x1 or 3x3 or
the operation is not recognized.
"""
batch_norm_params = {
'is_training': is_training and fine_tune_batch_norm,
'decay': 0.9997,
'epsilon': 1e-5,
'scale': True,
}
hparams = self.hparams
with slim.arg_scope(
[slim.conv2d, slim.separable_conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
padding='SAME',
stride=1,
reuse=reuse):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with tf.variable_scope(scope, _META_ARCHITECTURE_SCOPE, [features]):
depth = hparams['reduction_size']
branch_logits = []
for i, current_config in enumerate(self.config):
scope = 'branch%d' % i
current_config = self._parse_operation(
config=current_config,
crop_size=crop_size,
output_stride=output_stride,
image_pooling_crop_size=image_pooling_crop_size)
tf.logging.info(current_config)
if current_config[_INPUT] < 0:
operation_input = features
else:
operation_input = branch_logits[current_config[_INPUT]]
if current_config[_OP] == _CONV:
if current_config[_KERNEL] == [1, 1] or current_config[
_KERNEL] == 1:
branch_logits.append(
slim.conv2d(operation_input, depth, 1, scope=scope))
else:
conv_rate = [r * hparams['conv_rate_multiplier']
for r in current_config[_RATE]]
branch_logits.append(
utils.split_separable_conv2d(
operation_input,
filters=depth,
kernel_size=current_config[_KERNEL],
rate=conv_rate,
weight_decay=weight_decay,
scope=scope))
elif current_config[_OP] == _PYRAMID_POOLING:
pooled_features = slim.avg_pool2d(
operation_input,
kernel_size=current_config[_KERNEL],
stride=[1, 1],
padding='VALID')
pooled_features = slim.conv2d(
pooled_features,
depth,
1,
scope=scope)
pooled_features = tf.image.resize_bilinear(
pooled_features,
current_config[_TARGET_SIZE],
align_corners=True)
# Set shape for resize_height/resize_width if they are not Tensor.
resize_height = current_config[_TARGET_SIZE][0]
resize_width = current_config[_TARGET_SIZE][1]
if isinstance(resize_height, tf.Tensor):
resize_height = None
if isinstance(resize_width, tf.Tensor):
resize_width = None
pooled_features.set_shape(
[None, resize_height, resize_width, depth])
branch_logits.append(pooled_features)
else:
raise ValueError('Unrecognized operation.')
# Merge branch logits.
concat_logits = tf.concat(branch_logits, 3)
if self.hparams['dropout_on_concat_features']:
concat_logits = slim.dropout(
concat_logits,
keep_prob=self.hparams['dropout_keep_prob'],
is_training=is_training,
scope=_CONCAT_PROJECTION_SCOPE + '_dropout')
concat_logits = slim.conv2d(concat_logits,
self.hparams['concat_channels'],
1,
scope=_CONCAT_PROJECTION_SCOPE)
if self.hparams['dropout_on_projection_features']:
concat_logits = slim.dropout(
concat_logits,
keep_prob=self.hparams['dropout_keep_prob'],
is_training=is_training,
scope=_CONCAT_PROJECTION_SCOPE + '_dropout')
return concat_logits
| googleinterns/wss | third_party/deeplab/core/dense_prediction_cell.py | Python | apache-2.0 | 12,180 | 0.003859 |
# Задача 2. Вариант 1.
# Напишите программу, которая будет выводить на экран наиболее понравившееся вам высказывание, автором которого является Еврипид. Не забудьте о том, что автор должен быть упомянут на отдельной строке.
# Anisimova M.L.
# 02.09.2016
print("Жизнь наша есть борьба.")
print("\t\t Еврипид")
input("Нажмите Enter для выхода.")
| Mariaanisimova/pythonintask | BIZa/2015/Anisimova_M_L/task_2_1.py | Python | apache-2.0 | 574 | 0.008523 |
import json
from tornado import gen
from tornado.httpclient import AsyncHTTPClient, HTTPError
from .exceptions import SparkPostAPIException
class TornadoTransport(object):
@gen.coroutine
def request(self, method, uri, headers, **kwargs):
if "data" in kwargs:
kwargs["body"] = kwargs.pop("data")
client = AsyncHTTPClient()
try:
response = yield client.fetch(uri, method=method, headers=headers,
**kwargs)
except HTTPError as ex:
raise SparkPostAPIException(ex.response)
if response.code == 204:
raise gen.Return(True)
if response.code == 200:
result = None
try:
result = json.loads(response.body.decode("utf-8"))
except:
pass
if result:
if 'results' in result:
raise gen.Return(result['results'])
raise gen.Return(result)
raise SparkPostAPIException(response)
| thonkify/thonkify | src/lib/sparkpost/tornado/base.py | Python | mit | 1,048 | 0.000954 |
#!/F3/core/run_it.py
# This file is used for creating a script
# Author : Ismail Sunni/@ismailsunni
# Created : 2012-04-06
import MySQLdb # accesing mysql database
from xlwt import Workbook # for writing in excel
import xlrd # for reading excel
from tempfile import TemporaryFile
import util as util
import tweet_model as tm
import preprocess as pp
from db_control import db_conn
def main_sql_to_excel():
"""Read from database then write in excel"""
# To do
# read database
# database variable
db_host = 'localhost'
db_user = 'root'
db_password = ''
db_name = 'rimus'
conn = MySQLdb.connect(db_host, db_user, db_password, db_name)
cursor = conn.cursor()
query = "SELECT * FROM `tweets`"
try:
cursor.execute(query)
result = cursor.fetchall()
# return result
except Exception, e:
util.debug('db_control.read error' + str(e))
conn.rollback()
result = None
# write to excel
book = Workbook()
activeSheet = book.add_sheet('tweets')
i = 1
activeSheet.write(i, 0, 'No')
activeSheet.write(i, 1, 'Tweet Id')
activeSheet.write(i, 2, 'Username')
activeSheet.write(i, 3, 'Created')
activeSheet.write(i, 4, 'Text')
from random import sample
result = sample(result, 3000)
i += 1
try:
for row in result:
activeSheet.write(i, 0, str(i - 1))
activeSheet.write(i, 1, str(row[0]))
activeSheet.write(i, 2, str(row[7]))
activeSheet.write(i, 3, row[3].__str__())
activeSheet.write(i, 4, pp.normalize_character(row[1]))
i += 1
# print i
if i >= 50002:
break
book.save('test_data_training2.xls')
book.save(TemporaryFile())
except Exception, e:
util.debug(str(e))
def main_excel_to_sql():
book = xlrd.open_workbook('test_data_training2.xls')
sheet = book.sheet_by_name('tweets')
tweets = []
for row in range(sheet.nrows):
if sheet.row_values(row)[5] == 1:
new_data = {}
new_data['id'] = int(sheet.row_values(row)[1])
new_data['sentiment'] = int(sheet.row_values(row)[4])
tweets.append(new_data)
# new_db = new db_conn()
print tweets
def move_data():
book = xlrd.open_workbook('data_training_TA_Ismail Sunni.xls')
sheet = book.sheet_by_name('tweets')
tweets = []
k = 0
for row in range(sheet.nrows):
if sheet.row_values(row)[6] == 3:
tweets.append(sheet.row_values(row))
conn = db_conn()
i = 0
for tweet in tweets:
query = "INSERT INTO " + conn.dev_table + "( `tweet_id`, `tweet_text`, `created_at`, `sentiment`) VALUES (" + str(tweet[1]) + ", '" + tweet[4] + "', '" + tweet[3] + "', '" + str(int(tweet[5])) +"')"
# print query
if conn.insert(query) == True:
i += 1
print i
def ultimate_function():
book = xlrd.open_workbook('data_training_TA_Ismail Sunni.xls')
sheet = book.sheet_by_name('tweets')
tweets = []
for row in range(sheet.nrows):
if sheet.row_values(row)[6] == 3:
tweets.append(sheet.row_values(row))
conn = db_conn()
i = 0
j = 0
for tweet in tweets:
query = "UPDATE " + conn.test_table + " SET `sentiment`=" + str(int(tweet[5])) + ", `dev_tweet`= 1 WHERE `tweet_id`="+str(tweet[1])
if conn.update(query) == True:
i += 1
else:
j += 1
print i
print j
def reset_data():
conn = db_conn()
query = "UPDATE " + conn.test_table + " SET `dev_tweet` = 0"
return conn.update(query)
if __name__ == '__main__':
print reset_data()
ultimate_function()
| ismailsunni/f3-factor-finder | core/run_it.py | Python | gpl-2.0 | 3,452 | 0.032445 |
"""Mock module for Sphinx autodoc."""
class FFI(object):
NULL = NotImplemented
I_AM_FAKE = True # This is used for the documentation of "default"
def cdef(self, _):
pass
def dlopen(self, _):
return FakeLibrary()
class FakeLibrary(object):
# from portaudio.h:
paFloat32 = paInt32 = paInt24 = paInt16 = paInt8 = paUInt8 = NotImplemented
paFramesPerBufferUnspecified = 0
def Pa_Initialize(self):
return 0
def Pa_Terminate(self):
return 0
# from stdio.h:
def fopen(*args, **kwargs):
return NotImplemented
def fclose(*args):
pass
| dholl/python-sounddevice | doc/fake_cffi.py | Python | mit | 636 | 0 |
import subprocess, os, zipfile, requests
## Function Download
def download(url, fichier):
pass
fileName = fichier
req = requests.get(url)
file = open(fileName, 'wb')
for chunk in req.iter_content(100000):
file.write(chunk)
file.close()
print("The download is finish !")
## Function Unzip
def unzip(source , destination):
with zipfile.ZipFile(source) as zf:
zf.extractall(destination)
nameinfo = open("name.info", "r")
ServerName = nameinfo.readline().rstrip()
Version = nameinfo.readline().rstrip()
VersionServer = nameinfo.readline().rstrip()
nameinfo.close()
subprocess.call(['java', '-jar', ServerName +'.jar'])
fichier = open("eula.txt", "w")
fichier.write("eula = true")
fichier.close()
if not os.path.exists("world"):
print("Whitch type of Minecraft server you want to create ?")
a=input("[1] Pre-Build (Map and Plugin) Spigot Server [2] Blanc Spigot Server [3] Semi-Build (Plugin pre installed, blanc map) : ")
if a == '1':
print(VersionServer)
if VersionServer == '1.9' or VersionServer == '1.8' or VersionServer == '1.7.10':
download('https://raw.githubusercontent.com/dinnozap/MinecraftServerMaker/master/world.zip', 'world.zip')
unzip('world.zip', '')
if not os.path.exists("plugins"):
os.mkdir("plugins")
download('https://hub.spigotmc.org/jenkins/job/Spigot-Essentials/lastSuccessfulBuild/artifact/Essentials/target/Essentials-2.x-SNAPSHOT.jar', 'plugins/essentials.jar')
download('https://www.spigotmc.org/resources/sexymotd.2474/download?version=73466', 'plugins/motd.jar')
subprocess.call(['java', '-jar', ServerName +'.jar'])
elif a=='2':
subprocess.call(['java', '-jar', ServerName +'.jar'])
elif a=='3':
if not os.path.exists("plugins"):
os.mkdir("plugins")
download('https://hub.spigotmc.org/jenkins/job/Spigot-Essentials/lastSuccessfulBuild/artifact/Essentials/target/Essentials-2.x-SNAPSHOT.jar', 'plugins/essentials.jar')
download('https://www.spigotmc.org/resources/sexymotd.2474/download?version=73466', 'plugins/motd.jar')
subprocess.call(['java', '-jar', ServerName +'.jar'])
| dinnozap/MinecraftServerMaker | launch.py | Python | apache-2.0 | 2,078 | 0.039461 |
import scipy.misc, numpy as np, os, sys
def save_img(out_path, img):
img = np.clip(img, 0, 255).astype(np.uint8)
scipy.misc.imsave(out_path, img)
def scale_img(style_path, style_scale):
scale = float(style_scale)
o0, o1, o2 = scipy.misc.imread(style_path, mode='RGB').shape
scale = float(style_scale)
new_shape = (int(o0 * scale), int(o1 * scale), o2)
style_target = _get_img(style_path, img_size=new_shape)
return style_target
def get_img(src, img_size=False):
img = scipy.misc.imread(src, mode='RGB') # misc.imresize(, (256, 256, 3))
if not (len(img.shape) == 3 and img.shape[2] == 3):
img = np.dstack((img,img,img))
if img_size != False:
img = scipy.misc.imresize(img, img_size)
return img
def exists(p, msg):
assert os.path.exists(p), msg
def list_files(in_path):
files = []
for (dirpath, dirnames, filenames) in os.walk(in_path):
files.extend(filenames)
break
return files
| gmittal/prisma | server/src/utils.py | Python | mit | 975 | 0.017436 |
import plyvel
import ast
import hashlib
import os
import sys
import threading
from processor import print_log, logger
from utils import (
bc_address_to_hash_160,
Hash,
bytes8_to_int,
bytes4_to_int,
int_to_bytes8,
int_to_hex8,
int_to_bytes4,
int_to_hex4
)
"""
Patricia tree for hashing unspents
"""
# increase this when database needs to be updated
GENESIS_HASH = '00000c492bf73490420868bc577680bfc4c60116e7e85343bc624787c21efa4c'
DB_VERSION = 3
KEYLENGTH = 56 # 20 + 32 + 4
class Node(object):
def __init__(self, s):
self.k = int(s[0:32].encode('hex'), 16)
self.s = s[32:]
if self.k == 0 and self.s:
print "init error", len(self.s), "0x%0.64X" % self.k
raise BaseException("z")
def serialized(self):
k = "0x%0.64X" % self.k
k = k[2:].decode('hex')
assert len(k) == 32
return k + self.s
def has(self, c):
return (self.k & (1 << (ord(c)))) != 0
def is_singleton(self, key):
assert self.s != ''
return len(self.s) == 40
def get_singleton(self):
for i in xrange(256):
if self.k == (1 << i):
return chr(i)
raise BaseException("get_singleton")
def indexof(self, c):
assert self.k != 0 or self.s == ''
x = 0
for i in xrange(ord(c)):
if (self.k & (1 << i)) != 0:
x += 40
return x
def get(self, c):
x = self.indexof(c)
ss = self.s[x:x + 40]
_hash = ss[0:32]
value = bytes8_to_int(ss[32:40])
return _hash, value
def set(self, c, h, value):
if h is None:
h = chr(0) * 32
vv = int_to_bytes8(value)
item = h + vv
assert len(item) == 40
if self.has(c):
self.remove(c)
x = self.indexof(c)
self.s = self.s[0:x] + item + self.s[x:]
self.k |= (1 << ord(c))
assert self.k != 0
def remove(self, c):
x = self.indexof(c)
self.k &= ~(1 << ord(c))
self.s = self.s[0:x] + self.s[x + 40:]
def get_hash(self, x, parent):
if x:
assert self.k != 0
skip_string = x[len(parent) + 1:] if x != '' else ''
x = 0
v = 0
hh = ''
for i in xrange(256):
if (self.k & (1 << i)) != 0:
ss = self.s[x:x + 40]
hh += ss[0:32]
v += bytes8_to_int(ss[32:40])
x += 40
try:
_hash = Hash(skip_string + hh)
except:
_hash = None
if x:
assert self.k != 0
return _hash, v
@classmethod
def from_dict(cls, d):
k = 0
s = ''
for i in xrange(256):
if chr(i) in d:
k += 1 << i
h, value = d[chr(i)]
if h is None: h = chr(0) * 32
vv = int_to_bytes8(value)
item = h + vv
assert len(item) == 40
s += item
k = "0x%0.64X" % k # 32 bytes
k = k[2:].decode('hex')
assert len(k) == 32
out = k + s
return Node(out)
class DB(object):
def __init__(self, path, name, cache_size):
self.db = plyvel.DB(os.path.join(path, name), create_if_missing=True,
compression=None, lru_cache_size=cache_size)
self.batch = self.db.write_batch()
self.cache = {}
self.lock = threading.Lock()
def put(self, key, s):
self.batch.put(key, s)
self.cache[key] = s
def get(self, key):
s = self.cache.get(key)
if s == 'deleted':
return None
if s is None:
with self.lock:
s = self.db.get(key)
return s
def delete(self, key):
self.batch.delete(key)
self.cache[key] = 'deleted'
def close(self):
self.db.close()
def write(self):
with self.lock:
self.batch.write()
self.batch.clear()
self.cache.clear()
def get_next(self, key):
with self.lock:
i = self.db.iterator(start=key)
k, _ = i.next()
return k
class Storage(object):
def __init__(self, config, shared, test_reorgs):
self.shared = shared
self.hash_list = {}
self.parents = {}
self.skip_batch = {}
self.test_reorgs = test_reorgs
# init path
self.dbpath = config.get('leveldb', 'path')
if not os.path.exists(self.dbpath):
os.mkdir(self.dbpath)
try:
self.db_utxo = DB(self.dbpath, 'utxo',
config.getint('leveldb', 'utxo_cache'))
self.db_hist = DB(self.dbpath, 'hist',
config.getint('leveldb', 'hist_cache'))
self.db_addr = DB(self.dbpath, 'addr',
config.getint('leveldb', 'addr_cache'))
self.db_undo = DB(self.dbpath, 'undo', None)
except:
logger.error('db init', exc_info=True)
self.shared.stop()
try:
self.last_hash, self.height, db_version = ast.literal_eval(
self.db_undo.get('height'))
except:
print_log('Initializing database')
self.height = 0
self.last_hash = GENESIS_HASH
self.pruning_limit = config.getint('leveldb', 'pruning_limit')
db_version = DB_VERSION
self.put_node('', Node.from_dict({}))
# check version
if db_version != DB_VERSION:
print_log(
"Your database '%s' is deprecated. "
"Please create a new database" % self.dbpath)
self.shared.stop()
return
# pruning limit
try:
self.pruning_limit = ast.literal_eval(self.db_undo.get('limit'))
except:
self.pruning_limit = config.getint('leveldb', 'pruning_limit')
self.db_undo.put('version', repr(self.pruning_limit))
# reorg limit
try:
self.reorg_limit = ast.literal_eval(
self.db_undo.get('reorg_limit'))
except:
self.reorg_limit = config.getint('leveldb', 'reorg_limit')
self.db_undo.put('reorg_limit', repr(self.reorg_limit))
# compute root hash
root_node = self.get_node('')
self.root_hash, coins = root_node.get_hash('', None)
# print stuff
print_log("Database version %d." % db_version)
print_log(
"Pruning limit for spent outputs is %d." % self.pruning_limit)
print_log("Reorg limit is %d blocks." % self.reorg_limit)
print_log("Blockchain height", self.height)
print_log("UTXO tree root hash:", self.root_hash.encode('hex'))
print_log("Coins in database:", coins)
# convert between bitcoin addresses and 20 bytes keys used for storage.
@staticmethod
def address_to_key(addr):
return bc_address_to_hash_160(addr)
def get_skip(self, key):
o = self.skip_batch.get(key)
if o is not None:
return o
k = self.db_utxo.get_next(key)
assert k.startswith(key)
return k[len(key):]
def set_skip(self, key, skip):
self.skip_batch[key] = skip
def get_proof(self, addr):
key = self.address_to_key(addr)
k = self.db_utxo.get_next(key)
p = self.get_path(k)
p.append(k)
out = []
for item in p:
v = self.db_utxo.get(item)
out.append((item.encode('hex'), v.encode('hex')))
return out
def get_balance(self, addr):
key = self.address_to_key(addr)
k = self.db_utxo.get_next(key)
if not k.startswith(key):
return 0
p = self.get_parent(k)
d = self.get_node(p)
letter = k[len(p)]
return d.get(letter)[1]
def listunspent(self, addr):
key = self.address_to_key(addr)
if key is None:
raise BaseException('Invalid Bitcoin address', addr)
out = []
with self.db_utxo.lock:
for k, v in self.db_utxo.db.iterator(start=key):
if not k.startswith(key):
break
if len(k) == KEYLENGTH:
txid = k[20:52].encode('hex')
txpos = bytes4_to_int(k[52:56])
h = bytes4_to_int(v[8:12])
v = bytes8_to_int(v[0:8])
out.append({'tx_hash': txid, 'tx_pos': txpos, 'height': h,
'value': v})
if len(out) == 1000:
print_log('max utxo reached', addr)
break
out.sort(key=lambda x: x['height'])
return out
def get_history(self, addr):
out = []
o = self.listunspent(addr)
for item in o:
out.append((item['height'], item['tx_hash']))
h = self.db_hist.get(addr)
while h:
item = h[0:80]
h = h[80:]
txi = item[0:32].encode('hex')
hi = bytes4_to_int(item[36:40])
txo = item[40:72].encode('hex')
ho = bytes4_to_int(item[76:80])
out.append((hi, txi))
out.append((ho, txo))
# uniqueness
out = set(out)
# sort by height then tx_hash
out = sorted(out)
return map(lambda x: {'height': x[0], 'tx_hash': x[1]}, out)
def get_address(self, txi):
return self.db_addr.get(txi)
def get_undo_info(self, height):
s = self.db_undo.get("undo_info_%d" % (height % self.reorg_limit))
if s is None:
print_log("no undo info for ", height)
return eval(s)
def write_undo_info(self, height, bitcoind_height, undo_info):
if height > bitcoind_height - self.reorg_limit or self.test_reorgs:
self.db_undo.put("undo_info_%d" % (height % self.reorg_limit),
repr(undo_info))
@staticmethod
def common_prefix(word1, word2):
max_len = min(len(word1), len(word2))
for i in xrange(max_len):
if word2[i] != word1[i]:
index = i
break
else:
index = max_len
return word1[0:index]
def put_node(self, key, node):
self.db_utxo.put(key, node.serialized())
def get_node(self, key):
s = self.db_utxo.get(key)
if s is None:
return
return Node(s)
def add_key(self, target, value, height):
assert len(target) == KEYLENGTH
path = self.get_path(target, new=True)
if path is True:
return
parent = path[-1]
parent_node = self.get_node(parent)
n = len(parent)
c = target[n]
if parent_node.has(c):
h, v = parent_node.get(c)
skip = self.get_skip(parent + c)
child = parent + c + skip
assert not target.startswith(child)
prefix = self.common_prefix(child, target)
index = len(prefix)
if len(child) == KEYLENGTH:
# if it's a leaf, get hash and value of new_key from parent
d = Node.from_dict({
target[index]: (None, 0),
child[index]: (h, v)
})
else:
# if it is not a leaf, update its hash because skip_string changed
child_node = self.get_node(child)
h, v = child_node.get_hash(child, prefix)
d = Node.from_dict({
target[index]: (None, 0),
child[index]: (h, v)
})
self.set_skip(prefix + target[index], target[index + 1:])
self.set_skip(prefix + child[index], child[index + 1:])
self.put_node(prefix, d)
path.append(prefix)
self.parents[child] = prefix
# update parent skip
new_skip = prefix[n + 1:]
self.set_skip(parent + c, new_skip)
parent_node.set(c, None, 0)
self.put_node(parent, parent_node)
else:
# add new letter to parent
skip = target[n + 1:]
self.set_skip(parent + c, skip)
parent_node.set(c, None, 0)
self.put_node(parent, parent_node)
# write the new leaf
s = (int_to_hex8(value) + int_to_hex4(height)).decode('hex')
self.db_utxo.put(target, s)
# the hash of a leaf is the txid
_hash = target[20:52]
self.update_node_hash(target, path, _hash, value)
def update_node_hash(self, node, path, _hash, value):
c = node
for x in path[::-1]:
self.parents[c] = x
c = x
self.hash_list[node] = (_hash, value)
def update_hashes(self):
nodes = {} # nodes to write
for i in xrange(KEYLENGTH, -1, -1):
for node in self.hash_list.keys():
if len(node) != i:
continue
node_hash, node_value = self.hash_list.pop(node)
parent = self.parents[node] if node != '' else ''
if i != KEYLENGTH and node_hash is None:
n = self.get_node(node)
node_hash, node_value = n.get_hash(node, parent)
assert node_hash is not None
if node == '':
self.root_hash = node_hash
self.root_value = node_value
assert self.root_hash is not None
break
# read parent
d = nodes.get(parent)
if d is None:
d = self.get_node(parent)
assert d is not None
# write value into parent
letter = node[len(parent)]
d.set(letter, node_hash, node_value)
nodes[parent] = d
# iterate
grandparent = self.parents[parent] if parent != '' else None
parent_hash, parent_value = d.get_hash(parent, grandparent)
if parent_hash is not None:
self.hash_list[parent] = (parent_hash, parent_value)
for k, v in nodes.iteritems():
self.put_node(k, v)
# cleanup
assert self.hash_list == {}
self.parents = {}
self.skip_batch = {}
def get_path(self, target, new=False):
x = self.db_utxo.get(target)
if not new and x is None:
raise BaseException('key not in tree', target.encode('hex'))
if new and x is not None:
# raise BaseException('key already in tree', target.encode('hex'))
# occurs at block 91880 (duplicate txid)
print_log('key already in tree', target.encode('hex'))
return True
remaining = target
key = ''
path = []
while key != target:
node = self.get_node(key)
if node is None:
break
# raise # should never happen
path.append(key)
c = remaining[0]
if not node.has(c):
break
skip = self.get_skip(key + c)
key = key + c + skip
if not target.startswith(key):
break
remaining = target[len(key):]
return path
def delete_key(self, leaf):
path = self.get_path(leaf)
s = self.db_utxo.get(leaf)
self.db_utxo.delete(leaf)
if leaf in self.hash_list:
del self.hash_list[leaf]
parent = str(path[-1])
letter = leaf[len(parent)]
parent_node = self.get_node(parent)
parent_node.remove(letter)
# remove key if it has a single child
if parent_node.is_singleton(parent):
self.db_utxo.delete(parent)
if parent in self.hash_list:
del self.hash_list[parent]
l = parent_node.get_singleton()
_hash, value = parent_node.get(l)
skip = self.get_skip(parent + l)
otherleaf = parent + l + skip
# update skip value in grand-parent
gp = path[-2]
gp_items = self.get_node(gp)
letter = otherleaf[len(gp)]
new_skip = otherleaf[len(gp) + 1:]
gp_items.set(letter, None, 0)
self.set_skip(gp + letter, new_skip)
self.put_node(gp, gp_items)
# note: k is not necessarily a leaf
if len(otherleaf) == KEYLENGTH:
ss = self.db_utxo.get(otherleaf)
_hash, value = otherleaf[20:52], bytes8_to_int(ss[0:8])
else:
_hash, value = None, None
self.update_node_hash(otherleaf, path[:-1], _hash, value)
else:
self.put_node(parent, parent_node)
_hash, value = None, None
self.update_node_hash(parent, path[:-1], _hash, value)
return s
def get_parent(self, x):
p = self.get_path(x)
return p[-1]
def get_root_hash(self):
return self.root_hash if self.root_hash else ''
def batch_write(self):
for db in [self.db_utxo, self.db_addr, self.db_hist, self.db_undo]:
db.write()
def close(self):
for db in [self.db_utxo, self.db_addr, self.db_hist, self.db_undo]:
db.close()
def save_height(self, block_hash, block_height):
self.db_undo.put('height',
repr((block_hash, block_height, DB_VERSION)))
def add_to_history(self, addr, tx_hash, tx_pos, value, tx_height):
key = self.address_to_key(addr)
txo = (tx_hash + int_to_hex4(tx_pos)).decode('hex')
# write the new history
self.add_key(key + txo, value, tx_height)
# backlink
self.db_addr.put(txo, addr)
def revert_add_to_history(self, addr, tx_hash, tx_pos, value, tx_height):
key = self.address_to_key(addr)
txo = (tx_hash + int_to_hex4(tx_pos)).decode('hex')
# delete
self.delete_key(key + txo)
# backlink
self.db_addr.delete(txo)
def get_utxo_value(self, addr, txi):
key = self.address_to_key(addr)
leaf = key + txi
s = self.db_utxo.get(leaf)
value = bytes8_to_int(s[0:8])
return value
def set_spent(self, addr, txi, txid, index, height, undo):
key = self.address_to_key(addr)
leaf = key + txi
s = self.delete_key(leaf)
value = bytes8_to_int(s[0:8])
in_height = bytes4_to_int(s[8:12])
undo[leaf] = value, in_height
# delete backlink txi-> addr
self.db_addr.delete(txi)
# add to history
s = self.db_hist.get(addr)
if s is None: s = ''
txo = (txid + int_to_hex4(index) + int_to_hex4(height)).decode('hex')
s += txi + int_to_bytes4(in_height) + txo
s = s[-80 * self.pruning_limit:]
self.db_hist.put(addr, s)
def revert_set_spent(self, addr, txi, undo):
key = self.address_to_key(addr)
leaf = key + txi
# restore backlink
self.db_addr.put(txi, addr)
v, height = undo.pop(leaf)
self.add_key(leaf, v, height)
# revert add to history
s = self.db_hist.get(addr)
# s might be empty if pruning limit was reached
if not s:
return
assert s[-80:-44] == txi
s = s[:-80]
self.db_hist.put(addr, s)
def import_transaction(self, txid, tx, block_height, touched_addr):
# contains the list of pruned items for each address in the tx;
# also, 'prev_addr' is a list of prev addresses
undo = {
'prev_addr': []}
prev_addr = []
for i, x in enumerate(tx.get('inputs')):
txi = (
x.get('prevout_hash') + int_to_hex4(
x.get('prevout_n'))).decode(
'hex')
addr = self.get_address(txi)
if addr is not None:
self.set_spent(addr, txi, txid, i, block_height, undo)
touched_addr.add(addr)
prev_addr.append(addr)
undo['prev_addr'] = prev_addr
# here I add only the outputs to history;
# maybe I want to add inputs too (that's in the other loop)
for x in tx.get('outputs'):
addr = x.get('address')
if addr is None: continue
self.add_to_history(addr, txid, x.get('index'), x.get('value'),
block_height)
touched_addr.add(addr)
return undo
def revert_transaction(self, txid, tx, block_height, touched_addr, undo):
for x in reversed(tx.get('outputs')):
addr = x.get('address')
if addr is None: continue
self.revert_add_to_history(addr, txid, x.get('index'),
x.get('value'), block_height)
touched_addr.add(addr)
prev_addr = undo.pop('prev_addr')
for i, x in reversed(list(enumerate(tx.get('inputs')))):
addr = prev_addr[i]
if addr is not None:
txi = (x.get('prevout_hash') + int_to_hex4(
x.get('prevout_n'))).decode('hex')
self.revert_set_spent(addr, txi, undo)
touched_addr.add(addr)
assert undo == {}
| Verbalist/electrum-server | src/storage.py | Python | mit | 21,577 | 0.000556 |
from string import capwords
from django.db import models
CAPWORDS_ATTRS = ('name', 'firstname')
class PatientManager(models.Manager):
"""
custum patient manger to modifie create and update
"""
attrs = CAPWORDS_ATTRS
# paremeter to capwords
# def create_patient(self, name=None, firstname=None, birthdate=None):
# """
# every patient creatient must use this
# """
# if not name:
# raise ValueError('Must Include a name when adding a Patient')
# if not firstname:
# raise ValueError('Must Include a firstname when adding a Patient')
# if not birthdate:
# raise ValueError('Must Include a birthdate when adding a Patient')
# patient = self.model(
# name = name,
# firstname= firstname,
# birthdate = birthdate
# )
# print('hello')
# patient.save(using=self.db)
# return patient
def create(self, **kwargs):
"""
enhancement
"""
# capwors certain fields
for i in self.attrs:
kwargs[i] = capwords(kwargs[i])
# recall base create
return super(PatientManager, self).create(**kwargs)
class Patient(models.Model):
"""
ase class of patient.&
Require on ly 3 fields : name, firstname, birthdate
"""
attrs = CAPWORDS_ATTRS
# required Field
name = models.CharField(max_length=50)
firstname = models.CharField(max_length=50)
birthdate = models.DateField()
sexe = models.BooleanField(default=True) #True if women else false
# non required fields
street = models.CharField(blank=True, max_length=200, default="")
postalcode = models.CharField(blank=True, max_length=5, default="")
city = models.CharField(max_length=200, blank=True, default="")
phonenumber = models.CharField(blank=True, max_length=20, default="")
email = models.EmailField(blank=True, max_length=100, default="")
alive = models.BooleanField(default=True)
objects = PatientManager()
def __str__(self):
"""
nice printing Firstname Name
"""
return self.firstname + ' ' + self.name
def save(self, *args, **kwargs):
"""
customizing save method, adds :
- fore capwords for name et firstanme
"""
for i in self.attrs:
setattr(self, i, capwords(getattr(self, i)))
super(Patient, self).save(*args, **kwargs)
"""
champs à ajouter :
date de décès
décédé
médecin traitant déclaré
notes divers
"""
| jgirardet/unolog | unolog/patients/models.py | Python | gpl-3.0 | 2,704 | 0.001113 |
import six
def force_unicode(value):
if not isinstance(value, six.string_types):
return six.text_type(value)
try:
return value.decode('utf-8')
except (AttributeError, UnicodeEncodeError):
return value
def force_bytes(value):
if not isinstance(value, six.string_types):
value = force_unicode(value)
try:
return value.encode('utf-8')
except (AttributeError, UnicodeDecodeError):
return value
| Eksmo/itunes-iap | itunesiap/utils.py | Python | bsd-2-clause | 466 | 0 |
"""Defines a workspace that is needed by a task"""
from __future__ import unicode_literals
class TaskWorkspace(object):
"""Represents a workspace needed by a task
"""
def __init__(self, name, mode, volume_name=None):
"""Creates a task workspace
:param name: The name of the workspace
:type name: string
:param mode: The mode to use for the workspace, either 'ro' or 'rw'
:type mode: string
:param volume_name: The name to use for the workspace's volume
:type volume_name: string
"""
self.name = name
self.mode = mode
self.volume_name = volume_name
| ngageoint/scale | scale/job/execution/configuration/workspace.py | Python | apache-2.0 | 653 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2018-03-23 18:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('d4s2_api', '0016_email_group_to_set'),
]
operations = [
migrations.AlterUniqueTogether(
name='emailtemplate',
unique_together=set([('template_set', 'template_type')]),
),
migrations.RemoveField(
model_name='historicalemailtemplate',
name='group',
),
migrations.AlterField(
model_name='emailtemplate',
name='template_set',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='d4s2_api.EmailTemplateSet'),
),
migrations.RemoveField(
model_name='emailtemplate',
name='group',
),
]
| Duke-GCB/DukeDSHandoverService | d4s2_api/migrations/0017_auto_20180323_1833.py | Python | mit | 944 | 0.001059 |
# Configuration file for ipython-notebook.
c = get_config()
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# NotebookApp will inherit config from: BaseIPythonApplication, Application
# The IPython profile to use.
# c.NotebookApp.profile = u'default'
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# The IP address the notebook server will listen on.
# c.NotebookApp.ip = '127.0.0.1'
# The base URL for the notebook server
# c.NotebookApp.base_project_url = '/'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.NotebookApp.verbose_crash = False
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.NotebookApp.copy_config_files = False
# The base URL for the kernel server
# c.NotebookApp.base_kernel_url = '/'
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# Whether to overwrite existing config files when copying
# c.NotebookApp.overwrite = False
# Whether to prevent editing/execution of notebooks.
# c.NotebookApp.read_only = False
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
# c.NotebookApp.open_browser = True
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = u''
# The hostname for the websocket server.
# c.NotebookApp.websocket_host = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.NotebookApp.ipython_dir = u'/home/ivan/.config/ipython'
# Set the log level by value or name.
# c.NotebookApp.log_level = 20
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from IPython.lib import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = u''
# The Logging format template
# c.NotebookApp.log_format = '[%(name)s] %(message)s'
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = u''
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.webapp_settings = {}
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = u''
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: KernelApp, BaseIPythonApplication,
# Application, InteractiveShellApp
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.zmq.displayhook.ZMQDisplayHook'
# Set the IP or interface on which the kernel will listen.
# c.IPKernelApp.ip = '127.0.0.1'
#
# c.IPKernelApp.parent_appname = u''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (DEALER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security-
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, an 'import *' is done from numpy and pylab, when using pylab
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u'/home/ivan/.config/ipython'
# ONLY USED ON WINDOWS Interrupt this process when the parent is signalled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration ('qt', 'wx', 'gtk', 'glut', 'pyglet',
# 'osx').
# c.IPKernelApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s] %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'Linux'
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialiization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = 'ivan'
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The UUID identifying this session.
# c.Session.session = u''
# execution key, for extra authentication.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# path to file containing execution key.
# c.Session.keyfile = ''
#------------------------------------------------------------------------------
# InlineBackend configuration
#------------------------------------------------------------------------------
# An object to store configuration of the inline backend.
# The image format for figures with the inline backend.
# c.InlineBackend.figure_format = 'png'
# Close all figures at the end of each cell.
#
# When True, ensures that each cell starts with no active figures, but it also
# means that one must keep track of references in order to edit or redraw
# figures in subsequent cells. This mode is ideal for the notebook, where
# residual plots from other cells might be surprising.
#
# When False, one must call figure() to create new figures. This means that
# gcf() and getfigs() can reference figures created in other cells, and the
# active figure can continue to be edited with pylab/pyplot methods that
# reference the current active figure. This mode facilitates iterative editing
# of figures, and behaves most consistently with other matplotlib backends, but
# figure barriers between cells must be explicit.
# c.InlineBackend.close_figures = True
# Subset of matplotlib rcParams that should be different for the inline backend.
# c.InlineBackend.rc = {'font.size': 10, 'savefig.dpi': 72, 'figure.figsize': (6.0, 4.0), 'figure.subplot.bottom': 0.125}
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebok mapping and HTTP error handling
# MappingKernelManager will inherit config from: MultiKernelManager
# The max raw message size accepted from the browser over a WebSocket
# connection.
# c.MappingKernelManager.max_msg_size = 65536
# Kernel heartbeat interval in seconds.
# c.MappingKernelManager.time_to_dead = 3.0
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MappingKernelManager.kernel_manager_class = 'IPython.zmq.blockingkernelmanager.BlockingKernelManager'
# Delay (in seconds) before sending first heartbeat.
# c.MappingKernelManager.first_beat = 5.0
#------------------------------------------------------------------------------
# NotebookManager configuration
#------------------------------------------------------------------------------
# Automatically create a Python script when saving the notebook.
#
# For easier use of import, %run and %load across notebooks, a <notebook-
# name>.py script will be created next to any <notebook-name>.ipynb on each
# save. This can also be set with the short `--script` flag.
# c.NotebookManager.save_script = False
# The directory to use for notebooks.
# c.NotebookManager.notebook_dir = u'/home/ivan'
| nisavid/home | .config/ipython/profile_simple/ipython_notebook_config.py | Python | unlicense | 17,850 | 0.003361 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class predictionCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: not a.keyword.value in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=predictionCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the prediction client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| sasha-gitg/python-aiplatform | schema/predict/prediction/scripts/fixup_prediction_v1beta1_keywords.py | Python | apache-2.0 | 5,910 | 0.001184 |
import PandaProd.Producer.opts
PandaProd.Producer.opts.options.config = 'Autumn18'
from PandaProd.Producer.prod import process
| cpausmit/Kraken | pandaf/014/mc.py | Python | mit | 129 | 0.007752 |
# Lint as: python2, python3
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for training."""
import math
import tensorflow as tf
from tensorflow.contrib import slim
# Custom import
from third_party.deeplab.core import preprocess_utils
def _div_maybe_zero(total_loss, num_present):
"""Normalizes the total loss with the number of present pixels."""
return tf.cast(num_present > 0, tf.float32) * tf.math.divide(
total_loss, tf.maximum(1e-5, num_present))
net_to_stride_to_endpoints_name = {
'xception_65': {
4: 'xception_65/entry_flow/block1',
8: 'xception_65/entry_flow/block2',
## All stride=16 below
13: 'xception_65/entry_flow/block3',
14: 'xception_65/middle_flow/block1',
15: 'xception_65/exit_flow/block1',
16: 'xception_65/exit_flow/block2',
},
'resnet_v1_50': {
8: 'resnet_v1_50/block1',
## All stride=16 below
14: 'resnet_v1_50/block2',
15: 'resnet_v1_50/block3',
16: 'resnet_v1_50/block4',
},
'resnet_v1_101': {
8: 'resnet_v1_101/block1',
## All stride=16 below
14: 'resnet_v1_101/block2',
15: 'resnet_v1_101/block3',
16: 'resnet_v1_101/block4',
},
}
def compute_cam_v2(
end_points,
logits,
cls_label,
num_class=21,
use_attention=True,
attention_dim=128,
strides=(15, 16),
is_training=True,
valid_mask=None,
net='xception_65',
):
"""Compute Grad-CAM.
Args:
end_points: Network end_points (dict).
logits: Cls logits with shape [N, #classes-1] (multi-label, no bg)
cls_label: Ground truth image-level label
num_class: Number of classes including background
use_attention: Using self-attention to refine or not. If not, then no
learnable parameters
attention_dim: Embedding space dimension for key and query used in the
self-attention module
strides: Use feature maps from which stride to compute pixel similarity for
Grad-CAM refinement
is_training: Indicate training or inference mode
valid_mask: To identity valid region of the input. It is used to avoid
attending to padding regions
net: Specify which network is used
Returns:
A list of computed Grad-CAMs or refined ones.
"""
# Sanity check: Make sure strides are sorted
strides = sorted(list(strides))[::-1]
# Always use the last stride layer to compute Grad-CAM
conv_layer = end_points[net_to_stride_to_endpoints_name[net][strides[0]]]
cams = []
# Can we speed up this part?
for c in range(num_class-1):
grads = tf.gradients(logits[:, c], conv_layer)
weights = tf.reduce_mean(grads, axis=(1, 2))
weighted_grads = weights * conv_layer
curr_cams = tf.nn.relu(tf.reduce_sum(weighted_grads, axis=3))
cams.append(curr_cams)
cams = tf.stack(cams, axis=-1)
cls_label = tf.reshape(cls_label, [-1, 1, 1, num_class - 1])
cams = cls_label * cams
# Normalize to [0, 1]
cams = _div_maybe_zero(
cams, tf.reduce_max(cams, axis=(1, 2), keepdims=True))
out_cam = tf.stop_gradient(cams, name='stride_{}/cam'.format(strides[0]))
if not use_attention:
out_att_cam = None
else:
valid_mask = tf.compat.v1.image.resize_nearest_neighbor(
valid_mask, preprocess_utils.resolve_shape(out_cam, 4)[1:3])
out_att_cam = compute_self_att_v2(
end_points,
out_cam,
num_class,
attention_dim,
strides,
is_training,
linformer=False,
valid_mask=valid_mask,
net=net)
# Add bg score
bg = 1 - tf.reduce_max(out_cam, axis=3, keepdims=True)
out_cam = tf.concat([bg, out_cam], axis=-1)
return out_cam, out_att_cam
def compute_self_att_v2(
end_points,
logits,
num_class=21,
attention_dim=128,
strides=(15, 16),
is_training=True,
linformer=True,
valid_mask=None,
factor=8,
downsample_type='nearest',
net='xception_65'):
"""Compute self-attention for segmentation head.
Args:
end_points: Network end_points (dict).
logits: The input seed for refinement. Used as ``value'' in self-attention.
Can be either logits, probability, or score map.
num_class: Number of classes including background
attention_dim: Embedding space dimension for key and query used in the
self-attention module
strides: Use feature maps from which stride to compute pixel similarity
is_training: Indicate training or inference mode
linformer: Adopt the idea from https://arxiv.org/abs/2006.04768 to reduce
memory usage in self-attention computation. But instead of learning the
downsample function, we use deterministic image downsample functions
valid_mask: To identity valid region of the input. It is used to avoid
attending to padding regions
factor: Downsample factor used in linformer mode
downsample_type: Use which downsample method to reduce the memory usage. Can
be either 'nearest' or 'bilinear'. Default: 'nearest'
net: Specify which network is used
Returns:
A list of computed Grad-CAMs or refined ones.
"""
# Sanity check: Make sure strides are sorted
strides = sorted(list(strides))[::-1]
conv_layer_list = []
for stride in strides:
conv_layer = end_points[net_to_stride_to_endpoints_name[net][stride]]
conv_layer_list.append(conv_layer)
# Resize to seed resolution first
h, w = preprocess_utils.resolve_shape(logits, 4)[1:3]
conv_layer_list = [
tf.compat.v1.image.resize_bilinear(
conv, (h, w), align_corners=True)
for conv in conv_layer_list
]
conv_layer_merged = tf.concat(conv_layer_list, axis=-1)
conv_layer_merged = tf.stop_gradient(conv_layer_merged)
score = tf.stop_gradient(logits)
# This tells us what input it is (decoder logits or Grad-CAM)
value_dim = tf.shape(score)[-1]
# Only valid when we use Linformer style to reduce size for key and value
if downsample_type == 'bilinear':
resize_fn = tf.compat.v1.image.resize_bilinear
else:
resize_fn = tf.compat.v1.image.resize_nearest_neighbor
scope = 'hyper_column'
with tf.variable_scope(scope):
with slim.arg_scope([slim.conv2d],
activation_fn=None,
normalizer_fn=None,
biases_initializer=None,
reuse=tf.AUTO_REUSE):
k = slim.conv2d(
conv_layer_merged, attention_dim, [1, 1], scope='key')
q = slim.conv2d(
conv_layer_merged, attention_dim, [1, 1], scope='query')
q = tf.reshape(q, [-1, h * w, attention_dim])
if valid_mask is not None:
valid_mask_q = tf.reshape(valid_mask, [-1, h * w, 1])
# Adopt idea from Linformer (https://arxiv.org/abs/2006.04768) to reduce the
# memory usage. Instead of learning a downsample function, we use determinstic
# image downsample methods (nearest neighbor or bilinear) to reduce the size
# of key and value.
if linformer:
k = resize_fn(
k, ((h // factor + 1), (w // factor + 1)), align_corners=True)
k = tf.reshape(k,
[-1, (h // factor + 1) * (w // factor + 1), attention_dim])
if valid_mask is not None:
valid_mask_k = tf.compat.v1.image.resize_nearest_neighbor(
valid_mask, ((h // factor + 1), (w // factor + 1)))
valid_mask_k = tf.reshape(
tf.cast(valid_mask_k, tf.float32),
[-1, (h // factor + 1) * (w // factor + 1), 1])
else:
k = tf.reshape(k, [-1, h * w, attention_dim])
valid_mask_k = tf.reshape(valid_mask, [-1, h * w, 1])
matmul_qk = tf.matmul(q, k, transpose_b=True)
scaled_att_logits = matmul_qk / math.sqrt(attention_dim)
# Masking
if valid_mask is not None:
final_mask = tf.matmul(valid_mask_q, valid_mask_k, transpose_b=True)
scaled_att_logits += (1 - final_mask) * -1e9
att_weights = tf.nn.softmax(scaled_att_logits, axis=-1)
if linformer:
value = resize_fn(
score, ((h // factor + 1), (w // factor + 1)), align_corners=True)
value = tf.reshape(value,
[-1, (h // factor + 1) * (w // factor + 1), value_dim])
else:
value = tf.reshape(score, [-1, h * w, value_dim])
att_score = tf.matmul(att_weights, value)
att_score = tf.reshape(att_score, tf.shape(score))
## Add skip-connection and 1x1 conv to convert score back to logit
att_score += score
if value_dim != num_class:
# Set an initial score for the background class. Since the score range of a
# class is [0, 2] after skip-connection, we use 2 minus the max class
# probability to set the initial background score for each pixel.
bg = 2 - tf.reduce_max(att_score, axis=3, keepdims=True)
att_score = tf.concat([bg, att_score], axis=-1)
out_att_logits = slim.conv2d(
att_score,
num_class, [1, 1],
scope='pixel_normalization',
activation_fn=None,
normalizer_fn=slim.batch_norm,
normalizer_params={'is_training': is_training},
reuse=tf.AUTO_REUSE)
return out_att_logits
def compute_cam(
end_points,
logits,
cls_label,
num_class=21,
use_attention=True,
attention_dim=128,
strides=(15, 16),
is_training=True,
valid_mask=None,
net='xception_65'
):
"""Compute Grad-CAM.
Args:
end_points: Network end_points (dict).
logits: Cls logits with shape [N, #classes-1] (multi-label, no bg)
cls_label: Ground truth image-level label
num_class: Number of classes including background
use_attention: Using self-attention to refine or not. If not, then no
learnable parameters
attention_dim: Embedding space dimension for key and query used in the
self-attention module
strides: Use feature maps from which stride to compute pixel similarity for
Grad-CAM refinement
is_training: Indicate training or inference mode (for compatibility only,
not used in this function)
valid_mask: To identity valid region of the input. It is used to avoid
attending to padding regions
net: Specify which network is used
Returns:
A list of computed Grad-CAMs or refined ones.
"""
# Sanity check: Make sure strides are sorted
strides = sorted(list(strides))[::-1]
# Always use the last stride layer to compute Grad-CAM
conv_layer = end_points[net_to_stride_to_endpoints_name[net][strides[0]]]
cams = []
# Can we speed up this part?
for c in range(num_class-1):
grads = tf.gradients(logits[:, c], conv_layer)
weights = tf.reduce_mean(grads, axis=(1, 2))
weighted_grads = weights * conv_layer
curr_cams = tf.nn.relu(tf.reduce_sum(weighted_grads, axis=3))
cams.append(curr_cams)
cams = tf.stack(cams, axis=-1)
cls_label = tf.reshape(cls_label, [-1, 1, 1, num_class - 1])
cams = cls_label * cams
# Normalize to [0, 1]
cams = _div_maybe_zero(
cams, tf.reduce_max(cams, axis=(1, 2), keepdims=True))
out_cam = tf.stop_gradient(cams, name='stride_{}/cam'.format(strides[0]))
if not use_attention:
out_att_cam = None
else:
valid_mask = tf.compat.v1.image.resize_nearest_neighbor(
valid_mask, preprocess_utils.resolve_shape(out_cam, 4)[1:3])
conv_layer_list = []
for stride in strides:
conv_layer = end_points[net_to_stride_to_endpoints_name[net][stride]]
conv_layer_list.append(conv_layer)
# Resize to seed resolution first
h, w = preprocess_utils.resolve_shape(out_cam, 4)[1:3]
conv_layer_list = [
tf.compat.v1.image.resize_bilinear(
conv, (h, w), align_corners=True)
for conv in conv_layer_list
]
conv_layer_merged = tf.concat(conv_layer_list, axis=-1)
conv_layer_merged = tf.stop_gradient(conv_layer_merged)
score = tf.stop_gradient(out_cam)
scope = 'hyper_column'
with tf.variable_scope(scope):
with slim.arg_scope([slim.conv2d],
activation_fn=None,
normalizer_fn=None,
biases_initializer=None,
reuse=tf.AUTO_REUSE):
k = slim.conv2d(
conv_layer_merged, attention_dim, [1, 1], scope='key')
q = slim.conv2d(
conv_layer_merged, attention_dim, [1, 1], scope='query')
q = tf.reshape(q, [-1, h * w, attention_dim])
if valid_mask is not None:
valid_mask_q = tf.reshape(valid_mask, [-1, h * w, 1])
k = tf.reshape(k, [-1, h * w, attention_dim])
valid_mask_k = tf.reshape(valid_mask, [-1, h * w, 1])
matmul_qk = tf.matmul(q, k, transpose_b=True)
scaled_att_logits = matmul_qk / math.sqrt(attention_dim)
# Masking
if valid_mask is not None:
final_mask = tf.matmul(valid_mask_q, valid_mask_k, transpose_b=True)
scaled_att_logits += (1 - final_mask) * -1e9
att_weights = tf.nn.softmax(scaled_att_logits, axis=-1)
value = tf.reshape(score, [-1, h * w, num_class-1])
att_score = tf.matmul(att_weights, value)
att_score = tf.reshape(att_score, tf.shape(score))
## Add 1x1 conv to convert score back to logit
bg = 1 - tf.reduce_max(att_score, axis=3, keepdims=True)
att_score = tf.concat([bg, att_score], axis=-1)
out_att_cam = slim.conv2d(
att_score,
num_class, [1, 1],
scope='pixel_normalization',
activation_fn=None,
normalizer_fn=None,
reuse=tf.AUTO_REUSE)
return out_cam, out_att_cam
| googleinterns/wss | core/train_utils_core.py | Python | apache-2.0 | 13,925 | 0.007038 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import err
import utils as U
# needed for testing
import random as rand
import time
# FIX heap duplicates used by different ksp!
import heapq
from heapq import heappush, heappop
from itertools import count
from collections import defaultdict
from blessings import Terminal
term = Terminal()
def graph_factory(graph_type):
if graph_type == 'nx':
global nx
import networkx as nx
return GraphNX()
elif graph_type == 'gt':
global gt
import graph_tool.all as gt
return GraphGT()
else:
raise err.Fatal('unknown graph library requested: {}'.format(graph_type))
class GraphGT(object):
@staticmethod
def compare(G1, G2):
raise NotImplementedError
@staticmethod
def compare_nodes(n1, n2):
raise NotImplementedError
@staticmethod
def compare_edges():
raise NotImplementedError
def __init__(self, G=None, Type=None):
# unused maxVert
self.maxVertices = 0
# create a Di-graph if not created already
if G is None:
self.G = gt.Graph()
self.Type = 'test_no'
else:
self.G = G
self.Type = Type
self.node_vertex_dict = {}
self.edge_attr_dict = self.G.new_edge_property('object')
def check_n_add_n_get(self, n):
v = self.node_vertex_dict.get(n)
# if node does not exist in the graph
if v is None:
# allocate a new vertex
v = self.G.add_vertex()
# add it to the dictionary for future
self.node_vertex_dict[n] = v
return v
def nodes(self):
# why is this needed?
raise NotImplementedError
def add_edge(
self,
n1,
n2,
attr_val=None,
):
v1 = self.check_n_add_n_get(n1)
v2 = self.check_n_add_n_get(n2)
e = self.G.add_edge(v1, v2)
self.edge_attr_dict[e] = attr_val
def add_edge_wt(
self,
v1,
v2,
weight,
):
raise NotImplementedError
self.G.add_edge(v1, v2, w=weight)
def add_node(self, n):
raise NotImplementedError # Actually, its just not tested...
self.check_n_add_n_get(n)
return
############################## UNFINISHED FROM HERE
def get_path_attr_list(self, path):
raise NotImplementedError
attr_list = []
for (v1, v2) in U.pairwise(path):
attr_list.append(self.G[v1][v2]['attr'])
return attr_list
# Actually draws the graph!! Need to rewrite get_path_generator() from
# scratch for gt. Also, destroys the passed in graph (oops) :D
# Hence, use this function only for debugging!!
# # TODO: Fix it, of course?
def get_path_generator(
self,
source_list,
sink_list,
max_depth=None,
):
print 'WARNING: This is actually a plotting function!!!'
num_source_nodes = len(source_list)
num_sink_nodes = len(sink_list)
# super_source_vertex = g.add_vertex()
# super_sink_vertex = g.add_vertex()
super_source_vertex = 'super_source_vertex'
super_sink_vertex = 'super_sink_vertex'
edge_list = zip([super_source_vertex] * num_source_nodes, source_list)
for e in edge_list:
self.add_edge(*e)
edge_list = zip(sink_list, [super_sink_vertex] * num_sink_nodes)
for e in edge_list:
self.add_edge(*e)
g = self.G
pos = gt.arf_layout(g, max_iter=0)
gt.graph_draw(g, pos=pos, vertex_text=self.G.vertex_index)
time.sleep(1000)
print 'exiting'
exit()
gt.graph_draw(self.G, vertex_text=self.G.vertex_index)
time.sleep(1000)
# print edge_list
# Add edges:
# \forall sink \in sink_list. sink -> super sink node
edge_list = zip(sink_list, [dummy_super_sink_node] * num_sink_nodes)
H.add_edges_from(edge_list)
# print edge_list
# print '='*80
# TODO: WHY?
# Switching this on with def path_gen(), results in empty path and no further results!!
# #xplanation required!
# for path in nx.all_simple_paths(H, dummy_super_source_node, dummy_super_sink_node):
# print path
# print '='*80
# TODO: how to do this with lambda?
# Also, is this indeed correct?
def path_gen():
for i in nx.all_simple_paths(H, dummy_super_source_node,
dummy_super_sink_node):
# Remove the first (super source)
# and the last element (super sink)
yield i[1:-1]
# return lambda: [yield i[1:-1] for i in nx.all_simple_paths(H,
# dummy_super_source_node, dummy_super_sink_node)]
return path_gen()
def neighbors(self, node):
raise NotImplementedError
return self.G.neighbors(node)
def draw(self, pos_dict=None):
raise NotImplementedError
nx.draw_networkx(self.G, pos=pos_dict, labels=pos_dict,
with_labels=True)
def __contains__(self, key):
raise NotImplementedError
return key in self.G
def __repr__(self):
raise NotImplementedError
s = ''
s += '''==== Nodes ==== {} '''.format(self.G.nodes())
s += '''==== Edges ==== {} '''.format(self.G.edges())
return s
class GraphNX(object):
@staticmethod
def compare(G1, G2):
G1 = G1.G
G2 = G2.G
G1_nodes_set = set(G1.nodes())
G2_nodes_set = set(G2.nodes())
G1_edges_set = set(G1.edges())
G2_edges_set = set(G2.edges())
G1_in_G2_nodes = G1_nodes_set.issubset(G2_nodes_set)
G2_in_G1_nodes = G2_nodes_set.issubset(G1_nodes_set)
G1_in_G2_edges = G1_edges_set.issubset(G2_edges_set)
G2_in_G1_edges = G2_edges_set.issubset(G1_edges_set)
G1_in_G2 = G1_in_G2_nodes and G1_in_G2_edges
G2_in_G1 = G2_in_G1_nodes and G2_in_G1_edges
print 'G1_in_G2_nodes: {}, G1_in_G2_edges: {}'.format(G1_in_G2_nodes,
G1_in_G2_edges)
print 'G2_in_G1_nodes: {}, G2_in_G1_edges: {}'.format(G2_in_G1_nodes,
G2_in_G1_edges)
print '''G1_nodes_set - G2_nodes_set
{}
'''.format(G1_nodes_set
- G2_nodes_set)
G1_and_G2_are_equal = G1_in_G2 and G2_in_G1
print 'G1_in_G2: {}, G2_in_G1: {}\n'.format(G1_in_G2, G2_in_G1)
return G1_and_G2_are_equal
@staticmethod
def compare_nodes(n1, n2):
raise NotImplementedError
@staticmethod
def compare_edges():
raise NotImplementedError
def __init__(self, G=None, Type=None):
# unused maxVert
self.maxVertices = 0
self.ctr = 0
# create a Di-graph if not created already
if G is None:
self.G = nx.DiGraph()
self.Type = 'test_no'
else:
self.G = G
self.Type = Type
def nodes(self):
return self.G.nodes()
def add_edge(self, v1, v2, ci=None, pi=None, weight=1):
self.G.add_edge(v1, v2, weight=1, ci=ci, pi=pi)
self.ctr += 1
if self.ctr % 1000 == 0:
with term.location(x=100, y=term.height-10):
print(term.green('nodes={}, edges={}'
.format(
self.G.number_of_nodes(),
self.G.number_of_edges())))
def add_edges_from(self, edge_list, ci=None, pi=None, weight=1):
self.G.add_edges_from(edge_list, weight=1, ci=ci, pi=pi)
def add_node(self, v):
self.G.add_node(v)
def get_path_attr_list(self, path, attrs):
attr_map = defaultdict(list)
for (v1, v2) in U.pairwise(path):
for attr in attrs:
attr_map[attr].append(self.G[v1][v2][attr])
return attr_map
# ###################### KSP 1 ##################################################
# https://gist.github.com/guilhermemm/d4623c574d4bccb6bf0c
# __author__ = 'Guilherme Maia <guilhermemm@gmail.com>'
# __all__ = ['k_shortest_paths']
def k_shortest_paths(
self,
G,
source,
target,
k=1,
weight='weight',
):
"""Returns the k-shortest paths from source to target in a weighted graph G.
Parameters
----------
G : NetworkX graph
source : node
Starting node
target : node
Ending node
k : integer, optional (default=1)
The number of shortest paths to find
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
lengths, paths : lists
Returns a tuple with two lists.
The first list stores the length of each k-shortest path.
The second list stores each k-shortest path.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.complete_graph(5)
>>> print(k_shortest_paths(G, 0, 4, 4))
([1, 2, 2, 2], [[0, 4], [0, 1, 4], [0, 2, 4], [0, 3, 4]])
Notes
------
Edge weight attributes must be numerical and non-negative.
Distances are calculated as sums of weighted edges traversed.
"""
if source == target:
return ([0], [[source]])
(length, path) = nx.single_source_dijkstra(G, source, target,
weight=weight)
if target not in length:
raise nx.NetworkXNoPath('node %s not reachable from %s' % (source,
target))
lengths = [length[target]]
paths = [path[target]]
c = count()
B = []
# Is deep copy really required?
# Fails due to embedded Ctype objects which can not be pickled
# # G_original = G.copy()
# Swapping with shallow copy...will it work?
G_original = G
if nx.is_directed(G_original):
G = nx.DiGraph(G_original)
else:
G = nx.Graph(G_original)
######################################
#TODO: wrap this up somehow
print ''
print term.move_up + term.move_up
######################################
print 'getting K:{} paths...'.format(k),
for i in range(1, k):
with term.location():
print i
for j in range(len(paths[-1]) - 1):
spur_node = paths[-1][j]
root_path = (paths[-1])[:j + 1]
edges_removed = []
for c_path in paths:
if len(c_path) > j and root_path == c_path[:j + 1]:
u = c_path[j]
v = c_path[j + 1]
if G.has_edge(u, v):
edge_attr = G.edge[u][v]
G.remove_edge(u, v)
edges_removed.append((u, v, edge_attr))
for n in range(len(root_path) - 1):
node = root_path[n]
# out-edges
for (u, v, edge_attr) in G.edges_iter(node, data=True):
# print 'lala1: {} -> {}'.format(u,v)
G.remove_edge(u, v)
edges_removed.append((u, v, edge_attr))
if G.is_directed():
# in-edges
for (u, v, edge_attr) in G.in_edges_iter(node,
data=True):
# print 'lala2: {} -> {}'.format(u,v)
G.remove_edge(u, v)
edges_removed.append((u, v, edge_attr))
(spur_path_length, spur_path) = nx.single_source_dijkstra(G,
spur_node, target, weight=weight)
if target in spur_path and spur_path[target]:
total_path = root_path[:-1] + spur_path[target]
total_path_length = self.get_path_length(G_original,
root_path, weight) + spur_path_length[target]
heappush(B, (total_path_length, next(c), total_path))
for e in edges_removed:
(u, v, edge_attr) = e
G.add_edge(u, v, edge_attr)
if B:
(l, _, p) = heappop(B)
lengths.append(l)
paths.append(p)
else:
break
return (lengths, paths)
def get_path_length(
self,
G,
path,
weight='weight',
):
length = 0
if len(path) > 1:
for i in range(len(path) - 1):
u = path[i]
v = path[i + 1]
length += G.edge[u][v].get(weight, 1)
return length
# ################################### KSP2 ###########################
# ######################## GREG BERNSTEIN #######################
# https://groups.google.com/forum/#!topic/networkx-discuss/0niVmZZXxGA
def ksp_gregBern(
self,
G,
source,
target,
k=1,
weight='weight',
):
YKSP = YenKShortestPaths(G, weight)
path_list = []
wp0 = YKSP.findFirstShortestPath(source, target)
p0 = wp0.nodeList
path_list.append(p0)
print 'getting K:{} paths...'.format(k)
for i in range(k):
wpi = YKSP.getNextShortestPath()
if wpi is None:
break
pi = wpi.nodeList
path_list.append(pi)
print i, ' '
# print path_list
return path_list
# ################################### KSP END ###########################
def get_path_generator(
self,
source_list,
sink_list,
max_depth,
max_paths
):
# Create a shallow copy of the graph
H = nx.DiGraph(self.G)
# All modifications are now done on this shallow copy H
# Define super source and sink nodes
# A Super source node has a directed edge to each source node in the
# source_list
# Similarily, a Super sink node has a directed edge from each sink node
# in the sink_list
dummy_super_source_node = 'source'
dummy_super_sink_node = 'sink'
num_source_nodes = len(source_list)
num_sink_nodes = len(sink_list)
# increment max_depth by 2 to accommodate edges from 'super source' and
# to 'super sink'
max_depth += 2
# Add edges:
# \forall source \in source_list. super source node -> source
edge_list = zip([dummy_super_source_node] * num_source_nodes,
source_list)
H.add_edges_from(edge_list, weight=1)
# print edge_list
# Add edges:
# \forall sink \in sink_list. sink -> super sink node
edge_list = zip(sink_list, [dummy_super_sink_node] * num_sink_nodes)
H.add_edges_from(edge_list, weight=1)
# print edge_list
# print '='*80
# TODO: WHY?
# Switching this on with def path_gen(), results in empty path and no further results!!
# #xplanation required!
# for path in nx.all_simple_paths(H, dummy_super_source_node, dummy_super_sink_node):
# print path
# print '='*80
# TODO: how to do this with lambda?
# Also, is this indeed correct?
def path_gen():
# all_shortest_paths
# all_simple_paths
#
#K = 100
K = max_paths
(len_list, path_list) = self.k_shortest_paths(H,
dummy_super_source_node,
dummy_super_sink_node,
k=K)
# path_list = self.ksp_gregBern(H, dummy_super_source_node,
# dummy_super_sink_node,
# k=K)
# using simple paths
# for i in nx.all_simple_paths(H, dummy_super_source_node,
# dummy_super_sink_node,
# cutoff=max_depth):
# using all sohrtest paths
# for i in nx.all_shortest_paths(H, dummy_super_source_node, dummy_super_sink_node):
# Remove the first (super source)
# and the last element (super sink)
for p in path_list:
l = len(p)
#print l, max_depth
if l <= max_depth:
yield p[1:-1]
# return lambda: [yield i[1:-1] for i in nx.all_simple_paths(H,
# dummy_super_source_node, dummy_super_sink_node)]
return path_gen()
def neighbors(self, node):
return self.G.neighbors(node)
def draw(self, pos_dict=None):
nx.draw_networkx(self.G, pos=pos_dict, labels=pos_dict,
with_labels=True)
def __contains__(self, key):
return key in self.G
def __repr__(self):
s = ''
s += '''==== Nodes ==== {} '''.format(self.G.nodes())
s += '''==== Edges ==== {} '''.format(self.G.edges())
return s
def random_graph(
num_nodes,
num_edges,
dim,
seed=None,
directed=False,
):
raise NotImplementedError('modify to work with new abstraction object')
G = nx.gnm_random_graph(num_nodes, num_edges, seed, directed)
abstract_G = nx.DiGraph()
# create a mapping from G to abstract_G
graph_map = {}
# innane pos_dict for plotting only
pos_dict = {}
# create the nodes of abstract_G
N_MAX = 5
for n in G:
n_ = (n, rand.randint(0, N_MAX))
pos_dict[n_] = n_
graph_map[n] = n_
abstract_G.add_node(n_)
# add edges to abstract_G
edge_list = G.edges()
edge_list_ = map(lambda e: (graph_map[e[0]], graph_map[e[1]]), edge_list)
abstract_G.add_edges_from(edge_list_)
# nx.draw_networkx(abstract_G, pos=pos_dict, with_labels=True)
# plt.show()
return Graph(abstract_G, 'test_random')
# generates custom graphs to to test the correctness of the abstraction
def custom_graph(ID, num_dim):
if num_dim != 2:
raise NotImplementedError
if ID == 1:
return csg1()
else:
raise NotImplementedError
def csg1():
G = nx.DiGraph()
MAX_N = 10
init_state = (0, 0)
final_state = (MAX_N + 1, MAX_N + 1)
edge_list = [((n, n), (n + 1, n + 1)) for n in range(1, MAX_N)]
G.add_edge(init_state, edge_list[0][0])
G.add_edges_from(edge_list)
G.add_edge(edge_list[-1][-1], final_state)
return Graph(G, 'test_csg1')
# print G.nodes()
# print G.edges()
# plt.figure()
# Graph(G).draw_2d()
# plt.show()
# import networkx as nx
# import heapq
# from ModifiedDijkstra import ModifiedDijkstra
class YenKShortestPaths(object):
"""
This is a straight forward implementation of Yen's K shortest loopless
path algorithm. No attempt has been made to perform any optimization that
have been suggested in the literature. Our main goal was to have a
functioning K-shortest path algorithm. This implementation should work
for both undirected and directed graphs. However it has only been tested
so far against undirected graphs.
"""
def __init__(
self,
graph,
weight='weight',
cap='capacity',
):
"""
Constructor
"""
self.wt = weight
self.cap = cap
self.g = graph
self.pathHeap = [] # Use the heapq module functions heappush(pathHeap, item) and heappop(pathHeap, item)
self.pathList = [] # Contains WeightedPath objects
self.deletedEdges = set()
self.deletedNodes = set()
self.kPath = None
# Make a copy of the graph tempG that we can manipulate
if isinstance(graph, nx.Graph):
# self.tempG = graph.copy()
if nx.is_directed(graph):
self.tempG = nx.DiGraph(graph)
else:
self.tempG = nx.Graph(graph)
else:
self.tempG = None
def findFirstShortestPath(self, source, dest):
"""
This function is called to initialize the k-shortest path algorithm.
It also finds the shortest path in the network.
You can use this function to restart the algorithm at anytime with
possibly different source and destination values.
@param source The beginning node of the path.
@param dest The termination node of the path.
@return The shortest path or null if the path doesn't exist.
"""
# Used to initialize or reinitialize the algorithm
# Computes the shortest path via Dijsktra
self.kPath = None
self.pathHeap = []
self.pathList = []
self.source = source
self.dest = dest
# Compute the shortest path
# nodeList = nx.dijkstra_path(self.g, source, dest, self.wt)
alg = ModifiedDijkstra(self.g, self.wt)
nodeList = alg.getPath(source, dest, as_nodes=True)
if len(nodeList) == 0:
return None
deletedLinks = set()
self.kPath = WeightedPath(nodeList, deletedLinks, self.g, wt=self.wt,
cap=self.cap)
self.kPath.dNode = source
self.pathList.append(self.kPath)
return self.kPath
def getNextShortestPath(self):
"""
Use this function to compute successive shortest path. Each one will have
a length (cost) greater than or equal the previously generated algorithm.
Returns null if no more paths can be found.
You must first call findFirstShortestPath(source, dest) to initialize the
algorithm and set the source and destination node.
@return the next shortest path (or the next longer path depending on
how you want to think about things).
"""
if self.kPath == None:
raise UserWarning('Must call findFirstShortestPath before this method or no path exists'
)
# Iterate over all the nodes in kPath from dNode to the node before the destination
# and add candidate paths to the path heap.
kNodes = self.kPath.nodeList
index = kNodes.index(self.kPath.dNode)
curNode = kNodes[index]
while curNode != self.dest:
self._removeEdgesNodes(curNode)
candidate = self._computeCandidatePath(curNode)
self._restoreGraph()
if candidate != None:
heapq.heappush(self.pathHeap, candidate)
index = index + 1
curNode = kNodes[index]
if len(self.pathHeap) == 0:
return None
p = heapq.heappop(self.pathHeap) # after iterations contains next shortest path
self.pathList.append(p)
self.kPath = p # updates the kth path
return p
def _removeEdgesNodes(self, curNode):
"""
Remove all nodes from source to the node before the current node in kPath.
Delete the edge between curNode and the next node in kPath
Delete any edges previously deleted in kPath starting at curNode
add all deleted edges to the deleted edge list.
"""
# Figure out all edges to be removed first then take them out of the temp graph
# then remove all the nodes from the temp graph.
# At the start the temp graph is equal to the initial graph.
self.deletedEdges = set()
self.deletedNodes = set()
kNodes = self.kPath.nodeList
index = 0
tempNode = kNodes[index]
index += 1
while tempNode != curNode:
edges = self.tempG.edges(tempNode)
if len(edges) != 0:
for edge in edges:
self.deletedEdges.add(edge)
self.tempG.remove_edge(edge[0], edge[1])
#
self.deletedNodes.add(tempNode)
self.tempG.remove_node(tempNode)
tempNode = kNodes[index]
index += 1
# Also need to remove those old deleted edges that start on curNode
oldDelEdges = self.kPath.deletedEdges
if self.g.is_directed():
outEdges = self.g.out_edges(curNode)
else:
outEdges = self.g.edges(curNode)
# outEdges = self.g.edges(curNode)
for e in outEdges:
if e in oldDelEdges:
self.deletedEdges.add(e)
self.tempG.remove_edge(e[0], e[1])
# Now delete the edge from the curNode to the next in the path
tempNode = kNodes[index]
e = (curNode, tempNode)
self.deletedEdges.add(e)
self.tempG.remove_edge(curNode, tempNode)
def _computeCandidatePath(self, curNode):
"""
Compute the shortest path on the modified graph and then
combines with the portion of kPath from the source up through
the deviation node
"""
# DijkstraShortestPath alg = new DijkstraShortestPath(tempG, wt);
# List<E> ePath = alg.getPath(curNode, dest);
# nodeList = nx.dijkstra_path(self.tempG, curNode, self.dest, self.wt)
alg = ModifiedDijkstra(self.tempG, self.wt)
nodeList = alg.getPath(curNode, self.dest, as_nodes=True)
# Trying this out...
if nodeList == None:
return None
# Get first part of the path from kPath
nodePath = []
if curNode in self.kPath.nodeList:
index = self.kPath.nodeList.index(curNode)
nodePath = self.kPath.nodeList[0:index]
nodePath.extend(nodeList)
wp = WeightedPath(nodePath, self.deletedEdges, self.g, wt=self.wt,
cap=self.cap)
wp.dNode = curNode
return wp
def _restoreGraph(self):
"""
Using the internal deleted node and deleted edge containers
restores the temp graph to match the graph g.
"""
# self.tempG = self.g.copy()
if nx.is_directed(self.g):
self.tempG = nx.DiGraph(self.g)
else:
self.tempG = nx.Graph(self.g)
self.deletedEdges = []
self.deletedNodes = []
class WeightedPath(object):
"""Used internally by the Yen k-shortest path algorithm.
Also return to user as a result.
"""
def __init__(
self,
pathNodeList,
deletedEdges,
g,
wt='weight',
cap='capacity',
):
"""
Constructor
"""
self.nodeList = pathNodeList
self.deletedEdges = set(deletedEdges)
self.g = g
self.wt = wt
self.dNode = None # The deflection node
self.cost = 0.0
self.capacity = float('inf')
# print "WtPath pathNodeList: {}".format(pathNodeList)
for i in range(len(pathNodeList) - 1):
self.cost = self.cost + g[pathNodeList[i]][pathNodeList[i + 1]][wt]
################################# ADI: Do we need this? ##############################
#### if not cap == None:
#### self.capacity = min(self.capacity, g[pathNodeList[i]][pathNodeList[i+1]][cap])
#### else:
#### self.capacity = None
######################################################################################
def __cmp__(self, other):
if other == None:
return -1
return cmp(self.cost, other.cost)
def __str__(self):
return 'nodeList: {}, cost: {}, capacity: {}'.format(self.nodeList,
self.cost, self.capacity)
class ModifiedDijkstra(object):
"""
The Modified Dijkstra algorithm from "Survivable Networks" by Ramesh Bhandari.
This algorithm works with graphs that can have directed or undirected links.
In addition, this algorithm can correctly function in some cases of negative
arc lengths that arise in the disjoint path computations.
Works with graphs, *g*, in NetworkX format. Specifically Graph and
DiGraph classes.
"""
def __init__(self, g, wt='weight'):
"""
Constructor. Parameter *g* is a NetworkX Graph or DiGraph instance.
The *wt* keyword argument sets the link attribute to be used in computing
the path length.
"""
self.dist = {} # A map from nodes to their labels (float)
self.predecessor = {} # A map from a node to a node
self.g = g
self.wt = wt
edges = g.edges()
# Set the value for infinite distance in the graph
self.inf = 0.0
for e in edges:
self.inf += abs(g[e[0]][e[1]][wt])
self.inf += 1.0
def getPath(
self,
source,
dest,
as_nodes=False,
):
"""
Computes the shortest path in the graph between the given *source* and *dest*
node (strings). Returns the path as a list of links (default) or as a list of
nodes by setting the *as_nodes* keyword argument to *True*.
"""
self.dist = {} # A map from nodes to their labels (float)
self.predecessor = {} # A map from a node to a node
# Initialize the distance labels to "infinity"
vertices = self.g.nodes()
for vertex in vertices:
self.dist[vertex] = self.inf
self.predecessor[vertex] = source
# Further set up the distance from the source to itself and
# to all one hops away.
self.dist[source] = 0.0
if self.g.is_directed():
outEdges = self.g.out_edges([source])
else:
outEdges = self.g.edges([source])
for edge in outEdges:
self.dist[edge[1]] = self.g[edge[0]][edge[1]][self.wt]
s = set(vertices)
s.remove(source)
currentMin = self._findMinNode(s)
if currentMin == None:
return None
s.remove(currentMin)
while currentMin != dest and len(s) != 0 and currentMin != None:
if self.g.is_directed():
outEdges = self.g.out_edges([currentMin])
else:
outEdges = self.g.edges([currentMin])
for edge in outEdges:
opposite = edge[1]
if self.dist[currentMin] + self.g[edge[0]][edge[1]][self.wt] \
< self.dist[opposite]:
self.dist[opposite] = self.dist[currentMin] \
+ self.g[edge[0]][edge[1]][self.wt]
self.predecessor[opposite] = currentMin
s.add(opposite)
currentMin = self._findMinNode(s)
# print "Current min node {}, s = {}".format(currentMin, s)
if currentMin == None:
return None
s.remove(currentMin)
# Compute the path as a list of edges
currentNode = dest
predNode = self.predecessor.get(dest)
node_list = [dest]
done = False
path = []
while not done:
path.append((predNode, currentNode))
currentNode = predNode
predNode = self.predecessor[predNode]
node_list.append(currentNode)
done = currentNode == source
node_list.reverse()
if as_nodes:
return node_list
else:
return path
def _findMinNode(self, s):
"""
Finds the vertex with the minimum distance label in the set "s".
returns the minimum vertex
"""
minNode = None
minVal = self.inf
for vertex in s:
if self.dist[vertex] < minVal:
minVal = self.dist[vertex]
minNode = vertex
return minNode
| zutshi/S3CAMX | src/graph.py | Python | bsd-2-clause | 32,399 | 0.00213 |
# Generated by Django 2.2 on 2020-06-20 15:17
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="A",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("col", models.CharField(max_length=10, null=True)),
],
),
]
| 3YOURMIND/django-migration-linter | tests/test_project/app_make_not_null_with_django_default/migrations/0001_initial.py | Python | apache-2.0 | 661 | 0 |
# coding: utf-8
# python 3.5
import Orange
from orangecontrib.associate.fpgrowth import *
import pandas as pd
import numpy as np
import sys
import os
from collections import defaultdict
from itertools import chain
from itertools import combinations
from itertools import compress
from itertools import product
from sklearn.metrics import accuracy_score
from multiprocessing import Pool
from multiprocessing import freeze_support
# Global Setting
DIR_UCI = '/mnt/data/uci'
# ------------------------------------------------------
# Rule Class
# ------------------------------------------------------
class Rule :
def __init__(self):
self.value = list()
self.consequent = list()
self.support = float()
self.conf = float()
def setValue(self, values) :
self.value = values
def setConsequent(self, consequents) :
self.consequent = consequents
def setSupport(self, supports) :
self.support = supports
def setConf(self, confidence) :
self.conf = confidence
def getValue(self) :
return(self.value)
def getConsequent(self) :
return(self.consequent)
def getSupport(self) :
return(self.support)
def getSupportD(self) :
return(self.support * len(self.value))
def getConf(self) :
return(self.conf)
def output(self) :
print("value:" + str(self.value))
print("consequent:" + str(self.consequent))
print("support:" + str(self.support))
print("conf:" + str(self.conf))
# ======================================================
# Rules のうち、P個の属性値が分かれば、クラスを推定できるか
# ======================================================
def getPerIdentifiedClass(rules, p) :
attribute_values = [rule.getValue() for rule in rules]
attribute_values = list(chain.from_iterable(attribute_values))
attribute_values = list(set(attribute_values))
combi_attribute_values = combinations(attribute_values,p)
count = 0
bunbo = 0
for combi in combi_attribute_values :
bunbo += 1
rules_target = []
for rule in rules :
matching_count = len(list(set(combi) & set(rule.getValue())))
if matching_count == len(list(combi)) :
rules_target.append(rule)
# rules_target が空なら評価から外す
if len(rules_target) == 0:
bunbo -= 1
#
else :
consequents = [rule.getConsequent() for rule in rules_target]
if len(list(set(consequents))) == 1:
count += 1
if bunbo == 0:
ans = 0
else:
ans = (float(count) / float(bunbo))
return(ans)
# ======================================================
# ルールが対象のクラスを説明するかどうか
# ======================================================
def isExplainRule(obj, rule) :
matching_count = len(list(set(obj) & set(rule.getValue())))
if matching_count == len(rule.getValue()) : return(True)
else : return(False)
# ======================================================
# ルールが対象のクラスを説明するかどうか
# ======================================================
def getMatchingFactor(obj, rule) :
matching_factor = len(list(set(obj) & set(rule.getValue())))
matching_factor = matching_factor / len(rule.getValue())
return(matching_factor)
# ======================================================
# ルールのsupport P を返す
# ======================================================
def getSupportP(obj, rule) :
matching_factor = getMatchingFactor(obj, rule)
return(rule.getSupportD() * matching_factor)
# ======================================================
# ルールから対象のクラスを予測
# ======================================================
def estimateClass(obj, rules) :
list_judge = [isExplainRule(obj, r) for r in rules]
# 1つ以上マッチするなら
if any(list_judge) :
consequents = [rules[i].getConsequent() for i, judge in enumerate(list_judge) if judge]
# マッチしたルールが推論するクラスの数がただ1つなら
if len(set(consequents)) == 1 :
return(consequents[0])
else :
rules_match = list(compress(rules,list_judge))
supportD = [r.getSupportD() for r in rules_match]
return(rules_match[supportD.index(max(supportD))].getConsequent())
# rule が objに1つもマッチしない場合は部分一致ルールによる推定
else :
supportP = [getSupportP(obj, rule) for rule in rules]
return(rules[supportP.index(max(supportP))].getConsequent())
# ======================================================
# LERS による精度評価
# ======================================================
def predictByLERS(FILENAME, iter1, iter2, rules) :
# read test data
filepath = DIR_UCI+'/'+FILENAME+'/alpha/'+FILENAME+'-test'+str(iter1)+'-'+str(iter2)+'.txt'
decision_table_test = pd.read_csv(filepath, delimiter=' ', header=None)
decision_table_test = decision_table_test.dropna()
decision_class = decision_table_test[decision_table_test.columns[-1]].values.tolist()
decision_table_test = decision_table_test.drop(decision_table_test.columns[len(decision_table_test.columns)-1], axis=1)
decision_table_test = decision_table_test.values.tolist()
# LERS で予測
predictions = []
for obj in decision_table_test:
estimated_class = estimateClass(obj, rules)
predictions.append(estimated_class)
# 正答率を求める
accuracy = accuracy_score(decision_class, predictions)
print(accuracy)
return(accuracy)
# =====================================
# Main 関数
# =====================================
def getRulesByFPGrowth(FILENAME, classes, iter1, iter2, minsup, minconf) :
# read data
filepath = DIR_UCI+'/'+FILENAME+'/alpha/'+FILENAME+'-train'+str(iter1)+'-'+str(iter2)+'.txt'
data_pd = pd.read_csv(filepath, delimiter=' ')
pd.DataFrame.to_csv(data_pd, DIR_UCI+'/'+FILENAME+'/alpha/'+FILENAME+'-train'+str(iter1)+'-'+str(iter2)+'.basket', index=False, sep=',')
filepath = DIR_UCI+'/'+FILENAME+'/alpha/'+FILENAME+'-train'+str(iter1)+'-'+str(iter2)+'.basket'
data_table = Orange.data.Table(filepath)
#print len(data_table)
# set parameter
num_lines = sum(1 for line in open(filepath))
minsup = float(minsup) / float(num_lines)
#
#itemsets = frequent_itemsets(data_table, minsup)
#print(itemsets)
#print(list(itemsets))
X, mapping = OneHot.encode(data_table, include_class=True)
#print(X)
itemsets = dict(frequent_itemsets(X, minsup))
#print(itemsets)
#print(len(itemsets))
rules = [(P, Q, supp, conf) for P, Q, supp, conf in association_rules(itemsets, minconf) if len(Q) == 1]
#print(rules)
names = {item: '{}={}'.format(var.name, val) for item, var, val in OneHot.decode(mapping, data_table, mapping)}
for ante, cons, supp, conf in rules:
print(', '.join(names[i] for i in ante), '-->', names[next(iter(cons))], '(supp: {}, conf: {})'.format(supp, conf))
# induce rules
#rules_orange = Orange.associate.AssociationRulesSparseInducer(data_table, support=minsup, confidence=minconf)
#rules_orange = Orange.associate.AssociationRulesSparseInducer(data_table, support = minsup, max_item_sets = 2000)
# convert Rule Class
#rules = []
#for rule_orange in rules_orange :
# consequent = rule_orange.right.get_metas(str).keys()
# if len(consequent) == 1 and consequent[0] in classes and rule_orange.confidence >= minconf :
# rule = Rule()
# rule.setValue(rule_orange.left.get_metas(str).keys())
# rule.setConsequent(consequent[0])
# rule.setSupport(rule_orange.support)
# rule.setConf(rule_orange.confidence)
# rules.append(rule)
# END
#return(rules)
# ======================================================
# Apriori_LERS
# ======================================================
def Apriori_LERS(FILENAME, classes, iter1, iter2, min_sup, min_conf):
# rule 抽出
rules = getRulesByApriori(FILENAME, classes, iter1, iter2, min_sup, min_conf)
# predict by LERS
accuracy = predictByLERS(FILENAME, iter1, iter2, rules)
# save
savepath = DIR_UCI+'/'+FILENAME+'/Apriori_LERS.csv'
with open(savepath, "a") as f :
f.writelines('Apriori_LERS,{min_sup},{FILENAME},{iter1},{iter2},{acc}'.format(FILENAME=FILENAME,iter1=iter1,iter2=iter2,acc=accuracy,min_sup=min_sup)+"\n")
# END
return(accuracy)
def wrapper_Apriori_LERS(multi_args):
multi_args[0](multi_args[1],multi_args[2],multi_args[3],multi_args[4],multi_args[5],multi_args[6])
# ========================================
# listの平均と分散を求める
# ========================================
def getEvalMeanVar(result):
ans = '{mean}±{std}'.format(mean=('%.3f' % round(np.mean(results),3)), std=('%.3f' % round(np.std(results),3)))
return(ans)
# ========================================
# multi に実行する
# ========================================
def multi_main(proc, FILENAME, FUN, **kargs):
pool = Pool(proc)
results = []
multiargs = []
classes = kargs['classes']
min_sup_range = kargs['min_sup'] if 'min_sup' in kargs else range(2,11)
min_conf = kargs['min_conf']
# Apriori_LERS 用
if FUN == Apriori_LERS :
WRAPPER_FUN = wrapper_Apriori_LERS
for iter1, iter2, min_sup in product(range(1,11), range(1,11), min_sup_range):
multiargs.append((FUN, FILENAME, classes, iter1, iter2, min_sup, min_conf))
#print(multiargs)
results = pool.map(WRAPPER_FUN, multiargs)
else :
print("I dont' know the function.")
return(results)
# ========================================
# main
# ========================================
if __name__ == "__main__":
FILENAME = 'hayes-roth'
FILENAME = 'german_credit_categorical'
# number of class
classes = ['D1', 'D2', 'D3']
classes = ['D1', 'D2',]
iter1 = 10
iter2 = 3
# support と confidence の閾値
#min_sup_range = range(2,11,1)
#min_sup_range = range(2,20,2)
min_sup = 100
min_conf = 1.0
# rule induction
getRulesByFPGrowth(FILENAME, classes, iter1, iter2, min_sup, min_conf)
#print len(rules)
#for r in rules:
# print(r.output())
# predict by LERS
#print(predictByLERS(FILENAME, iter1, iter2, rules))
# 並列実行して全データで評価
#proc=32
#freeze_support()
#FUN = Apriori_LERS
#results = multi_main(proc, FILENAME, FUN, classes = classes, min_sup = min_sup_range, min_conf = min_conf)
| gingi99/research_dr | python/FPgrowth/orange_fpgrowth.py | Python | mit | 10,802 | 0.015115 |
"""
The ``LineEdit`` and ``MultiLineEdit`` widgets provide a way for the user
to input text.
.. UIExample:: 100
from flexx import app, event, ui
class Example(ui.Widget):
def init(self):
with ui.VBox():
self.line = ui.LineEdit(placeholder_text='type here')
self.l1 = ui.Label(html='<i>when user changes text</i>')
self.l2 = ui.Label(html='<i>when unfocusing or hitting enter </i>')
self.l3 = ui.Label(html='<i>when submitting (hitting enter)</i>')
ui.Widget(flex=1)
@event.reaction('line.user_text')
def when_user_changes_text(self, *events):
self.l1.set_text('user_text: ' + self.line.text)
@event.reaction('line.user_done')
def when_user_is_done_changing_text(self, *events):
self.l2.set_text('user_done: ' + self.line.text)
@event.reaction('line.submit')
def when_user_submits_text(self, *events):
self.l3.set_text('submit: ' + self.line.text)
"""
from ... import event
from . import Widget
class LineEdit(Widget):
""" An input widget to edit a line of text.
The ``node`` of this widget is a text
`<input> <https://developer.mozilla.org/docs/Web/HTML/Element/input>`_.
"""
DEFAULT_MIN_SIZE = 100, 28
CSS = """
.flx-LineEdit {
color: #333;
padding: 0.2em 0.4em;
border-radius: 3px;
border: 1px solid #aaa;
margin: 2px;
}
.flx-LineEdit:focus {
outline: none;
box-shadow: 0px 0px 3px 1px rgba(0, 100, 200, 0.7);
}
"""
## Properties
text = event.StringProp(settable=True, doc="""
The current text of the line edit. Settable. If this is an empty
string, the placeholder_text is displayed instead.
""")
password_mode = event.BoolProp(False, settable=True, doc="""
Whether the insered text should be hidden.
""")
placeholder_text = event.StringProp(settable=True, doc="""
The placeholder text (shown when the text is an empty string).
""")
autocomp = event.TupleProp(settable=True, doc="""
A tuple/list of strings for autocompletion. Might not work in all browsers.
""")
disabled = event.BoolProp(False, settable=True, doc="""
Whether the line edit is disabled.
""")
## Methods, actions, emitters
def _create_dom(self):
global window
# Create node element
node = window.document.createElement('input')
node.setAttribute('type', 'input')
node.setAttribute('list', self.id)
self._autocomp = window.document.createElement('datalist')
self._autocomp.id = self.id
node.appendChild(self._autocomp)
f1 = lambda: self.user_text(self.node.value)
self._addEventListener(node, 'input', f1, False)
self._addEventListener(node, 'blur', self.user_done, False)
#if IE10:
# self._addEventListener(self.node, 'change', f1, False)
return node
@event.emitter
def user_text(self, text):
""" Event emitted when the user edits the text. Has ``old_value``
and ``new_value`` attributes.
"""
d = {'old_value': self.text, 'new_value': text}
self.set_text(text)
return d
@event.emitter
def user_done(self):
""" Event emitted when the user is done editing the text, either by
moving the focus elsewhere, or by hitting enter.
Has ``old_value`` and ``new_value`` attributes (which are the same).
"""
d = {'old_value': self.text, 'new_value': self.text}
return d
@event.emitter
def submit(self):
""" Event emitted when the user strikes the enter or return key
(but not when losing focus). Has ``old_value`` and ``new_value``
attributes (which are the same).
"""
self.user_done()
d = {'old_value': self.text, 'new_value': self.text}
return d
@event.emitter
def key_down(self, e):
# Prevent propating the key
ev = super().key_down(e)
pkeys = 'Escape', # keys to propagate
if (ev.modifiers and ev.modifiers != ('Shift', )) or ev.key in pkeys:
pass
else:
e.stopPropagation()
if ev.key in ('Enter', 'Return'):
self.submit()
# Nice to blur on mobile, since it hides keyboard, but less nice on desktop
# self.node.blur()
elif ev.key == 'Escape':
self.node.blur()
return ev
## Reactions
@event.reaction
def __text_changed(self):
self.node.value = self.text
@event.reaction
def __password_mode_changed(self):
self.node.type = ['text', 'password'][int(bool(self.password_mode))]
@event.reaction
def __placeholder_text_changed(self):
self.node.placeholder = self.placeholder_text
# note: this works in the browser but not in e.g. firefox-app
@event.reaction
def __autocomp_changed(self):
global window
autocomp = self.autocomp
# Clear
for op in self._autocomp:
self._autocomp.removeChild(op)
# Add new options
for option in autocomp:
op = window.document.createElement('option')
op.value = option
self._autocomp.appendChild(op)
@event.reaction
def __disabled_changed(self):
if self.disabled:
self.node.setAttribute("disabled", "disabled")
else:
self.node.removeAttribute("disabled")
class MultiLineEdit(Widget):
""" An input widget to edit multiple lines of text.
The ``node`` of this widget is a
`<textarea> <https://developer.mozilla.org/docs/Web/HTML/Element/textarea>`_.
"""
DEFAULT_MIN_SIZE = 100, 50
CSS = """
.flx-MultiLineEdit {
resize: none;
overflow-y: scroll;
color: #333;
padding: 0.2em 0.4em;
border-radius: 3px;
border: 1px solid #aaa;
margin: 2px;
}
.flx-MultiLineEdit:focus {
outline: none;
box-shadow: 0px 0px 3px 1px rgba(0, 100, 200, 0.7);
}
"""
text = event.StringProp(settable=True, doc="""
The current text of the multi-line edit. Settable. If this is an empty
string, the placeholder_text is displayed instead.
""")
def _create_dom(self):
node = window.document.createElement('textarea')
f1 = lambda: self.user_text(self.node.value)
self._addEventListener(node, 'input', f1, False)
self._addEventListener(node, 'blur', self.user_done, False)
return node
@event.reaction
def __text_changed(self):
self.node.value = self.text
@event.emitter
def user_text(self, text):
""" Event emitted when the user edits the text. Has ``old_value``
and ``new_value`` attributes.
"""
d = {'old_value': self.text, 'new_value': text}
self.set_text(text)
return d
@event.emitter
def user_done(self):
""" Event emitted when the user is done editing the text by
moving the focus elsewhere. Has ``old_value`` and ``new_value``
attributes (which are the same).
"""
d = {'old_value': self.text, 'new_value': self.text}
return d
| zoofIO/flexx | flexx/ui/widgets/_lineedit.py | Python | bsd-2-clause | 7,452 | 0.001476 |
from _pydevd_bundle.pydevd_comm import CMD_SET_BREAK, CMD_ADD_EXCEPTION_BREAK
import inspect
from _pydevd_bundle.pydevd_constants import STATE_SUSPEND, get_thread_id, dict_iter_items, DJANGO_SUSPEND, IS_PY2
from pydevd_file_utils import get_abs_path_real_path_and_base_from_file, normcase
from _pydevd_bundle.pydevd_breakpoints import LineBreakpoint, get_exception_name
from _pydevd_bundle import pydevd_vars
import traceback
from _pydev_bundle import pydev_log
from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame, FCode, just_raised, ignore_exception_trace
IS_DJANGO18 = False
IS_DJANGO19 = False
IS_DJANGO19_OR_HIGHER = False
try:
import django
version = django.VERSION
IS_DJANGO18 = version[0] == 1 and version[1] == 8
IS_DJANGO19 = version[0] == 1 and version[1] == 9
IS_DJANGO19_OR_HIGHER = ((version[0] == 1 and version[1] >= 9) or version[0] > 1)
except:
pass
class DjangoLineBreakpoint(LineBreakpoint):
def __init__(self, file, line, condition, func_name, expression, hit_condition=None, is_logpoint=False):
self.file = file
LineBreakpoint.__init__(self, line, condition, func_name, expression, hit_condition=hit_condition, is_logpoint=is_logpoint)
def is_triggered(self, template_frame_file, template_frame_line):
return self.file == template_frame_file and self.line == template_frame_line
def __str__(self):
return "DjangoLineBreakpoint: %s-%d" %(self.file, self.line)
def add_line_breakpoint(plugin, pydb, type, file, line, condition, expression, func_name, hit_condition=None, is_logpoint=False):
if type == 'django-line':
breakpoint = DjangoLineBreakpoint(file, line, condition, func_name, expression, hit_condition=hit_condition, is_logpoint=is_logpoint)
if not hasattr(pydb, 'django_breakpoints'):
_init_plugin_breaks(pydb)
return breakpoint, pydb.django_breakpoints
return None
def add_exception_breakpoint(plugin, pydb, type, exception):
if type == 'django':
if not hasattr(pydb, 'django_exception_break'):
_init_plugin_breaks(pydb)
pydb.django_exception_break[exception] = True
pydb.set_tracing_for_untraced_contexts_if_not_frame_eval()
return True
return False
def _init_plugin_breaks(pydb):
pydb.django_exception_break = {}
pydb.django_breakpoints = {}
def remove_exception_breakpoint(plugin, pydb, type, exception):
if type == 'django':
try:
del pydb.django_exception_break[exception]
return True
except:
pass
return False
def get_breakpoints(plugin, pydb, type):
if type == 'django-line':
return pydb.django_breakpoints
return None
def _inherits(cls, *names):
if cls.__name__ in names:
return True
inherits_node = False
for base in inspect.getmro(cls):
if base.__name__ in names:
inherits_node = True
break
return inherits_node
def _is_django_render_call(frame):
try:
name = frame.f_code.co_name
if name != 'render':
return False
if 'self' not in frame.f_locals:
return False
cls = frame.f_locals['self'].__class__
inherits_node = _inherits(cls, 'Node')
if not inherits_node:
return False
clsname = cls.__name__
if IS_DJANGO19:
# in Django 1.9 we need to save the flag that there is included template
if clsname == 'IncludeNode':
if 'context' in frame.f_locals:
context = frame.f_locals['context']
context._has_included_template = True
return clsname != 'TextNode' and clsname != 'NodeList'
except:
traceback.print_exc()
return False
def _is_django_context_get_call(frame):
try:
if 'self' not in frame.f_locals:
return False
cls = frame.f_locals['self'].__class__
return _inherits(cls, 'BaseContext')
except:
traceback.print_exc()
return False
def _is_django_resolve_call(frame):
try:
name = frame.f_code.co_name
if name != '_resolve_lookup':
return False
if 'self' not in frame.f_locals:
return False
cls = frame.f_locals['self'].__class__
clsname = cls.__name__
return clsname == 'Variable'
except:
traceback.print_exc()
return False
def _is_django_suspended(thread):
return thread.additional_info.suspend_type == DJANGO_SUSPEND
def suspend_django(main_debugger, thread, frame, cmd=CMD_SET_BREAK):
frame = DjangoTemplateFrame(frame)
if frame.f_lineno is None:
return None
pydevd_vars.add_additional_frame_by_id(get_thread_id(thread), {id(frame): frame})
main_debugger.set_suspend(thread, cmd)
thread.additional_info.suspend_type = DJANGO_SUSPEND
return frame
def _find_django_render_frame(frame):
while frame is not None and not _is_django_render_call(frame):
frame = frame.f_back
return frame
#=======================================================================================================================
# Django Frame
#=======================================================================================================================
def _read_file(filename):
# type: (str) -> str
if IS_PY2:
f = open(filename, 'r')
else:
f = open(filename, 'r', encoding='utf-8', errors='replace')
s = f.read()
f.close()
return s
def _offset_to_line_number(text, offset):
curLine = 1
curOffset = 0
while curOffset < offset:
if curOffset == len(text):
return -1
c = text[curOffset]
if c == '\n':
curLine += 1
elif c == '\r':
curLine += 1
if curOffset < len(text) and text[curOffset + 1] == '\n':
curOffset += 1
curOffset += 1
return curLine
def _get_source_django_18_or_lower(frame):
# This method is usable only for the Django <= 1.8
try:
node = frame.f_locals['self']
if hasattr(node, 'source'):
return node.source
else:
if IS_DJANGO18:
# The debug setting was changed since Django 1.8
pydev_log.error_once("WARNING: Template path is not available. Set the 'debug' option in the OPTIONS of a DjangoTemplates "
"backend.")
else:
# The debug setting for Django < 1.8
pydev_log.error_once("WARNING: Template path is not available. Please set TEMPLATE_DEBUG=True in your settings.py to make "
"django template breakpoints working")
return None
except:
pydev_log.debug(traceback.format_exc())
return None
def _get_template_file_name(frame):
try:
if IS_DJANGO19:
# The Node source was removed since Django 1.9
if 'context' in frame.f_locals:
context = frame.f_locals['context']
if hasattr(context, '_has_included_template'):
# if there was included template we need to inspect the previous frames and find its name
back = frame.f_back
while back is not None and frame.f_code.co_name in ('render', '_render'):
locals = back.f_locals
if 'self' in locals:
self = locals['self']
if self.__class__.__name__ == 'Template' and hasattr(self, 'origin') and \
hasattr(self.origin, 'name'):
return normcase(self.origin.name)
back = back.f_back
else:
if hasattr(context, 'template') and hasattr(context.template, 'origin') and \
hasattr(context.template.origin, 'name'):
return normcase(context.template.origin.name)
return None
elif IS_DJANGO19_OR_HIGHER:
# For Django 1.10 and later there is much simpler way to get template name
if 'self' in frame.f_locals:
self = frame.f_locals['self']
if hasattr(self, 'origin') and hasattr(self.origin, 'name'):
return normcase(self.origin.name)
return None
source = _get_source_django_18_or_lower(frame)
if source is None:
pydev_log.debug("Source is None\n")
return None
fname = source[0].name
if fname == '<unknown source>':
pydev_log.debug("Source name is %s\n" % fname)
return None
else:
abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_file(fname)
return abs_path_real_path_and_base[1]
except:
pydev_log.debug(traceback.format_exc())
return None
def _get_template_line(frame):
if IS_DJANGO19_OR_HIGHER:
# The Node source was removed since Django 1.9
self = frame.f_locals['self']
if hasattr(self, 'token') and hasattr(self.token, 'lineno'):
return self.token.lineno
else:
return None
source = _get_source_django_18_or_lower(frame)
file_name = _get_template_file_name(frame)
try:
return _offset_to_line_number(_read_file(file_name), source[1][0])
except:
return None
class DjangoTemplateFrame:
def __init__(self, frame):
file_name = _get_template_file_name(frame)
self.back_context = frame.f_locals['context']
self.f_code = FCode('Django Template', file_name)
self.f_lineno = _get_template_line(frame)
self.f_back = frame
self.f_globals = {}
self.f_locals = self.collect_context(self.back_context)
self.f_trace = None
def collect_context(self, context):
res = {}
try:
for d in context.dicts:
for k, v in d.items():
res[k] = v
except AttributeError:
pass
return res
def _change_variable(self, name, value):
for d in self.back_context.dicts:
for k, v in d.items():
if k == name:
d[k] = value
def change_variable(plugin, frame, attr, expression):
if isinstance(frame, DjangoTemplateFrame):
result = eval(expression, frame.f_globals, frame.f_locals)
frame._change_variable(attr, result)
return result
return False
def _is_django_exception_break_context(frame):
try:
name = frame.f_code.co_name
except:
name = None
return name in ['_resolve_lookup', 'find_template']
#=======================================================================================================================
# Django Step Commands
#=======================================================================================================================
def can_not_skip(plugin, main_debugger, pydb_frame, frame):
return main_debugger.django_breakpoints and _is_django_render_call(frame)
def has_exception_breaks(plugin):
if len(plugin.main_debugger.django_exception_break) > 0:
return True
return False
def has_line_breaks(plugin):
for file, breakpoints in dict_iter_items(plugin.main_debugger.django_breakpoints):
if len(breakpoints) > 0:
return True
return False
def cmd_step_into(plugin, main_debugger, frame, event, args, stop_info, stop):
info = args[2]
thread = args[3]
plugin_stop = False
if _is_django_suspended(thread):
stop_info['django_stop'] = event == 'call' and _is_django_render_call(frame)
plugin_stop = stop_info['django_stop']
stop = stop and _is_django_resolve_call(frame.f_back) and not _is_django_context_get_call(frame)
if stop:
info.pydev_django_resolve_frame = True # we remember that we've go into python code from django rendering frame
return stop, plugin_stop
def cmd_step_over(plugin, main_debugger, frame, event, args, stop_info, stop):
info = args[2]
thread = args[3]
plugin_stop = False
if _is_django_suspended(thread):
stop_info['django_stop'] = event == 'call' and _is_django_render_call(frame)
plugin_stop = stop_info['django_stop']
stop = False
return stop, plugin_stop
else:
if event == 'return' and info.pydev_django_resolve_frame and _is_django_resolve_call(frame.f_back):
#we return to Django suspend mode and should not stop before django rendering frame
info.pydev_step_stop = frame.f_back
info.pydev_django_resolve_frame = False
thread.additional_info.suspend_type = DJANGO_SUSPEND
stop = info.pydev_step_stop is frame and event in ('line', 'return')
return stop, plugin_stop
def stop(plugin, main_debugger, frame, event, args, stop_info, arg, step_cmd):
main_debugger = args[0]
thread = args[3]
if 'django_stop' in stop_info and stop_info['django_stop']:
frame = suspend_django(main_debugger, thread, frame, step_cmd)
if frame:
main_debugger.do_wait_suspend(thread, frame, event, arg)
return True
return False
def get_breakpoint(plugin, main_debugger, pydb_frame, frame, event, args):
main_debugger = args[0]
filename = args[1]
info = args[2]
flag = False
django_breakpoint = None
new_frame = None
type = 'django'
if event == 'call' and info.pydev_state != STATE_SUSPEND and \
main_debugger.django_breakpoints and _is_django_render_call(frame):
filename = _get_template_file_name(frame)
pydev_log.debug("Django is rendering a template: %s\n" % filename)
django_breakpoints_for_file = main_debugger.django_breakpoints.get(filename)
if django_breakpoints_for_file:
pydev_log.debug("Breakpoints for that file: %s\n" % django_breakpoints_for_file)
template_line = _get_template_line(frame)
pydev_log.debug("Tracing template line: %s\n" % str(template_line))
if template_line in django_breakpoints_for_file:
django_breakpoint = django_breakpoints_for_file[template_line]
flag = True
new_frame = DjangoTemplateFrame(frame)
return flag, django_breakpoint, new_frame, type
def suspend(plugin, main_debugger, thread, frame, bp_type):
if bp_type == 'django':
return suspend_django(main_debugger, thread, frame)
return None
def exception_break(plugin, main_debugger, pydb_frame, frame, args, arg):
main_debugger = args[0]
thread = args[3]
exception, value, trace = arg
if main_debugger.django_exception_break and \
get_exception_name(exception) in ['VariableDoesNotExist', 'TemplateDoesNotExist', 'TemplateSyntaxError'] and \
just_raised(trace) and not ignore_exception_trace(trace) and _is_django_exception_break_context(frame):
render_frame = _find_django_render_frame(frame)
if render_frame:
suspend_frame = suspend_django(main_debugger, thread, render_frame, CMD_ADD_EXCEPTION_BREAK)
if suspend_frame:
add_exception_to_frame(suspend_frame, (exception, value, trace))
flag = True
thread.additional_info.pydev_message = 'VariableDoesNotExist'
suspend_frame.f_back = frame
frame = suspend_frame
return (flag, frame)
return None
| SlicerRt/SlicerDebuggingTools | PyDevRemoteDebug/ptvsd-4.1.3/ptvsd/_vendored/pydevd/pydevd_plugins/django_debug.py | Python | bsd-3-clause | 16,161 | 0.003589 |
from transformers import RobertaTokenizerFast
import scattertext as st
tokenizer_fast = RobertaTokenizerFast.from_pretrained(
"roberta-base", add_prefix_space=True)
tokenizer = st.RobertaTokenizerWrapper(tokenizer_fast)
df = st.SampleCorpora.ConventionData2012.get_data().assign(
parse = lambda df: df.text.apply(tokenizer.tokenize)
)
corpus = st.OffsetCorpusFactory(
df,
category_col='party',
parsed_col='parse',
feat_and_offset_getter=st.TokenFeatAndOffsetGetter()
).build()
# Remove words occur less than 5 times
corpus = corpus.remove_infrequent_words(5, non_text=True)
plot_df = corpus.get_metadata_freq_df('').assign(
Y=lambda df: df.democrat,
X=lambda df: df.republican,
Ypos=lambda df: st.Scalers.dense_rank(df.Y),
Xpos=lambda df: st.Scalers.dense_rank(df.X),
SuppressDisplay=False,
ColorScore=lambda df: st.Scalers.scale_center_zero(df.Ypos - df.Xpos),
)
html = st.dataframe_scattertext(
corpus,
plot_df=plot_df,
category='democrat',
category_name='Democratic',
not_category_name='Republican',
width_in_pixels=1000,
suppress_text_column='Display',
metadata=corpus.get_df()['speaker'],
use_non_text_features=True,
ignore_categories=False,
use_offsets=True,
unified_context=False,
color_score_column='ColorScore',
left_list_column='ColorScore',
y_label='Democarats',
x_label='Republicans',
header_names={'upper': 'Top Democratic', 'lower': 'Top Republican', 'right': 'Most Frequent'},
subword_encoding='RoBERTa'
)
fn = 'roberta_sentence_piece.html'
with open(fn, 'w') as of:
of.write(html)
print("Open ./" + fn + ' in Chrome.')
| JasonKessler/scattertext | demo_tokenizer_roberta.py | Python | apache-2.0 | 1,670 | 0.003593 |
"""The tests for the MQTT component."""
from collections import namedtuple
import unittest
from unittest import mock
import socket
import homeassistant.components.mqtt as mqtt
from homeassistant.const import (
EVENT_CALL_SERVICE, ATTR_DOMAIN, ATTR_SERVICE, EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP)
from tests.common import (
get_test_home_assistant, mock_mqtt_component, fire_mqtt_message)
class TestMQTT(unittest.TestCase):
"""Test the MQTT component."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant(1)
mock_mqtt_component(self.hass)
self.calls = []
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def record_calls(self, *args):
"""Helper for recording calls."""
self.calls.append(args)
def test_client_starts_on_home_assistant_start(self):
""""Test if client start on HA launch."""
self.hass.bus.fire(EVENT_HOMEASSISTANT_START)
self.hass.pool.block_till_done()
self.assertTrue(mqtt.MQTT_CLIENT.start.called)
def test_client_stops_on_home_assistant_start(self):
"""Test if client stops on HA launch."""
self.hass.bus.fire(EVENT_HOMEASSISTANT_START)
self.hass.pool.block_till_done()
self.hass.bus.fire(EVENT_HOMEASSISTANT_STOP)
self.hass.pool.block_till_done()
self.assertTrue(mqtt.MQTT_CLIENT.stop.called)
def test_setup_fails_if_no_connect_broker(self):
"""Test for setup failure if connection to broker is missing."""
with mock.patch('homeassistant.components.mqtt.MQTT',
side_effect=socket.error()):
self.assertFalse(mqtt.setup(self.hass, {mqtt.DOMAIN: {
mqtt.CONF_BROKER: 'test-broker',
}}))
def test_publish_calls_service(self):
"""Test the publishing of call to services."""
self.hass.bus.listen_once(EVENT_CALL_SERVICE, self.record_calls)
mqtt.publish(self.hass, 'test-topic', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual(
'test-topic',
self.calls[0][0].data['service_data'][mqtt.ATTR_TOPIC])
self.assertEqual(
'test-payload',
self.calls[0][0].data['service_data'][mqtt.ATTR_PAYLOAD])
def test_service_call_without_topic_does_not_publish(self):
"""Test the service call if topic is missing."""
self.hass.bus.fire(EVENT_CALL_SERVICE, {
ATTR_DOMAIN: mqtt.DOMAIN,
ATTR_SERVICE: mqtt.SERVICE_PUBLISH
})
self.hass.pool.block_till_done()
self.assertTrue(not mqtt.MQTT_CLIENT.publish.called)
def test_service_call_with_template_payload_renders_template(self):
"""Test the service call with rendered template.
If 'payload_template' is provided and 'payload' is not, then render it.
"""
mqtt.publish_template(self.hass, "test/topic", "{{ 1+1 }}")
self.hass.pool.block_till_done()
self.assertTrue(mqtt.MQTT_CLIENT.publish.called)
self.assertEqual(mqtt.MQTT_CLIENT.publish.call_args[0][1], "2")
def test_service_call_with_payload_doesnt_render_template(self):
"""Test the service call with unrendered template.
If a 'payload' is provided then use that instead of 'payload_template'.
"""
payload = "not a template"
payload_template = "a template"
# Call the service directly because the helper functions don't allow
# you to provide payload AND payload_template.
self.hass.services.call(mqtt.DOMAIN, mqtt.SERVICE_PUBLISH, {
mqtt.ATTR_TOPIC: "test/topic",
mqtt.ATTR_PAYLOAD: payload,
mqtt.ATTR_PAYLOAD_TEMPLATE: payload_template
}, blocking=True)
self.assertTrue(mqtt.MQTT_CLIENT.publish.called)
self.assertEqual(mqtt.MQTT_CLIENT.publish.call_args[0][1], payload)
def test_service_call_without_payload_or_payload_template(self):
"""Test the service call without payload or payload template.
If neither 'payload' or 'payload_template' is provided then fail.
"""
# Call the service directly because the helper functions require you to
# provide a payload.
self.hass.services.call(mqtt.DOMAIN, mqtt.SERVICE_PUBLISH, {
mqtt.ATTR_TOPIC: "test/topic"
}, blocking=True)
self.assertFalse(mqtt.MQTT_CLIENT.publish.called)
def test_subscribe_topic(self):
"""Test the subscription of a topic."""
mqtt.subscribe(self.hass, 'test-topic', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('test-topic', self.calls[0][0])
self.assertEqual('test-payload', self.calls[0][1])
def test_subscribe_topic_not_match(self):
"""Test if subscribed topic is not a match."""
mqtt.subscribe(self.hass, 'test-topic', self.record_calls)
fire_mqtt_message(self.hass, 'another-test-topic', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
def test_subscribe_topic_level_wildcard(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/+/on', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic/bier/on', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('test-topic/bier/on', self.calls[0][0])
self.assertEqual('test-payload', self.calls[0][1])
def test_subscribe_topic_level_wildcard_no_subtree_match(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/+/on', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic/bier', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
def test_subscribe_topic_subtree_wildcard_subtree_topic(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic/bier/on', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('test-topic/bier/on', self.calls[0][0])
self.assertEqual('test-payload', self.calls[0][1])
def test_subscribe_topic_subtree_wildcard_root_topic(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'test-topic', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(1, len(self.calls))
self.assertEqual('test-topic', self.calls[0][0])
self.assertEqual('test-payload', self.calls[0][1])
def test_subscribe_topic_subtree_wildcard_no_match(self):
"""Test the subscription of wildcard topics."""
mqtt.subscribe(self.hass, 'test-topic/#', self.record_calls)
fire_mqtt_message(self.hass, 'another-test-topic', 'test-payload')
self.hass.pool.block_till_done()
self.assertEqual(0, len(self.calls))
class TestMQTTCallbacks(unittest.TestCase):
"""Test the MQTT callbacks."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant(1)
# mock_mqtt_component(self.hass)
with mock.patch('paho.mqtt.client.Client'):
mqtt.setup(self.hass, {
mqtt.DOMAIN: {
mqtt.CONF_BROKER: 'mock-broker',
}
})
self.hass.config.components.append(mqtt.DOMAIN)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_receiving_mqtt_message_fires_hass_event(self):
"""Test if receiving triggers an event."""
calls = []
def record(event):
"""Helper to record calls."""
calls.append(event)
self.hass.bus.listen_once(mqtt.EVENT_MQTT_MESSAGE_RECEIVED, record)
MQTTMessage = namedtuple('MQTTMessage', ['topic', 'qos', 'payload'])
message = MQTTMessage('test_topic', 1, 'Hello World!'.encode('utf-8'))
mqtt.MQTT_CLIENT._mqtt_on_message(None, {'hass': self.hass}, message)
self.hass.pool.block_till_done()
self.assertEqual(1, len(calls))
last_event = calls[0]
self.assertEqual('Hello World!', last_event.data['payload'])
self.assertEqual(message.topic, last_event.data['topic'])
self.assertEqual(message.qos, last_event.data['qos'])
def test_mqtt_failed_connection_results_in_disconnect(self):
"""Test if connection failure leads to disconnect."""
for result_code in range(1, 6):
mqtt.MQTT_CLIENT._mqttc = mock.MagicMock()
mqtt.MQTT_CLIENT._mqtt_on_connect(None, {'topics': {}}, 0,
result_code)
self.assertTrue(mqtt.MQTT_CLIENT._mqttc.disconnect.called)
def test_mqtt_subscribes_topics_on_connect(self):
"""Test subscription to topic on connect."""
from collections import OrderedDict
prev_topics = OrderedDict()
prev_topics['topic/test'] = 1,
prev_topics['home/sensor'] = 2,
prev_topics['still/pending'] = None
mqtt.MQTT_CLIENT.topics = prev_topics
mqtt.MQTT_CLIENT.progress = {1: 'still/pending'}
# Return values for subscribe calls (rc, mid)
mqtt.MQTT_CLIENT._mqttc.subscribe.side_effect = ((0, 2), (0, 3))
mqtt.MQTT_CLIENT._mqtt_on_connect(None, None, 0, 0)
self.assertFalse(mqtt.MQTT_CLIENT._mqttc.disconnect.called)
expected = [(topic, qos) for topic, qos in prev_topics.items()
if qos is not None]
self.assertEqual(
expected,
[call[1] for call in mqtt.MQTT_CLIENT._mqttc.subscribe.mock_calls])
self.assertEqual({
1: 'still/pending',
2: 'topic/test',
3: 'home/sensor',
}, mqtt.MQTT_CLIENT.progress)
def test_mqtt_disconnect_tries_no_reconnect_on_stop(self):
"""Test the disconnect tries."""
mqtt.MQTT_CLIENT._mqtt_on_disconnect(None, None, 0)
self.assertFalse(mqtt.MQTT_CLIENT._mqttc.reconnect.called)
@mock.patch('homeassistant.components.mqtt.time.sleep')
def test_mqtt_disconnect_tries_reconnect(self, mock_sleep):
"""Test the re-connect tries."""
mqtt.MQTT_CLIENT.topics = {
'test/topic': 1,
'test/progress': None
}
mqtt.MQTT_CLIENT.progress = {
1: 'test/progress'
}
mqtt.MQTT_CLIENT._mqttc.reconnect.side_effect = [1, 1, 1, 0]
mqtt.MQTT_CLIENT._mqtt_on_disconnect(None, None, 1)
self.assertTrue(mqtt.MQTT_CLIENT._mqttc.reconnect.called)
self.assertEqual(4, len(mqtt.MQTT_CLIENT._mqttc.reconnect.mock_calls))
self.assertEqual([1, 2, 4],
[call[1][0] for call in mock_sleep.mock_calls])
self.assertEqual({'test/topic': 1}, mqtt.MQTT_CLIENT.topics)
self.assertEqual({}, mqtt.MQTT_CLIENT.progress)
| justyns/home-assistant | tests/components/mqtt/test_init.py | Python | mit | 11,668 | 0 |
import os.path
dir = "temp"
if os.path.isdir(dir):
print("Diretório temp existe")
elif os.path.isfile(dir):
print("Arquivo temp existe")
else:
print("Diretório temp não existe") | laenderoliveira/exerclivropy | cap09/exercicio-09-31.py | Python | mit | 192 | 0.005291 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PHOTOMETRYDEMO - model.py
the data model
Created on Tue Sep 23 12:09:45 2014
@author: rafik
"""
import os
import math
import numpy as np
import scipy as sp
import scipy.ndimage.interpolation
import pyfits
class Model(object):
def __init__(self):
self.name = None
self.masks = []
self.roi = None
self.ellipse = None
self.shape = None
self.scale = -1
self.psf = None
def getRegionCoords(self):
return self.roi.getRegionCoords()
def getMaskFilename(self, pfx=''):
filename = pfx+'mask.fits'
sx, sy = self.shape
renderedmask = np.ones((sx, sy), dtype=np.int)
for mask in self.masks:
if mask.type == 'mask':
pixels = mask.getCoveredPixels()
for px, py in pixels:
renderedmask[px, py] = 0
try:
os.remove(filename)
except OSError:
pass
hdu = pyfits.PrimaryHDU(np.rot90(renderedmask))
hdu.writeto(filename)
return filename
def createPSF(self, pfx=''):
#TODO
self.psf = PSF(pfx)
def getPhotometricZeropoint(self):
#TODO
return 0.0
def getPlateScale(self):
#TODO
return (0.001, 0.001)
class Selection(object):
def __init__(self, canv, nr, color):
self.canv = canv
s = self.canv.scale
print 'scale', s
self.x1, self.y1, self.x2, self.y2 = np.array([30,60,100,120])
self.nr = nr
self.color = color
self.type = None
tg = 'rect_%i' % nr
tg2 = 'rc_%i_' % nr
self.rect = canv.create_rectangle(self.x1*s, self.y1*s, self.x2*s, self.y2*s, tags=tg, outline=self.color)
self.p_a = canv.create_circle(self.x1*s,self.y1*s,5, fill=self.color, tags=tg2+'ul')
self.p_b = canv.create_circle(self.x2*s,self.y2*s,5, fill=self.color, tags=tg2+'lr')
def transform(self, pnt, x, y):
s = self.canv.scale
x /= s
y /= s
if pnt=="cp":
xo = (self.x1+self.x2) // 2
yo = (self.y1+self.y2) // 2
dx = self.x2 - xo
dy = self.y2 - yo
self.x1 = x-dx
self.y1 = y-dy
self.x2 = x+dx
self.y2 = y+dy
elif pnt=="ul":
self.x1 = x
self.y1 = y
elif pnt=='lr':
self.x2 = x
self.y2 = y
self.update()
def _update(self):
r=5
s = self.canv.scale
self.canv.coords(self.p_a, self.x1*s-r, self.y1*s-r, self.x1*s+r, self.y1*s+r)
self.canv.coords(self.p_b, self.x2*s-r, self.y2*s-r, self.x2*s+r, self.y2*s+r)
self.canv.coords(self.rect, self.x1*s, self.y1*s, self.x2*s, self.y2*s)
# inherit and overwrite
def update(self):
self._update()
#
class Mask(Selection):
def __init__(self, canv, nr):
Selection.__init__(self, canv, nr, color='yellow')
self.type = "mask"
s = self.canv.scale
self.lines = []
dx = -self.x1+self.x2
dx *= s
nlines = 10
for i in range(1,nlines+1):
x = i*dx/(nlines+1)
l = self.canv.create_line(self.x1*s+x, self.y1*s, self.x1*s+x, self.y2*s, tags='rect_%i' % self.nr, fill=self.color)
#print self.x1+x, self.y1, self.x1+x, self.y2
self.lines.append(l)
def update(self):
self._update()
s = self.canv.scale
dx = -self.x1+self.x2
for i in range(len(self.lines)):
x = (i+1)*dx/(len(self.lines)+1)*s
self.canv.coords(self.lines[i], self.x1*s+x, self.y1*s, self.x1*s+x, self.y2*s)
def getCoveredPixels(self):
px = []
minx = int(np.floor(np.min([self.x1, self.x2])))
maxx = int(np.floor(np.max([self.x1, self.x2])))
miny = int(np.floor(np.min([self.y1, self.y2])))
maxy = int(np.floor(np.max([self.y1, self.y2])))
for x in range(minx, maxx+1):
for y in range(miny, maxy+1):
px.append((x,y))
return px
#
# used to select an region of interesst (inside selected)
class ROI(Selection):
def __init__(self, canv, nr):
print "init roi"
Selection.__init__(self, canv, nr, color='green')
self.type = "roi"
s = self.canv.scale
self.lines = []
dd = 5
sx = int(self.canv.cget('width'))
sy = int(self.canv.cget('height'))
#print sx, sy
for xx in range(0,sx):
#print xx, xx%dd, xx%dd==True
if xx%dd == 0:
minx = np.min([self.x1, self.x2])*s
maxx = np.max([self.x1, self.x2])*s
miny = np.min([self.y1, self.y2])*s
maxy = np.max([self.y1, self.y2])*s
if xx<minx or xx>maxx:
l1 = self.canv.create_line(xx, 0, xx, sy//2, tags='roi_%i' % self.nr, fill=self.color)
l2 = self.canv.create_line(xx, sy//2, xx, sy, tags='roi_%i' % self.nr, fill=self.color)
else:
l1 = self.canv.create_line(xx, 0, xx, miny, tags='roi_%i' % self.nr, fill=self.color)
l2 = self.canv.create_line(xx, maxy, xx, sy, tags='roi_%i' % self.nr, fill=self.color)
self.lines.append((l1,l2))
def update(self):
self._update()
s = self.canv.scale
sx = int(self.canv.cget('width'))
sy = int(self.canv.cget('height'))
minx = np.min([self.x1, self.x2])*s
maxx = np.max([self.x1, self.x2])*s
miny = np.min([self.y1, self.y2])*s
maxy = np.max([self.y1, self.y2])*s
for l1, l2 in self.lines:
# x = (i+1)*dx/(len(self.lines)+1)
# self.canv.coords(self.lines[i], self.x1+x, self.y1, self.x1+x, self.y2)
ax1,ay1,ax2,ay2 = self.canv.coords(l1)
bx1,by1,bx2,by2 = self.canv.coords(l2)
xx = ax1
if xx<minx or xx>maxx:
self.canv.coords(l1, xx, 0, xx, sy//2)
self.canv.coords(l2, xx, sy//2, xx, sy)
else:
self.canv.coords(l1, xx, 0, xx, miny)
self.canv.coords(l2, xx, maxy, xx, sy)
def getRegionCoords(self):
minx = np.min([self.x1, self.x2])
maxx = np.max([self.x1, self.x2])
miny = np.min([self.y1, self.y2])
maxy = np.max([self.y1, self.y2])
return (minx, miny, maxx, maxy)
class Ellipse(object):
def __init__(self, canv):
self.canv = canv
self.xc = 300
self.yc = 300
self.a = 100
self.b = 50
self.r = np.pi / 4
s = self.canv.scale
pnts = self._poly_oval()
xa, ya, xb, yb = self._getHandlePoints()
self.poly = canv.create_polygon(pnts, fill='', outline='#fff', width=2, smooth=1, tags='poly')
self.p_a = canv.create_circle(xa,ya,5, fill='red', tags='pa')
self.p_b = canv.create_circle(xb,yb,5, fill='red', tags='pb')
self.p_c = canv.create_circle(self.xc*s,self.yc*s,5, fill='red', tags='pc')
def getCoords(self):
return (self.xc, self.yc)
# R_e (half light radius)
def getRe(self):
return np.sqrt(self.a * self.b)
def getAxisRatio(self):
return 1.0 * self.b / self.a
def getPositionAngle(self):
# should be in deg
# is messured from upwards y axis, internal saved from horizontal x axis (+90)
return self.r / np.pi * 180 + 90
def _getHandlePoints(self):
s = self.canv.scale
xa = self.xc + self.a*np.cos(self.r)
ya = self.yc - self.a*np.sin(self.r)
xb = self.xc + self.b*np.cos(self.r+np.pi/2)
yb = self.yc - self.b*np.sin(self.r+np.pi/2)
return (xa*s,ya*s,xb*s,yb*s)
def update(self):
print (self.xc, self.yc, self.a, self.b, self.r)
s = self.canv.scale
pnts = self._poly_oval()
xa, ya, xb, yb = self._getHandlePoints()
xc, yc = (self.xc*s, self.yc*s)
r = 5
self.canv.delete(self.poly)
self.canv.coords(self.p_a, xa-r, ya-r, xa+r, ya+r)
self.canv.coords(self.p_b, xb-r, yb-r, xb+r, yb+r)
self.canv.coords(self.p_c, xc-r, yc-r, xc+r, yc+r)
self.poly = self.canv.create_polygon(pnts, fill='', outline='#fff', width=2, smooth=1, tags='poly')
def transform(self, pnt, xp=0, yp=0):
s = self.canv.scale
xp /= s
yp /= s
if pnt=='c':
self.xc=xp
self.yc=yp
self.update()
return
xc = self.xc
yc = self.yc
a = self.a
b = self.b
dx = xc - xp
dy = yc - yp
ab = np.sqrt(dx*dx+dy*dy)
wab = np.arctan2(dx,dy)
if pnt=='a':
a = ab
r = wab + np.pi/2
if a<b: return
else:
b = ab
r = wab
if b>a:return
self.a = a
self.b = b
self.r = r
self.update()
return
def move(self, xc=0, yc=0):
pass
# def poly_oval2(x0,y0, x1,y1, r=0, steps=20):
# """return an oval as coordinates suitable for create_polygon"""
#
# # x0,y0,x1,y1 are as create_oval
#
# # rotation is in degrees anti-clockwise, convert to radians
# #rotation = r * math.pi / 180.0
# rotation = r
#
# # major and minor axes
# a = (x1 - x0) / 2.0
# b = (y1 - y0) / 2.0
#
# # center
# xc = x0 + a
# yc = y0 + b
#
# point_list = []
#
# # create the oval as a list of points
# for i in range(steps):
#
# # Calculate the angle for this step
# # 360 degrees == 2 pi radians
# theta = (math.pi * 2) * (float(i) / steps)
#
# x1 = a * math.cos(theta)
# y1 = b * math.sin(theta)
#
# # rotate x, y
# x = (x1 * math.cos(rotation)) + (y1 * math.sin(rotation))
# y = (y1 * math.cos(rotation)) - (x1 * math.sin(rotation))
#
# point_list.append(round(x + xc))
# point_list.append(round(y + yc))
#
# return point_list
def _poly_oval(self, steps=16):
"""return an oval as coordinates suitable for create_polygon"""
s = self.canv.scale
xc = self.xc * s
yc = self.yc * s
a = self.a * s
b = self.b * s
r = self.r
point_list = []
# create the oval as a list of points
for i in range(steps):
# Calculate the angle for this step
# 360 degrees == 2 pi radians
theta = (math.pi * 2) * (float(i) / steps)
x1 = a * math.cos(theta)
y1 = b * math.sin(theta)
# rotate x, y
x = (x1 * math.cos(r)) + (y1 * math.sin(r))
y = (y1 * math.cos(r)) - (x1 * math.sin(r))
point_list.append(round(x + xc))
point_list.append(round(y + yc))
return point_list
class PSF(object):
def __init__(self, pfx=''):
self.filename = pfx + 'psf.fits'
#TODO
pass
def getBoxSize(self):
#TODO
return (100, 100)
def getFileName(self):
#TODO
return self.filename | RafiKueng/SteMM | model.py | Python | mit | 12,007 | 0.015658 |
"""
A module to compute cosmological distances, including:
comoving_distance (Dc)
angular_diameter_distance (Da)
luminosity_distance (Dl)
comoving_volume (volume)
"""
c = 299792458.
G = 4.3e-6
from math import pi
import warnings
warnings.warn("Default cosmology is Om=0.3,Ol=0.7,h=0.7,w=-1 and distance units are Mpc!",ImportWarning)
class Distance:
def __init__(self,cosmo=[0.3,0.7,0.7]):
self.OMEGA_M = cosmo[0]
self.OMEGA_L = cosmo[1]
self.h = cosmo[2]
self.w = -1.
self.wpars = None
self.w_analytic = False
self.Dc = self.comoving_distance
self.Dt = self.comoving_transverse_distance
self.Dm = self.comoving_transverse_distance
self.Da = self.angular_diameter_distance
self.Dl = self.luminosity_distance
self.dm = self.distance_modulus
self.volume = self.comoving_volume
def set(self,cosmo):
self.OMEGA_M = cosmo[0]
self.OMEGA_L = cosmo[1]
self.h = cosmo[2]
def reset(self):
self.OMEGA_M = 0.3
self.OMEGA_L = 0.7
self.h = 0.7
self.w = -1.
def age(self,z):
from scipy import integrate
f = lambda zp,m,l,k : (m/zp+k+l*zp**2)**-0.5
om = self.OMEGA_M
ol = self.OMEGA_L
ok = 1.-om-ol
return (9.778/self.h)*integrate.romberg(f,1e-300,1/(1.+z),(om,ol,ok))
def comoving_distance(self,z1,z2=0.):
from scipy import integrate
if z2<z1:
z1,z2 = z2,z1
def fa(z):
if self.w_analytic==True:
return self.w(z,self.wpars)
from math import exp
wa = lambda z : (1.+self.w(z,self.wpars))/(1.+z)
#return exp(3.*integrate.romberg(wa,0,z))
return exp(3.*integrate.quad(wa,0,z)[0])
if type(self.w)==type(self.comoving_distance) or type(self.w)==type(fa):
f = lambda z,m,l,k : (m*(1.+z)**3+k*(1.+z)**2+l*fa(z))**-0.5
elif self.w!=-1.:
f = lambda z,m,l,k : (m*(1.+z)**3+k*(1.+z)**2+l*(1.+z)**(3.*(1.+self.w)))**-0.5
else:
f = lambda z,m,l,k : (m*(1.+z)**3+k*(1.+z)**2+l)**-0.5
om = self.OMEGA_M
ol = self.OMEGA_L
ok = 1.-om-ol
# return (c/self.h)*integrate.romberg(f,z1,z2,(om,ol,ok))/1e5
return (c/self.h)*integrate.quad(f,z1,z2,(om,ol,ok))[0]/1e5
def comoving_transverse_distance(self,z1,z2=0.):
dc = 1e5*self.comoving_distance(z1,z2)/(c/self.h)
ok = 1.-self.OMEGA_M-self.OMEGA_L
if ok>0:
from math import sinh,sqrt
dtc = sinh(sqrt(ok)*dc)/sqrt(ok)
elif ok<0:
from math import sin,sqrt
ok *= -1.
dtc = sin(sqrt(ok)*dc)/sqrt(ok)
else:
dtc = dc
return (c/self.h)*dtc/1e5
def angular_diameter_distance(self,z1,z2=0.):
if z2<z1:
z1,z2 = z2,z1
return self.comoving_transverse_distance(z1,z2)/(1.+z2)
def luminosity_distance(self,z):
return (1.+z)*self.comoving_transverse_distance(z)
def comoving_volume(self,z1,z2=0.):
from scipy import integrate
if z2<z1:
z1,z2 = z2,z1
f = lambda z,m,l,k: (self.comoving_distance(0.,z)**2)/((m*(1.+z)**3+k*(1.+z)**2+l)**0.5)
om = self.OMEGA_M
ol = self.OMEGA_L
ok = 1.-om-ol
return 4*pi*(c/self.h)*integrate.romberg(f,z1,z2,(om,ol,ok))/1e5
def rho_crit(self,z):
H2 = (self.OMEGA_M*(1+z)**3 + self.OMEGA_L)*(self.h/10.)**2
return 3*H2/(8.*pi*G)
def distance_modulus(self,z):
from math import log10
return 5*log10(self.luminosity_distance(z)*1e5)
if __name__ == '__main__':
import numpy as np
Mpc = 3.08567758E22
cosmo = Distance()
zl = 0.4457
zs = 2.379
Dol = cosmo.angular_diameter_distance(0., zl)*Mpc
Dos = cosmo.angular_diameter_distance(0., zs)*Mpc
Dls = cosmo.angular_diameter_distance(zl, zs)*Mpc
# In SI units
Gnewton = 6.67384E-11
Msun = 1.9891E30
# Arcseconds to radians (example of a 5 arcsec Einstein ring)
b = 5.0 / 3600.0 * np.pi/180.0
# Dimensionless mass within the ring (assuming SIE/SIS/point mass)
# When you get the masses of blobs from the code, they replace this m
m = b**2*np.pi
print(m*c**2*Dos*Dol/(4*np.pi*Gnewton*Dls)/Msun)
# Another way of getting the mass for a 5 arcsec Einstein ring
# Angular einstein radius (arcseconds) of a solar mass
theta_0 = np.sqrt(4.*Gnewton*Msun/c**2*Dls/Dol/Dos)*(180./np.pi)*3600.
print((5.0/theta_0)**2)
# print(1./theta_0**2./np.pi)
| eggplantbren/Lensing2 | src/distances.py | Python | gpl-3.0 | 4,646 | 0.020017 |
import os
import tempfile
from pkg_resources import Requirement
from infi.unittest import parameters
from .test_cases import ForgeTest
from pydeploy.environment import Environment
from pydeploy.environment_utils import EnvironmentUtils
from pydeploy.checkout_cache import CheckoutCache
from pydeploy.installer import Installer
from pydeploy import sources
from pydeploy.scm import git
from pydeploy import command
from pydeploy import exceptions
class SourceTest(ForgeTest):
def setUp(self):
super(SourceTest, self).setUp()
self.env = self.forge.create_mock(Environment)
self.env.installer = self.forge.create_mock(Installer)
self.env.utils = self.forge.create_mock(EnvironmentUtils)
class SourceFromStringTest(ForgeTest):
def setUp(self):
super(SourceFromStringTest, self).setUp()
self.S = sources.Source.from_anything
def test__git(self):
self.assertIsInstance(self.S("git://bla"), sources.Git)
def test__path(self):
filename = tempfile.mkdtemp()
self.assertIsInstance(self.S(filename), sources.Path)
def test__easy_install(self):
self.assertIsInstance(self.S("blablabla"), sources.EasyInstall)
def test__invalid_source(self):
for invalid_value in [2, 2.5, True]:
with self.assertRaises(ValueError):
self.S(invalid_value)
class PathSourceTest(SourceTest):
def setUp(self):
super(PathSourceTest, self).setUp()
self.path = tempfile.mkdtemp()
self.source = sources.Path(self.path)
def test__get_name(self):
self.assertEquals(self.source.get_name(), self.path)
def test__uses_expanduser(self):
source = sources.Path("~/a/b/c")
self.assertEquals(source._param, os.path.expanduser("~/a/b/c"))
def test__get_signature(self):
self.assertEquals(self.source.get_signature(), "Path({0})".format(self.path))
def test__checkout(self):
self.assertEquals(self.source.checkout(self.env), self.path)
with self.assertRaises(NotImplementedError):
self.source.checkout(self.env, '/another/path')
@parameters.toggle('reinstall')
def test__install(self, reinstall):
self.env.installer.install_unpacked_package(self.path, self.path, reinstall=reinstall)
self.forge.replay()
self.source.install(self.env, reinstall=reinstall)
class DelegateToPathInstallTest(SourceTest):
def setUp(self):
super(DelegateToPathInstallTest, self).setUp()
self.path_class = self.forge.create_class_mock(sources.Path)
self.orig_path_class = sources.Path
self.forge.replace_with(sources, "Path", self.path_class)
def expect_delegation_to_path_install(self, path, name, reinstall):
path_mock = self.forge.create_mock(self.orig_path_class)
self.path_class(path, name=name).and_return(path_mock)
return path_mock.install(self.env, reinstall=reinstall)
class GitSourceTest(DelegateToPathInstallTest):
def setUp(self):
super(GitSourceTest, self).setUp()
self.repo_url = "some/repo/url"
self.branch = 'some_branch'
self.source = sources.Git(self.repo_url, self.branch)
self.forge.replace_many(git, "clone_to_or_update", "reset_submodules")
def test__master_is_default_branch(self):
self.assertEquals(sources.Git('bla')._branch, 'master')
def test__get_name(self):
self.assertEquals(self.source.get_name(), self.repo_url + "@" + self.branch)
def test__repr(self):
self.assertEquals(repr(self.source), 'Git({})'.format(self.source.get_name()))
def test__get_signature(self):
self.assertEquals(self.source.get_signature(), repr(self.source))
@parameters.toggle('reinstall')
def test__git_source_install(self, reinstall):
self.forge.replace(self.source, "checkout")
checkout_path = "some/checkout/path"
self.source.checkout(self.env).and_return(checkout_path)
self.expect_delegation_to_path_install(checkout_path, name=self.repo_url, reinstall=reinstall)
with self.forge.verified_replay_context():
self.source.install(self.env, reinstall=reinstall)
def test__git_source_checkout_with_path_argument(self):
checkout_path = "/some/path/to/checkout"
git.clone_to_or_update(url=self.repo_url, path=checkout_path, branch=self.branch)
git.reset_submodules(checkout_path)
with self.forge.verified_replay_context():
result = self.source.checkout(self.env, checkout_path)
self.assertIs(result, checkout_path)
def test__git_source_checkout_no_path_argument(self):
checkout_path = "/some/path/to/checkout"
checkout_cache = self.forge.create_mock(CheckoutCache)
self.env.get_checkout_cache().and_return(checkout_cache)
checkout_cache.get_checkout_path(self.repo_url).and_return(checkout_path)
git.clone_to_or_update(url=self.repo_url, branch=self.branch, path=checkout_path)
git.reset_submodules(checkout_path)
with self.forge.verified_replay_context():
result = self.source.checkout(self.env)
self.assertIs(result, checkout_path)
def test__git_identifies_git_prefix(self):
url = "git://bla"
source = sources.Source.from_anything(url)
self.assertIsInstance(source, sources.Git)
class GitContraintsTest(ForgeTest):
def setUp(self):
super(GitContraintsTest, self).setUp()
self.forge.replace(git, "get_remote_references_dict")
self.url = "some_url"
self.source = sources.Git(self.url)
def test__more_than_one_constraint_not_supported(self):
with self.assertRaises(NotImplementedError):
self.source.resolve_constraints([('>=', '2.0.0'), ('<=', '3.0.0')])
@parameters.iterate('tag', ['v2.0.0', '2.0.0'])
def test__exact_version_matches_tag(self, tag):
self._assert_chooses("x==2.0.0", {
git.Tag(tag) : "some_hash"
}, 'tags/{}'.format(tag))
def test__exact_version_with_no_match_raises_exception(self):
self._assert_no_match('x==2.0.0', {
git.Tag('bla') : 'h1',
git.Branch('bloop') : 'h2'
})
@parameters.iterate('branch_name', ['v2.0.0', '2.0.0'])
def test__minimum_version_inclusive_selects_exact(self, branch_name):
self._assert_chooses("x>=2.0.0", {
git.Branch(branch_name) : "h1"
}, branch_name)
@parameters.toggle('inclusive')
@parameters.iterate('branch_name', ['3.0.0', 'v3.0.0', '2.3.2', 'v2.3'])
def test__minimum_version_with_matches(self, inclusive, branch_name):
self._assert_chooses("x{0}2.0.0".format(">=" if inclusive else ">"), {
git.Branch(branch_name)
}, branch_name)
@parameters.toggle('inclusive')
@parameters.iterate('branch_name', ['2.0.0-a1', 'v2.0.0-b1', 'v1.9'])
def test__minimum_version_without_matches(self, inclusive, branch_name):
self._assert_no_match("x{0}2.0.0".format(">=" if inclusive else ">"), {
git.Branch(branch_name)
})
@parameters.toggle('inclusive')
def test__unbound_version_takes_from_master(self, inclusive):
self._assert_chooses("x{0}2.0.0".format(">=" if inclusive else ">"), {
git.Branch('master')
}, 'master')
def _assert_chooses(self, requirement, options, chosen):
requirement = Requirement.parse(requirement)
git.get_remote_references_dict(self.url).and_return(options)
self.forge.replay()
new_source = self.source.resolve_constraints(requirement.specs)
self.assertIsInstance(new_source, sources.Git)
self.assertEquals(new_source._url, self.url)
self.assertEquals(new_source._branch, chosen)
def _assert_no_match(self, requirement, options):
specs = Requirement.parse(requirement).specs
git.get_remote_references_dict(self.url).and_return(options)
self.forge.replay()
with self.assertRaises(exceptions.RequiredVersionNotFound):
self.source.resolve_constraints(specs)
class ExternalToolSourceTest(SourceTest):
def setUp(self):
super(ExternalToolSourceTest, self).setUp()
self.package_name = "some_package==1.0.0"
self.forge.replace(command, "execute_assert_success")
class PIPSourceTest(ExternalToolSourceTest):
@parameters.toggle('reinstall')
def test__install(self, reinstall):
source = sources.PIP(self.package_name)
self.env.execute_pip_install(self.package_name, reinstall=reinstall)
with self.forge.verified_replay_context():
source.install(self.env, reinstall=reinstall)
def test__checkout_not_implemented(self):
with self.assertRaises(NotImplementedError):
sources.PIP(self.package_name).checkout(self.env, '/some/path')
with self.assertRaises(NotImplementedError):
sources.PIP(self.package_name).checkout(self.env)
class EasyInstallSourceTest(ExternalToolSourceTest):
@parameters.toggle('reinstall')
def test__install(self, reinstall):
self.env.execute_easy_install(self.package_name, reinstall=reinstall)
source = sources.EasyInstall(self.package_name)
with self.forge.verified_replay_context():
source.install(self.env, reinstall=reinstall)
def test__checkout_not_implemented(self):
with self.assertRaises(NotImplementedError):
sources.EasyInstall(self.package_name).checkout(self.env, '/some/path')
with self.assertRaises(NotImplementedError):
sources.EasyInstall(self.package_name).checkout(self.env)
class SCMTest(SourceTest):
def test__git(self):
repo = "git://some_repo"
result = sources.SCM(repo)
self.assertIsInstance(result, sources.Git)
self.assertEquals(result._url, repo)
def test__git_with_branch(self):
result = sources.SCM("git://some_repo@branch_name")
self.assertIsInstance(result, sources.Git)
self.assertEquals(result._url, "git://some_repo")
self.assertEquals(result._branch, "branch_name")
def test__other(self):
with self.assertRaises(ValueError):
sources.SCM("bla")
| vmalloc/pydeploy | tests/test__sources.py | Python | bsd-3-clause | 10,265 | 0.005261 |
from TestBase import BaseClass
class ContractBase(BaseClass):
def add_contract(self):
wd = self.wd
wd.find_element_by_link_text("add new").click()
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys()
def add_full_name(self, first_name=None, middle_name=None, last_name=None, nickname=None):
wd = self.wd
if first_name:
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys("%s" % first_name)
if middle_name:
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys("%s" % middle_name)
if last_name:
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys("%s" % last_name)
if nickname:
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys("%s" % nickname)
def add_title(self, title):
wd = self.wd
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys("%s" % title)
def add_company(self, company_name):
wd = self.wd
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys("%s" % company_name)
def add_address(self, address_name):
wd = self.wd
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys("%s" % address_name)
def add_phone_number(self, home=None, mobile=None, work=None, fax=None):
wd = self.wd
if home:
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys("%s" % home)
if mobile:
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys("%s" % mobile)
if work:
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys("%s" % work)
if fax:
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys("%s" % fax)
def add_email(self, email1=None, email2=None, email3=None):
wd = self.wd
if email1:
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys("%s" % email1)
if email2:
wd.find_element_by_name("email2").click()
wd.find_element_by_name("email2").clear()
wd.find_element_by_name("email2").send_keys("%s" % email2)
if email3:
wd.find_element_by_name("email3").click()
wd.find_element_by_name("email3").clear()
wd.find_element_by_name("email3").send_keys("%s" % email3)
def add_homepage(self, homepage=None):
wd = self.wd
wd.find_element_by_name("homepage").click()
wd.find_element_by_name("homepage").clear()
wd.find_element_by_name("homepage").send_keys("%s" % homepage)
def add_year(self):
wd = self.wd
# in futures we can made function where we will sent date and it choose it with similar way as previous
if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[3]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[3]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[2]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[2]").click()
wd.find_element_by_name("byear").click()
wd.find_element_by_name("byear").clear()
wd.find_element_by_name("byear").send_keys("1999")
if not wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[3]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[3]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[2]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[2]").click()
wd.find_element_by_name("ayear").click()
wd.find_element_by_name("ayear").clear()
wd.find_element_by_name("ayear").send_keys("1999")
def add_secondary_adress(self, address):
wd = self.wd
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys("%s" % address)
def add_secondary_home(self, phone):
wd = self.wd
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys("%s" % phone)
def add_secondary_notes(self, notes):
wd = self.wd
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys("%s" % notes)
def restore_contract(self):
self.wd.find_element_by_link_text("Logout").click()
self.wd.quit() | werbk/task-2.1 | tests_contract/contract_lib.py | Python | apache-2.0 | 5,896 | 0.002035 |
"""ZFS based backup workflows."""
import datetime
import shlex
import gflags
import lvm
import workflow
FLAGS = gflags.FLAGS
gflags.DEFINE_string('rsync_options',
'--archive --acls --numeric-ids --delete --inplace',
'rsync command options')
gflags.DEFINE_string('rsync_path', '/usr/bin/rsync', 'path to rsync binary')
gflags.DEFINE_string('zfs_snapshot_prefix', 'ari-backup-',
'prefix for historical ZFS snapshots')
gflags.DEFINE_string('zfs_snapshot_timestamp_format', '%Y-%m-%d--%H%M',
'strftime() formatted timestamp used when naming new ZFS snapshots')
class ZFSLVMBackup(lvm.LVMSourceMixIn, workflow.BaseWorkflow):
"""Workflow for backing up a logical volume to a ZFS dataset.
Data is copied from and LVM snapshot to a ZFS dataset using rsync and then
ZFS commands are issued to create historical snapshots. The ZFS snapshot
lifecycle is also managed by this class. When a backup completes, snapshots
older than snapshot_expiration_days are destroyed.
This approach has some benefits over rdiff-backup in that all backup
datapoints are easily browseable and replication of the backup data using
ZFS streams is generally less resource intensive than using something like
rsync to mirror the files created by rdiff-backup.
One downside is that it's easier to store all file metadata using
rdiff-backup. Rsync can only store metadata for files that the destination
file system can also store. For example, if extended file system
attributes are used on the source file system, but aren't available on the
destination, rdiff-backup will still record those attributes in its own
files. If faced with that same scenario, rsync would lose those attributes.
Furthermore, rsync must have root privilege to write arbitrary file
metadata.
New post-job hooks are added for creating ZFS snapshots and trimming old
ones.
"""
def __init__(self, label, source_hostname, rsync_dst, zfs_hostname,
dataset_name, snapshot_expiration_days, **kwargs):
"""Configure a ZFSLVMBackup object.
Args:
label: str, label for the backup job (e.g. database-server1).
source_hostname: str, the name of the host with the source data to
backup.
rsync_dst: str, the destination argument for the rsync command line
(e.g. backupbox:/backup-store/database-server1).
zfs_hostname: str, the name of the backup destination host where we will
be managing the ZFS snapshots.
dataset_name: str, the full ZFS path (not file system path) to the
dataset holding the backups for this job
(e.g. tank/backup-store/database-server1).
snapshot_expiration_days: int, the maxmium age of a ZFS snapshot in days.
Pro tip: It's a good practice to reuse the label argument as the last
path component in the rsync_dst and dataset_name arguments.
"""
# Call our super class's constructor to enable LVM snapshot management
super(ZFSLVMBackup, self).__init__(label, **kwargs)
# Assign instance vars specific to this class.
self.source_hostname = source_hostname
self.rsync_dst = rsync_dst
self.zfs_hostname = zfs_hostname
self.dataset_name = dataset_name
# Assign flags to instance vars so they might be easily overridden in
# workflow configs.
self.rsync_options = FLAGS.rsync_options
self.rsync_path = FLAGS.rsync_path
self.zfs_snapshot_prefix = FLAGS.zfs_snapshot_prefix
self.zfs_snapshot_timestamp_format = FLAGS.zfs_snapshot_timestamp_format
self.add_post_hook(self._create_zfs_snapshot)
self.add_post_hook(self._destroy_expired_zfs_snapshots,
{'days': snapshot_expiration_days})
def _get_current_datetime(self):
"""Returns datetime object with the current date and time.
This method is mostly useful for testing purposes.
"""
return datetime.datetime.now()
def _run_custom_workflow(self):
"""Run rsync backup of LVM snapshot to ZFS dataset."""
# TODO(jpwoodbu) Consider throwing an exception if we see things in the
# include or exclude lists since we don't use them in this class.
self.logger.debug('ZFSLVMBackup._run_custom_workflow started.')
# Since we're dealing with ZFS datasets, let's always exclude the .zfs
# directory in our rsync options.
rsync_options = shlex.split(self.rsync_options) + ['--exclude', '/.zfs']
# We add a trailing slash to the src path otherwise rsync will make a
# subdirectory at the destination, even if the destination is already a
# directory.
rsync_src = self._snapshot_mount_point_base_path + '/'
command = [self.rsync_path] + rsync_options + [rsync_src, self.rsync_dst]
self.run_command(command, self.source_hostname)
self.logger.debug('ZFSLVMBackup._run_custom_workflow completed.')
def _create_zfs_snapshot(self, error_case):
"""Creates a new ZFS snapshot of our destination dataset.
The name of the snapshot will include the zfs_snapshot_prefix provided by
FLAGS and a timestamp. The zfs_snapshot_prefix is used by
_remove_zfs_snapshots_older_than() when deciding which snapshots to
destroy. The timestamp encoded in a snapshot name is only for end-user
convenience. The creation metadata on the ZFS snapshot is what is used to
determine a snapshot's age.
This method does nothing if error_case is True.
Args:
error_case: bool, whether an error has occurred during the backup.
"""
if not error_case:
self.logger.info('Creating ZFS snapshot...')
timestamp = self._get_current_datetime().strftime(
self.zfs_snapshot_timestamp_format)
snapshot_name = self.zfs_snapshot_prefix + timestamp
snapshot_path = '{dataset_name}@{snapshot_name}'.format(
dataset_name=self.dataset_name, snapshot_name=snapshot_name)
command = ['zfs', 'snapshot', snapshot_path]
self.run_command(command, self.zfs_hostname)
def _find_snapshots_older_than(self, days):
"""Returns snapshots older than the given number of days.
Only snapshots that meet the following criteria are returned:
1. They were created at least "days" ago.
2. Their name is prefixed with FLAGS.zfs_snapshot_prefix.
Args:
days: int, the minimum age of the snapshots in days.
Returns:
A list of filtered snapshots.
"""
expiration = self._get_current_datetime() - datetime.timedelta(days=days)
# Let's find all the snapshots for this dataset.
command = ['zfs', 'get', '-rH', '-o', 'name,value', 'type',
self.dataset_name]
stdout, unused_stderr = self.run_command(command, self.zfs_hostname)
snapshots = list()
# Sometimes we get extra lines which are empty, so we'll strip the lines.
for line in stdout.strip().splitlines():
name, dataset_type = line.split('\t')
if dataset_type == 'snapshot':
# Let's try to only consider destroying snapshots made by us ;)
if name.split('@')[1].startswith(self.zfs_snapshot_prefix):
snapshots.append(name)
expired_snapshots = list()
for snapshot in snapshots:
creation_time = self._get_snapshot_creation_time(snapshot)
if creation_time <= expiration:
expired_snapshots.append(snapshot)
return expired_snapshots
def _get_snapshot_creation_time(self, snapshot):
"""Gets the creation time of a snapshot as a Python datetime object
Args:
snapshot: str, the fule ZFS path to the snapshot.
Returns:
A datetime object representing the creation time of the snapshot.
"""
command = ['zfs', 'get', '-H', '-o', 'value', 'creation', snapshot]
stdout, unused_stderr = self.run_command(command, self.zfs_hostname)
return datetime.datetime.strptime(stdout.strip(), '%a %b %d %H:%M %Y')
def _destroy_expired_zfs_snapshots(self, days, error_case):
"""Destroy snapshots older than the given numnber of days.
Any snapshots in the target dataset with a name that starts with
FLAGS.zfs_snapshot_prefix and a creation date older than days will be
destroyed. Depending on the size of the snapshots and the performance of
the disk subsystem, this operation could take a while.
This method does nothing if error_case is True.
Args:
days: int, the max age of a snapshot in days.
error_case: bool, whether an error has occurred during the backup.
"""
if not error_case:
self.logger.info('Looking for expired ZFS snapshots...')
snapshots = self._find_snapshots_older_than(days)
# Sentinel value used to log if we destroyed no snapshots.
snapshots_destroyed = False
# Destroy expired snapshots.
for snapshot in snapshots:
command = ['zfs', 'destroy', snapshot]
self.run_command(command, self.zfs_hostname)
snapshots_destroyed = True
self.logger.info('{snapshot} destroyed.'.format(snapshot=snapshot))
if not snapshots_destroyed:
self.logger.info('Found no expired ZFS snapshots.')
| rbarlow/ari-backup | ari_backup/zfs.py | Python | bsd-3-clause | 9,110 | 0.003952 |
#!/bin/python3
import sys
x1, v1, x2, v2 = map(int, input().strip().split(' '))
willLand = (
v1 != v2
and (x1 - x2) % (v2 - v1) == 0
and (x1 - x2) // (v2 - v1) >= 0)
print(('NO', 'YES')[willLand])
| lilsweetcaligula/Online-Judges | hackerrank/algorithms/implementation/easy/kangaroo/py/solution.py | Python | mit | 215 | 0.009302 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_files\version_updater.ui'
#
# Created: Thu Nov 10 15:32:30 2016
# by: pyside2-uic running on PySide2 2.0.0~alpha0
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.setWindowModality(QtCore.Qt.ApplicationModal)
Dialog.resize(1304, 753)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(Dialog)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.versions_treeView = QtWidgets.QTreeView(Dialog)
self.versions_treeView.setObjectName("versions_treeView")
self.verticalLayout.addWidget(self.versions_treeView)
self.horizontalWidget = QtWidgets.QWidget(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.horizontalWidget.sizePolicy().hasHeightForWidth())
self.horizontalWidget.setSizePolicy(sizePolicy)
self.horizontalWidget.setObjectName("horizontalWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.selectNone_pushButton = QtWidgets.QPushButton(self.horizontalWidget)
self.selectNone_pushButton.setObjectName("selectNone_pushButton")
self.horizontalLayout.addWidget(self.selectNone_pushButton)
self.selectAll_pushButton = QtWidgets.QPushButton(self.horizontalWidget)
self.selectAll_pushButton.setObjectName("selectAll_pushButton")
self.horizontalLayout.addWidget(self.selectAll_pushButton)
self.update_pushButton = QtWidgets.QPushButton(self.horizontalWidget)
self.update_pushButton.setObjectName("update_pushButton")
self.horizontalLayout.addWidget(self.update_pushButton)
self.cancel_pushButton = QtWidgets.QPushButton(self.horizontalWidget)
self.cancel_pushButton.setObjectName("cancel_pushButton")
self.horizontalLayout.addWidget(self.cancel_pushButton)
self.verticalLayout.addWidget(self.horizontalWidget)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtWidgets.QApplication.translate("Dialog", "Version Updater", None, -1))
self.label.setText(QtWidgets.QApplication.translate("Dialog", "<html><head/><body><p><span style=\" color:#c00000;\">Red Versions need update,</span><span style=\" color:#00c000;\">Greens are OK</span>, check the Versions that you want to trigger an update.</p></body></html>", None, -1))
self.selectNone_pushButton.setText(QtWidgets.QApplication.translate("Dialog", "Select None", None, -1))
self.selectAll_pushButton.setText(QtWidgets.QApplication.translate("Dialog", "Select All", None, -1))
self.update_pushButton.setText(QtWidgets.QApplication.translate("Dialog", "Update", None, -1))
self.cancel_pushButton.setText(QtWidgets.QApplication.translate("Dialog", "Cancel", None, -1))
| sergeneren/anima | anima/ui/ui_compiled/version_updater_UI_pyside2.py | Python | bsd-2-clause | 3,651 | 0.003835 |
# ----------------------------------------------------------------------------
# Copyright (c) 2011-2015, The American Gut Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from psycopg2 import connect
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
import agr
# table definitions, these are of the form: [(table_name, table_definition)].
# the motivation for this structure is to allow for checking if tables exist
# easily (see schema_is_sane)
tables = [
('biom',
"""create table biom (
sample varchar,
biom json,
biomtxt text,
constraint pk_biom primary key(sample)
)"""),
('metadata',
"""create table metadata (
sample varchar,
category varchar,
value varchar,
constraint pk_metadata primary key (sample, category),
constraint fk_metadata foreign key (sample) references biom(sample)
)"""),
('fastq',
"""create table fastq (
sample varchar,
url varchar,
constraint pk_fastq primary key (sample),
constraint fk_fastq foreign key (sample) references biom(sample),
constraint uc_fastq unique (url)
)"""),
('state',
"""create table state (
biom_sha varchar)""")
]
def database_connectivity(user=agr.db_user, password=agr.db_password,
host=agr.db_host, dbname=agr.db_name):
"""Determine if we can connect to the database
Paramters
---------
user : str
The database usermame
password : str
The password for the user
host : str
The database host
Returns
-------
bool
True if a connection was made, False otherwise
"""
try:
c = connect(user=user, password=password, host=host, dbname=dbname)
except:
return False
else:
c.close()
return True
def database_exists(user=agr.db_user, password=agr.db_password,
host=agr.db_host, dbname=agr.db_name):
"""Determine if the database exists
Paramters
---------
user : str
The database usermame
password : str
The password for the user
host : str
The database host
dbname : str
The name of the database to connect to
Returns
-------
bool
True if the database exists, False otherwise
"""
try:
c = connect(user=user, password=password, host=host, dbname=dbname)
except:
return False
else:
c.close()
return True
def schema_is_sane():
"""Check to see if the expected tables exist
Notes
-----
Assumes we have connectivity and the database exists.
The structure of the tables is _not_ verified, only checks that the table
names exist.
Database credentials are sourced from the agr module (e.g., the environment
configuration.
Returns
-------
bool
The expected tables appear to exist
"""
c = connect(user=agr.db_user, password=agr.db_password,
host=agr.db_host, dbname=agr.db_name)
cur = c.cursor()
for table_name, _ in tables:
cur.execute("""select exists(select *
from information_schema.tables
where table_name=%s)""", [table_name])
if not cur.fetchone()[0]:
return False
return True
def schema_has_data():
"""Check to see if the schema appears to have data
Notes
-----
Assumes we have connectivity and the database exists.
The structure of the tables is _not_ verified, only checks that there
appears to be rows in the tables.
Database credentials are sourced from the agr module (e.g., the environment
configuration.
Returns
-------
bool
If all of the tables appear to have data.
"""
if not schema_is_sane():
return False
c = connect(user=agr.db_user, password=agr.db_password,
host=agr.db_host, dbname=agr.db_name)
cur = c.cursor()
for table_name, _ in tables:
cur.execute("select count(1) from %s" % table_name)
if cur.fetchone()[0] == 0:
return False
return True
def create_database():
"""Create the database and the schema
Notes
-----
Assumes we have connectivity.
Database credentials are sourced from the agr module (e.g., the environment
configuration.
"""
c = connect(user=agr.admin_db_user, password=agr.admin_db_password,
host=agr.db_host)
c.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = c.cursor()
cur.execute('drop database if exists ag_rest')
cur.execute('create database %s' % agr.db_name)
cur.close()
c.close()
create_tables()
def create_tables():
"""Create the tables"""
c = connect(user=agr.admin_db_user, password=agr.admin_db_password,
host=agr.db_host, dbname=agr.db_name)
c.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = c.cursor()
for _, table in tables:
cur.execute(table)
if __name__ == '__main__':
import sys
if not database_connectivity():
sys.stderr.write("Cannot connect to the database\n")
sys.exit(1)
if not agr.test_environment:
if sys.argv[1] == 'FORCE_CREATE_TABLES':
create_tables()
sys.exit(0)
else:
sys.stderr.write("This does not appear to be a test environment\n")
sys.exit(1)
if database_exists() and schema_is_sane() and schema_has_data():
sys.exit(0)
else:
create_database()
| biocore/american-gut-rest | agr/schema.py | Python | bsd-3-clause | 5,875 | 0.00034 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import struct
class BBType(object):
command = 1
command_return = 2
consolemsg = 3
ping = 4
pong = 5
getenv = 6
getenv_return = 7
fs = 8
fs_return = 9
class BBPacket(object):
def __init__(self, p_type=0, p_flags=0, payload="", raw=None):
self.p_type = p_type
self.p_flags = p_flags
if raw is not None:
self.unpack(raw)
else:
self.payload = payload
def __repr__(self):
return "BBPacket(%i, %i)" % (self.p_type, self.p_flags)
def _unpack_payload(self, data):
self.payload = data
def _pack_payload(self):
return self.payload
def unpack(self, data):
self.p_type, self.p_flags = struct.unpack("!HH", data[:4])
self._unpack_payload(data[4:])
def pack(self):
return struct.pack("!HH", self.p_type, self.p_flags) + \
self._pack_payload()
class BBPacketCommand(BBPacket):
def __init__(self, raw=None, cmd=None):
self.cmd = cmd
super(BBPacketCommand, self).__init__(BBType.command, raw=raw)
def __repr__(self):
return "BBPacketCommand(cmd=%r)" % self.cmd
def _unpack_payload(self, payload):
self.cmd = payload
def _pack_payload(self):
return self.cmd
class BBPacketCommandReturn(BBPacket):
def __init__(self, raw=None, exit_code=None):
self.exit_code = exit_code
super(BBPacketCommandReturn, self).__init__(BBType.command_return,
raw=raw)
def __repr__(self):
return "BBPacketCommandReturn(exit_code=%i)" % self.exit_code
def _unpack_payload(self, data):
self.exit_code, = struct.unpack("!L", data[:4])
def _pack_payload(self):
return struct.pack("!L", self.exit_code)
class BBPacketConsoleMsg(BBPacket):
def __init__(self, raw=None, text=None):
self.text = text
super(BBPacketConsoleMsg, self).__init__(BBType.consolemsg, raw=raw)
def __repr__(self):
return "BBPacketConsoleMsg(text=%r)" % self.text
def _unpack_payload(self, payload):
self.text = payload
def _pack_payload(self):
return self.text
class BBPacketPing(BBPacket):
def __init__(self, raw=None):
super(BBPacketPing, self).__init__(BBType.ping, raw=raw)
def __repr__(self):
return "BBPacketPing()"
class BBPacketPong(BBPacket):
def __init__(self, raw=None):
super(BBPacketPong, self).__init__(BBType.pong, raw=raw)
def __repr__(self):
return "BBPacketPong()"
class BBPacketGetenv(BBPacket):
def __init__(self, raw=None, varname=None):
self.varname = varname
super(BBPacketGetenv, self).__init__(BBType.getenv, raw=raw)
def __repr__(self):
return "BBPacketGetenv(varname=%r)" % self.varname
def _unpack_payload(self, payload):
self.varname = payload
def _pack_payload(self):
return self.varname
class BBPacketGetenvReturn(BBPacket):
def __init__(self, raw=None, text=None):
self.text = text
super(BBPacketGetenvReturn, self).__init__(BBType.getenv_return,
raw=raw)
def __repr__(self):
return "BBPacketGetenvReturn(varvalue=%s)" % self.text
def _unpack_payload(self, payload):
self.text = payload
def _pack_payload(self):
return self.text
class BBPacketFS(BBPacket):
def __init__(self, raw=None, payload=None):
super(BBPacketFS, self).__init__(BBType.fs, payload=payload, raw=raw)
def __repr__(self):
return "BBPacketFS(payload=%r)" % self.payload
class BBPacketFSReturn(BBPacket):
def __init__(self, raw=None, payload=None):
super(BBPacketFSReturn, self).__init__(BBType.fs_return, payload=payload, raw=raw)
def __repr__(self):
return "BBPacketFSReturn(payload=%r)" % self.payload
| raphui/barebox | scripts/remote/messages.py | Python | gpl-2.0 | 4,045 | 0.000247 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Wrapper for gdi32.dll in ctypes.
"""
__revision__ = "$Id: gdi32.py 1299 2013-12-20 09:30:55Z qvasimodo $"
from defines import *
from kernel32 import GetLastError, SetLastError
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
#==============================================================================
#--- Helpers ------------------------------------------------------------------
#--- Types --------------------------------------------------------------------
#--- Constants ----------------------------------------------------------------
# GDI object types
OBJ_PEN = 1
OBJ_BRUSH = 2
OBJ_DC = 3
OBJ_METADC = 4
OBJ_PAL = 5
OBJ_FONT = 6
OBJ_BITMAP = 7
OBJ_REGION = 8
OBJ_METAFILE = 9
OBJ_MEMDC = 10
OBJ_EXTPEN = 11
OBJ_ENHMETADC = 12
OBJ_ENHMETAFILE = 13
OBJ_COLORSPACE = 14
GDI_OBJ_LAST = OBJ_COLORSPACE
# Ternary raster operations
SRCCOPY = 0x00CC0020 # dest = source
SRCPAINT = 0x00EE0086 # dest = source OR dest
SRCAND = 0x008800C6 # dest = source AND dest
SRCINVERT = 0x00660046 # dest = source XOR dest
SRCERASE = 0x00440328 # dest = source AND (NOT dest)
NOTSRCCOPY = 0x00330008 # dest = (NOT source)
NOTSRCERASE = 0x001100A6 # dest = (NOT src) AND (NOT dest)
MERGECOPY = 0x00C000CA # dest = (source AND pattern)
MERGEPAINT = 0x00BB0226 # dest = (NOT source) OR dest
PATCOPY = 0x00F00021 # dest = pattern
PATPAINT = 0x00FB0A09 # dest = DPSnoo
PATINVERT = 0x005A0049 # dest = pattern XOR dest
DSTINVERT = 0x00550009 # dest = (NOT dest)
BLACKNESS = 0x00000042 # dest = BLACK
WHITENESS = 0x00FF0062 # dest = WHITE
NOMIRRORBITMAP = 0x80000000 # Do not Mirror the bitmap in this call
CAPTUREBLT = 0x40000000 # Include layered windows
# Region flags
ERROR = 0
NULLREGION = 1
SIMPLEREGION = 2
COMPLEXREGION = 3
RGN_ERROR = ERROR
# CombineRgn() styles
RGN_AND = 1
RGN_OR = 2
RGN_XOR = 3
RGN_DIFF = 4
RGN_COPY = 5
RGN_MIN = RGN_AND
RGN_MAX = RGN_COPY
# StretchBlt() modes
BLACKONWHITE = 1
WHITEONBLACK = 2
COLORONCOLOR = 3
HALFTONE = 4
MAXSTRETCHBLTMODE = 4
STRETCH_ANDSCANS = BLACKONWHITE
STRETCH_ORSCANS = WHITEONBLACK
STRETCH_DELETESCANS = COLORONCOLOR
STRETCH_HALFTONE = HALFTONE
# PolyFill() modes
ALTERNATE = 1
WINDING = 2
POLYFILL_LAST = 2
# Layout orientation options
LAYOUT_RTL = 0x00000001 # Right to left
LAYOUT_BTT = 0x00000002 # Bottom to top
LAYOUT_VBH = 0x00000004 # Vertical before horizontal
LAYOUT_ORIENTATIONMASK = LAYOUT_RTL + LAYOUT_BTT + LAYOUT_VBH
LAYOUT_BITMAPORIENTATIONPRESERVED = 0x00000008
# Stock objects
WHITE_BRUSH = 0
LTGRAY_BRUSH = 1
GRAY_BRUSH = 2
DKGRAY_BRUSH = 3
BLACK_BRUSH = 4
NULL_BRUSH = 5
HOLLOW_BRUSH = NULL_BRUSH
WHITE_PEN = 6
BLACK_PEN = 7
NULL_PEN = 8
OEM_FIXED_FONT = 10
ANSI_FIXED_FONT = 11
ANSI_VAR_FONT = 12
SYSTEM_FONT = 13
DEVICE_DEFAULT_FONT = 14
DEFAULT_PALETTE = 15
SYSTEM_FIXED_FONT = 16
# Metafile functions
META_SETBKCOLOR = 0x0201
META_SETBKMODE = 0x0102
META_SETMAPMODE = 0x0103
META_SETROP2 = 0x0104
META_SETRELABS = 0x0105
META_SETPOLYFILLMODE = 0x0106
META_SETSTRETCHBLTMODE = 0x0107
META_SETTEXTCHAREXTRA = 0x0108
META_SETTEXTCOLOR = 0x0209
META_SETTEXTJUSTIFICATION = 0x020A
META_SETWINDOWORG = 0x020B
META_SETWINDOWEXT = 0x020C
META_SETVIEWPORTORG = 0x020D
META_SETVIEWPORTEXT = 0x020E
META_OFFSETWINDOWORG = 0x020F
META_SCALEWINDOWEXT = 0x0410
META_OFFSETVIEWPORTORG = 0x0211
META_SCALEVIEWPORTEXT = 0x0412
META_LINETO = 0x0213
META_MOVETO = 0x0214
META_EXCLUDECLIPRECT = 0x0415
META_INTERSECTCLIPRECT = 0x0416
META_ARC = 0x0817
META_ELLIPSE = 0x0418
META_FLOODFILL = 0x0419
META_PIE = 0x081A
META_RECTANGLE = 0x041B
META_ROUNDRECT = 0x061C
META_PATBLT = 0x061D
META_SAVEDC = 0x001E
META_SETPIXEL = 0x041F
META_OFFSETCLIPRGN = 0x0220
META_TEXTOUT = 0x0521
META_BITBLT = 0x0922
META_STRETCHBLT = 0x0B23
META_POLYGON = 0x0324
META_POLYLINE = 0x0325
META_ESCAPE = 0x0626
META_RESTOREDC = 0x0127
META_FILLREGION = 0x0228
META_FRAMEREGION = 0x0429
META_INVERTREGION = 0x012A
META_PAINTREGION = 0x012B
META_SELECTCLIPREGION = 0x012C
META_SELECTOBJECT = 0x012D
META_SETTEXTALIGN = 0x012E
META_CHORD = 0x0830
META_SETMAPPERFLAGS = 0x0231
META_EXTTEXTOUT = 0x0a32
META_SETDIBTODEV = 0x0d33
META_SELECTPALETTE = 0x0234
META_REALIZEPALETTE = 0x0035
META_ANIMATEPALETTE = 0x0436
META_SETPALENTRIES = 0x0037
META_POLYPOLYGON = 0x0538
META_RESIZEPALETTE = 0x0139
META_DIBBITBLT = 0x0940
META_DIBSTRETCHBLT = 0x0b41
META_DIBCREATEPATTERNBRUSH = 0x0142
META_STRETCHDIB = 0x0f43
META_EXTFLOODFILL = 0x0548
META_SETLAYOUT = 0x0149
META_DELETEOBJECT = 0x01f0
META_CREATEPALETTE = 0x00f7
META_CREATEPATTERNBRUSH = 0x01F9
META_CREATEPENINDIRECT = 0x02FA
META_CREATEFONTINDIRECT = 0x02FB
META_CREATEBRUSHINDIRECT = 0x02FC
META_CREATEREGION = 0x06FF
# Metafile escape codes
NEWFRAME = 1
ABORTDOC = 2
NEXTBAND = 3
SETCOLORTABLE = 4
GETCOLORTABLE = 5
FLUSHOUTPUT = 6
DRAFTMODE = 7
QUERYESCSUPPORT = 8
SETABORTPROC = 9
STARTDOC = 10
ENDDOC = 11
GETPHYSPAGESIZE = 12
GETPRINTINGOFFSET = 13
GETSCALINGFACTOR = 14
MFCOMMENT = 15
GETPENWIDTH = 16
SETCOPYCOUNT = 17
SELECTPAPERSOURCE = 18
DEVICEDATA = 19
PASSTHROUGH = 19
GETTECHNOLGY = 20
GETTECHNOLOGY = 20
SETLINECAP = 21
SETLINEJOIN = 22
SETMITERLIMIT = 23
BANDINFO = 24
DRAWPATTERNRECT = 25
GETVECTORPENSIZE = 26
GETVECTORBRUSHSIZE = 27
ENABLEDUPLEX = 28
GETSETPAPERBINS = 29
GETSETPRINTORIENT = 30
ENUMPAPERBINS = 31
SETDIBSCALING = 32
EPSPRINTING = 33
ENUMPAPERMETRICS = 34
GETSETPAPERMETRICS = 35
POSTSCRIPT_DATA = 37
POSTSCRIPT_IGNORE = 38
MOUSETRAILS = 39
GETDEVICEUNITS = 42
GETEXTENDEDTEXTMETRICS = 256
GETEXTENTTABLE = 257
GETPAIRKERNTABLE = 258
GETTRACKKERNTABLE = 259
EXTTEXTOUT = 512
GETFACENAME = 513
DOWNLOADFACE = 514
ENABLERELATIVEWIDTHS = 768
ENABLEPAIRKERNING = 769
SETKERNTRACK = 770
SETALLJUSTVALUES = 771
SETCHARSET = 772
STRETCHBLT = 2048
METAFILE_DRIVER = 2049
GETSETSCREENPARAMS = 3072
QUERYDIBSUPPORT = 3073
BEGIN_PATH = 4096
CLIP_TO_PATH = 4097
END_PATH = 4098
EXT_DEVICE_CAPS = 4099
RESTORE_CTM = 4100
SAVE_CTM = 4101
SET_ARC_DIRECTION = 4102
SET_BACKGROUND_COLOR = 4103
SET_POLY_MODE = 4104
SET_SCREEN_ANGLE = 4105
SET_SPREAD = 4106
TRANSFORM_CTM = 4107
SET_CLIP_BOX = 4108
SET_BOUNDS = 4109
SET_MIRROR_MODE = 4110
OPENCHANNEL = 4110
DOWNLOADHEADER = 4111
CLOSECHANNEL = 4112
POSTSCRIPT_PASSTHROUGH = 4115
ENCAPSULATED_POSTSCRIPT = 4116
POSTSCRIPT_IDENTIFY = 4117
POSTSCRIPT_INJECTION = 4118
CHECKJPEGFORMAT = 4119
CHECKPNGFORMAT = 4120
GET_PS_FEATURESETTING = 4121
GDIPLUS_TS_QUERYVER = 4122
GDIPLUS_TS_RECORD = 4123
SPCLPASSTHROUGH2 = 4568
#--- Structures ---------------------------------------------------------------
# typedef struct _RECT {
# LONG left;
# LONG top;
# LONG right;
# LONG bottom;
# }RECT, *PRECT;
class RECT(Structure):
_fields_ = [
('left', LONG),
('top', LONG),
('right', LONG),
('bottom', LONG),
]
PRECT = POINTER(RECT)
LPRECT = PRECT
# typedef struct tagPOINT {
# LONG x;
# LONG y;
# } POINT;
class POINT(Structure):
_fields_ = [
('x', LONG),
('y', LONG),
]
PPOINT = POINTER(POINT)
LPPOINT = PPOINT
# typedef struct tagBITMAP {
# LONG bmType;
# LONG bmWidth;
# LONG bmHeight;
# LONG bmWidthBytes;
# WORD bmPlanes;
# WORD bmBitsPixel;
# LPVOID bmBits;
# } BITMAP, *PBITMAP;
class BITMAP(Structure):
_fields_ = [
("bmType", LONG),
("bmWidth", LONG),
("bmHeight", LONG),
("bmWidthBytes", LONG),
("bmPlanes", WORD),
("bmBitsPixel", WORD),
("bmBits", LPVOID),
]
PBITMAP = POINTER(BITMAP)
LPBITMAP = PBITMAP
#==============================================================================
# This calculates the list of exported symbols.
_all = set(vars().keys()).difference(_all)
__all__ = [_x for _x in _all if not _x.startswith('_')]
__all__.sort()
#==============================================================================
| icchy/tracecorn | unitracer/lib/windows/amd64/gdi32.py | Python | mit | 12,573 | 0.021236 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib2
import time, os
import sys, fileinput
from bs4 import BeautifulSoup
class Grabber(object):
def use(self):
print ""
print "* This just Fucking whatever for grabbing."
print "* For license just fucking to change this. ^Summon Agus Created."
print "-------------------------------------------------------------------------------------"
print "[1] Add Note : ./notes.py addnote <file_name> <title> <content> <tag1, tag2>"
print "[2] List Note : ./notes.py listnote <file_name>"
print "[3] Delete Note : ./notes.py delnote <file_name> <numb_line>"
print "[4] Add Url to Grab : ./notes.py addurl <file_name> <url>"
print "-------------------------------------------------------------------------------------"
print ""
def addnote(self, args):
self.help = "./notes.py addnote <file_name> <title> <content> <tag1, tag2>"
if len(sys.argv) < 5:
sys.exit("[-] Fucking Damn!!\n[?] Use similiar this: " + self.help)
f_note_out = sys.argv[2]
title = sys.argv[3]
content = sys.argv[4]
tags = sys.argv[5]
print "[+] Your args is: ./notes.py", args, f_note_out, title, content, tags
time.sleep(1)
print "[>] Waiting for save your note ..."
my_note = '"'+title+'": "'+content+'"'+ ' tag: '+ tags
""" [?] Trying if file was exists, so note will add in new line.
[?] But, if file is doesn't exists, this program will automatically write file with your first argument.
"""
try:
f_note = open(f_note_out, 'a')
my_note = my_note + '\n'
except IOError:
f_note = open(f_note_out, 'w')
my_note = '\n' + my_note
f_note.write(my_note)
f_note.close()
time.sleep(1)
print "[>] Your note was saved in <"+ f_note_out +">"
def listnote(self, args):
self.help = "./notes.py listnote <file_name>"
if len(sys.argv) < 2:
sys.exit("[-] Fucking Damn!!\n[?] Use similiar this: " + self.help)
print "[+] Your args is: ./notes.py", args, sys.argv[2]
try:
with open(sys.argv[2], "r") as f:
print " -------------------------------------- "
for line in f:
print line.replace("\n", "")
time.sleep(0.3)
print " -------------------------------------- "
except IOError:
sys.exit("[-] File Doesn't exists!!"+\
"\n[?] This your path now: " +str(os.getcwd())+\
"\n[?] This files and folders in your path now: " + str(os.listdir('.')) )
def delnote(self, args):
self.help = "./notes.py delnote <file_name> <numb_line>"
if len(sys.argv) < 3:
sys.exit("[-] Fucking Damn!!\n[?] Use similiar this: " + self.help)
f_note_out = str(sys.argv[2])
try:
for numb, line in enumerate(fileinput.input(f_note_out, inplace=True)): #start index from 0
if numb == int(sys.argv[3]):
continue
else:
sys.stdout.write(line)
sys.exit("[+] Success delete line <"+sys.argv[3]+"> in file of <"+ f_note_out +">")
except OSError:
sys.exit("[-] File Doesn't exists!!"+\
"\n[?] This your path now: " +str(os.getcwd())+\
"\n[?] This files and folders in your path now: " + str(os.listdir('.')) )
def addurl(self, args):
self.help = "./notes.py addurl <file_name> <url>"
if len(sys.argv) < 3:
sys.exit("[-] Fucking Damn!!\n[?] Use similiar this: " + self.help)
url = str(sys.argv[3])
f_note_out = str(sys.argv[2])
print "[+] Your args is: ./notes.py", args, f_note_out, url
agent = {'User-Agent':'Mozilla/5.0'}
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
soup = BeautifulSoup(page)
title = soup.title.string.encode('utf-8')
descriptions = soup.findAll(attrs={"name":"description"})[0]['content'].encode('utf-8')
keywords = soup.findAll(attrs={"name":"keywords"})[0]['content'].encode('utf-8')
print "[>] Waiting for save your note ..."
time.sleep(1)
my_note = '"'+title+'": "'+descriptions+'"'+ ' tag: '+ keywords
try:
f_note = open(f_note_out, 'a')
my_note = my_note + '\n'
except IOError:
f_note = open(f_note_out, 'w')
my_note = '\n' + my_note
f_note.write(my_note)
f_note.close()
time.sleep(1)
print "[>] Your url note was saved in <"+ f_note_out +">"
if __name__ == "__main__":
mome = Grabber()
try:
args = str(sys.argv[1])
if args == 'addnote':
mome.addnote(args)
elif args == 'listnote':
mome.listnote(args)
elif args == 'delnote':
mome.delnote(args)
elif args == 'addurl':
mome.addurl(args)
else:
print "Funcking damn!, please checkout your input"
except IndexError:
mome.use()
| agusmakmun/Some-Examples-of-Simple-Python-Script | grabbing/notes.py | Python | agpl-3.0 | 5,474 | 0.011874 |
# The contents of this file are automatically written by
# tools/generate_schema_wrapper.py. Do not modify directly.
from . import core
import pandas as pd
from altair.utils.schemapi import Undefined
from altair.utils import parse_shorthand
class FieldChannelMixin(object):
def to_dict(self, validate=True, ignore=(), context=None):
context = context or {}
shorthand = self._get('shorthand')
field = self._get('field')
if shorthand is not Undefined and field is not Undefined:
raise ValueError("{} specifies both shorthand={} and field={}. "
"".format(self.__class__.__name__, shorthand, field))
if isinstance(shorthand, (tuple, list)):
# If given a list of shorthands, then transform it to a list of classes
kwds = self._kwds.copy()
kwds.pop('shorthand')
return [self.__class__(sh, **kwds).to_dict(validate=validate, ignore=ignore, context=context)
for sh in shorthand]
if shorthand is Undefined:
parsed = {}
elif isinstance(shorthand, str):
parsed = parse_shorthand(shorthand, data=context.get('data', None))
type_required = 'type' in self._kwds
type_in_shorthand = 'type' in parsed
type_defined_explicitly = self._get('type') is not Undefined
if not type_required:
# Secondary field names don't require a type argument in VegaLite 3+.
# We still parse it out of the shorthand, but drop it here.
parsed.pop('type', None)
elif not (type_in_shorthand or type_defined_explicitly):
if isinstance(context.get('data', None), pd.DataFrame):
raise ValueError("{} encoding field is specified without a type; "
"the type cannot be inferred because it does not "
"match any column in the data.".format(shorthand))
else:
raise ValueError("{} encoding field is specified without a type; "
"the type cannot be automatically inferred because "
"the data is not specified as a pandas.DataFrame."
"".format(shorthand))
else:
# Shorthand is not a string; we pass the definition to field,
# and do not do any parsing.
parsed = {'field': shorthand}
# Set shorthand to Undefined, because it's not part of the base schema.
self.shorthand = Undefined
self._kwds.update({k: v for k, v in parsed.items()
if self._get(k) is Undefined})
return super(FieldChannelMixin, self).to_dict(
validate=validate,
ignore=ignore,
context=context
)
class ValueChannelMixin(object):
def to_dict(self, validate=True, ignore=(), context=None):
context = context or {}
condition = getattr(self, 'condition', Undefined)
copy = self # don't copy unless we need to
if condition is not Undefined:
if isinstance(condition, core.SchemaBase):
pass
elif 'field' in condition and 'type' not in condition:
kwds = parse_shorthand(condition['field'], context.get('data', None))
copy = self.copy(deep=['condition'])
copy.condition.update(kwds)
return super(ValueChannelMixin, copy).to_dict(validate=validate,
ignore=ignore,
context=context)
class DatumChannelMixin(object):
def to_dict(self, validate=True, ignore=(), context=None):
context = context or {}
datum = getattr(self, 'datum', Undefined)
copy = self # don't copy unless we need to
if datum is not Undefined:
if isinstance(datum, core.SchemaBase):
pass
return super(DatumChannelMixin, copy).to_dict(validate=validate,
ignore=ignore,
context=context)
class Color(FieldChannelMixin, core.StringFieldDefWithCondition):
"""Color schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : anyOf(:class:`ConditionalStringValueDef`,
List(:class:`ConditionalStringValueDef`))
One or more value definition(s) with `a selection or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
legend : anyOf(:class:`Legend`, None)
An object defining properties of the legend.
If ``null``, the legend for the encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
**See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__
documentation.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ for sorting
by another encoding channel. (This type of sort definition is not available for
``row`` and ``column`` channels.)
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "color"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(Color, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, legend=legend, scale=scale,
sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds)
class ColorValue(ValueChannelMixin, core.StringValueDefWithCondition):
"""ColorValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef> where either the condition or the value are
optional.
Attributes
----------
condition : anyOf(:class:`ConditionalMarkPropFieldDef`, :class:`ConditionalStringValueDef`,
List(:class:`ConditionalStringValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : anyOf(string, None)
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "color"
def __init__(self, value, condition=Undefined, **kwds):
super(ColorValue, self).__init__(value=value, condition=condition, **kwds)
class Column(FieldChannelMixin, core.FacetFieldDef):
"""Column schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
header : :class:`Header`
An object defining properties of a facet's header.
sort : anyOf(:class:`SortArray`, :class:`SortOrder`, :class:`EncodingSortField`, None)
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "column"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
header=Undefined, sort=Undefined, timeUnit=Undefined, title=Undefined, type=Undefined,
**kwds):
super(Column, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
header=header, sort=sort, timeUnit=timeUnit, title=title,
type=type, **kwds)
class Detail(FieldChannelMixin, core.FieldDefWithoutScale):
"""Detail schema wrapper
Mapping(required=[shorthand])
Definition object for a data field, its type and transformation of an encoding channel.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, enum('binned'), None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "detail"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, **kwds):
super(Detail, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, type=type, **kwds)
class Facet(FieldChannelMixin, core.FacetFieldDef):
"""Facet schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
header : :class:`Header`
An object defining properties of a facet's header.
sort : anyOf(:class:`SortArray`, :class:`SortOrder`, :class:`EncodingSortField`, None)
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "facet"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
header=Undefined, sort=Undefined, timeUnit=Undefined, title=Undefined, type=Undefined,
**kwds):
super(Facet, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
header=header, sort=sort, timeUnit=timeUnit, title=title, type=type,
**kwds)
class Fill(FieldChannelMixin, core.StringFieldDefWithCondition):
"""Fill schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : anyOf(:class:`ConditionalStringValueDef`,
List(:class:`ConditionalStringValueDef`))
One or more value definition(s) with `a selection or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
legend : anyOf(:class:`Legend`, None)
An object defining properties of the legend.
If ``null``, the legend for the encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
**See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__
documentation.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ for sorting
by another encoding channel. (This type of sort definition is not available for
``row`` and ``column`` channels.)
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "fill"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(Fill, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, legend=legend, scale=scale,
sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds)
class FillValue(ValueChannelMixin, core.StringValueDefWithCondition):
"""FillValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef> where either the condition or the value are
optional.
Attributes
----------
condition : anyOf(:class:`ConditionalMarkPropFieldDef`, :class:`ConditionalStringValueDef`,
List(:class:`ConditionalStringValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : anyOf(string, None)
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "fill"
def __init__(self, value, condition=Undefined, **kwds):
super(FillValue, self).__init__(value=value, condition=condition, **kwds)
class FillOpacity(FieldChannelMixin, core.NumericFieldDefWithCondition):
"""FillOpacity schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : anyOf(:class:`ConditionalNumberValueDef`,
List(:class:`ConditionalNumberValueDef`))
One or more value definition(s) with `a selection or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
legend : anyOf(:class:`Legend`, None)
An object defining properties of the legend.
If ``null``, the legend for the encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
**See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__
documentation.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ for sorting
by another encoding channel. (This type of sort definition is not available for
``row`` and ``column`` channels.)
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "fillOpacity"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(FillOpacity, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, legend=legend, scale=scale,
sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds)
class FillOpacityValue(ValueChannelMixin, core.NumericValueDefWithCondition):
"""FillOpacityValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef> where either the condition or the value are
optional.
Attributes
----------
condition : anyOf(:class:`ConditionalMarkPropFieldDef`, :class:`ConditionalNumberValueDef`,
List(:class:`ConditionalNumberValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : float
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "fillOpacity"
def __init__(self, value, condition=Undefined, **kwds):
super(FillOpacityValue, self).__init__(value=value, condition=condition, **kwds)
class Href(FieldChannelMixin, core.TextFieldDefWithCondition):
"""Href schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, enum('binned'), None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : anyOf(:class:`ConditionalValueDef`, List(:class:`ConditionalValueDef`))
One or more value definition(s) with `a selection or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
format : string
The text formatting pattern for labels of guides (axes, legends, headers) and text
marks.
* If the format type is ``"number"`` (e.g., for quantitative fields), this is D3's
`number format pattern <https://github.com/d3/d3-format#locale_format>`__.
* If the format type is ``"time"`` (e.g., for temporal fields), this is D3's `time
format pattern <https://github.com/d3/d3-time-format#locale_format>`__.
See the `format documentation <https://vega.github.io/vega-lite/docs/format.html>`__
for more examples.
**Default value:** Derived from `numberFormat
<https://vega.github.io/vega-lite/docs/config.html#format>`__ config for number
format and from `timeFormat
<https://vega.github.io/vega-lite/docs/config.html#format>`__ config for time
format.
formatType : enum('number', 'time')
The format type for labels ( ``"number"`` or ``"time"`` ).
**Default value:**
* ``"time"`` for temporal fields and ordinal and nomimal fields with ``timeUnit``.
* ``"number"`` for quantitative fields as well as ordinal and nomimal fields without
``timeUnit``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "href"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, format=Undefined, formatType=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(Href, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, format=format,
formatType=formatType, timeUnit=timeUnit, title=title, type=type,
**kwds)
class HrefValue(ValueChannelMixin, core.TextValueDefWithCondition):
"""HrefValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef> where either the condition or the value are
optional.
Attributes
----------
condition : anyOf(:class:`ConditionalTextFieldDef`, :class:`ConditionalValueDef`,
List(:class:`ConditionalValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : :class:`Value`
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "href"
def __init__(self, value, condition=Undefined, **kwds):
super(HrefValue, self).__init__(value=value, condition=condition, **kwds)
class Key(FieldChannelMixin, core.FieldDefWithoutScale):
"""Key schema wrapper
Mapping(required=[shorthand])
Definition object for a data field, its type and transformation of an encoding channel.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, enum('binned'), None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "key"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, **kwds):
super(Key, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, type=type, **kwds)
class Latitude(FieldChannelMixin, core.LatLongFieldDef):
"""Latitude schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : enum('quantitative')
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "latitude"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, **kwds):
super(Latitude, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, type=type, **kwds)
class LatitudeValue(ValueChannelMixin, core.NumberValueDef):
"""LatitudeValue schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : float
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "latitude"
def __init__(self, value, **kwds):
super(LatitudeValue, self).__init__(value=value, **kwds)
class Latitude2(FieldChannelMixin, core.SecondaryFieldDef):
"""Latitude2 schema wrapper
Mapping(required=[shorthand])
A field definition of a secondary channel that shares a scale with another primary channel.
For example, ``x2``, ``xError`` and ``xError2`` share the same scale with ``x``.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "latitude2"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, **kwds):
super(Latitude2, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, **kwds)
class Latitude2Value(ValueChannelMixin, core.NumberValueDef):
"""Latitude2Value schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : float
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "latitude2"
def __init__(self, value, **kwds):
super(Latitude2Value, self).__init__(value=value, **kwds)
class Longitude(FieldChannelMixin, core.LatLongFieldDef):
"""Longitude schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : enum('quantitative')
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "longitude"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, **kwds):
super(Longitude, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, type=type, **kwds)
class LongitudeValue(ValueChannelMixin, core.NumberValueDef):
"""LongitudeValue schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : float
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "longitude"
def __init__(self, value, **kwds):
super(LongitudeValue, self).__init__(value=value, **kwds)
class Longitude2(FieldChannelMixin, core.SecondaryFieldDef):
"""Longitude2 schema wrapper
Mapping(required=[shorthand])
A field definition of a secondary channel that shares a scale with another primary channel.
For example, ``x2``, ``xError`` and ``xError2`` share the same scale with ``x``.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "longitude2"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, **kwds):
super(Longitude2, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, **kwds)
class Longitude2Value(ValueChannelMixin, core.NumberValueDef):
"""Longitude2Value schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : float
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "longitude2"
def __init__(self, value, **kwds):
super(Longitude2Value, self).__init__(value=value, **kwds)
class Opacity(FieldChannelMixin, core.NumericFieldDefWithCondition):
"""Opacity schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : anyOf(:class:`ConditionalNumberValueDef`,
List(:class:`ConditionalNumberValueDef`))
One or more value definition(s) with `a selection or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
legend : anyOf(:class:`Legend`, None)
An object defining properties of the legend.
If ``null``, the legend for the encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
**See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__
documentation.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ for sorting
by another encoding channel. (This type of sort definition is not available for
``row`` and ``column`` channels.)
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "opacity"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(Opacity, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, legend=legend, scale=scale,
sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds)
class OpacityValue(ValueChannelMixin, core.NumericValueDefWithCondition):
"""OpacityValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef> where either the condition or the value are
optional.
Attributes
----------
condition : anyOf(:class:`ConditionalMarkPropFieldDef`, :class:`ConditionalNumberValueDef`,
List(:class:`ConditionalNumberValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : float
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "opacity"
def __init__(self, value, condition=Undefined, **kwds):
super(OpacityValue, self).__init__(value=value, condition=condition, **kwds)
class Order(FieldChannelMixin, core.OrderFieldDef):
"""Order schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, enum('binned'), None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
sort : :class:`SortOrder`
The sort order. One of ``"ascending"`` (default) or ``"descending"``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "order"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
sort=Undefined, timeUnit=Undefined, title=Undefined, type=Undefined, **kwds):
super(Order, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds)
class OrderValue(ValueChannelMixin, core.NumberValueDef):
"""OrderValue schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : float
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "order"
def __init__(self, value, **kwds):
super(OrderValue, self).__init__(value=value, **kwds)
class Row(FieldChannelMixin, core.FacetFieldDef):
"""Row schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
header : :class:`Header`
An object defining properties of a facet's header.
sort : anyOf(:class:`SortArray`, :class:`SortOrder`, :class:`EncodingSortField`, None)
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "row"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
header=Undefined, sort=Undefined, timeUnit=Undefined, title=Undefined, type=Undefined,
**kwds):
super(Row, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
header=header, sort=sort, timeUnit=timeUnit, title=title, type=type,
**kwds)
class Shape(FieldChannelMixin, core.ShapeFieldDefWithCondition):
"""Shape schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : anyOf(:class:`ConditionalStringValueDef`,
List(:class:`ConditionalStringValueDef`))
One or more value definition(s) with `a selection or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
legend : anyOf(:class:`Legend`, None)
An object defining properties of the legend.
If ``null``, the legend for the encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
**See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__
documentation.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ for sorting
by another encoding channel. (This type of sort definition is not available for
``row`` and ``column`` channels.)
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`TypeForShape`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "shape"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(Shape, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, legend=legend, scale=scale,
sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds)
class ShapeValue(ValueChannelMixin, core.ShapeValueDefWithCondition):
"""ShapeValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef> where either the condition or the value are
optional.
Attributes
----------
condition : anyOf(:class:`ConditionalMarkPropFieldDefTypeForShape`,
:class:`ConditionalStringValueDef`, List(:class:`ConditionalStringValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : anyOf(string, None)
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "shape"
def __init__(self, value, condition=Undefined, **kwds):
super(ShapeValue, self).__init__(value=value, condition=condition, **kwds)
class Size(FieldChannelMixin, core.NumericFieldDefWithCondition):
"""Size schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : anyOf(:class:`ConditionalNumberValueDef`,
List(:class:`ConditionalNumberValueDef`))
One or more value definition(s) with `a selection or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
legend : anyOf(:class:`Legend`, None)
An object defining properties of the legend.
If ``null``, the legend for the encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
**See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__
documentation.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ for sorting
by another encoding channel. (This type of sort definition is not available for
``row`` and ``column`` channels.)
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "size"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(Size, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, legend=legend, scale=scale,
sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds)
class SizeValue(ValueChannelMixin, core.NumericValueDefWithCondition):
"""SizeValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef> where either the condition or the value are
optional.
Attributes
----------
condition : anyOf(:class:`ConditionalMarkPropFieldDef`, :class:`ConditionalNumberValueDef`,
List(:class:`ConditionalNumberValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : float
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "size"
def __init__(self, value, condition=Undefined, **kwds):
super(SizeValue, self).__init__(value=value, condition=condition, **kwds)
class Stroke(FieldChannelMixin, core.StringFieldDefWithCondition):
"""Stroke schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : anyOf(:class:`ConditionalStringValueDef`,
List(:class:`ConditionalStringValueDef`))
One or more value definition(s) with `a selection or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
legend : anyOf(:class:`Legend`, None)
An object defining properties of the legend.
If ``null``, the legend for the encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
**See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__
documentation.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ for sorting
by another encoding channel. (This type of sort definition is not available for
``row`` and ``column`` channels.)
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "stroke"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(Stroke, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, legend=legend, scale=scale,
sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds)
class StrokeValue(ValueChannelMixin, core.StringValueDefWithCondition):
"""StrokeValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef> where either the condition or the value are
optional.
Attributes
----------
condition : anyOf(:class:`ConditionalMarkPropFieldDef`, :class:`ConditionalStringValueDef`,
List(:class:`ConditionalStringValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : anyOf(string, None)
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "stroke"
def __init__(self, value, condition=Undefined, **kwds):
super(StrokeValue, self).__init__(value=value, condition=condition, **kwds)
class StrokeOpacity(FieldChannelMixin, core.NumericFieldDefWithCondition):
"""StrokeOpacity schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : anyOf(:class:`ConditionalNumberValueDef`,
List(:class:`ConditionalNumberValueDef`))
One or more value definition(s) with `a selection or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
legend : anyOf(:class:`Legend`, None)
An object defining properties of the legend.
If ``null``, the legend for the encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
**See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__
documentation.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ for sorting
by another encoding channel. (This type of sort definition is not available for
``row`` and ``column`` channels.)
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "strokeOpacity"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(StrokeOpacity, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, legend=legend,
scale=scale, sort=sort, timeUnit=timeUnit, title=title,
type=type, **kwds)
class StrokeOpacityValue(ValueChannelMixin, core.NumericValueDefWithCondition):
"""StrokeOpacityValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef> where either the condition or the value are
optional.
Attributes
----------
condition : anyOf(:class:`ConditionalMarkPropFieldDef`, :class:`ConditionalNumberValueDef`,
List(:class:`ConditionalNumberValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : float
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "strokeOpacity"
def __init__(self, value, condition=Undefined, **kwds):
super(StrokeOpacityValue, self).__init__(value=value, condition=condition, **kwds)
class StrokeWidth(FieldChannelMixin, core.NumericFieldDefWithCondition):
"""StrokeWidth schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : anyOf(:class:`ConditionalNumberValueDef`,
List(:class:`ConditionalNumberValueDef`))
One or more value definition(s) with `a selection or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
legend : anyOf(:class:`Legend`, None)
An object defining properties of the legend.
If ``null``, the legend for the encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
**See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__
documentation.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ for sorting
by another encoding channel. (This type of sort definition is not available for
``row`` and ``column`` channels.)
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "strokeWidth"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, legend=Undefined, scale=Undefined, sort=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(StrokeWidth, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, legend=legend, scale=scale,
sort=sort, timeUnit=timeUnit, title=title, type=type, **kwds)
class StrokeWidthValue(ValueChannelMixin, core.NumericValueDefWithCondition):
"""StrokeWidthValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef> where either the condition or the value are
optional.
Attributes
----------
condition : anyOf(:class:`ConditionalMarkPropFieldDef`, :class:`ConditionalNumberValueDef`,
List(:class:`ConditionalNumberValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : float
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "strokeWidth"
def __init__(self, value, condition=Undefined, **kwds):
super(StrokeWidthValue, self).__init__(value=value, condition=condition, **kwds)
class Text(FieldChannelMixin, core.TextFieldDefWithCondition):
"""Text schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, enum('binned'), None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : anyOf(:class:`ConditionalValueDef`, List(:class:`ConditionalValueDef`))
One or more value definition(s) with `a selection or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
format : string
The text formatting pattern for labels of guides (axes, legends, headers) and text
marks.
* If the format type is ``"number"`` (e.g., for quantitative fields), this is D3's
`number format pattern <https://github.com/d3/d3-format#locale_format>`__.
* If the format type is ``"time"`` (e.g., for temporal fields), this is D3's `time
format pattern <https://github.com/d3/d3-time-format#locale_format>`__.
See the `format documentation <https://vega.github.io/vega-lite/docs/format.html>`__
for more examples.
**Default value:** Derived from `numberFormat
<https://vega.github.io/vega-lite/docs/config.html#format>`__ config for number
format and from `timeFormat
<https://vega.github.io/vega-lite/docs/config.html#format>`__ config for time
format.
formatType : enum('number', 'time')
The format type for labels ( ``"number"`` or ``"time"`` ).
**Default value:**
* ``"time"`` for temporal fields and ordinal and nomimal fields with ``timeUnit``.
* ``"number"`` for quantitative fields as well as ordinal and nomimal fields without
``timeUnit``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "text"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, format=Undefined, formatType=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(Text, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, format=format,
formatType=formatType, timeUnit=timeUnit, title=title, type=type,
**kwds)
class TextValue(ValueChannelMixin, core.TextValueDefWithCondition):
"""TextValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef> where either the condition or the value are
optional.
Attributes
----------
condition : anyOf(:class:`ConditionalTextFieldDef`, :class:`ConditionalValueDef`,
List(:class:`ConditionalValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : :class:`Value`
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "text"
def __init__(self, value, condition=Undefined, **kwds):
super(TextValue, self).__init__(value=value, condition=condition, **kwds)
class Tooltip(FieldChannelMixin, core.TextFieldDefWithCondition):
"""Tooltip schema wrapper
Mapping(required=[shorthand])
A FieldDef with Condition :raw-html:`<ValueDef>`
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, enum('binned'), None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : anyOf(:class:`ConditionalValueDef`, List(:class:`ConditionalValueDef`))
One or more value definition(s) with `a selection or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
format : string
The text formatting pattern for labels of guides (axes, legends, headers) and text
marks.
* If the format type is ``"number"`` (e.g., for quantitative fields), this is D3's
`number format pattern <https://github.com/d3/d3-format#locale_format>`__.
* If the format type is ``"time"`` (e.g., for temporal fields), this is D3's `time
format pattern <https://github.com/d3/d3-time-format#locale_format>`__.
See the `format documentation <https://vega.github.io/vega-lite/docs/format.html>`__
for more examples.
**Default value:** Derived from `numberFormat
<https://vega.github.io/vega-lite/docs/config.html#format>`__ config for number
format and from `timeFormat
<https://vega.github.io/vega-lite/docs/config.html#format>`__ config for time
format.
formatType : enum('number', 'time')
The format type for labels ( ``"number"`` or ``"time"`` ).
**Default value:**
* ``"time"`` for temporal fields and ordinal and nomimal fields with ``timeUnit``.
* ``"number"`` for quantitative fields as well as ordinal and nomimal fields without
``timeUnit``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "tooltip"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, condition=Undefined,
field=Undefined, format=Undefined, formatType=Undefined, timeUnit=Undefined,
title=Undefined, type=Undefined, **kwds):
super(Tooltip, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin,
condition=condition, field=field, format=format,
formatType=formatType, timeUnit=timeUnit, title=title, type=type,
**kwds)
class TooltipValue(ValueChannelMixin, core.TextValueDefWithCondition):
"""TooltipValue schema wrapper
Mapping(required=[])
A ValueDef with Condition<ValueDef | FieldDef> where either the condition or the value are
optional.
Attributes
----------
condition : anyOf(:class:`ConditionalTextFieldDef`, :class:`ConditionalValueDef`,
List(:class:`ConditionalValueDef`))
A field definition or one or more value definition(s) with a selection predicate.
value : :class:`Value`
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "tooltip"
def __init__(self, value, condition=Undefined, **kwds):
super(TooltipValue, self).__init__(value=value, condition=condition, **kwds)
class X(FieldChannelMixin, core.PositionFieldDef):
"""X schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
axis : anyOf(:class:`Axis`, None)
An object defining properties of axis's gridlines, ticks and labels.
If ``null``, the axis for the encoding channel will be removed.
**Default value:** If undefined, default `axis properties
<https://vega.github.io/vega-lite/docs/axis.html>`__ are applied.
**See also:** `axis <https://vega.github.io/vega-lite/docs/axis.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, enum('binned'), None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
impute : :class:`ImputeParams`
An object defining the properties of the Impute Operation to be applied.
The field value of the other positional channel is taken as ``key`` of the
``Impute`` Operation.
The field of the ``color`` channel if specified is used as ``groupby`` of the
``Impute`` Operation.
**See also:** `impute <https://vega.github.io/vega-lite/docs/impute.html>`__
documentation.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ for sorting
by another encoding channel. (This type of sort definition is not available for
``row`` and ``column`` channels.)
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
stack : anyOf(:class:`StackOffset`, None, boolean)
Type of stacking offset if the field should be stacked.
``stack`` is only applicable for ``x`` and ``y`` channels with continuous domains.
For example, ``stack`` of ``y`` can be used to customize stacking for a vertical bar
chart.
``stack`` can be one of the following values:
* ``"zero"`` or `true`: stacking with baseline offset at zero value of the scale
(for creating typical stacked
[bar](https://vega.github.io/vega-lite/docs/stack.html#bar) and `area
<https://vega.github.io/vega-lite/docs/stack.html#area>`__ chart).
* ``"normalize"`` - stacking with normalized domain (for creating `normalized
stacked bar and area charts
<https://vega.github.io/vega-lite/docs/stack.html#normalized>`__.
:raw-html:`<br/>`
- ``"center"`` - stacking with center baseline (for `streamgraph
<https://vega.github.io/vega-lite/docs/stack.html#streamgraph>`__ ).
* ``null`` or ``false`` - No-stacking. This will produce layered `bar
<https://vega.github.io/vega-lite/docs/stack.html#layered-bar-chart>`__ and area
chart.
**Default value:** ``zero`` for plots with all of the following conditions are true:
(1) the mark is ``bar`` or ``area`` ;
(2) the stacked measure channel (x or y) has a linear scale;
(3) At least one of non-position channels mapped to an unaggregated field that is
different from x and y. Otherwise, ``null`` by default.
**See also:** `stack <https://vega.github.io/vega-lite/docs/stack.html>`__
documentation.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "x"
def __init__(self, shorthand=Undefined, aggregate=Undefined, axis=Undefined, bin=Undefined,
field=Undefined, impute=Undefined, scale=Undefined, sort=Undefined, stack=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, **kwds):
super(X, self).__init__(shorthand=shorthand, aggregate=aggregate, axis=axis, bin=bin,
field=field, impute=impute, scale=scale, sort=sort, stack=stack,
timeUnit=timeUnit, title=title, type=type, **kwds)
class XValue(ValueChannelMixin, core.XValueDef):
"""XValue schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : anyOf(float, enum('width'))
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "x"
def __init__(self, value, **kwds):
super(XValue, self).__init__(value=value, **kwds)
class X2(FieldChannelMixin, core.SecondaryFieldDef):
"""X2 schema wrapper
Mapping(required=[shorthand])
A field definition of a secondary channel that shares a scale with another primary channel.
For example, ``x2``, ``xError`` and ``xError2`` share the same scale with ``x``.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "x2"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, **kwds):
super(X2, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, **kwds)
class X2Value(ValueChannelMixin, core.XValueDef):
"""X2Value schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : anyOf(float, enum('width'))
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "x2"
def __init__(self, value, **kwds):
super(X2Value, self).__init__(value=value, **kwds)
class XError(FieldChannelMixin, core.SecondaryFieldDef):
"""XError schema wrapper
Mapping(required=[shorthand])
A field definition of a secondary channel that shares a scale with another primary channel.
For example, ``x2``, ``xError`` and ``xError2`` share the same scale with ``x``.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "xError"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, **kwds):
super(XError, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, **kwds)
class XErrorValue(ValueChannelMixin, core.NumberValueDef):
"""XErrorValue schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : float
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "xError"
def __init__(self, value, **kwds):
super(XErrorValue, self).__init__(value=value, **kwds)
class XError2(FieldChannelMixin, core.SecondaryFieldDef):
"""XError2 schema wrapper
Mapping(required=[shorthand])
A field definition of a secondary channel that shares a scale with another primary channel.
For example, ``x2``, ``xError`` and ``xError2`` share the same scale with ``x``.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "xError2"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, **kwds):
super(XError2, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, **kwds)
class XError2Value(ValueChannelMixin, core.NumberValueDef):
"""XError2Value schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : float
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "xError2"
def __init__(self, value, **kwds):
super(XError2Value, self).__init__(value=value, **kwds)
class Y(FieldChannelMixin, core.PositionFieldDef):
"""Y schema wrapper
Mapping(required=[shorthand])
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
axis : anyOf(:class:`Axis`, None)
An object defining properties of axis's gridlines, ticks and labels.
If ``null``, the axis for the encoding channel will be removed.
**Default value:** If undefined, default `axis properties
<https://vega.github.io/vega-lite/docs/axis.html>`__ are applied.
**See also:** `axis <https://vega.github.io/vega-lite/docs/axis.html>`__
documentation.
bin : anyOf(boolean, :class:`BinParams`, enum('binned'), None)
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
impute : :class:`ImputeParams`
An object defining the properties of the Impute Operation to be applied.
The field value of the other positional channel is taken as ``key`` of the
``Impute`` Operation.
The field of the ``color`` channel if specified is used as ``groupby`` of the
``Impute`` Operation.
**See also:** `impute <https://vega.github.io/vega-lite/docs/impute.html>`__
documentation.
scale : anyOf(:class:`Scale`, None)
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : :class:`Sort`
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
Javascript.
* `A sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ for sorting
by another encoding channel. (This type of sort definition is not available for
``row`` and ``column`` channels.)
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects <types#datetime>`__. In addition, for time units
``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"`` ).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` is not supported for ``row`` and ``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
stack : anyOf(:class:`StackOffset`, None, boolean)
Type of stacking offset if the field should be stacked.
``stack`` is only applicable for ``x`` and ``y`` channels with continuous domains.
For example, ``stack`` of ``y`` can be used to customize stacking for a vertical bar
chart.
``stack`` can be one of the following values:
* ``"zero"`` or `true`: stacking with baseline offset at zero value of the scale
(for creating typical stacked
[bar](https://vega.github.io/vega-lite/docs/stack.html#bar) and `area
<https://vega.github.io/vega-lite/docs/stack.html#area>`__ chart).
* ``"normalize"`` - stacking with normalized domain (for creating `normalized
stacked bar and area charts
<https://vega.github.io/vega-lite/docs/stack.html#normalized>`__.
:raw-html:`<br/>`
- ``"center"`` - stacking with center baseline (for `streamgraph
<https://vega.github.io/vega-lite/docs/stack.html#streamgraph>`__ ).
* ``null`` or ``false`` - No-stacking. This will produce layered `bar
<https://vega.github.io/vega-lite/docs/stack.html#layered-bar-chart>`__ and area
chart.
**Default value:** ``zero`` for plots with all of the following conditions are true:
(1) the mark is ``bar`` or ``area`` ;
(2) the stacked measure channel (x or y) has a linear scale;
(3) At least one of non-position channels mapped to an unaggregated field that is
different from x and y. Otherwise, ``null`` by default.
**See also:** `stack <https://vega.github.io/vega-lite/docs/stack.html>`__
documentation.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`
The encoded field's type of measurement ( ``"quantitative"``, ``"temporal"``,
``"ordinal"``, or ``"nominal"`` ).
It can also be a ``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
**Note:**
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"`` ) or a
timestamp number (e.g., ``1552199579097`` ).
* Data ``type`` describes the semantics of the data rather than the primitive data
types ( ``number``, ``string``, etc.). The same primitive data type can have
different types of measurement. For example, numeric data can represent
quantitative, ordinal, or nominal data.
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (for using a temporal scale) or `"ordinal" (for using
an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat", "type": "quantitative"}``. The ``"type"`` of the aggregate output
is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError`` ) do not have
``type`` as they have exactly the same type as their primary channels (e.g.,
``x``, ``y`` ).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "y"
def __init__(self, shorthand=Undefined, aggregate=Undefined, axis=Undefined, bin=Undefined,
field=Undefined, impute=Undefined, scale=Undefined, sort=Undefined, stack=Undefined,
timeUnit=Undefined, title=Undefined, type=Undefined, **kwds):
super(Y, self).__init__(shorthand=shorthand, aggregate=aggregate, axis=axis, bin=bin,
field=field, impute=impute, scale=scale, sort=sort, stack=stack,
timeUnit=timeUnit, title=title, type=type, **kwds)
class YValue(ValueChannelMixin, core.YValueDef):
"""YValue schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : anyOf(float, enum('height'))
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "y"
def __init__(self, value, **kwds):
super(YValue, self).__init__(value=value, **kwds)
class Y2(FieldChannelMixin, core.SecondaryFieldDef):
"""Y2 schema wrapper
Mapping(required=[shorthand])
A field definition of a secondary channel that shares a scale with another primary channel.
For example, ``x2``, ``xError`` and ``xError2`` share the same scale with ``x``.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "y2"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, **kwds):
super(Y2, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, **kwds)
class Y2Value(ValueChannelMixin, core.YValueDef):
"""Y2Value schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : anyOf(float, enum('height'))
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "y2"
def __init__(self, value, **kwds):
super(Y2Value, self).__init__(value=value, **kwds)
class YError(FieldChannelMixin, core.SecondaryFieldDef):
"""YError schema wrapper
Mapping(required=[shorthand])
A field definition of a secondary channel that shares a scale with another primary channel.
For example, ``x2``, ``xError`` and ``xError2`` share the same scale with ``x``.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "yError"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, **kwds):
super(YError, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, **kwds)
class YErrorValue(ValueChannelMixin, core.NumberValueDef):
"""YErrorValue schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : float
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "yError"
def __init__(self, value, **kwds):
super(YErrorValue, self).__init__(value=value, **kwds)
class YError2(FieldChannelMixin, core.SecondaryFieldDef):
"""YError2 schema wrapper
Mapping(required=[shorthand])
A field definition of a secondary channel that shares a scale with another primary channel.
For example, ``x2``, ``xError`` and ``xError2`` share the same scale with ``x``.
Attributes
----------
shorthand : string
shorthand for field, aggregate, and type
aggregate : :class:`Aggregate`
Aggregation function for the field
(e.g., ``mean``, ``sum``, ``median``, ``min``, ``max``, ``count`` ).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bin : None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#params>`__, or indicating that the
data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (
``"binned"`` ).
If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html>`__ will be applied.
If ``"binned"``, this indicates that the data for the ``x`` (or ``y`` ) channel are
already binned. You can map the bin-start field to ``x`` (or ``y`` ) and the bin-end
field to ``x2`` (or ``y2`` ). The scale and axis will be formatted similar to
binning in Vega-lite. To adjust the axis ticks based on the bin step, you can also
set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : :class:`Field`
**Required.** A string defining the name of the field from which to pull a data
value
or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:**
1) Dots ( ``.`` ) and brackets ( ``[`` and ``]`` ) can be used to access nested
objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"`` ).
If field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"`` ).
See more details about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__.
2) ``field`` is not required if ``aggregate`` is ``count``.
timeUnit : :class:`TimeUnit`
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours`` ) for a temporal
field.
or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : anyOf(string, None)
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function (
``aggregate``, ``bin`` and ``timeUnit`` ). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"`` ). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"`` ).
Otherwise, the title is simply the field name.
**Notes** :
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/docs/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "yError2"
def __init__(self, shorthand=Undefined, aggregate=Undefined, bin=Undefined, field=Undefined,
timeUnit=Undefined, title=Undefined, **kwds):
super(YError2, self).__init__(shorthand=shorthand, aggregate=aggregate, bin=bin, field=field,
timeUnit=timeUnit, title=title, **kwds)
class YError2Value(ValueChannelMixin, core.NumberValueDef):
"""YError2Value schema wrapper
Mapping(required=[value])
Definition object for a constant value of an encoding channel.
Attributes
----------
value : float
A constant value in visual domain (e.g., ``"red"`` / "#0099ff" for color, values
between ``0`` to ``1`` for opacity).
"""
_class_is_valid_at_instantiation = False
_encoding_name = "yError2"
def __init__(self, value, **kwds):
super(YError2Value, self).__init__(value=value, **kwds)
| altair-viz/altair | altair/vegalite/v3/schema/channels.py | Python | bsd-3-clause | 263,403 | 0.006124 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.