text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# Copyright (C) 2013, Stefan Schwarzer
# See the file LICENSE for licensing terms.
"""
tool.py - helper code
"""
from __future__ import unicode_literals
import compat as compat
__all__ = ["same_string_type_as", "as_bytes", "as_unicode",
"as_default_string"]
# Encoding to convert between byte string and unicode string. This is
# a "lossless" encoding: Strings can be encoded/decoded back and forth
# without information loss or causing encoding-related errors. The
# `ftplib` module under Python 3 also uses the "latin1" encoding
# internally. It's important to use the same encoding here, so that users who
# used `ftplib` to create FTP items with non-ASCII characters can access them
# in the same way with ftputil.
LOSSLESS_ENCODING = "utf-8"
def same_string_type_as(type_source, content_source):
"""
Return a string of the same type as `type_source` with the content
from `content_source`.
If the `type_source` and `content_source` don't have the same
type, use `LOSSLESS_ENCODING` above to encode or decode, whatever
operation is needed.
"""
if (
isinstance(type_source, compat.bytes_type) and
isinstance(content_source, compat.unicode_type)):
return content_source.encode(LOSSLESS_ENCODING)
elif (
isinstance(type_source, compat.unicode_type) and
isinstance(content_source, compat.bytes_type)):
return content_source.decode(LOSSLESS_ENCODING)
else:
return content_source
def as_bytes(string):
"""
Return the argument `string` converted to a byte string if it's a
unicode string. Otherwise just return the string.
"""
return same_string_type_as(b"", string)
def as_unicode(string):
"""
Return the argument `string` converted to a unicode string if it's
a byte string. Otherwise just return the string.
"""
return same_string_type_as("", string)
def as_default_string(string):
"""
Return the argument `string` converted to a the default string
type for the Python version. For unicode strings,
`LOSSLESS_ENCODING` is used for encoding or decoding.
"""
return same_string_type_as(compat.default_string_type(), string)
def encode_if_unicode(string, encoding):
"""
Return the string `string`, encoded with `encoding` if `string` is
a unicode string. Otherwise return `string` unchanged.
"""
if isinstance(string, compat.unicode_type):
return string.encode(encoding)
else:
return string
def recursive_str_to_unicode(target):
"""
recursive function for convert all string in dict, tuple and list to unicode
"""
pack_result = []
if isinstance(target, dict):
level = {}
for key, val in target.iteritems():
ukey = recursive_str_to_unicode(key)
uval = recursive_str_to_unicode(val)
level[ukey] = uval
pack_result.append(level)
elif isinstance(target, list):
level = []
for leaf in target:
uleaf = recursive_str_to_unicode(leaf)
level.append(uleaf)
pack_result.append(level)
elif isinstance(target, tuple):
level = []
for leaf in target:
uleaf = recursive_str_to_unicode(leaf)
level.append(uleaf)
pack_result.append(tuple(level))
elif isinstance(target, str):
return as_unicode(target)
else:
return target
result = pack_result.pop()
return result
################################################################################
# Testing
if __name__ == '__main__':
test_obj = {str('myList'): [str('inList1'), str('inList2')],
str('myTuple'): (str('inTuple1'), str('inTuple2')),
str('mystr'): str('text'),
str('myint'): 99}
print repr(test_obj)
print repr(recursive_str_to_unicode(test_obj)) | pavel-odintsov/ru_open_statistics | helpers/helperUnicode.py | Python | gpl-2.0 | 3,890 | 0.001028 |
from inc import *
modFunc.addCommand('suggest', 'suggest', 'suggest')
modFunc.addCommand('sug', 'suggest', 'suggest')
modFunc.addCommand('issue', 'suggest', 'suggest')
modFunc.addCommand('sug-read', 'suggest', 'read')
modFunc.addCommand('sug-clear', 'suggest', 'clear')
def suggest(line, irc):
message, username, msgto = ircFunc.ircMessage(line)
combinedMsg = ' '.join(message[1:])
numArgs = len(message) - 1
if numArgs > 0 and combinedMsg.strip() != "":
f = open('suggestions.txt' , 'a')
f.write(username + ': ' + combinedMsg)
f.close()
ircFunc.ircSay(username, '%s, thank you for your suggestion... It has been documented and will be reviewed. :)' % username, irc)
else:
ircFunc.ircSay(username, 'You didnt even suggest anything... :/ Command usage is : !suggest <suggestion goes here>', irc)
def read(line, irc):
message, username, msgto = ircFunc.ircMessage(line)
if username in configFunc.getBotConf('botadmins').split(" "):
if (ircFunc.isRegged(username, irc)):
with open('suggestions.txt') as sugfile:
print 'in with'
for sugline in sugfile:
ircFunc.ircSay(msgto, sugline, irc)
def clear(line, irc):
message, username, msgto = ircFunc.ircMessage(line)
if username.lower() in configFunc.getBotConf('botadmins').split(" "):
if (ircFunc.isRegged(username, irc)):
f = open('suggestions.txt', 'w')
f.write('Suggestions:' + '\n')
f.close()
ircFunc.ircSay(username, 'Suggestions Cleared.....', irc)
| chris2727/BeastBot | src/inc/modules/suggest.py | Python | gpl-3.0 | 1,611 | 0.003724 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('studentguide', '0005_add_studentguide_clubs'),
]
operations = [
migrations.AddField(
model_name='tag',
name='image',
field=models.FileField(null=True, upload_to=b'studentguide/tags/', blank=True),
),
]
| enjaz/enjaz | studentguide/migrations/0006_tag_image.py | Python | agpl-3.0 | 446 | 0.002242 |
# Copyright 2013-2015 Lenna X. Peterson. All rights reserved.
from .meta import classproperty
class AtomData(object):
# Maximum ASA for each residue
# from Miller et al. 1987, JMB 196: 641-656
total_asa = {
'A': 113.0,
'R': 241.0,
'N': 158.0,
'D': 151.0,
'C': 140.0,
'Q': 189.0,
'E': 183.0,
'G': 85.0,
'H': 194.0,
'I': 182.0,
'L': 180.0,
'K': 211.0,
'M': 204.0,
'F': 218.0,
'P': 143.0,
'S': 122.0,
'T': 146.0,
'W': 259.0,
'Y': 229.0,
'V': 160.0,
}
@classmethod
def is_surface(cls, resn, asa, total_asa=None, cutoff=0.1):
"""Return True if ratio of residue ASA to max ASA >= cutoff"""
if total_asa is None:
total_asa = cls.total_asa
resn = resn.upper()
if len(resn) == 3:
resn = cls.three_to_one[resn]
return float(asa) / total_asa[resn] >= cutoff
three_to_full = {
'Val': 'Valine', 'Ile': 'Isoleucine', 'Leu': 'Leucine',
'Glu': 'Glutamic acid', 'Gln': 'Glutamine',
'Asp': 'Aspartic acid', 'Asn': 'Asparagine', 'His': 'Histidine',
'Trp': 'Tryptophan', 'Phe': 'Phenylalanine', 'Tyr': 'Tyrosine',
'Arg': 'Arginine', 'Lys': 'Lysine',
'Ser': 'Serine', 'Thr': 'Threonine',
'Met': 'Methionine', 'Ala': 'Alanine',
'Gly': 'Glycine', 'Pro': 'Proline', 'Cys': 'Cysteine'}
three_to_one = {
'VAL': 'V', 'ILE': 'I', 'LEU': 'L', 'GLU': 'E', 'GLN': 'Q',
'ASP': 'D', 'ASN': 'N', 'HIS': 'H', 'TRP': 'W', 'PHE': 'F', 'TYR': 'Y',
'ARG': 'R', 'LYS': 'K', 'SER': 'S', 'THR': 'T', 'MET': 'M', 'ALA': 'A',
'GLY': 'G', 'PRO': 'P', 'CYS': 'C'}
one_to_three = {o: t for t, o in three_to_one.iteritems()}
@classproperty
def one_to_full(cls):
"""
This can't see three_to_full unless explicitly passed because
dict comprehensions create their own local scope
"""
return {o: cls.three_to_full[t.title()] for t, o in cls.three_to_one.iteritems()}
res_atom_list = dict(
ALA=['C', 'CA', 'CB', 'N', 'O'],
ARG=['C', 'CA', 'CB', 'CD', 'CG', 'CZ', 'N', 'NE', 'NH1', 'NH2', 'O'],
ASN=['C', 'CA', 'CB', 'CG', 'N', 'ND2', 'O', 'OD1'],
ASP=['C', 'CA', 'CB', 'CG', 'N', 'O', 'OD1', 'OD2'],
CYS=['C', 'CA', 'CB', 'N', 'O', 'SG'],
GLN=['C', 'CA', 'CB', 'CD', 'CG', 'N', 'NE2', 'O', 'OE1'],
GLU=['C', 'CA', 'CB', 'CD', 'CG', 'N', 'O', 'OE1', 'OE2'],
GLY=['C', 'CA', 'N', 'O'],
HIS=['C', 'CA', 'CB', 'CD2', 'CE1', 'CG', 'N', 'ND1', 'NE2', 'O'],
ILE=['C', 'CA', 'CB', 'CD1', 'CG1', 'CG2', 'N', 'O'],
LEU=['C', 'CA', 'CB', 'CD1', 'CD2', 'CG', 'N', 'O'],
LYS=['C', 'CA', 'CB', 'CD', 'CE', 'CG', 'N', 'NZ', 'O'],
MET=['C', 'CA', 'CB', 'CE', 'CG', 'N', 'O', 'SD'],
PHE=['C', 'CA', 'CB', 'CD1', 'CD2',
'CE1', 'CE2', 'CG', 'CZ', 'N', 'O'],
PRO=['C', 'CA', 'CB', 'CD', 'CG', 'N', 'O'],
SER=['C', 'CA', 'CB', 'N', 'O', 'OG'],
THR=['C', 'CA', 'CB', 'CG2', 'N', 'O', 'OG1'],
TRP=['C', 'CA', 'CB', 'CD1', 'CD2', 'CE2',
'CE3', 'CG', 'CH2', 'CZ2', 'CZ3', 'N', 'NE1', 'O'],
TYR=['C', 'CA', 'CB', 'CD1', 'CD2',
'CE1', 'CE2', 'CG', 'CZ', 'N', 'O', 'OH'],
VAL=['C', 'CA', 'CB', 'CG1', 'CG2', 'N', 'O'],
)
all_chi = dict(
chi1=dict(
ARG=['N', 'CA', 'CB', 'CG'],
ASN=['N', 'CA', 'CB', 'CG'],
ASP=['N', 'CA', 'CB', 'CG'],
CYS=['N', 'CA', 'CB', 'SG'],
GLN=['N', 'CA', 'CB', 'CG'],
GLU=['N', 'CA', 'CB', 'CG'],
HIS=['N', 'CA', 'CB', 'CG'],
ILE=['N', 'CA', 'CB', 'CG1'],
LEU=['N', 'CA', 'CB', 'CG'],
LYS=['N', 'CA', 'CB', 'CG'],
MET=['N', 'CA', 'CB', 'CG'],
PHE=['N', 'CA', 'CB', 'CG'],
PRO=['N', 'CA', 'CB', 'CG'],
SER=['N', 'CA', 'CB', 'OG'],
THR=['N', 'CA', 'CB', 'OG1'],
TRP=['N', 'CA', 'CB', 'CG'],
TYR=['N', 'CA', 'CB', 'CG'],
VAL=['N', 'CA', 'CB', 'CG1'],
),
chi2=dict(
ARG=['CA', 'CB', 'CG', 'CD'],
ASN=['CA', 'CB', 'CG', 'OD1'],
ASP=['CA', 'CB', 'CG', 'OD1'],
GLN=['CA', 'CB', 'CG', 'CD'],
GLU=['CA', 'CB', 'CG', 'CD'],
HIS=['CA', 'CB', 'CG', 'ND1'],
ILE=['CA', 'CB', 'CG1', 'CD1'],
LEU=['CA', 'CB', 'CG', 'CD1'],
LYS=['CA', 'CB', 'CG', 'CD'],
MET=['CA', 'CB', 'CG', 'SD'],
PHE=['CA', 'CB', 'CG', 'CD1'],
PRO=['CA', 'CB', 'CG', 'CD'],
TRP=['CA', 'CB', 'CG', 'CD1'],
TYR=['CA', 'CB', 'CG', 'CD1'],
),
chi3=dict(
ARG=['CB', 'CG', 'CD', 'NE'],
GLN=['CB', 'CG', 'CD', 'OE1'],
GLU=['CB', 'CG', 'CD', 'OE1'],
LYS=['CB', 'CG', 'CD', 'CE'],
MET=['CB', 'CG', 'SD', 'CE'],
),
chi4=dict(
ARG=['CG', 'CD', 'NE', 'CZ'],
LYS=['CG', 'CD', 'CE', 'NZ'],
),
chi5=dict(
ARG=['CD', 'NE', 'CZ', 'NH1'],
),
)
alt_chi = dict(
chi1=dict(
VAL=['N', 'CA', 'CB', 'CG2'],
),
chi2=dict(
ASP=['CA', 'CB', 'CG', 'OD2'],
LEU=['CA', 'CB', 'CG', 'CD2'],
PHE=['CA', 'CB', 'CG', 'CD2'],
TYR=['CA', 'CB', 'CG', 'CD2'],
),
)
chi_atoms = dict(
ARG=set(['CB', 'CA', 'CG', 'NE', 'N', 'CZ', 'NH1', 'CD']),
ASN=set(['CB', 'CA', 'N', 'CG', 'OD1']),
ASP=set(['CB', 'CA', 'N', 'CG', 'OD1', 'OD2']),
CYS=set(['CB', 'CA', 'SG', 'N']),
GLN=set(['CB', 'CA', 'CG', 'N', 'CD', 'OE1']),
GLU=set(['CB', 'CA', 'CG', 'N', 'CD', 'OE1']),
HIS=set(['ND1', 'CB', 'CA', 'CG', 'N']),
ILE=set(['CG1', 'CB', 'CA', 'CD1', 'N']),
LEU=set(['CB', 'CA', 'CG', 'CD1', 'CD2', 'N']),
LYS=set(['CB', 'CA', 'CG', 'CE', 'N', 'NZ', 'CD']),
MET=set(['CB', 'CA', 'CG', 'CE', 'N', 'SD']),
PHE=set(['CB', 'CA', 'CG', 'CD1', 'CD2', 'N']),
PRO=set(['CB', 'CA', 'N', 'CG', 'CD']),
SER=set(['OG', 'CB', 'CA', 'N']),
THR=set(['CB', 'CA', 'OG1', 'N']),
TRP=set(['CB', 'CA', 'CG', 'CD1', 'N']),
TYR=set(['CB', 'CA', 'CG', 'CD1', 'CD2', 'N']),
VAL=set(['CG1', 'CG2', 'CB', 'CA', 'N']),
)
| lennax/util | util/atom_data.py | Python | gpl-3.0 | 6,616 | 0.000151 |
# -*- coding: utf-8 -*-
from openerp import models, fields, api
from datetime import datetime, timedelta, date
from dateutil.relativedelta import relativedelta
class Inspection(models.Model):
_name = 'property.inspection'
_order = 'date desc'
_inherit = ['mail.thread', 'ir.needaction_mixin']
property_id = fields.Many2one('property', string='Property ID', required=True, readonly=True)
### inspection ###
date = fields.Date(string='Date', required=True,)
inspector_id = fields.Many2one('res.users', string='Inspector')
act_type = fields.Selection([
('inspect', 'Tenken'),
('routine_inspection', 'Teikitenken'),
('change', 'Koukan'),
('repair', 'Syuri'),
('coordinate', 'Tyousei'),
('others', 'Other'),],
string='Act type')
inspection_note = fields.Text(string='Note')
product_memo = fields.Text(string='product_memo', help='Koukan sita kiki wo kaitene')
### request ###
request_id = fields.Many2one('property.inspection.request', string='Request')
request_date = fields.Date(string='request_date', related='request_id.date', readonly=True)
requester_name = fields.Char(string='requester_name', related='request_id.partner_id.name', readonly=True)
request_note = fields.Text(string='request_note', related='request_id.request_note', readonly=True)
responder_name = fields.Char(string='responder_name', related='request_id.user_id.name', readonly=True)
### ###
state = fields.Selection([
('ongoing', 'Taioutyu'),
('arranging', 'Tehaityu'),
('finishing', 'Kanryo'),],
string='state')
class InspectionRequest(models.Model):
_name = 'property.inspection.request'
_order = 'date desc'
date = fields.Date(string='Date', required=True, copy=False,)
partner_id = fields.Many2one('res.partner', string='partner_id',)
request_note = fields.Text(string='request_note',)
user_id = fields.Many2one('res.users', string='user_id', required=True, help='hosyu no irai wo uketahitoy')
@api.model
def create(self, vals):
if vals.get('name', 'New') == 'New':
vals['name'] = self.env['ir.sequence'].next_by_code('inspection.request') or 'New'
result = super(InspectionRequest, self).create(vals)
return result
| ichi23de5/ichi_Repo | property/models/inspection.py | Python | gpl-3.0 | 2,328 | 0.007732 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutStringManipulation(Koan):
def test_use_format_to_interpolate_variables(self):
value1 = 'one'
value2 = 2
string = "The values are {0} and {1}".format(value1, value2)
self.assertEqual("The values are one and 2", string)
def test_formatted_values_can_be_shown_in_any_order_or_be_repeated(self):
value1 = 'doh'
value2 = 'DOH'
string = "The values are {1}, {0}, {0} and {1}!".format(value1, value2)
self.assertEqual("The values are DOH, doh, doh and DOH!", string)
def test_any_python_expression_may_be_interpolated(self):
import math # import a standard python module with math functions
decimal_places = 4
string = "The square root of 5 is {0:.{1}f}".format(math.sqrt(5), \
decimal_places)
self.assertEqual("The square root of 5 is 2.2361", string)
def test_you_can_get_a_substring_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual("let", string[7:10])
def test_you_can_get_a_single_character_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual("a", string[1])
def test_single_characters_can_be_represented_by_integers(self):
self.assertEqual(97, ord('a'))
self.assertEqual(True, ord('b') == (ord('a') + 1))
def test_strings_can_be_split(self):
string = "Sausage Egg Cheese"
words = string.split()
self.assertEqual(["Sausage", "Egg", "Cheese"], words)
def test_strings_can_be_split_with_different_patterns(self):
import re # import python regular expression library
string = "the,rain;in,spain"
pattern = re.compile(',|;')
words = pattern.split(string)
self.assertEqual(["the", "rain", "in", "spain"], words)
# `pattern` is a Python regular expression pattern which matches
# ',' or ';'
def test_raw_strings_do_not_interpret_escape_characters(self):
string = r'\n'
self.assertNotEqual('\n', string)
self.assertEqual('\\n', string)
self.assertEqual(2, len(string))
# Useful in regular expressions, file paths, URLs, etc.
def test_strings_can_be_joined(self):
words = ["Now", "is", "the", "time"]
self.assertEqual("Now is the time", ' '.join(words))
def test_strings_can_change_case(self):
self.assertEqual('Guido', 'guido'.capitalize())
self.assertEqual('GUIDO', 'guido'.upper())
self.assertEqual('timbot', 'TimBot'.lower())
self.assertEqual('Guido Van Rossum', 'guido van rossum'.title())
self.assertEqual('tOtAlLy AwEsOmE', 'ToTaLlY aWeSoMe'.swapcase())
| fengren/python_koans | python2/koans/about_string_manipulation.py | Python | mit | 2,781 | 0.000719 |
import numpy
from PIL import Image
import sys
#if len(sys.argv) != 3:
# sys.exit('usage: dice.py path_to_segmented_image path_to_ground_truth_image')
pairs = [['/home/ognawala/data/PatientMS-R/20140120T143753/20140120T143753_annotated_rf.png', '/home/ognawala/data/Patient-Mask/20140120T143753-mask.png'], ['/home/ognawala/data/PatientMS-R/20140120T150515/20140120T150515_annotated_rf.png', '/home/ognawala/data/Patient-Mask/20140120T150515-mask.png']]
# intersection set
n_aib = 0
#individual markings
n_y = 0
n_truth = 0
for p in pairs:
y = Image.open(p[0])
y = numpy.array(y, dtype='uint8')
print p[0]
print y.shape
truth_im = Image.open(p[1])
truth_y = numpy.array(truth_im, dtype='uint8')
print p[1]
print truth_y.shape
# flatten arrays
truth_y = truth_y.flatten()
y = y.flatten()
print truth_y.shape
print y.shape
for i in range(len(y)):
# both marked?
if y[i]==200 and truth_y[i]==0:
n_aib += 1
# y marked
if y[i]==200:
n_y += 1
# truth marked
if truth_y[i]==0:
n_truth += 1
dice = float(2*n_aib)/float(n_y+n_truth)
print dice
| saahil/MSSegmentation | dice.py | Python | gpl-2.0 | 1,199 | 0.010008 |
# -*- coding: utf-8 -*-
"""
.. _tut-artifact-overview:
Overview of artifact detection
==============================
This tutorial covers the basics of artifact detection, and introduces the
artifact detection tools available in MNE-Python.
We begin as always by importing the necessary Python modules and loading some
:ref:`example data <sample-dataset>`:
"""
import os
import numpy as np
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(0, 60).load_data() # just use a fraction of data for speed here
###############################################################################
# What are artifacts?
# ^^^^^^^^^^^^^^^^^^^
#
# Artifacts are parts of the recorded signal that arise from sources other than
# the source of interest (i.e., neuronal activity in the brain). As such,
# artifacts are a form of interference or noise relative to the signal of
# interest. There are many possible causes of such interference, for example:
#
# - Environmental artifacts
# - Persistent oscillations centered around the `AC power line frequency`_
# (typically 50 or 60 Hz)
# - Brief signal jumps due to building vibration (such as a door slamming)
# - Electromagnetic field noise from nearby elevators, cell phones, the
# geomagnetic field, etc.
#
# - Instrumentation artifacts
# - Electromagnetic interference from stimulus presentation (such as EEG
# sensors picking up the field generated by unshielded headphones)
# - Continuous oscillations at specific frequencies used by head position
# indicator (HPI) coils
# - Random high-amplitude fluctuations (or alternatively, constant zero
# signal) in a single channel due to sensor malfunction (e.g., in surface
# electrodes, poor scalp contact)
#
# - Biological artifacts
# - Periodic `QRS`_-like signal patterns (especially in magnetometer
# channels) due to electrical activity of the heart
# - Short step-like deflections (especially in frontal EEG channels) due to
# eye movements
# - Large transient deflections (especially in frontal EEG channels) due to
# blinking
# - Brief bursts of high frequency fluctuations across several channels due
# to the muscular activity during swallowing
#
# There are also some cases where signals from within the brain can be
# considered artifactual. For example, if a researcher is primarily interested
# in the sensory response to a stimulus, but the experimental paradigm involves
# a behavioral response (such as button press), the neural activity associated
# with the planning and executing the button press could be considered an
# artifact relative to signal of interest (i.e., the evoked sensory response).
#
# .. note::
# Artifacts of the same genesis may appear different in recordings made by
# different EEG or MEG systems, due to differences in sensor design (e.g.,
# passive vs. active EEG electrodes; axial vs. planar gradiometers, etc).
#
#
# What to do about artifacts
# ^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# There are 3 basic options when faced with artifacts in your recordings:
#
# 1. *Ignore* the artifact and carry on with analysis
# 2. *Exclude* the corrupted portion of the data and analyze the remaining data
# 3. *Repair* the artifact by suppressing artifactual part of the recording
# while (hopefully) leaving the signal of interest intact
#
# There are many different approaches to repairing artifacts, and MNE-Python
# includes a variety of tools for artifact repair, including digital filtering,
# independent components analysis (ICA), Maxwell filtering / signal-space
# separation (SSS), and signal-space projection (SSP). Separate tutorials
# demonstrate each of these techniques for artifact repair. Many of the
# artifact repair techniques work on both continuous (raw) data and on data
# that has already been epoched (though not necessarily equally well); some can
# be applied to `memory-mapped`_ data while others require the data to be
# copied into RAM. Of course, before you can choose any of these strategies you
# must first *detect* the artifacts, which is the topic of the next section.
#
#
# Artifact detection
# ^^^^^^^^^^^^^^^^^^
#
# MNE-Python includes a few tools for automated detection of certain artifacts
# (such as heartbeats and blinks), but of course you can always visually
# inspect your data to identify and annotate artifacts as well.
#
# We saw in :ref:`the introductory tutorial <tut-overview>` that the example
# data includes :term:`SSP projectors <projector>`, so before we look at
# artifacts let's set aside the projectors in a separate variable and then
# remove them from the :class:`~mne.io.Raw` object using the
# :meth:`~mne.io.Raw.del_proj` method, so that we can inspect our data in it's
# original, raw state:
ssp_projectors = raw.info['projs']
raw.del_proj()
###############################################################################
# Low-frequency drifts
# ~~~~~~~~~~~~~~~~~~~~
#
# Low-frequency drifts are most readily detected by visual inspection using the
# basic :meth:`~mne.io.Raw.plot` method, though it is helpful to plot a
# relatively long time span and to disable channel-wise DC shift correction.
# Here we plot 60 seconds and show all the magnetometer channels:
mag_channels = mne.pick_types(raw.info, meg='mag')
raw.plot(duration=60, order=mag_channels, n_channels=len(mag_channels),
remove_dc=False)
###############################################################################
# Low-frequency drifts are readily removed by high-pass filtering at a fairly
# low cutoff frequency (the wavelength of the drifts seen above is probably
# around 20 seconds, so in this case a cutoff of 0.1 Hz would probably suppress
# most of the drift).
#
#
# Power line noise
# ~~~~~~~~~~~~~~~~
#
# Power line artifacts are easiest to see on plots of the spectrum, so we'll
# use :meth:`~mne.io.Raw.plot_psd` to illustrate.
fig = raw.plot_psd(tmax=np.inf, fmax=250, average=True)
# add some arrows at 60 Hz and its harmonics:
for ax in fig.axes[1:]:
freqs = ax.lines[-1].get_xdata()
psds = ax.lines[-1].get_ydata()
for freq in (60, 120, 180, 240):
idx = np.searchsorted(freqs, freq)
ax.arrow(x=freqs[idx], y=psds[idx] + 18, dx=0, dy=-12, color='red',
width=0.1, head_width=3, length_includes_head=True)
###############################################################################
# Here we see narrow frequency peaks at 60, 120, 180, and 240 Hz — the power
# line frequency of the USA (where the sample data was recorded) and its 2nd,
# 3rd, and 4th harmonics. Other peaks (around 25 to 30 Hz, and the second
# harmonic of those) are probably related to the heartbeat, which is more
# easily seen in the time domain using a dedicated heartbeat detection function
# as described in the next section.
#
#
# Heartbeat artifacts (ECG)
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# MNE-Python includes a dedicated function
# :func:`~mne.preprocessing.find_ecg_events` in the :mod:`mne.preprocessing`
# submodule, for detecting heartbeat artifacts from either dedicated ECG
# channels or from magnetometers (if no ECG channel is present). Additionally,
# the function :func:`~mne.preprocessing.create_ecg_epochs` will call
# :func:`~mne.preprocessing.find_ecg_events` under the hood, and use the
# resulting events array to extract epochs centered around the detected
# heartbeat artifacts. Here we create those epochs, then show an image plot of
# the detected ECG artifacts along with the average ERF across artifacts. We'll
# show all three channel types, even though EEG channels are less strongly
# affected by heartbeat artifacts:
# sphinx_gallery_thumbnail_number = 4
ecg_epochs = mne.preprocessing.create_ecg_epochs(raw)
ecg_epochs.plot_image(combine='mean')
###############################################################################
# The horizontal streaks in the magnetometer image plot reflect the fact that
# the heartbeat artifacts are superimposed on low-frequency drifts like the one
# we saw in an earlier section; to avoid this you could pass
# ``baseline=(-0.5, -0.2)`` in the call to
# :func:`~mne.preprocessing.create_ecg_epochs`.
# You can also get a quick look at the
# ECG-related field pattern across sensors by averaging the ECG epochs together
# via the :meth:`~mne.Epochs.average` method, and then using the
# :meth:`mne.Evoked.plot_topomap` method:
avg_ecg_epochs = ecg_epochs.average().apply_baseline((-0.5, -0.2))
###############################################################################
# Here again we can visualize the spatial pattern of the associated field at
# various times relative to the peak of the EOG response:
avg_ecg_epochs.plot_topomap(times=np.linspace(-0.05, 0.05, 11))
###############################################################################
# Or, we can get an ERP/F plot with :meth:`~mne.Evoked.plot` or a combined
# scalp field maps and ERP/F plot with :meth:`~mne.Evoked.plot_joint`. Here
# we've specified the times for scalp field maps manually, but if not provided
# they will be chosen automatically based on peaks in the signal:
avg_ecg_epochs.plot_joint(times=[-0.25, -0.025, 0, 0.025, 0.25])
###############################################################################
# Ocular artifacts (EOG)
# ~~~~~~~~~~~~~~~~~~~~~~
#
# Similar to the ECG detection and epoching methods described above, MNE-Python
# also includes functions for detecting and extracting ocular artifacts:
# :func:`~mne.preprocessing.find_eog_events` and
# :func:`~mne.preprocessing.create_eog_epochs`. Once again we'll use the
# higher-level convenience function that automatically finds the artifacts and
# extracts them in to an :class:`~mne.Epochs` object in one step. Unlike the
# heartbeat artifacts seen above, ocular artifacts are usually most prominent
# in the EEG channels, but we'll still show all three channel types. We'll use
# the ``baseline`` parameter this time too; note that there are many fewer
# blinks than heartbeats, which makes the image plots appear somewhat blocky:
eog_epochs = mne.preprocessing.create_eog_epochs(raw, baseline=(-0.5, -0.2))
eog_epochs.plot_image(combine='mean')
eog_epochs.average().plot_joint()
###############################################################################
# Summary
# ^^^^^^^
#
# Familiarizing yourself with typical artifact patterns and magnitudes is a
# crucial first step in assessing the efficacy of later attempts to repair
# those artifacts. A good rule of thumb is that the artifact amplitudes should
# be orders of magnitude larger than your signal of interest — and there should
# be several occurrences of such events — in order to find signal
# decompositions that effectively estimate and repair the artifacts.
#
# Several other tutorials in this section illustrate the various tools for
# artifact repair, and discuss the pros and cons of each technique, for
# example:
#
# - :ref:`tut-artifact-ssp`
# - :ref:`tut-artifact-ica`
# - :ref:`tut-artifact-sss`
#
# There are also tutorials on general-purpose preprocessing steps such as
# :ref:`filtering and resampling <tut-filter-resample>` and :ref:`excluding
# bad channels <tut-bad-channels>` or :ref:`spans of data
# <tut-reject-data-spans>`.
#
# .. LINKS
#
# .. _`AC power line frequency`:
# https://en.wikipedia.org/wiki/Mains_electricity
# .. _`QRS`: https://en.wikipedia.org/wiki/QRS_complex
# .. _`memory-mapped`: https://en.wikipedia.org/wiki/Memory-mapped_file
| kambysese/mne-python | tutorials/preprocessing/plot_10_preprocessing_overview.py | Python | bsd-3-clause | 11,702 | 0 |
#! /usr/bin/env python
"""
couchbasekit.fields
~~~~~~~~~~~~~~~~~~~
:website: http://github.com/kirpit/couchbasekit
:copyright: Copyright 2013, Roy Enjoy <kirpit *at* gmail.com>, see AUTHORS.txt.
:license: MIT, see LICENSE.txt for details.
* :class:`couchbasekit.fields.CustomField`
* :class:`couchbasekit.fields.ChoiceField`
* :class:`couchbasekit.fields.EmailField`
* :class:`couchbasekit.fields.PasswordField`
"""
import re
from abc import ABCMeta
class CustomField(object):
"""The abstract custom field to be extended by all other field classes.
.. note::
You can also create your own custom field types by implementing this
class. All you have to do is to assign your final (that is calculated
and ready to be saved) value to the :attr:`value` property. Please
note that it should also accept unicode raw values, which are fetched
and returned from couchbase server. See :class:`PasswordField` source
code as an example.
Please contribute back if you create a generic and useful custom field.
"""
__metaclass__ = ABCMeta
_value = None
def __init__(self):
raise NotImplementedError()
def __repr__(self):
return repr(self.value)
def __eq__(self, other):
if type(other) is type(self) and other.value==self.value:
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def value(self):
"""Property to be used when saving a custom field into
:class:`couchbasekit.document.Document` instance.
:returns: The value to be saved for the field within
:class:`couchbasekit.document.Document` instances.
:rtype: mixed
"""
if self._value is None:
raise ValueError("%s's 'value' is not set." % type(self).__name__)
return self._value
@value.setter
def value(self, value):
"""Propery setter that should be used to assign final (calculated)
value.
"""
self._value = value
class ChoiceField(CustomField):
"""The custom field to be used for multi choice options such as gender,
static category list etc. This class can't be used directly that has to be
extended by your choice list class. Thankfully, it's just easy::
class Gender(ChoiceField):
CHOICES = {
'M': 'Male',
'F': 'Female',
}
and all you have to do is to pass the current value to create your choice
object:
>>> choice = Gender('F')
>>> choice.value
'F'
>>> choice.text
'Female'
:param choice: The choice value.
:type choice: basestring
"""
__metaclass__ = ABCMeta
CHOICES = {}
def __eq__(self, other):
if super(ChoiceField, self).__eq__(other) and other.CHOICES==self.CHOICES:
return True
return False
def __init__(self, choice):
if not isinstance(self.CHOICES, dict) or not len(self.CHOICES):
raise AttributeError("ChoiceFields must have dictionary 'CHOICES' "
"attribute and cannot be empty.")
if choice not in self.CHOICES:
raise ValueError("Default choice for %s must be "
"within the 'CHOICES' attribute."
% type(self).__name__)
self.value = choice
@property
def text(self):
"""Returns the text of the current choice, object property.
:rtype: unicode
"""
return self.CHOICES.get(self.value)
def iteritems(self):
return self.CHOICES.iteritems()
# stolen from django email validator:
EMAIL_RE = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
# quoted-string, see also http://tools.ietf.org/html/rfc2822#section-3.2.5
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"'
r')@((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)$)' # domain
r'|\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$', re.IGNORECASE) # literal form, ipv4 address (SMTP 4.1.3)
class EmailField(CustomField):
"""The custom field to be used for email addresses and intended to validate
them as well.
:param email: Email address to be saved.
:type email: basestring
"""
def __init__(self, email):
if not self.is_valid(email):
raise ValueError("Email address is invalid.")
self.value = email
@staticmethod
def is_valid(email):
"""Email address validation method.
:param email: Email address to be saved.
:type email: basestring
:returns: True if email address is correct, False otherwise.
:rtype: bool
"""
if isinstance(email, basestring) and EMAIL_RE.match(email):
return True
return False
class PasswordField(CustomField):
"""The custom field to be used for password types.
It encrypts the raw passwords on-the-fly and depends on
`py-bcrypt` library for such encryption.
:param password: Raw or encrypted password value.
:type password: unicode
:raises: :exc:`ImportError` if `py-bcrypt` was not found.
"""
LOG_ROUNDS = 12
def __init__(self, password):
if not isinstance(password, basestring):
raise ValueError("Password must be a string or unicode.")
# do the encryption if raw password provided
if not password.startswith(('$2a$', '$2y$')):
bcrypt = self.get_bcrypt()
password = bcrypt.hashpw(password, bcrypt.gensalt(self.LOG_ROUNDS))
self.value = password
@staticmethod
def get_bcrypt():
"""Returns the `py-bcrypt` library for internal usage.
:returns: `py-bcrypt` package.
:raises: :exc:`ImportError` if `py-bcrypt` was not found.
"""
try: import bcrypt
except ImportError:
raise ImportError("PasswordField requires 'py-bcrypt' "
"library to hash the passwords.")
else: return bcrypt
def check_password(self, raw_password):
"""Validates the given raw password against the intance's encrypted one.
:param raw_password: Raw password to be checked against.
:type raw_password: unicode
:returns: True if comparison was successful, False otherwise.
:rtype: bool
:raises: :exc:`ImportError` if `py-bcrypt` was not found.
"""
bcrypt = self.get_bcrypt()
return bcrypt.hashpw(raw_password, self.value)==self.value | kirpit/couchbasekit | couchbasekit/fields.py | Python | mit | 6,692 | 0.002241 |
# Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sahara.i18n import _
import sahara.plugins.mapr.versions.version_handler_factory as vhf
import sahara.plugins.provisioning as p
class MapRPlugin(p.ProvisioningPluginBase):
title = 'MapR Hadoop Distribution'
description = _('The MapR Distribution provides a full Hadoop stack that'
' includes the MapR File System (MapR-FS), MapReduce,'
' a complete Hadoop ecosystem, and the MapR Control System'
' user interface')
def _get_handler(self, hadoop_version):
return vhf.VersionHandlerFactory.get().get_handler(hadoop_version)
def get_title(self):
return MapRPlugin.title
def get_description(self):
return MapRPlugin.description
def get_versions(self):
return vhf.VersionHandlerFactory.get().get_versions()
def get_node_processes(self, hadoop_version):
return self._get_handler(hadoop_version).get_node_processes()
def get_configs(self, hadoop_version):
return self._get_handler(hadoop_version).get_configs()
def configure_cluster(self, cluster):
self._get_handler(cluster.hadoop_version).configure_cluster(cluster)
def start_cluster(self, cluster):
self._get_handler(cluster.hadoop_version).start_cluster(cluster)
def validate(self, cluster):
self._get_handler(cluster.hadoop_version).validate(cluster)
def validate_scaling(self, cluster, existing, additional):
v_handler = self._get_handler(cluster.hadoop_version)
v_handler.validate_scaling(cluster, existing, additional)
def scale_cluster(self, cluster, instances):
v_handler = self._get_handler(cluster.hadoop_version)
v_handler.scale_cluster(cluster, instances)
def decommission_nodes(self, cluster, instances):
v_handler = self._get_handler(cluster.hadoop_version)
v_handler.decommission_nodes(cluster, instances)
def get_edp_engine(self, cluster, job_type):
v_handler = self._get_handler(cluster.hadoop_version)
return v_handler.get_edp_engine(cluster, job_type)
def get_open_ports(self, node_group):
v_handler = self._get_handler(node_group.cluster.hadoop_version)
return v_handler.get_open_ports(node_group)
| esikachev/scenario | sahara/plugins/mapr/plugin.py | Python | apache-2.0 | 2,850 | 0 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
def openstack_argument_spec():
# DEPRECATED: This argument spec is only used for the deprecated old
# OpenStack modules. It turns out that modern OpenStack auth is WAY
# more complex than this.
# Consume standard OpenStack environment variables.
# This is mainly only useful for ad-hoc command line operation as
# in playbooks one would assume variables would be used appropriately
OS_AUTH_URL=os.environ.get('OS_AUTH_URL', 'http://127.0.0.1:35357/v2.0/')
OS_PASSWORD=os.environ.get('OS_PASSWORD', None)
OS_REGION_NAME=os.environ.get('OS_REGION_NAME', None)
OS_USERNAME=os.environ.get('OS_USERNAME', 'admin')
OS_TENANT_NAME=os.environ.get('OS_TENANT_NAME', OS_USERNAME)
spec = dict(
login_username = dict(default=OS_USERNAME),
auth_url = dict(default=OS_AUTH_URL),
region_name = dict(default=OS_REGION_NAME),
availability_zone = dict(default=None),
)
if OS_PASSWORD:
spec['login_password'] = dict(default=OS_PASSWORD)
else:
spec['login_password'] = dict(required=True)
if OS_TENANT_NAME:
spec['login_tenant_name'] = dict(default=OS_TENANT_NAME)
else:
spec['login_tenant_name'] = dict(required=True)
return spec
def openstack_find_nova_addresses(addresses, ext_tag, key_name=None):
ret = []
for (k, v) in addresses.iteritems():
if key_name and k == key_name:
ret.extend([addrs['addr'] for addrs in v])
else:
for interface_spec in v:
if 'OS-EXT-IPS:type' in interface_spec and interface_spec['OS-EXT-IPS:type'] == ext_tag:
ret.append(interface_spec['addr'])
return ret
def openstack_full_argument_spec(**kwargs):
spec = dict(
cloud=dict(default=None),
auth_type=dict(default=None),
auth=dict(default=None, no_log=True),
region_name=dict(default=None),
availability_zone=dict(default=None),
verify=dict(default=True, aliases=['validate_certs']),
cacert=dict(default=None),
cert=dict(default=None),
key=dict(default=None, no_log=True),
wait=dict(default=True, type='bool'),
timeout=dict(default=180, type='int'),
api_timeout=dict(default=None, type='int'),
endpoint_type=dict(
default='public', choices=['public', 'internal', 'admin']
)
)
spec.update(kwargs)
return spec
def openstack_module_kwargs(**kwargs):
ret = {}
for key in ('mutually_exclusive', 'required_together', 'required_one_of'):
if key in kwargs:
if key in ret:
ret[key].extend(kwargs[key])
else:
ret[key] = kwargs[key]
return ret
| matburt/ansible | lib/ansible/module_utils/openstack.py | Python | gpl-3.0 | 4,528 | 0.006846 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Account Journal Always Check Date module for OpenERP
# Copyright (C) 2013-2014 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Journal Always Check Date',
'version': '0.1',
'category': 'Accounting & Finance',
'license': 'AGPL-3',
'summary': 'Option Check Date in Period always active on journals',
'description': """
Check Date in Period always active on Account Journals
======================================================
This module:
* activates the 'Check Date in Period' option on all existing account journals,
* enable the 'Check Date in Period' option on new account journals,
* prevent users from deactivating the 'Check Date in Period' option.
So this module is an additionnal security for countries where, on an account
move, the date must be inside the period.
Please contact Alexis de Lattre from Akretion <alexis.delattre@akretion.com>
for any help or question about this module.
""",
'author': "Akretion,Odoo Community Association (OCA)",
'website': 'http://www.akretion.com',
'depends': ['account'],
'data': [],
'installable': True,
'active': False,
}
| raycarnes/account-financial-tools | account_journal_always_check_date/__openerp__.py | Python | agpl-3.0 | 2,081 | 0 |
# -*- coding: utf-8 -*-
# © 2016 Cristian Moncho <cristian.moncho@diagram.es>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import logging
_logger = logging.getLogger(__name__)
_LOADED_VCS = []
def load_vcs(vcs):
vcs = vcs.lower()
modname = 'vcs.%s' % (vcs,)
clsname = vcs.title().replace('_', '')
try:
mod = getattr(__import__(modname, globals(), locals(), [], -1), vcs)
return getattr(mod, clsname)
except AttributeError:
raise Exception(
'Wrapper not found: from %s import %s' % (modname, clsname))
# [TODO] Automatically detect *.py files in 'vcs' folder
for vcs in ('git', 'bzr', 'hg', 'svn'):
try:
_LOADED_VCS.append((vcs, load_vcs(vcs)))
except Exception as e:
_logger.warning('Unable to load "%s" module: %s', vcs, e)
_logger.debug('Enabled VCS: %s', ', '.join(t[0] for t in _LOADED_VCS))
class VcsWrapper(object):
""" Version Control System Wrapper. """
def __new__(cls, vcs, path, **kwargs):
if not vcs:
vcs = cls._guess_vcs(path)
try:
return dict(_LOADED_VCS)[vcs](path, **kwargs)
except KeyError:
raise Exception('Unknown repository structure in %s' % (path,))
@classmethod
def available_vcs(cls):
return zip(*_LOADED_VCS)[0] if _LOADED_VCS else ()
@classmethod
def from_source(cls, vcs, path, source, branch=None, **kwargs):
res = cls(vcs, path)
res.init(source, branch=branch, **kwargs)
res.load()
return res
@classmethod
def from_dir(cls, vcs, path, **kwargs):
res = cls(vcs, path)
res.load(**kwargs)
return res
@staticmethod
def _guess_vcs(path):
""" Inspect the given path and search which VCS wrapper needs. """
for vcs, cls in _LOADED_VCS:
if cls.is_repo(path):
return vcs
| crimoniv/odoo-module-tools | repository_management/vcs_wrapper/vcs_wrapper.py | Python | agpl-3.0 | 1,918 | 0 |
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from datetime import timedelta
from sqlalchemy import DDL
from sqlalchemy.event import listens_for
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm.base import NEVER_SET, NO_VALUE
from indico.core.db import db
from indico.core.db.sqlalchemy import UTCDateTime, PyIntEnum
from indico.core.db.sqlalchemy.util.models import populate_one_to_one_backrefs
from indico.util.date_time import overlaps
from indico.util.locators import locator_property
from indico.util.string import format_repr, return_ascii
from indico.util.struct.enum import TitledIntEnum
from indico.util.i18n import _
class TimetableEntryType(TitledIntEnum):
__titles__ = [None, _("Session Block"), _("Contribution"), _("Break")]
# entries are uppercase since `break` is a keyword...
SESSION_BLOCK = 1
CONTRIBUTION = 2
BREAK = 3
def _make_check(type_, *cols):
all_cols = {'session_block_id', 'contribution_id', 'break_id'}
required_cols = all_cols & set(cols)
forbidden_cols = all_cols - required_cols
criteria = ['{} IS NULL'.format(col) for col in sorted(forbidden_cols)]
criteria += ['{} IS NOT NULL'.format(col) for col in sorted(required_cols)]
condition = 'type != {} OR ({})'.format(type_, ' AND '.join(criteria))
return db.CheckConstraint(condition, 'valid_{}'.format(type_.name.lower()))
class TimetableEntry(db.Model):
__tablename__ = 'timetable_entries'
@declared_attr
def __table_args__(cls):
return (db.Index('ix_timetable_entries_start_dt_desc', cls.start_dt.desc()),
_make_check(TimetableEntryType.SESSION_BLOCK, 'session_block_id'),
_make_check(TimetableEntryType.CONTRIBUTION, 'contribution_id'),
_make_check(TimetableEntryType.BREAK, 'break_id'),
db.CheckConstraint("type != {} OR parent_id IS NULL".format(TimetableEntryType.SESSION_BLOCK),
'valid_parent'),
{'schema': 'events'})
id = db.Column(
db.Integer,
primary_key=True
)
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
index=True,
nullable=False
)
parent_id = db.Column(
db.Integer,
db.ForeignKey('events.timetable_entries.id'),
index=True,
nullable=True,
)
session_block_id = db.Column(
db.Integer,
db.ForeignKey('events.session_blocks.id'),
index=True,
unique=True,
nullable=True
)
contribution_id = db.Column(
db.Integer,
db.ForeignKey('events.contributions.id'),
index=True,
unique=True,
nullable=True
)
break_id = db.Column(
db.Integer,
db.ForeignKey('events.breaks.id'),
index=True,
unique=True,
nullable=True
)
type = db.Column(
PyIntEnum(TimetableEntryType),
nullable=False
)
start_dt = db.Column(
UTCDateTime,
nullable=False
)
event_new = db.relationship(
'Event',
lazy=True,
backref=db.backref(
'timetable_entries',
order_by=lambda: TimetableEntry.start_dt,
cascade='all, delete-orphan',
lazy='dynamic'
)
)
session_block = db.relationship(
'SessionBlock',
lazy=False,
backref=db.backref(
'timetable_entry',
cascade='all, delete-orphan',
uselist=False,
lazy=True
)
)
contribution = db.relationship(
'Contribution',
lazy=False,
backref=db.backref(
'timetable_entry',
cascade='all, delete-orphan',
uselist=False,
lazy=True
)
)
break_ = db.relationship(
'Break',
cascade='all, delete-orphan',
single_parent=True,
lazy=False,
backref=db.backref(
'timetable_entry',
cascade='all, delete-orphan',
uselist=False,
lazy=True
)
)
children = db.relationship(
'TimetableEntry',
order_by='TimetableEntry.start_dt',
lazy=True,
backref=db.backref(
'parent',
remote_side=[id],
lazy=True
)
)
# relationship backrefs:
# - parent (TimetableEntry.children)
@property
def object(self):
if self.type == TimetableEntryType.SESSION_BLOCK:
return self.session_block
elif self.type == TimetableEntryType.CONTRIBUTION:
return self.contribution
elif self.type == TimetableEntryType.BREAK:
return self.break_
@object.setter
def object(self, value):
from indico.modules.events.contributions import Contribution
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.timetable.models.breaks import Break
self.session_block = self.contribution = self.break_ = None
if isinstance(value, SessionBlock):
self.session_block = value
elif isinstance(value, Contribution):
self.contribution = value
elif isinstance(value, Break):
self.break_ = value
elif value is not None:
raise TypeError('Unexpected object: {}'.format(value))
@hybrid_property
def duration(self):
return self.object.duration if self.object is not None else None
@duration.setter
def duration(self, value):
self.object.duration = value
@duration.expression
def duration(cls):
from indico.modules.events.contributions import Contribution
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.timetable.models.breaks import Break
return db.case({
TimetableEntryType.SESSION_BLOCK.value:
db.select([SessionBlock.duration])
.where(SessionBlock.id == cls.session_block_id)
.correlate_except(SessionBlock)
.as_scalar(),
TimetableEntryType.CONTRIBUTION.value:
db.select([Contribution.duration])
.where(Contribution.id == cls.contribution_id)
.correlate_except(Contribution)
.as_scalar(),
TimetableEntryType.BREAK.value:
db.select([Break.duration])
.where(Break.id == cls.break_id)
.correlate_except(Break)
.as_scalar(),
}, value=cls.type)
@hybrid_property
def end_dt(self):
if self.start_dt is None or self.duration is None:
return None
return self.start_dt + self.duration
@end_dt.expression
def end_dt(cls):
return cls.start_dt + cls.duration
@property
def session_siblings(self):
if self.type == TimetableEntryType.SESSION_BLOCK:
return [x for x in self.siblings
if x.session_block and x.session_block.session == self.session_block.session]
elif self.parent:
return self.siblings
else:
return []
@property
def siblings(self):
from indico.modules.events.timetable.util import get_top_level_entries, get_nested_entries
tzinfo = self.event_new.tzinfo
day = self.start_dt.astimezone(tzinfo).date()
siblings = (get_nested_entries(self.event_new)[self.parent_id]
if self.parent_id else
get_top_level_entries(self.event_new))
return [x for x in siblings if x.start_dt.astimezone(tzinfo).date() == day and x.id != self.id]
@property
def siblings_query(self):
tzinfo = self.event_new.tzinfo
day = self.start_dt.astimezone(tzinfo).date()
criteria = (TimetableEntry.id != self.id,
TimetableEntry.parent == self.parent,
db.cast(TimetableEntry.start_dt.astimezone(tzinfo), db.Date) == day)
return TimetableEntry.query.with_parent(self.event_new).filter(*criteria)
@locator_property
def locator(self):
return dict(self.event_new.locator, entry_id=self.id)
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'type', 'start_dt', 'end_dt', _repr=self.object)
def can_view(self, user):
"""Checks whether the user will see this entry in the timetable."""
if self.type in (TimetableEntryType.CONTRIBUTION, TimetableEntryType.BREAK):
return self.object.can_access(user)
elif self.type == TimetableEntryType.SESSION_BLOCK:
if self.object.can_access(user):
return True
return any(x.can_access(user) for x in self.object.contributions if not x.is_inheriting)
def extend_start_dt(self, start_dt):
assert start_dt < self.start_dt
extension = self.start_dt - start_dt
self.start_dt = start_dt
self.duration = self.duration + extension
def extend_end_dt(self, end_dt):
diff = end_dt - self.end_dt
if diff < timedelta(0):
raise ValueError("New end_dt is before current end_dt.")
self.duration += diff
def extend_parent(self, by_start=True, by_end=True):
"""Extend start/end of parent objects if needed.
No extension if performed for entries crossing a day boundary in the
event timezone.
:param by_start: Extend parent by start datetime.
:param by_end: Extend parent by end datetime.
"""
tzinfo = self.event_new.tzinfo
if self.start_dt.astimezone(tzinfo).date() != self.end_dt.astimezone(tzinfo).date():
return
if self.parent is None:
if by_start and self.start_dt < self.event_new.start_dt:
self.event_new.start_dt = self.start_dt
if by_end and self.end_dt > self.event_new.end_dt:
self.event_new.end_dt = self.end_dt
else:
extended = False
if by_start and self.start_dt < self.parent.start_dt:
self.parent.extend_start_dt(self.start_dt)
extended = True
if by_end and self.end_dt > self.parent.end_dt:
self.parent.extend_end_dt(self.end_dt)
extended = True
if extended:
self.parent.extend_parent(by_start=by_start, by_end=by_end)
def is_parallel(self, in_session=False):
siblings = self.siblings if not in_session else self.session_siblings
for sibling in siblings:
if overlaps((self.start_dt, self.end_dt), (sibling.start_dt, sibling.end_dt)):
return True
return False
def move(self, start_dt):
"""Move the entry to start at a different time.
This method automatically moves children of the entry to
preserve their start time relative to the parent's start time.
"""
if self.type == TimetableEntryType.SESSION_BLOCK:
diff = start_dt - self.start_dt
for child in self.children:
child.start_dt += diff
self.start_dt = start_dt
def move_next_to(self, sibling, position='before'):
if sibling not in self.siblings:
raise ValueError("Not a sibling")
if position not in ('before', 'after'):
raise ValueError("Invalid position")
if position == 'before':
start_dt = sibling.start_dt - self.duration
else:
start_dt = sibling.end_dt
self.move(start_dt)
@listens_for(TimetableEntry.__table__, 'after_create')
def _add_timetable_consistency_trigger(target, conn, **kw):
sql = """
CREATE CONSTRAINT TRIGGER consistent_timetable
AFTER INSERT OR UPDATE
ON {}
DEFERRABLE INITIALLY DEFERRED
FOR EACH ROW
EXECUTE PROCEDURE events.check_timetable_consistency('timetable_entry');
""".format(target.fullname)
DDL(sql).execute(conn)
@listens_for(TimetableEntry.session_block, 'set')
def _set_session_block(target, value, *unused):
target.type = TimetableEntryType.SESSION_BLOCK
@listens_for(TimetableEntry.contribution, 'set')
def _set_contribution(target, value, *unused):
target.type = TimetableEntryType.CONTRIBUTION
@listens_for(TimetableEntry.break_, 'set')
def _set_break(target, value, *unused):
target.type = TimetableEntryType.BREAK
@listens_for(TimetableEntry.start_dt, 'set')
def _set_start_dt(target, value, oldvalue, *unused):
from indico.modules.events.util import register_time_change
if oldvalue in (NEVER_SET, NO_VALUE):
return
if value != oldvalue and target.object is not None:
register_time_change(target)
populate_one_to_one_backrefs(TimetableEntry, 'session_block', 'contribution', 'break_')
| DavidAndreev/indico | indico/modules/events/timetable/models/entries.py | Python | gpl-3.0 | 13,696 | 0.001168 |
from __future__ import absolute_import, unicode_literals
import unittest
from mopidy.internal import path
from mopidy.models import Album, Artist, Playlist, TlTrack, Track
from mopidy.mpd import translator
class TrackMpdFormatTest(unittest.TestCase):
track = Track(
uri='a uri',
artists=[Artist(name='an artist')],
name='a name',
album=Album(
name='an album', num_tracks=13,
artists=[Artist(name='an other artist')]),
track_no=7,
composers=[Artist(name='a composer')],
performers=[Artist(name='a performer')],
genre='a genre',
date='1977-01-01',
disc_no=1,
comment='a comment',
length=137000,
)
def setUp(self): # noqa: N802
self.media_dir = '/dir/subdir'
path.mtime.set_fake_time(1234567)
def tearDown(self): # noqa: N802
path.mtime.undo_fake()
def test_track_to_mpd_format_for_empty_track(self):
# TODO: this is likely wrong, see:
# https://github.com/mopidy/mopidy/issues/923#issuecomment-79584110
result = translator.track_to_mpd_format(Track())
self.assertIn(('file', ''), result)
self.assertIn(('Time', 0), result)
self.assertIn(('Artist', ''), result)
self.assertIn(('Title', ''), result)
self.assertIn(('Album', ''), result)
self.assertIn(('Track', 0), result)
self.assertNotIn(('Date', ''), result)
self.assertEqual(len(result), 6)
def test_track_to_mpd_format_with_position(self):
result = translator.track_to_mpd_format(Track(), position=1)
self.assertNotIn(('Pos', 1), result)
def test_track_to_mpd_format_with_tlid(self):
result = translator.track_to_mpd_format(TlTrack(1, Track()))
self.assertNotIn(('Id', 1), result)
def test_track_to_mpd_format_with_position_and_tlid(self):
result = translator.track_to_mpd_format(
TlTrack(2, Track()), position=1)
self.assertIn(('Pos', 1), result)
self.assertIn(('Id', 2), result)
def test_track_to_mpd_format_for_nonempty_track(self):
result = translator.track_to_mpd_format(
TlTrack(122, self.track), position=9)
self.assertIn(('file', 'a uri'), result)
self.assertIn(('Time', 137), result)
self.assertIn(('Artist', 'an artist'), result)
self.assertIn(('Title', 'a name'), result)
self.assertIn(('Album', 'an album'), result)
self.assertIn(('AlbumArtist', 'an other artist'), result)
self.assertIn(('Composer', 'a composer'), result)
self.assertIn(('Performer', 'a performer'), result)
self.assertIn(('Genre', 'a genre'), result)
self.assertIn(('Track', '7/13'), result)
self.assertIn(('Date', '1977-01-01'), result)
self.assertIn(('Disc', 1), result)
self.assertIn(('Pos', 9), result)
self.assertIn(('Id', 122), result)
self.assertNotIn(('Comment', 'a comment'), result)
self.assertEqual(len(result), 14)
def test_track_to_mpd_format_with_last_modified(self):
track = self.track.replace(last_modified=995303899000)
result = translator.track_to_mpd_format(track)
self.assertIn(('Last-Modified', '2001-07-16T17:18:19Z'), result)
def test_track_to_mpd_format_with_last_modified_of_zero(self):
track = self.track.replace(last_modified=0)
result = translator.track_to_mpd_format(track)
keys = [k for k, v in result]
self.assertNotIn('Last-Modified', keys)
def test_track_to_mpd_format_musicbrainz_trackid(self):
track = self.track.replace(musicbrainz_id='foo')
result = translator.track_to_mpd_format(track)
self.assertIn(('MUSICBRAINZ_TRACKID', 'foo'), result)
def test_track_to_mpd_format_musicbrainz_albumid(self):
album = self.track.album.replace(musicbrainz_id='foo')
track = self.track.replace(album=album)
result = translator.track_to_mpd_format(track)
self.assertIn(('MUSICBRAINZ_ALBUMID', 'foo'), result)
def test_track_to_mpd_format_musicbrainz_albumartistid(self):
artist = list(self.track.artists)[0].replace(musicbrainz_id='foo')
album = self.track.album.replace(artists=[artist])
track = self.track.replace(album=album)
result = translator.track_to_mpd_format(track)
self.assertIn(('MUSICBRAINZ_ALBUMARTISTID', 'foo'), result)
def test_track_to_mpd_format_musicbrainz_artistid(self):
artist = list(self.track.artists)[0].replace(musicbrainz_id='foo')
track = self.track.replace(artists=[artist])
result = translator.track_to_mpd_format(track)
self.assertIn(('MUSICBRAINZ_ARTISTID', 'foo'), result)
def test_concat_multi_values(self):
artists = [Artist(name='ABBA'), Artist(name='Beatles')]
translated = translator.concat_multi_values(artists, 'name')
self.assertEqual(translated, 'ABBA;Beatles')
def test_concat_multi_values_artist_with_no_name(self):
artists = [Artist(name=None)]
translated = translator.concat_multi_values(artists, 'name')
self.assertEqual(translated, '')
def test_concat_multi_values_artist_with_no_musicbrainz_id(self):
artists = [Artist(name='Jah Wobble')]
translated = translator.concat_multi_values(artists, 'musicbrainz_id')
self.assertEqual(translated, '')
def test_track_to_mpd_format_with_stream_title(self):
result = translator.track_to_mpd_format(self.track, stream_title='foo')
self.assertIn(('Name', 'a name'), result)
self.assertIn(('Title', 'foo'), result)
def test_track_to_mpd_format_with_empty_stream_title(self):
result = translator.track_to_mpd_format(self.track, stream_title='')
self.assertIn(('Name', 'a name'), result)
self.assertIn(('Title', ''), result)
def test_track_to_mpd_format_with_stream_and_no_track_name(self):
track = self.track.replace(name=None)
result = translator.track_to_mpd_format(track, stream_title='foo')
self.assertNotIn(('Name', ''), result)
self.assertIn(('Title', 'foo'), result)
class PlaylistMpdFormatTest(unittest.TestCase):
def test_mpd_format(self):
playlist = Playlist(tracks=[
Track(track_no=1), Track(track_no=2), Track(track_no=3)])
result = translator.playlist_to_mpd_format(playlist)
self.assertEqual(len(result), 3)
def test_mpd_format_with_range(self):
playlist = Playlist(tracks=[
Track(track_no=1), Track(track_no=2), Track(track_no=3)])
result = translator.playlist_to_mpd_format(playlist, 1, 2)
self.assertEqual(len(result), 1)
self.assertEqual(dict(result[0])['Track'], 2)
| dbrgn/mopidy | tests/mpd/test_translator.py | Python | apache-2.0 | 6,809 | 0 |
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2013 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import math
import pylab
import numpy
import moose
runtime = 50.0
def makeModel():
# create container for model
model = moose.Neutral( 'model' )
harmonic = moose.CubeMesh( '/model/harmonic' )
harmonic.volume = 1e-15
lotka = moose.CubeMesh( '/model/lotka' )
lotka.volume = 1e-15
# create molecules and reactions
x = moose.Pool( '/model/lotka/x' )
y = moose.Pool( '/model/lotka/y' )
z = moose.BufPool( '/model/lotka/z' ) # Dummy molecule.
xreac = moose.Reac( '/model/lotka/xreac' )
yreac = moose.Reac( '/model/lotka/yreac' )
xrate = moose.Function( '/model/lotka/xreac/func' )
yrate = moose.Function( '/model/lotka/yreac/func' )
# Parameters
alpha = 1.0
beta = 1.0
gamma = 1.0
delta = 1.0
k = 1.0
x.nInit = 2.0
y.nInit = 1.0
z.nInit = 0.0
xrate.x.num = 1
yrate.x.num = 1
xrate.expr = "x0 * " + str( beta ) + " - " + str( alpha )
yrate.expr = str( gamma ) + " - x0 * " + str( delta )
xreac.Kf = k
yreac.Kf = k
xreac.Kb = 0
yreac.Kb = 0
# connect them up for reactions
moose.connect( y, 'nOut', xrate.x[0], 'input' )
moose.connect( x, 'nOut', yrate.x[0], 'input' )
moose.connect( xrate, 'valueOut', xreac, 'setNumKf' )
moose.connect( yrate, 'valueOut', yreac, 'setNumKf' )
moose.connect( xreac, 'sub', x, 'reac' )
moose.connect( xreac, 'prd', z, 'reac' )
moose.connect( yreac, 'sub', y, 'reac' )
moose.connect( yreac, 'prd', z, 'reac' )
# Create the output tables
graphs = moose.Neutral( '/model/graphs' )
xplot = moose.Table2 ( '/model/graphs/x' )
yplot = moose.Table2 ( '/model/graphs/y' )
# connect up the tables
moose.connect( xplot, 'requestOut', x, 'getN' );
moose.connect( yplot, 'requestOut', y, 'getN' );
def main():
"""
The funcReacLotkaVolterra example shows how to use function objects
as part of differential equation systems in the framework of the MOOSE
kinetic solvers. Here the system is set up explicitly using the
scripting, in normal use one would expect to use SBML.
In this example we set up a Lotka-Volterra system. The equations
are readily expressed as a pair of reactions each of whose rate is
governed by a function::
x' = x( alpha - beta.y )
y' = -y( gamma - delta.x )
This translates into two reactions::
x ---> z Kf = beta.y - alpha
y ---> z Kf = gamma - delta.x
Here z is a dummy molecule whose concentration is buffered to zero.
The model first runs using default Exponential Euler integration.
This is not particularly accurate even with a small timestep.
The model is then converted to use the deterministic Kinetic solver
Ksolve. This is accurate and faster.
Note that we cannot use the stochastic GSSA solver for this system, it
cannot handle a reaction term whose rate keeps changing.
"""
makeModel()
for i in range( 11, 18 ):
moose.setClock( i, 0.001 )
moose.setClock( 18, 0.1 )
moose.reinit()
moose.start( runtime ) # Run the model
# Iterate through all plots, dump their contents to data.plot.
for x in moose.wildcardFind( '/model/graphs/#' ):
#x.xplot( 'scriptKineticModel.plot', x.name )
t = numpy.arange( 0, x.vector.size, 1 ) * x.dt # sec
pylab.plot( t, x.vector, label=x.name )
pylab.ylim( 0, 2.5 )
pylab.title( "Exponential Euler solution. Note slight error buildup" )
pylab.legend()
pylab.figure()
compt = moose.element( '/model/lotka' )
ksolve = moose.Ksolve( '/model/lotka/ksolve' )
stoich = moose.Stoich( '/model/lotka/stoich' )
stoich.compartment = compt
stoich.ksolve = ksolve
stoich.path = '/model/lotka/##'
moose.reinit()
moose.start( runtime ) # Run the model
for i in range( 11, 18 ):
moose.setClock( i, 0.1 )
for x in moose.wildcardFind( '/model/graphs/#' ):
t = numpy.arange( 0, x.vector.size, 1 ) * x.dt # sec
pylab.plot( t, x.vector, label=x.name )
pylab.ylim( 0, 2.5 )
pylab.title( "Runge-Kutta solution." )
pylab.legend()
pylab.show()
quit()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
| dilawar/moose-full | moose-examples/snippets/funcReacLotkaVolterra.py | Python | gpl-2.0 | 5,043 | 0.029942 |
"""
Key-value store that holds XBlock field data read out of Blockstore
"""
from collections import namedtuple
from weakref import WeakKeyDictionary
import logging
from xblock.exceptions import InvalidScopeError, NoSuchDefinition
from xblock.fields import Field, BlockScope, Scope, UserScope, Sentinel
from xblock.field_data import FieldData
from openedx.core.djangoapps.xblock.learning_context.manager import get_learning_context_impl
from openedx.core.djangolib.blockstore_cache import (
get_bundle_version_files_cached,
get_bundle_draft_files_cached,
)
log = logging.getLogger(__name__)
ActiveBlock = namedtuple('ActiveBlock', ['olx_hash', 'changed_fields'])
DELETED = Sentinel('DELETED') # Special value indicating a field was reset to its default value
CHILDREN_INCLUDES = Sentinel('CHILDREN_INCLUDES') # Key for a pseudo-field that stores the XBlock's children info
MAX_DEFINITIONS_LOADED = 100 # How many of the most recently used XBlocks' field data to keep in memory at max.
class BlockInstanceUniqueKey(object):
"""
An empty object used as a unique key for each XBlock instance, see
get_weak_key_for_block() and BlockstoreFieldData._get_active_block(). Every
XBlock instance will get a unique one of these keys, even if they are
otherwise identical. Its purpose is similar to `id(block)`.
"""
def get_weak_key_for_block(block):
"""
Given an XBlock instance, return an object with the same lifetime as the
block, suitable as a key to hold block-specific data in a WeakKeyDictionary.
"""
# We would like to make the XBlock instance 'block' itself the key of
# BlockstoreFieldData.active_blocks, so that we have exactly one entry per
# XBlock instance in memory, and they'll each be automatically freed by the
# WeakKeyDictionary as needed. But because XModules implement
# __eq__() in a way that reads all field values, just attempting to use
# the block as a dict key here will trigger infinite recursion. So
# instead we key the dict on an arbitrary object,
# block key = BlockInstanceUniqueKey() which we create here. That way
# the weak reference will still cause the entry in the WeakKeyDictionary to
# be freed automatically when the block is no longer needed, and we
# still get one entry per XBlock instance.
if not hasattr(block, '_field_data_key_obj'):
block._field_data_key_obj = BlockInstanceUniqueKey() # pylint: disable=protected-access
return block._field_data_key_obj # pylint: disable=protected-access
def get_olx_hash_for_definition_key(def_key):
"""
Given a BundleDefinitionLocator, which identifies a specific version of an
OLX file, return the hash of the OLX file as given by the Blockstore API.
"""
if def_key.bundle_version:
# This is referring to an immutable file (BundleVersions are immutable so this can be aggressively cached)
files_list = get_bundle_version_files_cached(def_key.bundle_uuid, def_key.bundle_version)
else:
# This is referring to a draft OLX file which may be recently updated:
files_list = get_bundle_draft_files_cached(def_key.bundle_uuid, def_key.draft_name)
for entry in files_list:
if entry.path == def_key.olx_path:
return entry.hash_digest
raise NoSuchDefinition("Could not load OLX file for key {}".format(def_key))
class BlockstoreFieldData(FieldData):
"""
An XBlock FieldData implementation that reads XBlock field data directly out
of Blockstore.
It requires that every XBlock have a BundleDefinitionLocator as its
"definition key", since the BundleDefinitionLocator is what specifies the
OLX file path and version to use.
Within Blockstore there is no mechanism for setting different field values
at the usage level compared to the definition level, so we treat
usage-scoped fields identically to definition-scoped fields.
"""
def __init__(self):
"""
Initialize this BlockstoreFieldData instance.
"""
# loaded definitions: a dict where the key is the hash of the XBlock's
# olx file (as stated by the Blockstore API), and the values is the
# dict of field data as loaded from that OLX file. The field data dicts
# in this should be considered immutable, and never modified.
self.loaded_definitions = {}
# Active blocks: this holds the field data *changes* for all the XBlocks
# that are currently in memory being used for something. We only keep a
# weak reference so that the memory will be freed when the XBlock is no
# longer needed (e.g. at the end of a request)
# The key of this dictionary is on ID object owned by the XBlock itself
# (see _get_active_block()) and the value is an ActiveBlock object
# (which holds olx_hash and changed_fields)
self.active_blocks = WeakKeyDictionary()
super(BlockstoreFieldData, self).__init__() # lint-amnesty, pylint: disable=super-with-arguments
def _getfield(self, block, name):
"""
Return the field with the given `name` from `block`.
If the XBlock doesn't have such a field, raises a KeyError.
"""
# First, get the field from the class, if defined
block_field = getattr(block.__class__, name, None)
if block_field is not None and isinstance(block_field, Field):
return block_field
# Not in the class, so name really doesn't name a field
raise KeyError(name)
def _check_field(self, block, name):
"""
Given a block and the name of one of its fields, check that we will be
able to read/write it.
"""
if name == CHILDREN_INCLUDES:
return # This is a pseudo-field used in conjunction with BlockstoreChildrenData
field = self._getfield(block, name)
if field.scope in (Scope.children, Scope.parent): # lint-amnesty, pylint: disable=no-else-raise
# This field data store is focused on definition-level field data, and children/parent is mostly
# relevant at the usage level. Scope.parent doesn't even seem to be used?
raise NotImplementedError("Setting Scope.children/parent is not supported by BlockstoreFieldData.")
else:
if field.scope.user != UserScope.NONE:
raise InvalidScopeError("BlockstoreFieldData only supports UserScope.NONE fields")
if field.scope.block not in (BlockScope.DEFINITION, BlockScope.USAGE):
raise InvalidScopeError(
"BlockstoreFieldData does not support BlockScope.{} fields".format(field.scope.block)
)
# There is also BlockScope.TYPE but we don't need to support that;
# it's mostly relevant as Scope.preferences(UserScope.ONE, BlockScope.TYPE)
# Which would be handled by a user-aware FieldData implementation
def _get_active_block(self, block):
"""
Get the ActiveBlock entry for the specified block, creating it if
necessary.
"""
key = get_weak_key_for_block(block)
if key not in self.active_blocks:
self.active_blocks[key] = ActiveBlock(
olx_hash=get_olx_hash_for_definition_key(block.scope_ids.def_id),
changed_fields={},
)
return self.active_blocks[key]
def get(self, block, name):
"""
Get the given field value from Blockstore
If the XBlock has been making changes to its fields, the value will be
in self._get_active_block(block).changed_fields[name]
Otherwise, the value comes from self.loaded_definitions which is a dict
of OLX file field data, keyed by the hash of the OLX file.
"""
self._check_field(block, name)
entry = self._get_active_block(block)
if name in entry.changed_fields:
value = entry.changed_fields[name]
if value == DELETED:
raise KeyError # KeyError means use the default value, since this field was deliberately set to default
return value
try:
saved_fields = self.loaded_definitions[entry.olx_hash]
except KeyError:
if name == CHILDREN_INCLUDES:
# Special case: parse_xml calls add_node_as_child which calls 'block.children.append()'
# BEFORE parse_xml is done, and .append() needs to read the value of children. So
return [] # start with an empty list, it will get filled in.
# Otherwise, this is an anomalous get() before the XML was fully loaded:
# This could happen if an XBlock's parse_xml() method tried to read a field before setting it,
# if an XBlock read field data in its constructor (forbidden), or if an XBlock was loaded via
# some means other than runtime.get_block(). One way this can happen is if you log/print an XBlock during
# XML parsing, because ScopedStorageMixin.__repr__ will try to print all field values, and any fields which
# aren't mentioned in the XML (which are left at their default) will be "not loaded yet."
log.exception(
"XBlock %s tried to read from field data (%s) that wasn't loaded from Blockstore!",
block.scope_ids.usage_id, name,
)
raise # Just use the default value for now; any exception raised here is caught anyways
return saved_fields[name]
# If 'name' is not found, this will raise KeyError, which means to use the default value
def set(self, block, name, value):
"""
Set the value of the field named `name`
"""
entry = self._get_active_block(block)
entry.changed_fields[name] = value
def delete(self, block, name):
"""
Reset the value of the field named `name` to the default
"""
self.set(block, name, DELETED)
def default(self, block, name):
"""
Get the default value for block's field 'name'.
The XBlock class will provide the default if KeyError is raised; this is
mostly for the purpose of context-specific overrides.
"""
raise KeyError(name)
def cache_fields(self, block):
"""
Cache field data:
This is called by the runtime after a block has parsed its OLX via its
parse_xml() methods and written all of its field values into this field
data store. The values will be stored in
self._get_active_block(block).changed_fields
so we know at this point that that isn't really "changed" field data,
it's the result of parsing the OLX. Save a copy into loaded_definitions.
"""
entry = self._get_active_block(block)
self.loaded_definitions[entry.olx_hash] = entry.changed_fields.copy()
# Reset changed_fields to indicate this block hasn't actually made any field data changes, just loaded from XML:
entry.changed_fields.clear()
if len(self.loaded_definitions) > MAX_DEFINITIONS_LOADED:
self.free_unused_definitions()
def has_changes(self, block):
"""
Does the specified block have any unsaved changes?
"""
entry = self._get_active_block(block)
return bool(entry.changed_fields)
def has_cached_definition(self, definition_key):
"""
Has the specified OLX file been loaded into memory?
"""
olx_hash = get_olx_hash_for_definition_key(definition_key)
return olx_hash in self.loaded_definitions
def free_unused_definitions(self):
"""
Free unused field data cache entries from self.loaded_definitions
as long as they're not in use.
"""
olx_hashes = set(self.loaded_definitions.keys())
olx_hashes_needed = set(entry.olx_hash for entry in self.active_blocks.values())
olx_hashes_safe_to_delete = olx_hashes - olx_hashes_needed
# To avoid doing this too often, randomly cull unused entries until
# we have only half as many as MAX_DEFINITIONS_LOADED in memory, if possible.
while olx_hashes_safe_to_delete and (len(self.loaded_definitions) > MAX_DEFINITIONS_LOADED / 2):
del self.loaded_definitions[olx_hashes_safe_to_delete.pop()]
class BlockstoreChildrenData(FieldData):
"""
An XBlock FieldData implementation that reads 'children' data out of
the definition fields in BlockstoreFieldData.
The children field contains usage keys and so is usage-specific; the
BlockstoreFieldData can only store field data that is not usage-specific. So
we store data about the <xblock-include /> elements that define the children
in BlockstoreFieldData (since that is not usage-specific), and this field
data implementation loads that <xblock-include /> data and transforms it
into the usage keys that comprise the standard .children field.
"""
def __init__(self, blockstore_field_data):
"""
Initialize this BlockstoreChildrenData instance.
"""
# The data store that holds Scope.usage and Scope.definition data:
self.authored_data_store = blockstore_field_data
super(BlockstoreChildrenData, self).__init__() # lint-amnesty, pylint: disable=super-with-arguments
def _check_field(self, block, name): # pylint: disable=unused-argument
"""
Given a block and the name of one of its fields, check that we will be
able to read/write it.
"""
if name != 'children':
raise InvalidScopeError("BlockstoreChildrenData can only read/write from a field named 'children'")
def get(self, block, name):
"""
Get the "children' field value.
We do this by reading the parsed <xblock-include /> values from
the regular authored data store and then transforming them to usage IDs.
"""
self._check_field(block, name)
children_includes = self.get_includes(block)
if not children_includes:
return []
# Now the .children field is required to be a list of usage IDs:
learning_context = get_learning_context_impl(block.scope_ids.usage_id)
child_usages = []
for parsed_include in children_includes:
child_usages.append(
learning_context.usage_for_child_include(
block.scope_ids.usage_id, block.scope_ids.def_id, parsed_include,
)
)
return child_usages
def set(self, block, name, value):
"""
Set the value of the field; requires name='children'
"""
self._check_field(block, name)
children_includes = self.authored_data_store.get(block, CHILDREN_INCLUDES)
if len(value) != len(children_includes):
raise RuntimeError(
"This runtime does not allow changing .children directly - use runtime.add_child_include instead."
)
# This is a no-op; the value of 'children' is derived from CHILDREN_INCLUDES
# so we never write to the children field directly. All we do is make sure it
# looks like it's still in sync with CHILDREN_INCLUDES
def get_includes(self, block):
"""
Get the list of <xblock-include /> elements representing this XBlock's
children.
"""
try:
return self.authored_data_store.get(block, CHILDREN_INCLUDES)
except KeyError:
# KeyError raised by an XBlock field data store means "use the
# default value", and the default value for the children field is an
# empty list.
return []
def append_include(self, block, parsed_include):
"""
Append an <xblock-include /> element to this XBlock's list of children
"""
self.authored_data_store.set(block, CHILDREN_INCLUDES, self.get_includes(block) + [parsed_include])
def delete(self, block, name):
"""
Reset the value of the field named `name` to the default
"""
self._check_field(block, name)
self.authored_data_store.set(block, CHILDREN_INCLUDES, [])
self.set(block, name, [])
| stvstnfrd/edx-platform | openedx/core/djangoapps/xblock/runtime/blockstore_field_data.py | Python | agpl-3.0 | 16,343 | 0.003243 |
#!/usr/bin/python3.4
#############################################################################
#
# Dictionnary DB managing script. Add/Del/Search definitions
# Copyright (C) 2014 bertrand
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#############################################################################
###############
### Imports ###
import sys
import psycopg2 as PSQL
import textwrap as txtwrp
#####################
### Configuration ###
config = {
'VERSION_MAJOR' : '0',
'VERSION_MINOR' : '1',
'dbname' : 'bertrand',
'user' : 'bertrand'
}
#############
### USAGE ###
def usage():
print("Tool to insert/remove entries in the dicotionnnary.")
print("Version: " + config['VERSION_MAJOR'] + "." + config['VERSION_MINOR'])
print("Usage: " + sys.argv[0] + " <command> <options>")
print("")
print("Commands:")
print(" add Add definition to dictionnary.")
print(" del Remove definition from dictionnary.")
print(" help Print general help or command specific help.")
print(" search Search definition in dictionnary.")
print("")
###########
### ADD ###
def add():
argc = len(sys.argv)
if argc < 3:
__help_cmd(sys.argv[1])
return
req = {
'fields' : '',
'name' : '',
'def' : '',
'url' : ''
}
i=2
while i < argc:
if sys.argv[i] == "-d":
i += 1
req['def'] = sys.argv[i]
elif sys.argv[i] == "-f":
i += 1
req['fields'] = sys.argv[i]
elif sys.argv[i] == '-n':
i += 1
req['name'] = sys.argv[i]
elif sys.argv[i] == "-u":
i += 1
req['url'] = sys.argv[i]
else:
print("Unknown option '" + sys.argv[i] + "'")
__help_cmd(sys.argv[1])
return
i += 1
if req['fields'] == '':
print("Please specify fields with option '-f'.")
__help_cmd(sys.argv[1])
return
elif req['name'] == '':
print("Please specify fields with option '-f'.")
__help_cmd(sys.argv[1])
return
elif req['def'] == '':
print("Please specify definition with option '-d'.")
__help_cmd(sys.argv[1])
return
conn = PSQL.connect("dbname=" + config['dbname'] + " user=" + config['user'])
cur = conn.cursor()
req = cur.mogrify("INSERT INTO dico (fields,name,def,url) VALUES (%s, %s, %s, %s)",
("{" + req['fields'] + "}", req['name'], req['def'], req['url']))
print(req)
cur.execute(req)
conn.commit()
cur.close()
conn.close()
###########
### DEL ###
def delete():
try:
defid = sys.argv[2]
except IndexError:
print("Missing argument.")
__help_cmd(sys.argv[1])
return
conn = PSQL.connect("dbname=" + config['dbname'] + " user=" + config['user'])
cur = conn.cursor()
req = cur.mogrify("DELETE FROM dico WHERE id=%s", (defid,))
print(req)
cur.execute(req)
conn.commit()
cur.close()
conn.close()
#####################
### HELP COMMANDS ###
def help_cmd():
try:
cmd = sys.argv[2]
except:
cmd = ''
__help_cmd(cmd)
def __help_cmd(cmd):
if cmd == '' :
usage()
elif cmd == "add" :
print("Command '" + cmd + "': Add definition to dictionnary.")
print("Usage: " + sys.argv[0] + " " + cmd + " <options>")
print("")
print("Options:")
print(" -d <str> Definition.")
print(" -f <str,str,..> List of fields.")
print(" -n <str> Name of the entry")
print(" -u <url> One url to a more complete definition.")
print("")
elif cmd == "del" :
print("Command '" + cmd + "': Delete definition from dictionnary.")
print("Usage: " + sys.argv[0] + " " + cmd + " <id>")
print("")
print("Param:")
print(" id ID of the definition to delete.")
print("")
elif cmd == "help" :
print("Command '" + cmd + "': Print help.")
print("Usage: " + sys.argv[0] + " " + cmd + " [command]")
print("")
print("Giving NO 'command' this will print the general help.")
print("Giving 'command' this will print the command specific help. ")
print("")
elif cmd == "search" :
print("Command '" + cmd + "': Search definition in dictionnary.")
print("Usage: " + sys.argv[0] + " " + cmd + " <options>")
print("")
print("Options:")
print(" -a Print all definitions in the table.")
print(" -f <str,str,...> Print definitions matching the set of given fields.")
print(" -i <id> Print definition matching the given ID.")
print(" -n <str> Print definition mathing the given entry name.")
print("")
else:
print("Unknown command: '" + cmd + "'")
usage()
##############
### SEARCH ###
def search():
try:
opt = sys.argv[2]
except IndexError:
__help_cmd(sys.argv[1])
return
else:
if not opt in ('-a', '-f', '-i', '-n'):
print("Unknown option '" + sys.argv[2] + "'")
__help_cmd(sys.argv[1])
return
conn = PSQL.connect("dbname=" + config['dbname'] + " user=" + config['user'])
cur = conn.cursor()
try:
if opt == "-a":
req = cur.mogrify("SELECT id,fields,name,def,url FROM dico")
elif opt == "-f":
optarg = sys.argv[3]
req = __search_build_req_fields(optarg.split(','))
elif opt == '-i':
optarg = sys.argv[3]
req = cur.mogrify("SELECT id,fields,name,def,url FROM dico WHERE id=%s", (optarg,))
elif opt == "-n":
optarg = sys.argv[3]
req = cur.mogrify("SELECT id,fields,name,def,url FROM dico WHERE name=%s", (optarg,))
except IndexError:
print("Missing argument.")
__help_cmd(sys.argv[1])
else:
print(req)
cur.execute(req)
print_rows(cur.fetchall())
conn.commit()
finally:
cur.close()
conn.close()
def __search_build_req_fields(fields):
# How do you like your SQL injection?
# I like mine crispy and with a python '+' ;)
# http://initd.org/psycopg/docs/usage.html
# http://xkcd.com/327/
# That will do for now ...
req = "SELECT id,fields,name,def,url FROM dico WHERE "
req += "'" + fields[0] + "'=ANY(fields)"
for f in fields[1:]:
req += " OR '" + f + "'=ANY(fields)"
return req
###################################
### PRINT PSQL REQUESTS RESULTS ###
def print_rows(rows):
for row in rows:
print("---------------------")
print("ID : ", row[0])
__print_row_wrapped("FIELDS : ", row[1])
__print_row_wrapped("NAME : ", row[2])
__print_row_wrapped("DEF : ", row[3])
__print_row_wrapped("URL : ", row[4])
print("")
def __print_row_wrapped(label, value):
labellen = len(label)
wrapped = txtwrp.wrap(value)
print(label, wrapped[0])
for i in range(1, len(wrapped)):
print(' ' * labellen, wrapped[i])
############
### MAIN ###
commands = {
'add' : add,
'del' : delete,
'help' : help_cmd,
'search' : search
}
try:
cmd = sys.argv[1]
except KeyError:
print("Unknown command: " + cmd)
usage()
sys.exit()
except IndexError:
usage()
sys.exit()
else:
commands[cmd]()
| bertrandF/DictionaryDB | db.py | Python | gpl-2.0 | 8,296 | 0.008799 |
import os
import sys
import json
from optional_django import staticfiles
from optional_django.serializers import JSONEncoder
from optional_django.safestring import mark_safe
from optional_django import six
from js_host.function import Function
from js_host.exceptions import FunctionError
from react.render import RenderedComponent
from react.exceptions import ComponentSourceFileNotFound
from react.exceptions import ReactRenderingError
from react_router.conf import settings
from react_router.templates import MOUNT_JS
from react_router.bundle import bundle_component
from webpack.compiler import WebpackBundle
class RouteRenderedComponent(RenderedComponent):
def get_client_asset(self):
client_asset = None
bundled_component = self.get_bundle()
assets = bundled_component.get_assets()
for asset in assets:
if asset['path'] == self.path_to_source:
client_asset = asset
break
return client_asset
def get_var(self):
client_asset = self.get_client_asset()
if client_asset:
return 'client'
raise Exception("Client asset not found.")
def render_js(self):
client_asset = self.get_client_asset()
if client_asset:
client_bundle = mark_safe(WebpackBundle.render_tag(client_asset['url']))
return mark_safe(
'\n{bundle}\n<script>\n{mount_js}\n</script>\n'.format(
bundle=client_bundle,
mount_js=self.render_mount_js(),
)
)
def render_mount_js(self):
return mark_safe(
MOUNT_JS.format(
var=self.get_var(),
props=self.serialized_props or 'null',
container_id=self.get_container_id()
)
)
class RouteRedirect(object):
def __init__(self, pathname, query = None, state = None, *args, **kwargs):
self.path = pathname
self.query = query
if state and 'nextPathname' in state:
self.nextPath = state['nextPathname']
else:
self.nextPath = None
if self.path is None:
raise ReactRenderingError("No path returned for redirection.")
super(RouteRedirect, self).__init__(*args, **kwargs)
@property
def url(self):
if self.query:
return "%s?next=%s&%s" % (self.path, self.nextPath, self.query)
else:
return "%s?next=%s" % (self.path, self.nextPath)
class RouteNotFound(object):
def __init__(self, *args, **kwargs):
super(RouteNotFound, self).__init__(*args, **kwargs)
js_host_function = Function(settings.JS_HOST_FUNCTION)
def render_route(
# Rendering options
path, # path to routes file
client_path, # path to client routes file
request, # pass in request object
props=None,
to_static_markup=None,
# Bundling options
bundle=None,
translate=None,
# Prop handling
json_encoder=None
):
if not os.path.isabs(path):
abs_path = staticfiles.find(path)
if not abs_path:
raise ComponentSourceFileNotFound(path)
path = abs_path
if not os.path.exists(path):
raise ComponentSourceFileNotFound(path)
if not os.path.isabs(client_path):
abs_client_path = staticfiles.find(client_path)
if not abs_client_path:
raise ComponentSourceFileNotFound(client_path)
client_path = abs_client_path
if not os.path.exists(client_path):
raise ComponentSourceFileNotFound(client_path)
bundled_component = None
import re
client_re = re.compile(r"client-(?:\w*\d*).js",re.IGNORECASE)
server_re = re.compile(r"server-(?:\w*\d*).js",re.IGNORECASE)
if bundle or translate:
bundled_component = bundle_component(path, client_path, translate=translate)
assets = bundled_component.get_assets()
for asset in assets:
m = client_re.search(asset['name'])
if m:
client_path = asset['path']
m = server_re.search(asset['name'])
if m:
path = asset['path']
if json_encoder is None:
json_encoder = JSONEncoder
if props is not None:
serialized_props = json.dumps(props, cls=json_encoder)
else:
serialized_props = None
try:
location = {
'pathname': request.path,
'query': request.GET.dict()
}
cbData = json.loads(js_host_function.call(
path=path,
location=location,
serializedProps=serialized_props,
toStaticMarkup=to_static_markup
))
except FunctionError as e:
raise six.reraise(ReactRenderingError, ReactRenderingError(*e.args), sys.exc_info()[2])
if cbData['match']:
return RouteRenderedComponent(cbData['markup'], client_path, props, serialized_props, bundled_component, to_static_markup)
else:
if cbData['redirectInfo']:
return RouteRedirect(**cbData['redirectInfo'])
else:
return RouteNotFound()
| HorizonXP/python-react-router | react_router/render.py | Python | mit | 5,105 | 0.003918 |
import nltk
import re
import pprint
def main():
IN = re.compile(r'.*\bin\b(?!\b.+ing)')
for doc in nltk.corpus.ieer.parsed_docs('NYT_19980315'):
for rel in nltk.sem.extract_rels('ORG', 'LOC', doc, corpus='ieer', pattern=IN):
print nltk.sem.relextract.rtuple(rel)
if __name__ == "__main__":
main()
| attibalazs/nltk-examples | 7.6_Relation_Extraction.py | Python | mit | 332 | 0.006024 |
__all__ = ["pval_task", "annotation_task"] | eeyorkey/ipac | tasks/__init__.py | Python | gpl-2.0 | 42 | 0.02381 |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Pavel Korshunov <Pavel.Korshunov@idiap.ch>
# Tue 22 Sep 17:21:35 CEST 2015
#
# Copyright (C) 2012-2015 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import math
import numpy
logger = logging.getLogger("bob.bio.spear")
def zeromean_unitvar_norm(data, mean, std):
""" Normalized the data with zero mean and unit variance. Mean and variance are in numpy.ndarray format"""
return numpy.divide(data - mean, std)
def calc_mean(c0, c1=[]):
""" Calculates the mean of the data."""
if c1 != []:
return (numpy.mean(c0, 0) + numpy.mean(c1, 0)) / 2.0
else:
return numpy.mean(c0, 0)
def calc_std(c0, c1=[]):
""" Calculates the variance of the data."""
if c1 == []:
return numpy.std(c0, 0)
prop = float(len(c0)) / float(len(c1))
if prop < 1:
p0 = int(math.ceil(1 / prop))
p1 = 1
else:
p0 = 1
p1 = int(math.ceil(prop))
return numpy.std(numpy.vstack(p0 * [c0] + p1 * [c1]), 0)
"""
@param c0
@param c1
@param nonStdZero if the std was zero, convert to one. This will avoid a zero division
"""
def calc_mean_std(c0, c1=[], nonStdZero=False):
""" Calculates both the mean of the data. """
mi = calc_mean(c0, c1)
std = calc_std(c0, c1)
if nonStdZero:
std[std == 0] = 1
return mi, std
def vad_filter_features(vad_labels, features, filter_frames="trim_silence"):
"""Trim the spectrogram to remove silent head/tails from the speech sample.
Keep all remaining frames or either speech or non-speech only
@param: filter_frames: the value is either 'silence_only' (keep the speech, remove everything else),
'speech_only' (only keep the silent parts), 'trim_silence' (trim silent heads and tails),
or 'no_filter' (no filter is applied)
"""
if not features.size:
raise ValueError(
"vad_filter_features(): data sample is empty, no features extraction is possible"
)
vad_labels = numpy.asarray(vad_labels, dtype=numpy.int8)
features = numpy.asarray(features, dtype=numpy.float64)
features = numpy.reshape(features, (vad_labels.shape[0], -1))
# logger.info("RatioVectorExtractor, vad_labels shape: %s", str(vad_labels.shape))
# print ("RatioVectorExtractor, features max: %f and min: %f" %(numpy.max(features), numpy.min(features)))
# first, take the whole thing, in case there are problems later
filtered_features = features
# if VAD detection worked on this sample
if vad_labels is not None and filter_frames != "no_filter":
# make sure the size of VAD labels and sectrogram lenght match
if len(vad_labels) == len(features):
# take only speech frames, as in VAD speech frames are 1 and silence are 0
(speech,) = numpy.nonzero(vad_labels)
silences = None
if filter_frames == "silence_only":
# take only silent frames - those for which VAD gave zeros
(silences,) = numpy.nonzero(vad_labels == 0)
if len(speech):
nzstart = speech[0] # index of the first non-zero
nzend = speech[-1] # index of the last non-zero
if filter_frames == "silence_only": # extract only silent frames
# take only silent frames in-between the speech
silences = silences[silences > nzstart]
silences = silences[silences < nzend]
filtered_features = features[silences, :]
elif filter_frames == "speech_only":
filtered_features = features[speech, :]
else: # when we take all
filtered_features = features[
nzstart : nzend + 1, :
] # numpy slicing is a non-closed interval [)
else:
logger.error(
"vad_filter_features(): VAD labels should be the same length as energy bands"
)
logger.info(
"vad_filter_features(): filtered_features shape: %s",
str(filtered_features.shape),
)
return filtered_features
| bioidiap/bob.bio.spear | bob/bio/spear/utils/extraction.py | Python | gpl-3.0 | 4,798 | 0.002293 |
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Nodes for coroutine objects and their creations.
Coroutines are turned into normal functions that create generator objects,
whose implementation lives here. The creation itself also lives here.
"""
from .ExpressionBases import ExpressionChildHavingBase
from .FunctionNodes import ExpressionFunctionEntryPointBase
class ExpressionMakeCoroutineObject(ExpressionChildHavingBase):
kind = "EXPRESSION_MAKE_COROUTINE_OBJECT"
named_child = "coroutine_ref"
__slots__ = ("variable_closure_traces",)
def __init__(self, coroutine_ref, source_ref):
assert coroutine_ref.getFunctionBody().isExpressionCoroutineObjectBody()
ExpressionChildHavingBase.__init__(
self, value=coroutine_ref, source_ref=source_ref
)
self.variable_closure_traces = None
def getDetailsForDisplay(self):
return {"coroutine": self.subnode_coroutine_ref.getFunctionBody().getCodeName()}
def computeExpression(self, trace_collection):
self.variable_closure_traces = []
for (
closure_variable
) in self.subnode_coroutine_ref.getFunctionBody().getClosureVariables():
trace = trace_collection.getVariableCurrentTrace(closure_variable)
trace.addNameUsage()
self.variable_closure_traces.append((closure_variable, trace))
# TODO: Coroutine body may know something too.
return self, None, None
@staticmethod
def mayRaiseException(exception_type):
return False
@staticmethod
def mayHaveSideEffects():
return False
def getClosureVariableVersions(self):
return self.variable_closure_traces
class ExpressionCoroutineObjectBody(ExpressionFunctionEntryPointBase):
kind = "EXPRESSION_COROUTINE_OBJECT_BODY"
__slots__ = ("qualname_setup", "needs_generator_return_exit")
def __init__(self, provider, name, code_object, flags, auto_release, source_ref):
ExpressionFunctionEntryPointBase.__init__(
self,
provider=provider,
name=name,
code_object=code_object,
code_prefix="coroutine",
flags=flags,
auto_release=auto_release,
source_ref=source_ref,
)
self.needs_generator_return_exit = False
self.qualname_setup = None
def getFunctionName(self):
return self.name
def markAsNeedsGeneratorReturnHandling(self, value):
self.needs_generator_return_exit = max(self.needs_generator_return_exit, value)
def needsGeneratorReturnHandling(self):
return self.needs_generator_return_exit == 2
def needsGeneratorReturnExit(self):
return bool(self.needs_generator_return_exit)
@staticmethod
def needsCreation():
return False
@staticmethod
def isUnoptimized():
return False
class ExpressionAsyncWait(ExpressionChildHavingBase):
kind = "EXPRESSION_ASYNC_WAIT"
named_child = "expression"
__slots__ = ("exception_preserving",)
def __init__(self, expression, source_ref):
ExpressionChildHavingBase.__init__(
self, value=expression, source_ref=source_ref
)
self.exception_preserving = False
@staticmethod
def isExpressionAsyncWait():
return True
def computeExpression(self, trace_collection):
# TODO: Might be predictable based awaitable analysis or for constants.
trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
class ExpressionAsyncWaitEnter(ExpressionAsyncWait):
kind = "EXPRESSION_ASYNC_WAIT_ENTER"
class ExpressionAsyncWaitExit(ExpressionAsyncWait):
kind = "EXPRESSION_ASYNC_WAIT_EXIT"
| kayhayen/Nuitka | nuitka/nodes/CoroutineNodes.py | Python | apache-2.0 | 4,498 | 0.001112 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
class URLMappings(object):
def __init__(self, src_root, build_dir):
self.mappings = {
'dart:mojo.internal': os.path.join(src_root, 'mojo/public/dart/sdk_ext/internal.dart'),
'dart:sky': os.path.join(build_dir, 'gen/sky/bindings/dart_sky.dart'),
'dart:sky.internals': os.path.join(src_root, 'sky/engine/bindings/sky_internals.dart'),
'dart:sky_builtin_natives': os.path.join(src_root, 'sky/engine/bindings/builtin_natives.dart'),
}
self.packages_root = os.path.join(build_dir, 'gen/dart-pkg/packages')
@property
def as_args(self):
return map(lambda item: '--url-mapping=%s,%s' % item, self.mappings.items())
| xunmengfeng/engine | sky/tools/skypy/url_mappings.py | Python | bsd-3-clause | 878 | 0.009112 |
import urllib
from flask import url_for
from flask_script import Manager, Server, Shell, Command
from config.app import create_app
from config.db import db
from config.initializers.newrelic_monitoring import NewrelicMonitoring
from federation_api.people.model import Person
manager = Manager(create_app)
server = Server(host='0.0.0.0', port=1786)
NewrelicMonitoring(manager.app())
manager.add_command('runserver', server)
def _make_context():
models = [Person]
models = {model.__name__: model for model in models}
return dict(app=create_app(), db=db, **models)
manager.add_command('shell', Shell(make_context=_make_context))
class Routes(Command):
def run(self):
output = []
app = manager.app
for rule in app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
methods = ','.join(rule.methods)
# FIXME: Results in http://<host_name>:<host_port>/<blueprint_mount>/<endpoint>g
url = url_for(rule.endpoint, **options)
line = urllib.unquote("{:50s} {:20s} {}".format(rule.endpoint, methods,
url))
output.append(line)
for line in sorted(output):
print(line)
manager.add_command('routes', Routes())
if __name__ == '__main__':
manager.run()
| practo/federation | manage.py | Python | mit | 1,415 | 0.002827 |
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
from pandac.PandaModules import *
import random
from FireworkGlobals import *
colors = {WHITE: Vec4(1, 1, 1, 1),
RED: Vec4(1, 0.2, 0.2, 1),
BLUE: Vec4(0.2, 0.2, 1, 1),
YELLOW: Vec4(1, 1, 0.2, 1),
GREEN: Vec4(0.2, 1, 0.2, 1),
PINK: Vec4(1, 0.5, 0.5, 1),
PEACH: Vec4(0.9, 0.6, 0.4, 1),
PURPLE: Vec4(1, 0.1, 1, 1),
CYAN: Vec4(0.2, 1, 1, 1)}
textures = {SNOWFLAKE: 'phase_8/models/props/snowflake_treasure',
MUSICNOTE: 'phase_6/models/props/music_treasure',
FLOWER: 'phase_8/models/props/flower_treasure',
ICECREAM: 'phase_4/models/props/icecream',
STARFISH: 'phase_6/models/props/starfish_treasure',
ZZZ: 'phase_8/models/props/zzz_treasure'}
fireworkId = 0
def getNextSequenceName(name):
global fireworkId
fireworkId += 1
return '%s-%s' % (name, fireworkId)
def getColor(colorIndex):
return colors.get(colorIndex)
def getTexture(textureIndex):
return loader.loadModel(textures.get(textureIndex))
def shootFirework(style, x = 0, y = 0, z = 0, colorIndex1 = 0, colorIndex2 = 0, amp = 10):
func = style2shootFunc.get(style)
color1 = getColor(colorIndex1)
if style is CIRCLESPRITE:
color2 = getTexture(colorIndex2)
else:
color2 = getColor(colorIndex2)
if func and color1 and color2:
return func(x, y, z, color1, color2, amp)
def shootFireworkRing(x, y, z, color1, color2, amp):
f = ParticleEffect.ParticleEffect()
p0 = Particles.Particles('particles-1')
p0.setFactory('PointParticleFactory')
p0.setRenderer('SparkleParticleRenderer')
p0.setEmitter('RingEmitter')
p0.setPoolSize(100)
p0.setBirthRate(0.01)
p0.setLitterSize(100)
p0.setLitterSpread(0)
p0.factory.setLifespanBase(1.5)
p0.factory.setLifespanSpread(0.5)
p0.factory.setMassBase(1.0)
p0.factory.setMassSpread(0.0)
p0.factory.setTerminalVelocityBase(20.0)
p0.factory.setTerminalVelocitySpread(2.0)
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(1.0)
p0.renderer.setCenterColor(color1)
p0.renderer.setEdgeColor(color2)
p0.renderer.setBirthRadius(0.3)
p0.renderer.setDeathRadius(0.3)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(0)
p0.emitter.setAmplitudeSpread(0)
f0 = ForceGroup.ForceGroup('gravity')
force0 = LinearSourceForce(Point3(x, y, z), LinearDistanceForce.FTONEOVERR, 0.1, 1.1 * amp, 1)
force0.setActive(1)
f0.addForce(force0)
force1 = LinearSinkForce(Point3(x, y, z), LinearDistanceForce.FTONEOVERR, 0.5, 2.0 * amp, 1)
force1.setActive(1)
f0.addForce(force1)
f.addForceGroup(f0)
p0.emitter.setRadius(4.0)
f.addParticles(p0)
f.setPos(x, y, z)
f.setHpr(0, random.random() * 180, random.random() * 180)
sfx = loader.loadSfx('phase_4/audio/sfx/firework_distance_03.ogg')
sfx.setVolume(0.7)
t = Sequence(Func(f.start, render, render), Func(sfx.play), Wait(0.5), Func(p0.setBirthRate, 3), Wait(1.5), Func(f.cleanup), name=getNextSequenceName('shootFireworkRing'))
t.start()
def shootFireworkRocket(x, y, z, color1, color2, amp):
f = ParticleEffect.ParticleEffect()
p0 = Particles.Particles('particles-1')
p0.setFactory('PointParticleFactory')
p0.setRenderer('SparkleParticleRenderer')
p0.setEmitter('SphereVolumeEmitter')
p0.setPoolSize(110)
p0.setBirthRate(0.01)
p0.setLitterSize(2)
p0.setLitterSpread(0)
p0.factory.setLifespanBase(0.4)
p0.factory.setLifespanSpread(0.1)
p0.factory.setMassBase(1.0)
p0.factory.setMassSpread(0.0)
p0.factory.setTerminalVelocityBase(400.0)
p0.factory.setTerminalVelocitySpread(0.0)
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(1.0)
p0.renderer.setCenterColor(color1)
p0.renderer.setEdgeColor(color2)
p0.renderer.setBirthRadius(0.6)
p0.renderer.setDeathRadius(0.6)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPNOSCALE)
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitude(amp)
p0.emitter.setAmplitudeSpread(0.0)
p0.emitter.setRadius(0.3)
f.addParticles(p0)
gravityForceGroup = ForceGroup.ForceGroup('gravity')
force0 = LinearVectorForce(Vec3(0.0, 0.0, -10.0), 1.0, 0)
force0.setActive(1)
gravityForceGroup.addForce(force0)
f.addForceGroup(gravityForceGroup)
f.setPos(x, y, z)
sfxName = random.choice(('phase_4/audio/sfx/firework_whistle_01.ogg', 'phase_4/audio/sfx/firework_whistle_02.ogg'))
sfx = loader.loadSfx(sfxName)
sfx.setVolume(0.4)
t = Sequence(Func(f.start, render, render), Func(sfx.play), LerpPosInterval(f, 2.0, Vec3(x, y, z + 20 * amp), blendType='easeInOut'), Func(p0.setBirthRate, 3), Wait(0.5), Func(f.cleanup), name=getNextSequenceName('shootFirework'))
t.start()
def shootPop(x, y, z, color1, color2, amp):
sfxName = random.choice(('phase_4/audio/sfx/firework_distance_01.ogg', 'phase_4/audio/sfx/firework_distance_02.ogg', 'phase_4/audio/sfx/firework_distance_03.ogg'))
sfx = loader.loadSfx(sfxName)
t = Sequence(Func(sfx.play), Wait(3), name=getNextSequenceName('shootFireworkRocket'))
t.start()
def shootFireworkCircle(x, y, z, color1, color2, amp):
return shootFireworkCircleGeneric(x, y, z, color1, color2, amp, 100)
def shootFireworkCircleLarge(x, y, z, color1, color2, amp):
return shootFireworkCircleGeneric(x, y, z, color1, color2, amp * 1.5, 200)
def shootFireworkCircleSmall(x, y, z, color1, color2, amp):
return shootFireworkCircleGeneric(x, y, z, color1, color2, amp * 0.5, 50)
def shootFireworkCircleGeneric(x, y, z, color1, color2, amp, poolSize):
f = ParticleEffect.ParticleEffect()
p0 = Particles.Particles('particles-1')
p0.setFactory('PointParticleFactory')
p0.setRenderer('SparkleParticleRenderer')
p0.setEmitter('SphereVolumeEmitter')
p0.setPoolSize(poolSize)
p0.setBirthRate(0.01)
p0.setLitterSize(poolSize)
p0.factory.setLifespanBase(2.0)
p0.factory.setLifespanSpread(0.5)
p0.factory.setTerminalVelocityBase(400.0)
p0.factory.setTerminalVelocitySpread(40.0)
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
p0.renderer.setUserAlpha(1.0)
p0.renderer.setCenterColor(color1)
p0.renderer.setEdgeColor(color1)
p0.renderer.setBirthRadius(0.4)
p0.renderer.setDeathRadius(0.6)
p0.renderer.setLifeScale(SparkleParticleRenderer.SPSCALE)
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitudeSpread(0.1)
p0.emitter.setAmplitude(amp)
p0.emitter.setRadius(0.1)
f.addParticles(p0)
circleForceGroup = ForceGroup.ForceGroup('gravity')
force1 = LinearSinkForce(Point3(x, y, z - 100), LinearDistanceForce.FTONEOVERRSQUARED, 2.0, 0.3 * amp * 0.1, 1)
force1.setActive(1)
circleForceGroup.addForce(force1)
f.addForceGroup(circleForceGroup)
f.setPos(x, y, z)
sfxName = random.choice(('phase_4/audio/sfx/firework_explosion_01.ogg', 'phase_4/audio/sfx/firework_explosion_02.ogg', 'phase_4/audio/sfx/firework_explosion_03.ogg'))
sfx = loader.loadSfx(sfxName)
sfx.setVolume(0.7)
t = Sequence(Func(f.start, render, render), Func(sfx.play), Wait(0.5), Func(p0.setBirthRate, 3), Wait(0.5), Func(p0.renderer.setCenterColor, color2), Func(p0.renderer.setEdgeColor, color2), Wait(1.5), Func(f.cleanup), name=getNextSequenceName('shootFireworkCircle'))
t.start()
def shootFireworkCircleSprite(x, y, z, color, texture, amp):
f = ParticleEffect.ParticleEffect()
p0 = Particles.Particles('particles-1')
p0.setFactory('PointParticleFactory')
p0.setRenderer('SpriteParticleRenderer')
p0.setEmitter('SphereVolumeEmitter')
p0.setPoolSize(100)
p0.setBirthRate(0.01)
p0.setLitterSize(100)
p0.factory.setLifespanBase(2.0)
p0.factory.setLifespanSpread(0.5)
p0.factory.setTerminalVelocityBase(400.0)
p0.factory.setTerminalVelocitySpread(40.0)
p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAUSER)
p0.renderer.setUserAlpha(1.0)
p0.renderer.setFromNode(texture)
p0.renderer.setColor(color)
p0.renderer.setXScaleFlag(1)
p0.renderer.setYScaleFlag(1)
p0.renderer.setInitialXScale(0.12)
p0.renderer.setFinalXScale(0.48)
p0.renderer.setInitialYScale(0.12)
p0.renderer.setFinalYScale(0.48)
p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
p0.emitter.setAmplitudeSpread(0.1)
p0.emitter.setAmplitude(amp)
p0.emitter.setRadius(0.1)
f.addParticles(p0)
circleForceGroup = ForceGroup.ForceGroup('gravity')
force1 = LinearSinkForce(Point3(x, y, z - 100), LinearDistanceForce.FTONEOVERRSQUARED, 2.0, 0.3 * amp * 0.1, 1)
force1.setActive(1)
circleForceGroup.addForce(force1)
f.addForceGroup(circleForceGroup)
f.setPos(x, y, z)
sfxName = random.choice(('phase_4/audio/sfx/firework_explosion_01.ogg', 'phase_4/audio/sfx/firework_explosion_02.ogg', 'phase_4/audio/sfx/firework_explosion_03.ogg'))
sfx = loader.loadSfx(sfxName)
sfx.setVolume(0.7)
t = Sequence(Func(f.start, render, render), Func(sfx.play), Wait(0.5), Func(p0.setBirthRate, 3), Wait(2.0), Func(f.cleanup), name=getNextSequenceName('shootFireworkSprite'))
t.start()
style2shootFunc = {CIRCLE: shootFireworkCircle,
CIRCLELARGE: shootFireworkCircleLarge,
CIRCLESMALL: shootFireworkCircleSmall,
CIRCLESPRITE: shootFireworkCircleSprite,
ROCKET: shootFireworkRocket,
RING: shootFireworkRing,
POP: shootPop}
| Spiderlover/Toontown | toontown/effects/Fireworks.py | Python | mit | 9,697 | 0.004744 |
#
# Copyright 2016 The Charles Stark Draper Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from twisted.web.server import Site
from twisted.web.static import File
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.application import service, internet
from twisted.application.app import startApplication
import os
import sys
import logging
from logging import config
import logging.handlers
import argparse
import simplejson
parser = argparse.ArgumentParser(description='Export incoming JSON logs to a specified file.')
parser.add_argument('-c', '--config', type=str, help='Configuration file path.')
parser.add_argument('-p', '--port', type=int, default=80, help='Port for the TCP server to run on.')
parser.add_argument('-l', '--log-directory', type=str, help='Directory in which to output log files.')
parser.add_argument('-f', '--filename', type=str, default="xdata", help='Specify filename to store logs.')
parser.add_argument('--allow-origin', type=str,\
help='List of string URLs to allow Cross-Origin requests from.', nargs='*')
arguments = parser.parse_known_args()[0]
valid_keys = set(['port', 'log_directory', 'filename', 'allow_origin'])
if arguments.config is not None:
with open(arguments.config, 'r') as config_file:
settings = simplejson.loads(config_file.read())
else:
settings = vars(arguments)
settings = { key: settings[key] for key in settings if key in valid_keys }
if 'port' not in settings:
settings['port'] = 80
if 'log_directory' not in settings or settings['log_directory'] is None:
print 'Missing required config parameter log_directory.'
sys.exit(1)
if os.path.exists(settings['log_directory']):
if not os.access(settings['log_directory'], os.W_OK):
print 'Insufficient permissions to write to log directory %s' % settings['log_directory']
sys.exit(1)
else:
try:
os.makedirs(settings['log_directory'])
except:
print 'Unable to create log directory %s' % settings['log_directory']
sys.exit(1)
# logging configuration
LOG_SETTINGS = {
'version': 1,
'handlers': {
settings['filename'] + '-js': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'formatter': 'xdata',
'filename': os.path.join(settings['log_directory'], settings['filename'] + '-js.log'),
'mode': 'a',
'maxBytes': 100e6,
'backupCount': 10,
},
# Deprecated
settings['filename'] + '-v2': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'formatter': 'xdata',
'filename': os.path.join(settings['log_directory'], settings['filename'] + '-v2.log'),
'mode': 'a',
'maxBytes': 100e6,
'backupCount': 10,
},
settings['filename'] + '-v3': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'formatter': 'xdata',
'filename': os.path.join(settings['log_directory'], settings['filename'] + '-v3.log'),
'mode': 'a',
'maxBytes': 100e6,
'backupCount': 10,
},
settings['filename'] + '-error': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'formatter': 'detailed',
'filename': os.path.join(settings['log_directory'], settings['filename'] + '-error.log'),
'mode': 'a',
'maxBytes': 100e6,
'backupCount': 10,
},
},
'formatters': {
'xdata': {
'format': '%(message)s',
},
'detailed': {
'format': '%(asctime)s %(module)-17s line:%(lineno)-4d ' \
'%(levelname)-8s %(message)s',
},
'email': {
'format': 'Timestamp: %(asctime)s\nModule: %(module)s\n' \
'Line: %(lineno)d\nMessage: %(message)s',
},
},
'loggers': {
settings['filename'] + '-js': {
'level':'DEBUG',
'handlers': [settings['filename'] + '-js',]
},
settings['filename'] + '-v2': {
'level':'DEBUG',
'handlers': [settings['filename'] + '-v2',]
},
settings['filename'] + '-v3': {
'level':'DEBUG',
'handlers': [settings['filename'] + '-v3',]
},
settings['filename'] + '-error': {
'level':'DEBUG',
'handlers': [settings['filename'] + '-error',]
},
}
}
config.dictConfig(LOG_SETTINGS)
logger_js = logging.getLogger(settings['filename'] + '-js')
logger = logging.getLogger(settings['filename'] + '-v2')
loggerv3 = logging.getLogger(settings['filename'] + '-v3')
logger_err = logging.getLogger(settings['filename'] + '-error')
wf_dict = {
0: 'WF_OTHER',
1: 'WF_DEFINE',
2: 'WF_GETDATA',
3: 'WF_EXPLORE',
4: 'WF_CREATE',
5: 'WF_ENRICH',
6: 'WF_TRANSFORM'
}
def get_allow_origin(request):
if 'allow_origin' not in settings or settings['allow_origin'] is None:
return '*'
elif isinstance(settings['allow_origin'], list):
origin = request.getHeader('Origin')
return 'null' if origin not in settings['allow_origin'] else origin
else:
return settings['allow_origin']
def log_json(data):
if ('useraleVersion' in data) and (data ['useraleVersion'].split('.')[0] == '4'):
logger_js.info(simplejson.dumps(data))
elif ('useraleVersion' in data) and (data['useraleVersion'].split('.')[0] == '3'):
loggerv3.info(simplejson.dumps(data))
elif ('parms' in data) and ('wf_state' in data['parms']):
data['wf_state_longname'] = wf_dict[data['parms']['wf_state']]
logger.info(simplejson.dumps(data))
class Logger(Resource):
def render_OPTIONS(self, request):
request.setHeader('Access-Control-Allow-Origin', get_allow_origin(request))
request.setHeader('Access-Control-Allow-Methods', 'POST')
request.setHeader('Access-Control-Allow-Headers', 'x-prototype-version,x-requested-with,Content-Type')
request.setHeader('Access-Control-Max-Age', 2520) # 42 hours
return ''
def render_POST(self, request):
request.setHeader('Access-Control-Allow-Origin', get_allow_origin(request))
request.setHeader('Access-Control-Allow-Methods', 'POST')
request.setHeader('Access-Control-Allow-Headers', 'x-prototype-version,x-requested-with,Content-Type')
request.setHeader('Access-Control-Max-Age', 2520) # 42 hours
data = simplejson.loads(request.content.getvalue())
try:
if isinstance(data, list):
for datum in data:
log_json(datum)
else:
log_json(data)
except Exception as e:
logger_err.error(e)
return ''
root = Resource()
root.putChild('send_log', Logger())
# create a resource to serve static files
tmp_service = internet.TCPServer(settings['port'], Site(root))
application = service.Application('User-ALE')
# attach the service to its parent application
tmp_service.setServiceParent(application)
startApplication(application, 0)
reactor.run()
| draperlaboratory/user-ale | demo/dashboard/files/twisted_app.py | Python | apache-2.0 | 7,791 | 0.005391 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
import json
import yaml
from kubernetes_py.utils import is_valid_string
class DeleteOptions(object):
"""
http://kubernetes.io/docs/api-reference/v1/definitions/#_v1_deleteoptions
"""
def __init__(self):
super(DeleteOptions, self).__init__()
# TODO(froch): add support for the below.
# self._preconditions = None
self._kind = "DeleteOptions"
self._api_version = "v1"
self._grace_period_seconds = 0
self._orphan_dependents = False
# ------------------------------------------------------------------------------------- kind
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, k=0):
if not is_valid_string(k):
raise SyntaxError("DeleteOptions: kind: [ {0} ] is invalid.".format(k))
self._kind = k
# ------------------------------------------------------------------------------------- apiVersion
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, v=0):
if not is_valid_string(v):
raise SyntaxError("DeleteOptions: api_version: [ {0} ] is invalid.".format(v))
self._kind = v
# ------------------------------------------------------------------------------------- grace period seconds
@property
def grace_period_seconds(self):
return self._grace_period_seconds
@grace_period_seconds.setter
def grace_period_seconds(self, secs=0):
if not isinstance(secs, int):
raise SyntaxError("DeleteOptions: grace_period_seconds: [ {0} ] is invalid.".format(secs))
self._grace_period_seconds = secs
# ------------------------------------------------------------------------------------- orphanDependents
@property
def orphan_dependents(self):
return self._orphan_dependents
@orphan_dependents.setter
def orphan_dependents(self, orphan=False):
if not isinstance(orphan, bool):
raise SyntaxError("DeleteOptions: orphan_dependents: [ {0} ] is invalid.".format(orphan))
self._orphan_dependents = orphan
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.kind is not None:
data["kind"] = self.kind
if self.api_version is not None:
data["apiVersion"] = self.api_version
if self.grace_period_seconds is not None:
data["gracePeriodSeconds"] = self.grace_period_seconds
if self.orphan_dependents is not None:
data["orphanDependents"] = self.orphan_dependents
return data
def as_json(self):
data = self.serialize()
j = json.dumps(data, indent=4)
return j
def as_yaml(self):
data = self.serialize()
y = yaml.dump(data, default_flow_style=False)
return y
| mnubo/kubernetes-py | kubernetes_py/models/v1/DeleteOptions.py | Python | apache-2.0 | 3,140 | 0.002866 |
from reportlab.lib import styles
from reportlab.lib import colors
from reportlab.lib.units import cm
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT, TA_JUSTIFY
from reportlab.platypus import Preformatted, Paragraph, Frame, \
Image, Table, TableStyle, Spacer
def getParagraphStyles():
"""Returns a dictionary of styles to get you started.
We will provide a way to specify a module of these. Note that
this just includes TableStyles as well as ParagraphStyles for any
tables you wish to use.
"""
stylesheet = {}
ParagraphStyle = styles.ParagraphStyle
para = ParagraphStyle('Normal', None) #the ancestor of all
para.fontName = 'Times-Roman'
para.fontSize = 24
para.leading = 28
stylesheet['Normal'] = para
#This one is spaced out a bit...
para = ParagraphStyle('BodyText', stylesheet['Normal'])
para.spaceBefore = 12
stylesheet['BodyText'] = para
#Indented, for lists
para = ParagraphStyle('Indent', stylesheet['Normal'])
para.leftIndent = 36
para.firstLineIndent = 0
stylesheet['Indent'] = para
para = ParagraphStyle('Centered', stylesheet['Normal'])
para.alignment = TA_CENTER
stylesheet['Centered'] = para
para = ParagraphStyle('BigCentered', stylesheet['Normal'])
para.spaceBefore = 12
para.alignment = TA_CENTER
stylesheet['BigCentered'] = para
para = ParagraphStyle('Italic', stylesheet['BodyText'])
para.fontName = 'Times-Italic'
stylesheet['Italic'] = para
para = ParagraphStyle('Title', stylesheet['Normal'])
para.fontName = 'Times-Roman'
para.fontSize = 48
para.leading = 58
para.alignment = TA_CENTER
stylesheet['Title'] = para
para = ParagraphStyle('Heading1', stylesheet['Normal'])
para.fontName = 'Times-Bold'
para.fontSize = 36
para.leading = 44
para.alignment = TA_CENTER
stylesheet['Heading1'] = para
para = ParagraphStyle('Heading2', stylesheet['Normal'])
para.fontName = 'Times-Bold'
para.fontSize = 28
para.leading = 34
para.spaceBefore = 24
stylesheet['Heading2'] = para
para = ParagraphStyle('Heading3', stylesheet['Normal'])
para.fontName = 'Times-BoldItalic'
para.spaceBefore = 24
stylesheet['Heading3'] = para
para = ParagraphStyle('Heading4', stylesheet['Normal'])
para.fontName = 'Times-BoldItalic'
para.spaceBefore = 6
stylesheet['Heading4'] = para
para = ParagraphStyle('Bullet', stylesheet['Normal'])
para.firstLineIndent = 0
para.leftIndent = 56
para.spaceBefore = 6
para.bulletFontName = 'Symbol'
para.bulletFontSize = 24
para.bulletIndent = 20
stylesheet['Bullet'] = para
para = ParagraphStyle('Definition', stylesheet['Normal'])
#use this for definition lists
para.firstLineIndent = 0
para.leftIndent = 72
para.bulletIndent = 0
para.spaceBefore = 12
para.bulletFontName = 'Helvetica-BoldOblique'
para.bulletFontSize = 24
stylesheet['Definition'] = para
para = ParagraphStyle('Code', stylesheet['Normal'])
para.fontName = 'Courier'
para.fontSize = 16
para.leading = 18
para.leftIndent = 36
stylesheet['Code'] = para
para = ParagraphStyle('PythonCode', stylesheet['Normal'])
para.fontName = 'Courier'
para.fontSize = 16
para.leading = 18
para.leftIndent = 36
stylesheet['PythonCode'] = para
para = ParagraphStyle('Small', stylesheet['Normal'])
para.fontSize = 12
para.leading = 14
stylesheet['Small'] = para
#now for a table
ts = TableStyle([
('FONT', (0,0), (-1,-1), 'Times-Roman', 24),
('LINEABOVE', (0,0), (-1,0), 2, colors.green),
('LINEABOVE', (0,1), (-1,-1), 0.25, colors.black),
('LINEBELOW', (0,-1), (-1,-1), 2, colors.green),
('LINEBEFORE', (-1,0), (-1,-1), 2, colors.black),
('ALIGN', (1,1), (-1,-1), 'RIGHT'), #all numeric cells right aligned
('TEXTCOLOR', (0,1), (0,-1), colors.red),
('BACKGROUND', (0,0), (-1,0), colors.Color(0,0.7,0.7))
])
stylesheet['table1'] = ts
return stylesheet
| nickpack/reportlab | tools/pythonpoint/styles/standard.py | Python | bsd-3-clause | 4,262 | 0.005631 |
# This file is part of the MapProxy project.
# Copyright (C) 2011-2013 Omniscale <http://omniscale.de>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import logging
import os
import re
import sqlite3
import threading
from mapproxy.cache.base import TileCacheBase, tile_buffer, REMOVE_ON_UNLOCK
from mapproxy.compat import BytesIO, PY2, itertools
from mapproxy.image import ImageSource
from mapproxy.srs import get_epsg_num
from mapproxy.util.fs import ensure_directory
from mapproxy.util.lock import FileLock
log = logging.getLogger(__name__)
class GeopackageCache(TileCacheBase):
supports_timestamp = False
def __init__(self, geopackage_file, tile_grid, table_name, with_timestamps=False, timeout=30, wal=False):
self.tile_grid = tile_grid
self.table_name = self._check_table_name(table_name)
self.lock_cache_id = 'gpkg' + hashlib.md5(geopackage_file.encode('utf-8')).hexdigest()
self.geopackage_file = geopackage_file
# XXX timestamps not implemented
self.supports_timestamp = with_timestamps
self.timeout = timeout
self.wal = wal
self.ensure_gpkg()
self._db_conn_cache = threading.local()
@property
def db(self):
if not getattr(self._db_conn_cache, 'db', None):
self.ensure_gpkg()
self._db_conn_cache.db = sqlite3.connect(self.geopackage_file, timeout=self.timeout)
return self._db_conn_cache.db
def cleanup(self):
"""
Close all open connection and remove them from cache.
"""
if getattr(self._db_conn_cache, 'db', None):
self._db_conn_cache.db.close()
self._db_conn_cache.db = None
@staticmethod
def _check_table_name(table_name):
"""
>>> GeopackageCache._check_table_name("test")
'test'
>>> GeopackageCache._check_table_name("test_2")
'test_2'
>>> GeopackageCache._check_table_name("test-2")
'test-2'
>>> GeopackageCache._check_table_name("test3;")
Traceback (most recent call last):
...
ValueError: The table_name test3; contains unsupported characters.
>>> GeopackageCache._check_table_name("table name")
Traceback (most recent call last):
...
ValueError: The table_name table name contains unsupported characters.
@param table_name: A desired name for an geopackage table.
@return: The name of the table if it is good, otherwise an exception.
"""
# Regex string indicating table names which will be accepted.
regex_str = '^[a-zA-Z0-9_-]+$'
if re.match(regex_str, table_name):
return table_name
else:
msg = ("The table name may only contain alphanumeric characters, an underscore, "
"or a dash: {}".format(regex_str))
log.info(msg)
raise ValueError("The table_name {0} contains unsupported characters.".format(table_name))
def ensure_gpkg(self):
if not os.path.isfile(self.geopackage_file):
with FileLock(self.geopackage_file + '.init.lck',
remove_on_unlock=REMOVE_ON_UNLOCK):
ensure_directory(self.geopackage_file)
self._initialize_gpkg()
else:
if not self.check_gpkg():
ensure_directory(self.geopackage_file)
self._initialize_gpkg()
def check_gpkg(self):
if not self._verify_table():
return False
if not self._verify_gpkg_contents():
return False
if not self._verify_tile_size():
return False
return True
def _verify_table(self):
with sqlite3.connect(self.geopackage_file) as db:
cur = db.execute("""SELECT name FROM sqlite_master WHERE type='table' AND name=?""",
(self.table_name,))
content = cur.fetchone()
if not content:
# Table doesn't exist _initialize_gpkg will create a new one.
return False
return True
def _verify_gpkg_contents(self):
with sqlite3.connect(self.geopackage_file) as db:
cur = db.execute("""SELECT * FROM gpkg_contents WHERE table_name = ?"""
, (self.table_name,))
results = cur.fetchone()
if not results:
# Table doesn't exist in gpkg_contents _initialize_gpkg will add it.
return False
gpkg_data_type = results[1]
gpkg_srs_id = results[9]
cur = db.execute("""SELECT * FROM gpkg_spatial_ref_sys WHERE srs_id = ?"""
, (gpkg_srs_id,))
gpkg_coordsys_id = cur.fetchone()[3]
if gpkg_data_type.lower() != "tiles":
log.info("The geopackage table name already exists for a data type other than tiles.")
raise ValueError("table_name is improperly configured.")
if gpkg_coordsys_id != get_epsg_num(self.tile_grid.srs.srs_code):
log.info(
"The geopackage {0} table name {1} already exists and has an SRS of {2}, which does not match the configured" \
" Mapproxy SRS of {3}.".format(self.geopackage_file, self.table_name, gpkg_coordsys_id,
get_epsg_num(self.tile_grid.srs.srs_code)))
raise ValueError("srs is improperly configured.")
return True
def _verify_tile_size(self):
with sqlite3.connect(self.geopackage_file) as db:
cur = db.execute(
"""SELECT * FROM gpkg_tile_matrix WHERE table_name = ?""",
(self.table_name,))
results = cur.fetchall()
results = results[0]
tile_size = self.tile_grid.tile_size
if not results:
# There is no tile conflict. Return to allow the creation of new tiles.
return True
gpkg_table_name, gpkg_zoom_level, gpkg_matrix_width, gpkg_matrix_height, gpkg_tile_width, gpkg_tile_height, \
gpkg_pixel_x_size, gpkg_pixel_y_size = results
resolution = self.tile_grid.resolution(gpkg_zoom_level)
if gpkg_tile_width != tile_size[0] or gpkg_tile_height != tile_size[1]:
log.info(
"The geopackage {0} table name {1} already exists and has tile sizes of ({2},{3})"
" which is different than the configure tile sizes of ({4},{5}).".format(self.geopackage_file,
self.table_name,
gpkg_tile_width,
gpkg_tile_height,
tile_size[0],
tile_size[1]))
log.info("The current mapproxy configuration is invalid for this geopackage.")
raise ValueError("tile_size is improperly configured.")
if not is_close(gpkg_pixel_x_size, resolution) or not is_close(gpkg_pixel_y_size, resolution):
log.info(
"The geopackage {0} table name {1} already exists and level {2} a resolution of ({3:.13f},{4:.13f})"
" which is different than the configured resolution of ({5:.13f},{6:.13f}).".format(self.geopackage_file,
self.table_name,
gpkg_zoom_level,
gpkg_pixel_x_size,
gpkg_pixel_y_size,
resolution,
resolution))
log.info("The current mapproxy configuration is invalid for this geopackage.")
raise ValueError("res is improperly configured.")
return True
def _initialize_gpkg(self):
log.info('initializing Geopackage file %s', self.geopackage_file)
db = sqlite3.connect(self.geopackage_file)
if self.wal:
db.execute('PRAGMA journal_mode=wal')
proj = get_epsg_num(self.tile_grid.srs.srs_code)
stmts = ["""
CREATE TABLE IF NOT EXISTS gpkg_contents
(table_name TEXT NOT NULL PRIMARY KEY, -- The name of the tiles, or feature table
data_type TEXT NOT NULL, -- Type of data stored in the table: "features" per clause Features (http://www.geopackage.org/spec/#features), "tiles" per clause Tiles (http://www.geopackage.org/spec/#tiles), or an implementer-defined value for other data tables per clause in an Extended GeoPackage
identifier TEXT UNIQUE, -- A human-readable identifier (e.g. short name) for the table_name content
description TEXT DEFAULT '', -- A human-readable description for the table_name content
last_change DATETIME NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ','now')), -- Timestamp value in ISO 8601 format as defined by the strftime function %Y-%m-%dT%H:%M:%fZ format string applied to the current time
min_x DOUBLE, -- Bounding box minimum easting or longitude for all content in table_name
min_y DOUBLE, -- Bounding box minimum northing or latitude for all content in table_name
max_x DOUBLE, -- Bounding box maximum easting or longitude for all content in table_name
max_y DOUBLE, -- Bounding box maximum northing or latitude for all content in table_name
srs_id INTEGER, -- Spatial Reference System ID: gpkg_spatial_ref_sys.srs_id; when data_type is features, SHALL also match gpkg_geometry_columns.srs_id; When data_type is tiles, SHALL also match gpkg_tile_matrix_set.srs.id
CONSTRAINT fk_gc_r_srs_id FOREIGN KEY (srs_id) REFERENCES gpkg_spatial_ref_sys(srs_id))
""",
"""
CREATE TABLE IF NOT EXISTS gpkg_spatial_ref_sys
(srs_name TEXT NOT NULL, -- Human readable name of this SRS (Spatial Reference System)
srs_id INTEGER NOT NULL PRIMARY KEY, -- Unique identifier for each Spatial Reference System within a GeoPackage
organization TEXT NOT NULL, -- Case-insensitive name of the defining organization e.g. EPSG or epsg
organization_coordsys_id INTEGER NOT NULL, -- Numeric ID of the Spatial Reference System assigned by the organization
definition TEXT NOT NULL, -- Well-known Text representation of the Spatial Reference System
description TEXT)
""",
"""
CREATE TABLE IF NOT EXISTS gpkg_tile_matrix
(table_name TEXT NOT NULL, -- Tile Pyramid User Data Table Name
zoom_level INTEGER NOT NULL, -- 0 <= zoom_level <= max_level for table_name
matrix_width INTEGER NOT NULL, -- Number of columns (>= 1) in tile matrix at this zoom level
matrix_height INTEGER NOT NULL, -- Number of rows (>= 1) in tile matrix at this zoom level
tile_width INTEGER NOT NULL, -- Tile width in pixels (>= 1) for this zoom level
tile_height INTEGER NOT NULL, -- Tile height in pixels (>= 1) for this zoom level
pixel_x_size DOUBLE NOT NULL, -- In t_table_name srid units or default meters for srid 0 (>0)
pixel_y_size DOUBLE NOT NULL, -- In t_table_name srid units or default meters for srid 0 (>0)
CONSTRAINT pk_ttm PRIMARY KEY (table_name, zoom_level), CONSTRAINT fk_tmm_table_name FOREIGN KEY (table_name) REFERENCES gpkg_contents(table_name))
""",
"""
CREATE TABLE IF NOT EXISTS gpkg_tile_matrix_set
(table_name TEXT NOT NULL PRIMARY KEY, -- Tile Pyramid User Data Table Name
srs_id INTEGER NOT NULL, -- Spatial Reference System ID: gpkg_spatial_ref_sys.srs_id
min_x DOUBLE NOT NULL, -- Bounding box minimum easting or longitude for all content in table_name
min_y DOUBLE NOT NULL, -- Bounding box minimum northing or latitude for all content in table_name
max_x DOUBLE NOT NULL, -- Bounding box maximum easting or longitude for all content in table_name
max_y DOUBLE NOT NULL, -- Bounding box maximum northing or latitude for all content in table_name
CONSTRAINT fk_gtms_table_name FOREIGN KEY (table_name) REFERENCES gpkg_contents(table_name), CONSTRAINT fk_gtms_srs FOREIGN KEY (srs_id) REFERENCES gpkg_spatial_ref_sys (srs_id))
""",
"""
CREATE TABLE IF NOT EXISTS [{0}]
(id INTEGER PRIMARY KEY AUTOINCREMENT, -- Autoincrement primary key
zoom_level INTEGER NOT NULL, -- min(zoom_level) <= zoom_level <= max(zoom_level) for t_table_name
tile_column INTEGER NOT NULL, -- 0 to tile_matrix matrix_width - 1
tile_row INTEGER NOT NULL, -- 0 to tile_matrix matrix_height - 1
tile_data BLOB NOT NULL, -- Of an image MIME type specified in clauses Tile Encoding PNG, Tile Encoding JPEG, Tile Encoding WEBP
UNIQUE (zoom_level, tile_column, tile_row))
""".format(self.table_name)
]
for stmt in stmts:
db.execute(stmt)
db.execute("PRAGMA foreign_keys = 1;")
# List of WKT execute statements and data.("""
wkt_statement = """
INSERT OR REPLACE INTO gpkg_spatial_ref_sys (
srs_id,
organization,
organization_coordsys_id,
srs_name,
definition)
VALUES (?, ?, ?, ?, ?)
"""
wkt_entries = [(3857, 'epsg', 3857, 'WGS 84 / Pseudo-Mercator',
"""
PROJCS["WGS 84 / Pseudo-Mercator",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,\
AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],\
UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","9122"]]AUTHORITY["EPSG","4326"]],\
PROJECTION["Mercator_1SP"],PARAMETER["central_meridian",0],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],\
PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH]\
"""
),
(4326, 'epsg', 4326, 'WGS 84',
"""
GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],\
AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,\
AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]\
"""
),
(-1, 'NONE', -1, ' ', 'undefined'),
(0, 'NONE', 0, ' ', 'undefined')
]
if get_epsg_num(self.tile_grid.srs.srs_code) not in [4326, 3857]:
wkt_entries.append((proj, 'epsg', proj, 'Not provided', "Added via Mapproxy."))
db.commit()
# Add geopackage version to the header (1.0)
db.execute("PRAGMA application_id = 1196437808;")
db.commit()
for wkt_entry in wkt_entries:
try:
db.execute(wkt_statement, (wkt_entry[0], wkt_entry[1], wkt_entry[2], wkt_entry[3], wkt_entry[4]))
except sqlite3.IntegrityError:
log.info("srs_id already exists.".format(wkt_entry[0]))
db.commit()
# Ensure that tile table exists here, don't overwrite a valid entry.
try:
db.execute("""
INSERT INTO gpkg_contents (
table_name,
data_type,
identifier,
description,
min_x,
max_x,
min_y,
max_y,
srs_id)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);
""", (self.table_name,
"tiles",
self.table_name,
"Created with Mapproxy.",
self.tile_grid.bbox[0],
self.tile_grid.bbox[2],
self.tile_grid.bbox[1],
self.tile_grid.bbox[3],
proj))
except sqlite3.IntegrityError:
pass
db.commit()
# Ensure that tile set exists here, don't overwrite a valid entry.
try:
db.execute("""
INSERT INTO gpkg_tile_matrix_set (table_name, srs_id, min_x, max_x, min_y, max_y)
VALUES (?, ?, ?, ?, ?, ?);
""", (
self.table_name, proj, self.tile_grid.bbox[0], self.tile_grid.bbox[2], self.tile_grid.bbox[1],
self.tile_grid.bbox[3]))
except sqlite3.IntegrityError:
pass
db.commit()
tile_size = self.tile_grid.tile_size
for grid, resolution, level in zip(self.tile_grid.grid_sizes,
self.tile_grid.resolutions, range(20)):
db.execute("""INSERT OR REPLACE INTO gpkg_tile_matrix
(table_name, zoom_level, matrix_width, matrix_height, tile_width, tile_height, pixel_x_size, pixel_y_size)
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
(self.table_name, level, grid[0], grid[1], tile_size[0], tile_size[1], resolution, resolution))
db.commit()
db.close()
def is_cached(self, tile):
if tile.coord is None:
return True
if tile.source:
return True
return self.load_tile(tile)
def store_tile(self, tile):
if tile.stored:
return True
return self._store_bulk([tile])
def store_tiles(self, tiles):
tiles = [t for t in tiles if not t.stored]
return self._store_bulk(tiles)
def _store_bulk(self, tiles):
records = []
# tile_buffer (as_buffer) will encode the tile to the target format
# we collect all tiles before, to avoid having the db transaction
# open during this slow encoding
for tile in tiles:
with tile_buffer(tile) as buf:
if PY2:
content = buffer(buf.read())
else:
content = buf.read()
x, y, level = tile.coord
records.append((level, x, y, content))
cursor = self.db.cursor()
try:
stmt = "INSERT OR REPLACE INTO [{0}] (zoom_level, tile_column, tile_row, tile_data) VALUES (?,?,?,?)".format(
self.table_name)
cursor.executemany(stmt, records)
self.db.commit()
except sqlite3.OperationalError as ex:
log.warn('unable to store tile: %s', ex)
return False
return True
def load_tile(self, tile, with_metadata=False):
if tile.source or tile.coord is None:
return True
cur = self.db.cursor()
cur.execute("""SELECT tile_data FROM [{0}]
WHERE tile_column = ? AND
tile_row = ? AND
zoom_level = ?""".format(self.table_name), tile.coord)
content = cur.fetchone()
if content:
tile.source = ImageSource(BytesIO(content[0]))
return True
else:
return False
def load_tiles(self, tiles, with_metadata=False):
# associate the right tiles with the cursor
tile_dict = {}
coords = []
for tile in tiles:
if tile.source or tile.coord is None:
continue
x, y, level = tile.coord
coords.append(x)
coords.append(y)
coords.append(level)
tile_dict[(x, y)] = tile
if not tile_dict:
# all tiles loaded or coords are None
return True
stmt_base = "SELECT tile_column, tile_row, tile_data FROM [{0}] WHERE ".format(self.table_name)
loaded_tiles = 0
# SQLite is limited to 1000 args -> split into multiple requests if more arguments are needed
while coords:
cur_coords = coords[:999]
stmt = stmt_base + ' OR '.join(
['(tile_column = ? AND tile_row = ? AND zoom_level = ?)'] * (len(cur_coords) // 3))
cursor = self.db.cursor()
cursor.execute(stmt, cur_coords)
for row in cursor:
loaded_tiles += 1
tile = tile_dict[(row[0], row[1])]
data = row[2]
tile.size = len(data)
tile.source = ImageSource(BytesIO(data))
cursor.close()
coords = coords[999:]
return loaded_tiles == len(tile_dict)
def remove_tile(self, tile):
cursor = self.db.cursor()
cursor.execute(
"DELETE FROM [{0}] WHERE (tile_column = ? AND tile_row = ? AND zoom_level = ?)".format(self.table_name),
tile.coord)
self.db.commit()
if cursor.rowcount:
return True
return False
def remove_level_tiles_before(self, level, timestamp):
if timestamp == 0:
cursor = self.db.cursor()
cursor.execute(
"DELETE FROM [{0}] WHERE (zoom_level = ?)".format(self.table_name), (level,))
self.db.commit()
log.info("Cursor rowcount = {0}".format(cursor.rowcount))
if cursor.rowcount:
return True
return False
def load_tile_metadata(self, tile):
self.load_tile(tile)
class GeopackageLevelCache(TileCacheBase):
def __init__(self, geopackage_dir, tile_grid, table_name, timeout=30, wal=False):
self.lock_cache_id = 'gpkg-' + hashlib.md5(geopackage_dir.encode('utf-8')).hexdigest()
self.cache_dir = geopackage_dir
self.tile_grid = tile_grid
self.table_name = table_name
self.timeout = timeout
self.wal = wal
self._geopackage = {}
self._geopackage_lock = threading.Lock()
def _get_level(self, level):
if level in self._geopackage:
return self._geopackage[level]
with self._geopackage_lock:
if level not in self._geopackage:
geopackage_filename = os.path.join(self.cache_dir, '%s.gpkg' % level)
self._geopackage[level] = GeopackageCache(
geopackage_filename,
self.tile_grid,
self.table_name,
with_timestamps=True,
timeout=self.timeout,
wal=self.wal,
)
return self._geopackage[level]
def cleanup(self):
"""
Close all open connection and remove them from cache.
"""
with self._geopackage_lock:
for gp in self._geopackage.values():
gp.cleanup()
def is_cached(self, tile):
if tile.coord is None:
return True
if tile.source:
return True
return self._get_level(tile.coord[2]).is_cached(tile)
def store_tile(self, tile):
if tile.stored:
return True
return self._get_level(tile.coord[2]).store_tile(tile)
def store_tiles(self, tiles):
failed = False
for level, tiles in itertools.groupby(tiles, key=lambda t: t.coord[2]):
tiles = [t for t in tiles if not t.stored]
res = self._get_level(level).store_tiles(tiles)
if not res: failed = True
return failed
def load_tile(self, tile, with_metadata=False):
if tile.source or tile.coord is None:
return True
return self._get_level(tile.coord[2]).load_tile(tile, with_metadata=with_metadata)
def load_tiles(self, tiles, with_metadata=False):
level = None
for tile in tiles:
if tile.source or tile.coord is None:
continue
level = tile.coord[2]
break
if not level:
return True
return self._get_level(level).load_tiles(tiles, with_metadata=with_metadata)
def remove_tile(self, tile):
if tile.coord is None:
return True
return self._get_level(tile.coord[2]).remove_tile(tile)
def remove_level_tiles_before(self, level, timestamp):
level_cache = self._get_level(level)
if timestamp == 0:
level_cache.cleanup()
os.unlink(level_cache.geopackage_file)
return True
else:
return level_cache.remove_level_tiles_before(level, timestamp)
def is_close(a, b, rel_tol=1e-09, abs_tol=0.0):
"""
See PEP 485, added here for legacy versions.
>>> is_close(0.0, 0.0)
True
>>> is_close(1, 1.0)
True
>>> is_close(0.01, 0.001)
False
>>> is_close(0.0001001, 0.0001, rel_tol=1e-02)
True
>>> is_close(0.0001001, 0.0001)
False
@param a: An int or float.
@param b: An int or float.
@param rel_tol: Relative tolerance - maximumed allow difference between two numbers.
@param abs_tol: Absolute tolerance - minimum absolute tolerance.
@return: True if the values a and b are close.
"""
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
| olt/mapproxy | mapproxy/cache/geopackage.py | Python | apache-2.0 | 27,948 | 0.003936 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Starting point for routing EC2 requests.
"""
import hashlib
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import netutils
from oslo_utils import timeutils
import requests
import six
import webob
import webob.dec
import webob.exc
from nova.api.ec2 import apirequest
from nova.api.ec2 import ec2utils
from nova.api.ec2 import faults
from nova.api import validator
from nova import context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova.openstack.common import context as common_context
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache
from nova import wsgi
LOG = logging.getLogger(__name__)
ec2_opts = [
cfg.IntOpt('lockout_attempts',
default=5,
help='Number of failed auths before lockout.'),
cfg.IntOpt('lockout_minutes',
default=15,
help='Number of minutes to lockout if triggered.'),
cfg.IntOpt('lockout_window',
default=15,
help='Number of minutes for lockout window.'),
cfg.StrOpt('keystone_ec2_url',
default='http://localhost:5000/v2.0/ec2tokens',
help='URL to get token from ec2 request.'),
cfg.BoolOpt('ec2_private_dns_show_ip',
default=False,
help='Return the IP address as private dns hostname in '
'describe instances'),
cfg.BoolOpt('ec2_strict_validation',
default=True,
help='Validate security group names'
' according to EC2 specification'),
cfg.IntOpt('ec2_timestamp_expiry',
default=300,
help='Time in seconds before ec2 timestamp expires'),
cfg.BoolOpt('keystone_ec2_insecure', default=False, help='Disable SSL '
'certificate verification.'),
]
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
CONF.import_group('ssl', 'nova.openstack.common.sslutils')
# Fault Wrapper around all EC2 requests
class FaultWrapper(wsgi.Middleware):
"""Calls the middleware stack, captures any exceptions into faults."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
LOG.exception(_LE("FaultWrapper: %s"), ex)
return faults.Fault(webob.exc.HTTPInternalServerError())
class RequestLogging(wsgi.Middleware):
"""Access-Log akin logging for all EC2 API requests."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
start = timeutils.utcnow()
rv = req.get_response(self.application)
self.log_request_completion(rv, req, start)
return rv
def log_request_completion(self, response, request, start):
apireq = request.environ.get('ec2.request', None)
if apireq:
controller = apireq.controller
action = apireq.action
else:
controller = None
action = None
ctxt = request.environ.get('nova.context', None)
delta = timeutils.utcnow() - start
seconds = delta.seconds
microseconds = delta.microseconds
LOG.info(
"%s.%ss %s %s %s %s:%s %s [%s] %s %s",
seconds,
microseconds,
request.remote_addr,
request.method,
"%s%s" % (request.script_name, request.path_info),
controller,
action,
response.status_int,
request.user_agent,
request.content_type,
response.content_type,
context=ctxt) # noqa
class Lockout(wsgi.Middleware):
"""Lockout for x minutes on y failed auths in a z minute period.
x = lockout_timeout flag
y = lockout_window flag
z = lockout_attempts flag
Uses memcached if lockout_memcached_servers flag is set, otherwise it
uses a very simple in-process cache. Due to the simplicity of
the implementation, the timeout window is started with the first
failed request, so it will block if there are x failed logins within
that period.
There is a possible race condition where simultaneous requests could
sneak in before the lockout hits, but this is extremely rare and would
only result in a couple of extra failed attempts.
"""
def __init__(self, application):
"""middleware can use fake for testing."""
self.mc = memorycache.get_client()
super(Lockout, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
access_key = str(req.params['AWSAccessKeyId'])
failures_key = "authfailures-%s" % access_key
failures = int(self.mc.get(failures_key) or 0)
if failures >= CONF.lockout_attempts:
detail = _("Too many failed authentications.")
raise webob.exc.HTTPForbidden(explanation=detail)
res = req.get_response(self.application)
if res.status_int == 403:
failures = self.mc.incr(failures_key)
if failures is None:
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=CONF.lockout_window * 60)
elif failures >= CONF.lockout_attempts:
LOG.warning(_LW('Access key %(access_key)s has had '
'%(failures)d failed authentications and '
'will be locked out for %(lock_mins)d '
'minutes.'),
{'access_key': access_key,
'failures': failures,
'lock_mins': CONF.lockout_minutes})
self.mc.set(failures_key, str(failures),
time=CONF.lockout_minutes * 60)
return res
class EC2KeystoneAuth(wsgi.Middleware):
"""Authenticate an EC2 request with keystone and convert to context."""
def _get_signature(self, req):
"""Extract the signature from the request.
This can be a get/post variable or for version 4 also in a header
called 'Authorization'.
- params['Signature'] == version 0,1,2,3
- params['X-Amz-Signature'] == version 4
- header 'Authorization' == version 4
"""
sig = req.params.get('Signature') or req.params.get('X-Amz-Signature')
if sig is None and 'Authorization' in req.headers:
auth_str = req.headers['Authorization']
sig = auth_str.partition("Signature=")[2].split(',')[0]
return sig
def _get_access(self, req):
"""Extract the access key identifier.
For version 0/1/2/3 this is passed as the AccessKeyId parameter, for
version 4 it is either an X-Amz-Credential parameter or a Credential=
field in the 'Authorization' header string.
"""
access = req.params.get('AWSAccessKeyId')
if access is None:
cred_param = req.params.get('X-Amz-Credential')
if cred_param:
access = cred_param.split("/")[0]
if access is None and 'Authorization' in req.headers:
auth_str = req.headers['Authorization']
cred_str = auth_str.partition("Credential=")[2].split(',')[0]
access = cred_str.split("/")[0]
return access
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
# NOTE(alevine) We need to calculate the hash here because
# subsequent access to request modifies the req.body so the hash
# calculation will yield invalid results.
body_hash = hashlib.sha256(req.body).hexdigest()
request_id = common_context.generate_request_id()
signature = self._get_signature(req)
if not signature:
msg = _("Signature not provided")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
access = self._get_access(req)
if not access:
msg = _("Access key not provided")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
if 'X-Amz-Signature' in req.params or 'Authorization' in req.headers:
auth_params = {}
else:
# Make a copy of args for authentication and signature verification
auth_params = dict(req.params)
# Not part of authentication args
auth_params.pop('Signature', None)
cred_dict = {
'access': access,
'signature': signature,
'host': req.host,
'verb': req.method,
'path': req.path,
'params': auth_params,
'headers': req.headers,
'body_hash': body_hash
}
if "ec2" in CONF.keystone_ec2_url:
creds = {'ec2Credentials': cred_dict}
else:
creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}}
creds_json = jsonutils.dumps(creds)
headers = {'Content-Type': 'application/json'}
verify = not CONF.keystone_ec2_insecure
if verify and CONF.ssl.ca_file:
verify = CONF.ssl.ca_file
cert = None
if CONF.ssl.cert_file and CONF.ssl.key_file:
cert = (CONF.ssl.cert_file, CONF.ssl.key_file)
elif CONF.ssl.cert_file:
cert = CONF.ssl.cert_file
response = requests.request('POST', CONF.keystone_ec2_url,
data=creds_json, headers=headers,
verify=verify, cert=cert)
status_code = response.status_code
if status_code != 200:
msg = response.reason
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=status_code)
result = response.json()
try:
token_id = result['access']['token']['id']
user_id = result['access']['user']['id']
project_id = result['access']['token']['tenant']['id']
user_name = result['access']['user'].get('name')
project_name = result['access']['token']['tenant'].get('name')
roles = [role['name'] for role
in result['access']['user']['roles']]
except (AttributeError, KeyError) as e:
LOG.error(_LE("Keystone failure: %s"), e)
msg = _("Failure parsing response from keystone: %s") % e
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For',
remote_address)
catalog = result['access']['serviceCatalog']
ctxt = context.RequestContext(user_id,
project_id,
user_name=user_name,
project_name=project_name,
roles=roles,
auth_token=token_id,
remote_address=remote_address,
service_catalog=catalog)
req.environ['nova.context'] = ctxt
return self.application
class NoAuth(wsgi.Middleware):
"""Add user:project as 'nova.context' to WSGI environ."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if 'AWSAccessKeyId' not in req.params:
raise webob.exc.HTTPBadRequest()
user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':')
project_id = project_id or user_id
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
is_admin=True,
remote_address=remote_address)
req.environ['nova.context'] = ctx
return self.application
class Requestify(wsgi.Middleware):
def __init__(self, app, controller):
super(Requestify, self).__init__(app)
self.controller = importutils.import_object(controller)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
# Not all arguments are mandatory with v4 signatures, as some data is
# passed in the header, not query arguments.
required_args = ['Action', 'Version']
non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Version', 'Timestamp']
args = dict(req.params)
try:
expired = ec2utils.is_ec2_timestamp_expired(req.params,
expires=CONF.ec2_timestamp_expiry)
if expired:
msg = _("Timestamp failed validation.")
LOG.debug("Timestamp failed validation")
raise webob.exc.HTTPForbidden(explanation=msg)
# Raise KeyError if omitted
action = req.params['Action']
# Fix bug lp:720157 for older (version 1) clients
# If not present assume v4
version = req.params.get('SignatureVersion', 4)
if int(version) == 1:
non_args.remove('SignatureMethod')
if 'SignatureMethod' in args:
args.pop('SignatureMethod')
for non_arg in non_args:
if non_arg in required_args:
# Remove, but raise KeyError if omitted
args.pop(non_arg)
else:
args.pop(non_arg, None)
except KeyError:
raise webob.exc.HTTPBadRequest()
except exception.InvalidRequest as err:
raise webob.exc.HTTPBadRequest(explanation=six.text_type(err))
LOG.debug('action: %s', action)
for key, value in args.items():
LOG.debug('arg: %(key)s\t\tval: %(value)s',
{'key': key, 'value': value})
# Success!
api_request = apirequest.APIRequest(self.controller, action,
req.params['Version'], args)
req.environ['ec2.request'] = api_request
return self.application
class Authorizer(wsgi.Middleware):
"""Authorize an EC2 API request.
Return a 401 if ec2.controller and ec2.action in WSGI environ may not be
executed in nova.context.
"""
def __init__(self, application):
super(Authorizer, self).__init__(application)
self.action_roles = {
'CloudController': {
'DescribeAvailabilityZones': ['all'],
'DescribeRegions': ['all'],
'DescribeSnapshots': ['all'],
'DescribeKeyPairs': ['all'],
'CreateKeyPair': ['all'],
'DeleteKeyPair': ['all'],
'DescribeSecurityGroups': ['all'],
'ImportKeyPair': ['all'],
'AuthorizeSecurityGroupIngress': ['netadmin'],
'RevokeSecurityGroupIngress': ['netadmin'],
'CreateSecurityGroup': ['netadmin'],
'DeleteSecurityGroup': ['netadmin'],
'GetConsoleOutput': ['projectmanager', 'sysadmin'],
'DescribeVolumes': ['projectmanager', 'sysadmin'],
'CreateVolume': ['projectmanager', 'sysadmin'],
'AttachVolume': ['projectmanager', 'sysadmin'],
'DetachVolume': ['projectmanager', 'sysadmin'],
'DescribeInstances': ['all'],
'DescribeAddresses': ['all'],
'AllocateAddress': ['netadmin'],
'ReleaseAddress': ['netadmin'],
'AssociateAddress': ['netadmin'],
'DisassociateAddress': ['netadmin'],
'RunInstances': ['projectmanager', 'sysadmin'],
'TerminateInstances': ['projectmanager', 'sysadmin'],
'RebootInstances': ['projectmanager', 'sysadmin'],
'UpdateInstance': ['projectmanager', 'sysadmin'],
'StartInstances': ['projectmanager', 'sysadmin'],
'StopInstances': ['projectmanager', 'sysadmin'],
'DeleteVolume': ['projectmanager', 'sysadmin'],
'DescribeImages': ['all'],
'DeregisterImage': ['projectmanager', 'sysadmin'],
'RegisterImage': ['projectmanager', 'sysadmin'],
'DescribeImageAttribute': ['all'],
'ModifyImageAttribute': ['projectmanager', 'sysadmin'],
'UpdateImage': ['projectmanager', 'sysadmin'],
'CreateImage': ['projectmanager', 'sysadmin'],
},
'AdminController': {
# All actions have the same permission: ['none'] (the default)
# superusers will be allowed to run them
# all others will get HTTPUnauthorized.
},
}
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['nova.context']
controller = req.environ['ec2.request'].controller.__class__.__name__
action = req.environ['ec2.request'].action
allowed_roles = self.action_roles[controller].get(action, ['none'])
if self._matches_any_role(context, allowed_roles):
return self.application
else:
LOG.audit(_('Unauthorized request for controller=%(controller)s '
'and action=%(action)s'),
{'controller': controller, 'action': action},
context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):
"""Return True if any role in roles is allowed in context."""
if context.is_admin:
return True
if 'all' in roles:
return True
if 'none' in roles:
return False
return any(role in context.roles for role in roles)
class Validator(wsgi.Middleware):
def validate_ec2_id(val):
if not validator.validate_str()(val):
return False
try:
ec2utils.ec2_id_to_id(val)
except exception.InvalidEc2Id:
return False
return True
validator.validate_ec2_id = validate_ec2_id
validator.DEFAULT_VALIDATOR = {
'instance_id': validator.validate_ec2_id,
'volume_id': validator.validate_ec2_id,
'image_id': validator.validate_ec2_id,
'attribute': validator.validate_str(),
'image_location': validator.validate_image_path,
'public_ip': netutils.is_valid_ipv4,
'region_name': validator.validate_str(),
'group_name': validator.validate_str(max_length=255),
'group_description': validator.validate_str(max_length=255),
'size': validator.validate_int(),
'user_data': validator.validate_user_data
}
def __init__(self, application):
super(Validator, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if validator.validate(req.environ['ec2.request'].args,
validator.DEFAULT_VALIDATOR):
return self.application
else:
raise webob.exc.HTTPBadRequest()
def exception_to_ec2code(ex):
"""Helper to extract EC2 error code from exception.
For other than EC2 exceptions (those without ec2_code attribute),
use exception name.
"""
if hasattr(ex, 'ec2_code'):
code = ex.ec2_code
else:
code = type(ex).__name__
return code
def ec2_error_ex(ex, req, code=None, message=None, unexpected=False):
"""Return an EC2 error response based on passed exception and log
the exception on an appropriate log level:
* DEBUG: expected errors
* ERROR: unexpected errors
All expected errors are treated as client errors and 4xx HTTP
status codes are always returned for them.
Unexpected 5xx errors may contain sensitive information,
suppress their messages for security.
"""
if not code:
code = exception_to_ec2code(ex)
status = getattr(ex, 'code', None)
if not status:
status = 500
if unexpected:
log_fun = LOG.error
log_msg = _LE("Unexpected %(ex_name)s raised: %(ex_str)s")
else:
log_fun = LOG.debug
log_msg = "%(ex_name)s raised: %(ex_str)s"
# NOTE(jruzicka): For compatibility with EC2 API, treat expected
# exceptions as client (4xx) errors. The exception error code is 500
# by default and most exceptions inherit this from NovaException even
# though they are actually client errors in most cases.
if status >= 500:
status = 400
context = req.environ['nova.context']
request_id = context.request_id
log_msg_args = {
'ex_name': type(ex).__name__,
'ex_str': ex
}
log_fun(log_msg, log_msg_args, context=context)
if ex.args and not message and (not unexpected or status < 500):
message = unicode(ex.args[0])
if unexpected:
# Log filtered environment for unexpected errors.
env = req.environ.copy()
for k in env.keys():
if not isinstance(env[k], six.string_types):
env.pop(k)
log_fun(_LE('Environment: %s'), jsonutils.dumps(env))
if not message:
message = _('Unknown error occurred.')
return faults.ec2_error_response(request_id, code, message, status=status)
class Executor(wsgi.Application):
"""Execute an EC2 API request.
Executes 'ec2.action' upon 'ec2.controller', passing 'nova.context' and
'ec2.action_args' (all variables in WSGI environ.) Returns an XML
response, or a 400 upon failure.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['nova.context']
api_request = req.environ['ec2.request']
try:
result = api_request.invoke(context)
except exception.InstanceNotFound as ex:
ec2_id = ec2utils.id_to_ec2_inst_id(ex.kwargs['instance_id'])
message = ex.msg_fmt % {'instance_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except exception.VolumeNotFound as ex:
ec2_id = ec2utils.id_to_ec2_vol_id(ex.kwargs['volume_id'])
message = ex.msg_fmt % {'volume_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except exception.SnapshotNotFound as ex:
ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id'])
message = ex.msg_fmt % {'snapshot_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except (exception.CannotDisassociateAutoAssignedFloatingIP,
exception.FloatingIpAssociated,
exception.FloatingIpNotFound,
exception.ImageNotActive,
exception.InvalidInstanceIDMalformed,
exception.InvalidVolumeIDMalformed,
exception.InvalidKeypair,
exception.InvalidParameterValue,
exception.InvalidPortRange,
exception.InvalidVolume,
exception.KeyPairExists,
exception.KeypairNotFound,
exception.MissingParameter,
exception.NoFloatingIpInterface,
exception.NoMoreFixedIps,
exception.Forbidden,
exception.QuotaError,
exception.SecurityGroupExists,
exception.SecurityGroupLimitExceeded,
exception.SecurityGroupRuleExists,
exception.VolumeUnattached,
# Following aren't translated to valid EC2 errors.
exception.ImageNotFound,
exception.ImageNotFoundEC2,
exception.InvalidAttribute,
exception.InvalidRequest,
exception.NotFound) as ex:
return ec2_error_ex(ex, req)
except Exception as ex:
return ec2_error_ex(ex, req, unexpected=True)
else:
resp = webob.Response()
resp.status = 200
resp.headers['Content-Type'] = 'text/xml'
resp.body = str(result)
return resp
| orbitfp7/nova | nova/api/ec2/__init__.py | Python | apache-2.0 | 25,652 | 0.000039 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.platform import test
def GenerateNumpyRandomRGB(shape):
# Only generate floating points that are fractions like n / 256, since they
# are RGB pixels. Some low-precision floating point types in this test can't
# handle arbitrary precision floating points well.
return np.random.randint(0, 256, shape) / 256.
class RGBToHSVTest(xla_test.XLATestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in self.float_types:
inp = GenerateNumpyRandomRGB(shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.cached_session() as sess:
batch0 = array_ops.placeholder(nptype, shape=shape)
with self.test_scope():
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops.unstack(batch0)
with self.test_scope():
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops.stack(split1)
join2 = array_ops.stack(split2)
batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2],
{batch0: inp})
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllCloseAccordingToType(
batch2, inp, bfloat16_atol=0.03, half_rtol=0.02)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in self.float_types:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.cached_session():
placeholder = array_ops.placeholder(nptype)
with self.test_scope():
hsv = image_ops.rgb_to_hsv(placeholder)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = rgb.eval(feed_dict={placeholder: rgb_np})
self.assertAllCloseAccordingToType(rgb_tf, rgb_np, bfloat16_atol=0.03)
def testRGBToHSVNumpy(self):
"""Tests the RGB to HSV conversion matches a reference implementation."""
for nptype in self.float_types:
rgb_flat = GenerateNumpyRandomRGB((64, 3)).astype(nptype)
rgb_np = rgb_flat.reshape(4, 4, 4, 3)
hsv_np = np.array([
colorsys.rgb_to_hsv(
r.astype(np.float64), g.astype(np.float64), b.astype(np.float64))
for r, g, b in rgb_flat
])
hsv_np = hsv_np.reshape(4, 4, 4, 3)
with self.cached_session():
placeholder = array_ops.placeholder(nptype)
with self.test_scope():
hsv_op = image_ops.rgb_to_hsv(placeholder)
hsv_tf = hsv_op.eval(feed_dict={placeholder: rgb_np})
self.assertAllCloseAccordingToType(hsv_tf, hsv_np)
class AdjustContrastTest(xla_test.XLATestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.cached_session():
x = array_ops.placeholder(x_np.dtype, shape=x_np.shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = image_ops.adjust_contrast(flt_x, contrast_factor)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllClose(y_tf, y_np, 1e-6)
def testFloatContrast(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testBatchContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.cached_session():
x = array_ops.placeholder(np.float32)
with self.test_scope():
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = y.eval({x: x_np})
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
class AdjustHueTest(xla_test.XLATestCase):
def testAdjustNegativeHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = -0.25
y_data = [0, 13, 1, 54, 226, 59, 8, 234, 150, 255, 39, 1]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testAdjustPositiveHue(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testBatchAdjustHue(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
delta = 0.25
y_data = [13, 0, 11, 226, 54, 221, 234, 8, 92, 1, 217, 255]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
flt_x = image_ops.convert_image_dtype(x, dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(flt_x, delta)
y = image_ops.convert_image_dtype(y, x.dtype, saturate=True)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def _adjustHueNp(self, x_np, delta_h):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
h += delta_h
h = math.fmod(h + 10.0, 1.0)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def _adjustHueTf(self, x_np, delta_h):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32)
with self.test_scope():
y = gen_image_ops.adjust_hue(x, delta_h)
y_tf = y.eval({x: x_np})
return y_tf
def testAdjustRandomHue(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_np = self._adjustHueNp(x_np, delta_h)
y_tf = self._adjustHueTf(x_np, delta_h)
self.assertAllClose(y_tf, y_np, rtol=2e-5, atol=1e-4)
def testInvalidShapes(self):
fused = False
if not fused:
# The tests are known to pass with the fused adjust_hue. We will enable
# them when the fused implementation is the default.
return
x_np = np.random.rand(2, 3) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
fused = False
with self.assertRaisesRegexp(ValueError, "Shape must be at least rank 3"):
self._adjustHueTf(x_np, delta_h)
x_np = np.random.rand(4, 2, 4) * 255.
delta_h = np.random.rand() * 2.0 - 1.0
with self.assertRaisesOpError("input must have 3 channels"):
self._adjustHueTf(x_np, delta_h)
class AdjustSaturationTest(xla_test.XLATestCase):
def _adjust_saturation(self, image, saturation_factor):
image = ops.convert_to_tensor(image, name="image")
orig_dtype = image.dtype
flt_image = image_ops.convert_image_dtype(image, dtypes.float32)
with self.test_scope():
saturation_adjusted_image = gen_image_ops.adjust_saturation(
flt_image, saturation_factor)
return image_ops.convert_image_dtype(saturation_adjusted_image, orig_dtype)
def testHalfSaturation(self):
x_shape = [2, 2, 3]
x_rgb_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_rgb_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 0.5
y_rgb_data = [6, 9, 13, 140, 180, 226, 135, 121, 234, 172, 255, 128]
y_np = np.array(y_rgb_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def testTwiceSaturation(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
saturation_factor = 2.0
y_data = [0, 5, 13, 0, 106, 226, 30, 0, 234, 89, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
with self.cached_session():
x = array_ops.placeholder(x_np.dtype, shape=x_shape)
y = self._adjust_saturation(x, saturation_factor)
y_tf = y.eval({x: x_np})
self.assertAllEqual(y_tf, y_np)
def _adjustSaturationNp(self, x_np, scale):
self.assertEqual(x_np.shape[-1], 3)
x_v = x_np.reshape([-1, 3])
y_v = np.ndarray(x_v.shape, dtype=x_v.dtype)
channel_count = x_v.shape[0]
for i in xrange(channel_count):
r = x_v[i][0]
g = x_v[i][1]
b = x_v[i][2]
h, s, v = colorsys.rgb_to_hsv(r, g, b)
s *= scale
s = min(1.0, max(0.0, s))
r, g, b = colorsys.hsv_to_rgb(h, s, v)
y_v[i][0] = r
y_v[i][1] = g
y_v[i][2] = b
return y_v.reshape(x_np.shape)
def testAdjustRandomSaturation(self):
x_shapes = [
[2, 2, 3],
[4, 2, 3],
[2, 4, 3],
[2, 5, 3],
[1000, 1, 3],
]
test_styles = [
"all_random",
"rg_same",
"rb_same",
"gb_same",
"rgb_same",
]
with self.cached_session():
for x_shape in x_shapes:
for test_style in test_styles:
x_np = np.random.rand(*x_shape) * 255.
scale = np.random.rand()
if test_style == "all_random":
pass
elif test_style == "rg_same":
x_np[..., 1] = x_np[..., 0]
elif test_style == "rb_same":
x_np[..., 2] = x_np[..., 0]
elif test_style == "gb_same":
x_np[..., 2] = x_np[..., 1]
elif test_style == "rgb_same":
x_np[..., 1] = x_np[..., 0]
x_np[..., 2] = x_np[..., 0]
else:
raise AssertionError("Invalid test style: %s" % (test_style))
y_baseline = self._adjustSaturationNp(x_np, scale)
x = array_ops.placeholder(dtypes.float32, shape=x_shape)
with self.test_scope():
y_fused = self._adjust_saturation(x,
scale).eval(feed_dict={x: x_np})
self.assertAllClose(y_fused, y_baseline, rtol=2e-5, atol=1e-5)
class ResizeNearestNeighborTest(xla_test.XLATestCase):
# TODO(ilch): Wrap each test with `for dtype in self.float_types:`
# Some work to understand how that should be done was presented here:
# cl/227850213
def _assertForwardOpMatchesExpected(self,
image_np,
target_shape,
expected=None,
large_tolerance=False,
align_corners=True):
if expected is None:
self.fail("expected must be specified")
with self.cached_session() as sess, self.test_scope():
image = array_ops.placeholder(image_np.dtype)
resized = gen_image_ops.resize_nearest_neighbor(
image, target_shape, align_corners=align_corners)
out = sess.run(resized, {image: image_np[np.newaxis, :, :, np.newaxis]})
if large_tolerance:
self.assertAllClose(
expected[np.newaxis, :, :, np.newaxis], out, rtol=0.03, atol=0.1)
else:
self.assertAllClose(expected[np.newaxis, :, :, np.newaxis], out)
def testAlignCorners2x2To1x1(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2], [3, 4]], dtype=np.float32), [1, 1],
expected=np.array([[1]], dtype=np.float32))
def testAlignCorners1x1To2x2(self):
self._assertForwardOpMatchesExpected(
np.array([[1]], dtype=np.float32), [2, 2],
expected=np.array([[1, 1], [1, 1]], dtype=np.float32))
def testAlignCorners1x1To3x3(self):
self._assertForwardOpMatchesExpected(
np.array([[1]], dtype=np.float32), [3, 3],
expected=np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.float32))
def testAlignCorners2x2To3x3(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2], [3, 4]], dtype=np.float32), [3, 3],
expected=np.array([[1, 2, 2], [3, 4, 4], [3, 4, 4]], dtype=np.float32))
def testAlignCorners2x2To4x4(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2], [3, 4]], dtype=np.float32), [4, 4],
expected=np.array(
[[1, 1, 2, 2], [1, 1, 2, 2], [3, 3, 4, 4], [3, 3, 4, 4]],
dtype=np.float32))
def testAlignCorners3x3To2x2(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [2, 2],
expected=np.array([[1, 3], [7, 9]], dtype=np.float32))
def testAlignCorners4x4To3x3(self):
self._assertForwardOpMatchesExpected(
np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]],
dtype=np.float32), [3, 3],
expected=np.array([[1, 3, 4], [9, 11, 12], [13, 15, 16]],
dtype=np.float32))
def testAlignCorners3x3To4x4(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [4, 4],
expected=np.array(
[[1, 2, 2, 3], [4, 5, 5, 6], [4, 5, 5, 6], [7, 8, 8, 9]],
dtype=np.float32))
def testAlignCorners3x3To6x6(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [6, 6],
expected=np.array(
[[1, 1, 2, 2, 3, 3], [1, 1, 2, 2, 3, 3], [4, 4, 5, 5, 6, 6],
[4, 4, 5, 5, 6, 6], [7, 7, 8, 8, 9, 9], [7, 7, 8, 8, 9, 9]],
dtype=np.float32))
def testAlignCorners3x3To9x9(self):
# The expected matrix might look uneven in terms of how many of each number
# there is, but this is an artifact of doing the dilation and convolution
# iteratively. The behavior is less esoteric in the 3x3To12x12 case below.
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [9, 9],
expected=np.array(
[[1, 2, 2, 2, 2, 3, 3, 3, 3], [4, 5, 5, 5, 5, 6, 6, 6, 6],
[4, 5, 5, 5, 5, 6, 6, 6, 6], [4, 5, 5, 5, 5, 6, 6, 6, 6],
[4, 5, 5, 5, 5, 6, 6, 6, 6], [7, 8, 8, 8, 8, 9, 9, 9, 9],
[7, 8, 8, 8, 8, 9, 9, 9, 9], [7, 8, 8, 8, 8, 9, 9, 9, 9],
[7, 8, 8, 8, 8, 9, 9, 9, 9]],
dtype=np.float32))
def testAlignCorners3x3To12x12(self):
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32), [12, 12],
expected=np.array([[1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3],
[1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3],
[1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6],
[7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9],
[7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9],
[7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9]],
dtype=np.float32))
class ResizeBilinearTest(xla_test.XLATestCase):
def _assertForwardOpMatchesExpected(self,
image_np,
target_shape,
expected=None,
large_tolerance=False,
align_corners=True):
if expected is None:
self.fail("expected must be specified")
with self.cached_session() as sess, self.test_scope():
image = array_ops.placeholder(image_np.dtype)
resized = gen_image_ops.resize_bilinear(
image, target_shape, align_corners=align_corners)
out = sess.run(resized, {image: image_np[np.newaxis, :, :, np.newaxis]})
if large_tolerance:
self.assertAllClose(
expected[np.newaxis, :, :, np.newaxis], out, rtol=0.03, atol=0.1)
else:
self.assertAllClose(expected[np.newaxis, :, :, np.newaxis], out)
def _assertBackwardOpMatchesExpected(self,
grads_np,
input_shape=None,
dtype=None,
expected=None):
if input_shape is None:
self.fail("input_shape must be specified")
if expected is None:
self.fail("expected must be specified")
with self.cached_session() as sess, self.test_scope():
dtype = dtype or np.float32
grads = array_ops.placeholder(np.float32)
resized = gen_image_ops.resize_bilinear_grad(
grads,
np.zeros([1, input_shape[0], input_shape[1], 1], dtype=dtype),
align_corners=True)
out = sess.run(resized, {grads: grads_np[np.newaxis, :, :, np.newaxis]})
self.assertAllCloseAccordingToType(expected[np.newaxis, :, :, np.newaxis],
out)
def testAlignCorners1x2To3x3(self):
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array([[1, 2]], dtype=dtype), [3, 3],
expected=np.array([[1, 1.5, 2], [1, 1.5, 2], [1, 1.5, 2]],
dtype=np.float32))
def testAlignCorners1x2To3x3Grad(self):
for dtype in self.float_types:
self._assertBackwardOpMatchesExpected(
np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32),
input_shape=[1, 2],
dtype=dtype,
expected=np.array([[9, 12]], dtype=np.float32))
def testAlignCorners2x2To1x1(self):
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array([[1, 2], [3, 4]], dtype=dtype), [1, 1],
expected=np.array([[1]], dtype=np.float32))
def testAlignCorners2x2To1x1Grad(self):
for dtype in self.float_types:
self._assertBackwardOpMatchesExpected(
np.array([[7]], dtype=np.float32),
input_shape=[2, 2],
dtype=dtype,
expected=np.array([[7, 0], [0, 0]], dtype=np.float32))
def testAlignCorners2x2To3x3(self):
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array([[1, 2], [3, 4]], dtype=dtype), [3, 3],
expected=np.array([[1, 1.5, 2], [2, 2.5, 3], [3, 3.5, 4]],
dtype=np.float32))
def testAlignCorners2x2To3x3Grad(self):
self._assertBackwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32),
input_shape=[2, 2],
expected=np.array([[5.25, 8.25], [14.25, 17.25]], dtype=np.float32))
def testAlignCorners3x3To2x2(self):
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype), [2, 2],
expected=np.array([[1, 3], [7, 9]], dtype=np.float32))
def testAlignCorners3x3To2x2Grad(self):
for dtype in self.float_types:
self._assertBackwardOpMatchesExpected(
np.array([[7, 13], [22, 4]], dtype=np.float32),
input_shape=[3, 3],
dtype=dtype,
expected=np.array([[7, 0, 13], [0, 0, 0], [22, 0, 4]],
dtype=np.float32))
def testAlignCorners4x4To3x3(self):
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array(
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]],
dtype=dtype), [3, 3],
expected=np.array([[1, 2.5, 4], [7, 8.5, 10], [13, 14.5, 16]],
dtype=np.float32))
def testAlignCorners4x4To3x3Grad(self):
for dtype in self.float_types:
self._assertBackwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32),
input_shape=[4, 4],
dtype=dtype,
expected=np.array([[1, 1, 1, 3], [2, 1.25, 1.25, 3],
[2, 1.25, 1.25, 3], [7, 4, 4, 9]],
dtype=np.float32))
def testAlignCorners3x3To9x9(self):
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=dtype), [9, 9],
expected=np.array(
[[1.0, 1.25, 1.50, 1.75, 2.00, 2.25, 2.50, 2.75, 3.00],
[1.75, 2.00, 2.25, 2.50, 2.75, 3.00, 3.25, 3.50, 3.75],
[2.50, 2.75, 3.00, 3.25, 3.50, 3.75, 4.00, 4.25, 4.50],
[3.25, 3.50, 3.75, 4.00, 4.25, 4.50, 4.75, 5.00, 5.25],
[4.00, 4.25, 4.50, 4.75, 5.00, 5.25, 5.50, 5.75, 6.00],
[4.75, 5.00, 5.25, 5.50, 5.75, 6.00, 6.25, 6.50, 6.75],
[5.50, 5.75, 6.00, 6.25, 6.50, 6.75, 7.00, 7.25, 7.50],
[6.25, 6.50, 6.75, 7.00, 7.25, 7.50, 7.75, 8.00, 8.25],
[7.00, 7.25, 7.50, 7.75, 8.00, 8.25, 8.50, 8.75, 9.00]],
dtype=np.float32))
def testAlignCorners3x3To9x9Grad(self):
for dtype in self.float_types:
self._assertBackwardOpMatchesExpected(
np.array([[1.00, 1.25, 1.50, 1.75, 2.00, 2.25, 2.50, 2.75, 3.00],
[1.75, 2.00, 2.25, 2.50, 2.75, 3.00, 3.25, 3.50, 3.75],
[2.50, 2.75, 3.00, 3.25, 3.50, 3.75, 4.00, 4.25, 4.50],
[3.25, 3.50, 3.75, 4.00, 4.25, 4.50, 4.75, 5.00, 5.25],
[4.00, 4.25, 4.50, 4.75, 5.00, 5.25, 5.50, 5.75, 6.00],
[4.75, 5.00, 5.25, 5.50, 5.75, 6.00, 6.25, 6.50, 6.75],
[5.50, 5.75, 6.00, 6.25, 6.50, 6.75, 7.00, 7.25, 7.50],
[6.25, 6.50, 6.75, 7.00, 7.25, 7.50, 7.75, 8.00, 8.25],
[7.00, 7.25, 7.50, 7.75, 8.00, 8.25, 8.50, 8.75, 9.00]],
dtype=np.float32),
input_shape=[3, 3],
dtype=dtype,
expected=np.array(
[[12.5, 27.5, 21.875], [42.5, 80.0, 57.5], [40.625, 72.5, 50]],
dtype=np.float32))
def testAlignCorners4x4To8x8(self):
self._assertForwardOpMatchesExpected(
(np.array([[0, 1, 2, 3]], dtype=np.float32) + np.array(
[[0], [1], [2], [3]], dtype=np.float32)) * 7.0, [8, 8],
expected=3 *
(np.array([[0, 1, 2, 3, 4, 5, 6, 7]], dtype=np.float32) + np.array(
[[0], [1], [2], [3], [4], [5], [6], [7]], dtype=np.float32)),
large_tolerance=True)
def testAlignCorners8x8To16x16(self):
self._assertForwardOpMatchesExpected(
(np.array([[0, 1, 2, 3, 4, 5, 6, 7]], dtype=np.float32) + np.array(
[[0], [1], [2], [3], [4], [5], [6], [7]], dtype=np.float32)) * 15.0,
[16, 16],
expected=7 *
(np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]],
dtype=np.float32) +
np.array([[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11],
[12], [13], [14], [15]],
dtype=np.float32)),
large_tolerance=True)
def testNonAlignCorners3x2To6x4(self):
input_data = [[64, 32], [32, 64], [50, 100]]
expected_data = [[64.0, 48.0, 32.0, 32.0], [48.0, 48.0, 48.0, 48.0],
[32.0, 48.0, 64.0, 64.0], [41.0, 61.5, 82.0, 82.0],
[50.0, 75.0, 100.0, 100.0], [50.0, 75.0, 100.0, 100.0]]
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array(input_data, dtype=dtype), [6, 4],
expected=np.array(expected_data, dtype=np.float32),
align_corners=False)
def testNonAlignCorners6x4To3x2(self):
input_data = [[127, 127, 64, 64], [127, 127, 64, 64], [64, 64, 127, 127],
[64, 64, 127, 127], [50, 50, 100, 100], [50, 50, 100, 100]]
expected_data = [[127, 64], [64, 127], [50, 100]]
for dtype in self.float_types:
self._assertForwardOpMatchesExpected(
np.array(input_data, dtype=dtype), [3, 2],
expected=np.array(expected_data, dtype=dtype),
align_corners=False)
def testNonAlignCorners3x2To6x4Batch2(self):
input_data = [[[64, 32], [32, 64], [50, 100]], [[32, 16], [16, 32],
[25, 50]]]
expected_data = [[[64.0, 48.0, 32.0, 32.0], [48.0, 48.0, 48.0, 48.0],
[32.0, 48.0, 64.0, 64.0], [41.0, 61.5, 82.0, 82.0],
[50.0, 75.0, 100.0, 100.0], [50.0, 75.0, 100.0, 100.0]],
[[32.0, 24.0, 16.0, 16.0], [24.0, 24.0, 24.0, 24.0],
[16.0, 24.0, 32.0, 32.0], [20.5, 30.75, 41.0, 41.0],
[25.0, 37.5, 50.0, 50.0], [25.0, 37.5, 50.0, 50.0]]]
for dtype in self.float_types:
input_image = np.array(input_data, dtype=dtype)
expected = np.array(expected_data, dtype=dtype)
with self.cached_session() as sess, self.test_scope():
image = array_ops.placeholder(input_image.dtype)
resized = gen_image_ops.resize_bilinear(
image, [6, 4], align_corners=False)
out = sess.run(resized, {image: input_image[:, :, :, np.newaxis]})
self.assertAllClose(expected[:, :, :, np.newaxis], out)
class NonMaxSuppressionTest(xla_test.XLATestCase):
def testNMS128From1024(self):
num_boxes = 1024
boxes_np = np.random.normal(50, 10, (num_boxes, 4)).astype("f4")
scores_np = np.random.normal(0.5, 0.1, (num_boxes,)).astype("f4")
max_output_size = 128
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.0, dtype=np.float32)
with self.cached_session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
score_threshold: score_threshold_np,
iou_threshold: iou_threshold_np
}
(indices_tf, _) = sess.run(selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
def testNMS3From6Boxes(self):
# Three boxes are selected based on IOU.
boxes_data = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
boxes_np = np.array(boxes_data, dtype=np.float32)
scores_data = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
scores_np = np.array(scores_data, dtype=np.float32)
max_output_size = 3
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.0, dtype=np.float32)
with self.cached_session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
score_threshold: score_threshold_np,
iou_threshold: iou_threshold_np
}
(indices_tf, num_valid) = sess.run(
selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
self.assertEqual(num_valid, 3)
self.assertAllClose(indices_tf[:num_valid], [3, 0, 5])
def testNMS3Then2WithScoreThresh(self):
# Three boxes are selected based on IOU.
# One is filtered out by score threshold.
boxes_data = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
boxes_np = np.array(boxes_data, dtype=np.float32)
scores_data = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
scores_np = np.array(scores_data, dtype=np.float32)
max_output_size = 3
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.4, dtype=np.float32)
with self.cached_session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
iou_threshold: iou_threshold_np,
score_threshold: score_threshold_np
}
(indices_tf, num_valid) = sess.run(
selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
self.assertEqual(num_valid, 2)
self.assertAllClose(indices_tf[:num_valid], [3, 0])
def testNMS3Then1WithScoreMaxThresh(self):
# Three boxes are selected based on IOU.
# One is filtered out by score threshold.
# One is filtered out by max_output_size.
boxes_data = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
boxes_np = np.array(boxes_data, dtype=np.float32)
scores_data = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
scores_np = np.array(scores_data, dtype=np.float32)
max_output_size = 1
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.4, dtype=np.float32)
with self.cached_session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
iou_threshold: iou_threshold_np,
score_threshold: score_threshold_np
}
(indices_tf, num_valid) = sess.run(
selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
self.assertEqual(num_valid, 1)
self.assertAllClose(indices_tf[:num_valid], [3])
def testSelectFromContinuousOverLap(self):
# Tests that a suppressed box does not itself suppress other boxes.
boxes_data = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 3]]
boxes_np = np.array(boxes_data, dtype=np.float32)
scores_data = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
scores_np = np.array(scores_data, dtype=np.float32)
max_output_size = 3
iou_threshold_np = np.array(0.5, dtype=np.float32)
score_threshold_np = np.array(0.1, dtype=np.float32)
with self.cached_session() as sess:
boxes = array_ops.placeholder(boxes_np.dtype, shape=boxes_np.shape)
scores = array_ops.placeholder(scores_np.dtype, shape=scores_np.shape)
iou_threshold = array_ops.placeholder(iou_threshold_np.dtype,
iou_threshold_np.shape)
score_threshold = array_ops.placeholder(score_threshold_np.dtype,
score_threshold_np.shape)
with self.test_scope():
selected_indices = image_ops.non_max_suppression_padded(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_to_max_output_size=True)
inputs_feed = {
boxes: boxes_np,
scores: scores_np,
iou_threshold: iou_threshold_np,
score_threshold: score_threshold_np
}
(indices_tf, num_valid) = sess.run(
selected_indices, feed_dict=inputs_feed)
self.assertEqual(indices_tf.size, max_output_size)
self.assertEqual(num_valid, 3)
self.assertAllClose(indices_tf[:num_valid], [0, 2, 4])
if __name__ == "__main__":
test.main()
| apark263/tensorflow | tensorflow/compiler/tests/image_ops_test.py | Python | apache-2.0 | 38,279 | 0.006923 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ArtistMedia.is_default_image'
db.delete_column(u'artist_artistmedia', 'is_default_image')
# Deleting field 'ArtistMedia.name'
db.delete_column(u'artist_artistmedia', 'name')
# Deleting field 'ArtistMedia.video_link'
db.delete_column(u'artist_artistmedia', 'video_link')
# Deleting field 'ArtistMedia.full_res_image'
db.delete_column(u'artist_artistmedia', 'full_res_image')
# Deleting field 'ArtistMedia.image'
db.delete_column(u'artist_artistmedia', 'image')
# Deleting field 'ArtistMedia.id'
db.delete_column(u'artist_artistmedia', u'id')
# Deleting field 'ArtistMedia.thumbnail'
db.delete_column(u'artist_artistmedia', 'thumbnail')
# Adding field 'ArtistMedia.frontmedia_ptr'
db.add_column(u'artist_artistmedia', u'frontmedia_ptr',
self.gf('django.db.models.fields.related.OneToOneField')(default=-1, to=orm['front_material.FrontMedia'], unique=True, primary_key=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'ArtistMedia.is_default_image'
db.add_column(u'artist_artistmedia', 'is_default_image',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'ArtistMedia.name'
db.add_column(u'artist_artistmedia', 'name',
self.gf('django.db.models.fields.CharField')(default='', max_length=100),
keep_default=False)
# Adding field 'ArtistMedia.video_link'
db.add_column(u'artist_artistmedia', 'video_link',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'ArtistMedia.full_res_image'
db.add_column(u'artist_artistmedia', 'full_res_image',
self.gf('django.db.models.fields.files.ImageField')(default='', max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'ArtistMedia.image'
db.add_column(u'artist_artistmedia', 'image',
self.gf('django.db.models.fields.files.ImageField')(default='', max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'ArtistMedia.id'
db.add_column(u'artist_artistmedia', u'id',
self.gf('django.db.models.fields.AutoField')(default=1, primary_key=True),
keep_default=False)
# Adding field 'ArtistMedia.thumbnail'
db.add_column(u'artist_artistmedia', 'thumbnail',
self.gf('django.db.models.fields.files.ImageField')(default='', max_length=100, null=True, blank=True),
keep_default=False)
# Deleting field 'ArtistMedia.frontmedia_ptr'
db.delete_column(u'artist_artistmedia', u'frontmedia_ptr_id')
models = {
u'artist.artist': {
'Meta': {'object_name': 'Artist'},
'artist_statement': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'slug': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'artist.artistmedia': {
'Meta': {'object_name': 'ArtistMedia', '_ormbases': [u'front_material.FrontMedia']},
'artist': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['artist.Artist']"}),
u'frontmedia_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['front_material.FrontMedia']", 'unique': 'True', 'primary_key': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'front_material.frontmedia': {
'Meta': {'object_name': 'FrontMedia'},
'full_res_image': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'is_default_image': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'video_link': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['artist']
| hcwiley/the-front | the_front/the_front/artist/migrations/0005_auto__del_field_artistmedia_is_default_image__del_field_artistmedia_na.py | Python | gpl-2.0 | 8,560 | 0.006075 |
import unittest
import os
import numpy
from rmgpy.tools.canteraModel import findIgnitionDelay, CanteraCondition, Cantera
from rmgpy.quantity import Quantity
import rmgpy
class CanteraTest(unittest.TestCase):
def testIgnitionDelay(self):
"""
Test that findIgnitionDelay() works.
"""
t = numpy.arange(0,5,0.5)
P = numpy.array([0,0.33,0.5,0.9,2,4,15,16,16.1,16.2])
OH = numpy.array([0,0.33,0.5,0.9,2,4,15,16,7,2])
CO = OH*0.9
t_ign = findIgnitionDelay(t,P)
self.assertEqual(t_ign,2.75)
t_ign = findIgnitionDelay(t,OH,'maxHalfConcentration')
self.assertEqual(t_ign,3)
t_ign = findIgnitionDelay(t,[OH,CO], 'maxSpeciesConcentrations')
self.assertEqual(t_ign,3.5)
def testRepr(self):
"""
Test that the repr function for a CanteraCondition object can reconstitute
the same object
"""
reactorType='IdealGasReactor'
molFrac={'CC': 0.05, '[Ar]': 0.95}
P=(3,'atm')
T=(1500,'K')
terminationTime=(5e-5,'s')
condition = CanteraCondition(reactorType,
terminationTime,
molFrac,
T0=T,
P0=P)
reprCondition=eval(condition.__repr__())
self.assertEqual(reprCondition.T0.value_si,Quantity(T).value_si)
self.assertEqual(reprCondition.P0.value_si,Quantity(P).value_si)
self.assertEqual(reprCondition.V0,None)
self.assertEqual(reprCondition.molFrac,molFrac)
class RMGToCanteraTest(unittest.TestCase):
"""
Contains unit tests for the conversion of RMG species and reaction objects to Cantera objects.
"""
def setUp(self):
"""
A function run before each unit test in this class.
"""
from rmgpy.chemkin import loadChemkinFile
folder = os.path.join(os.path.dirname(rmgpy.__file__),'tools/data/various_kinetics')
chemkinPath = os.path.join(folder, 'chem_annotated.inp')
dictionaryPath = os.path.join(folder, 'species_dictionary.txt')
transportPath = os.path.join(folder, 'tran.dat')
species, reactions = loadChemkinFile(chemkinPath, dictionaryPath,transportPath)
self.rmg_ctSpecies = [spec.toCantera() for spec in species]
self.rmg_ctReactions = []
for rxn in reactions:
convertedReactions = rxn.toCantera(species)
if isinstance(convertedReactions,list):
self.rmg_ctReactions.extend(convertedReactions)
else:
self.rmg_ctReactions.append(convertedReactions)
job = Cantera()
job.loadChemkinModel(chemkinPath, transportFile=transportPath,quiet=True)
self.ctSpecies = job.model.species()
self.ctReactions = job.model.reactions()
def testSpeciesConversion(self):
"""
Test that species objects convert properly
"""
from rmgpy.tools.canteraModel import checkEquivalentCanteraSpecies
for i in range(len(self.ctSpecies)):
self.assertTrue(checkEquivalentCanteraSpecies(self.ctSpecies[i],self.rmg_ctSpecies[i]))
def testReactionConversion(self):
"""
Test that species objects convert properly
"""
from rmgpy.tools.canteraModel import checkEquivalentCanteraReaction
for i in range(len(self.ctReactions)):
self.assertTrue(checkEquivalentCanteraReaction(self.ctReactions[i],self.rmg_ctReactions[i]))
| chatelak/RMG-Py | rmgpy/tools/canteraTest.py | Python | mit | 3,597 | 0.020573 |
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from elasticsearch import Elasticsearch
@python_2_unicode_compatible
class ElasticCluster(models.Model):
class Meta:
db_table = 'cluster_elastic_cluster'
# cluster name
name = models.CharField(max_length=128)
host = models.CharField(max_length=256)
port = models.IntegerField()
def __str__(self):
return '{name} {host}:{port}'.format(name=self.name, host=self.host, port=self.port)
def address(self):
return '{host}:{port}'.format(host=self.host, port=self.port)
def client(self, timeout=30):
return Elasticsearch(self.address(), timeout=timeout)
def info(self):
info = self.client().info()
ret = {
'cluster_name': info['cluster_name'],
'elasticsearch_version': info['version']['number'],
'lucene_version': info['version']['lucene_version'],
}
return ret
def health(self):
es = self.client()
return es.cluster.health()
def pending_tasks(self):
es = self.client()
tasks = es.cluster.pending_tasks()
return len(tasks), tasks
| InterestingLab/elasticmanager | cluster/models.py | Python | mit | 1,207 | 0.000829 |
#!/usr/bin/env python
import sys
from twisted.protocols import amp
from twisted.internet import reactor
from twisted.internet.protocol import ServerFactory
from twisted.python import usage
from twisted.cred.checkers import FilePasswordDB
from twisted.cred.portal import Portal
from twisted.cred import credentials
from Realm import Realm, IAvatar
import commands
default_port = 65432
class Options(usage.Options):
optParameters = [
["port", "p", default_port, "server port"],
]
class ChatProtocol(amp.AMP):
@commands.Login.responder
def login(self, username, password):
"""Attempt to login."""
if username in self.factory.username_to_protocol:
raise commands.LoginError("User '%s' already logged in" % username)
creds = credentials.UsernamePassword(username, password)
deferred = self.factory.portal.login(creds, None, IAvatar)
deferred.addCallback(self.login_succeeded)
deferred.addErrback(self.login_failed)
return deferred
def login_succeeded(self, (avatar_interface, avatar, logout)):
name = avatar.name
self.username = name
self.factory.username_to_protocol[name] = self
# Tell all users about this user
for protocol in self.factory.username_to_protocol.itervalues():
protocol.callRemote(commands.AddUser, user=name)
# Tell this user about all other users
for username in self.factory.username_to_protocol:
if username != name:
self.callRemote(commands.AddUser, user=username)
return {}
def login_failed(self, failure):
raise commands.LoginError("Incorrect username or password")
@commands.SendToUsers.responder
def send_to_users(self, message, usernames):
for username in usernames:
protocol = self.factory.username_to_protocol.get(username)
if protocol:
protocol.callRemote(commands.Send, message=message,
sender=self.username)
# Also show it to the sender
if self.username not in usernames:
self.callRemote(commands.Send, message=message,
sender=self.username)
return {}
@commands.SendToAll.responder
def send_to_all(self, message):
for protocol in self.factory.username_to_protocol.itervalues():
protocol.callRemote(commands.Send, message=message,
sender=self.username)
return {}
def connectionLost(self, unused):
try:
del self.factory.username_to_protocol[self.username]
except KeyError:
pass
for protocol in self.factory.username_to_protocol.itervalues():
protocol.callRemote(commands.DelUser, user=self.username)
class ChatFactory(ServerFactory):
protocol = ChatProtocol
def __init__(self, portal):
self.portal = portal
self.username_to_protocol = {}
def main():
options = Options()
try:
options.parseOptions()
except usage.UsageError, err:
print "%s: %s" % (sys.argv[0], err)
print "%s: Try --help for usage details" % sys.argv[0]
sys.exit(1)
port = int(options["port"])
realm = Realm()
checker = FilePasswordDB("passwd.txt")
portal = Portal(realm, [checker])
factory = ChatFactory(portal)
reactor.listenTCP(port, factory)
reactor.run()
if __name__ == "__main__":
main()
| dripton/ampchat | chatserver.py | Python | mit | 3,458 | 0.001446 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Schedulers determine how worker's queues get filled. They control which
locations get scanned, in what order, at what time. This allows further
optimizations to be easily added, without having to modify the existing
overseer and worker thread code.
Schedulers will recieve:
queues - A list of queues for the workers they control. For now, this is a
list containing a single queue.
status - A list of status dicts for the workers. Schedulers can use this
information to make more intelligent scheduling decisions.
Useful values include:
- last_scan_date: unix timestamp of when the last scan was
completed
- location: [lat,lng,alt] of the last scan
args - The configuration arguments. This may not include all of the arguments,
just ones that are relevant to this scheduler instance (eg. if
multiple locations become supported, the args passed to the
scheduler will only contain the parameters for the location
it handles)
Schedulers must fill the queues with items to search.
Queue items are a list containing:
[step, (latitude, longitude, altitude),
appears_seconds, disappears_seconds)]
Where:
- step is the step number. Used only for display purposes.
- (latitude, longitude, altitude) is the location to be scanned.
- appears_seconds is the unix timestamp of when the pokemon next appears
- disappears_seconds is the unix timestamp of when the
pokemon next disappears
appears_seconds and disappears_seconds are used to skip scans that are too
late, and wait for scans the worker is early for. If a scheduler doesn't
have a specific time a location needs to be scanned, it should set
both to 0.
If implementing a new scheduler, place it before SchedulerFactory, and
add it to __scheduler_classes
'''
import itertools
import logging
import math
import geopy
import json
import time
import sys
from timeit import default_timer
from threading import Lock
from copy import deepcopy
import traceback
from collections import Counter
from queue import Empty
from operator import itemgetter
from datetime import datetime, timedelta
from .transform import get_new_coords
from .models import (hex_bounds, Pokemon, SpawnPoint, ScannedLocation,
ScanSpawnPoint)
from .utils import now, cur_sec, cellid, date_secs, equi_rect_distance
from .altitude import get_altitude
log = logging.getLogger(__name__)
# Simple base class that all other schedulers inherit from.
# Most of these functions should be overridden in the actual scheduler classes.
# Not all scheduler methods will need to use all of the functions.
class BaseScheduler(object):
def __init__(self, queues, status, args):
self.queues = queues
self.status = status
self.args = args
self.scan_location = False
self.size = None
self.ready = False
# Schedule function fills the queues with data.
def schedule(self):
log.warning('BaseScheduler does not schedule any items')
# location_changed function is called whenever the location being
# scanned changes.
# scan_location = (lat, lng, alt)
def location_changed(self, scan_location, dbq):
self.scan_location = scan_location
self.empty_queues()
# scanning_pause function is called when scanning is paused from the UI.
# The default function will empty all the queues.
# Note: This function is called repeatedly while scanning is paused!
def scanning_paused(self):
self.empty_queues()
def getsize(self):
return self.size
def get_overseer_message(self):
nextitem = self.queues[0].queue[0]
message = 'Processing search queue, next item is {:6f},{:6f}'.format(
nextitem[1][0], nextitem[1][1])
# If times are specified, print the time of the next queue item, and
# how many seconds ahead/behind realtime
if nextitem[2]:
message += ' @ {}'.format(
time.strftime('%H:%M:%S', time.localtime(nextitem[2])))
if nextitem[2] > now():
message += ' ({}s ahead)'.format(nextitem[2] - now())
else:
message += ' ({}s behind)'.format(now() - nextitem[2])
return message
# check if time to refresh queue
def time_to_refresh_queue(self):
return self.queues[0].empty()
def task_done(self, *args):
return self.queues[0].task_done()
# Return the next item in the queue
def next_item(self, search_items_queue):
step, step_location, appears, leaves = self.queues[0].get()
remain = appears - now() + 10
messages = {
'wait': 'Waiting for item from queue.',
'early': 'Early for {:6f},{:6f}; waiting {}s...'.format(
step_location[0], step_location[1], remain),
'late': 'Too late for location {:6f},{:6f}; skipping.'.format(
step_location[0], step_location[1]),
'search': 'Searching at {:6f},{:6f},{:6f}.'.format(
step_location[0], step_location[1], step_location[2]),
'invalid': ('Invalid response at {:6f},{:6f}, ' +
'abandoning location.').format(step_location[0],
step_location[1])
}
return step, step_location, appears, leaves, messages
# How long to delay since last action
def delay(self, *args):
return self.args.scan_delay # always scan delay time
# Function to empty all queues in the queues list
def empty_queues(self):
self.ready = False
for queue in self.queues:
if not queue.empty():
try:
while True:
queue.get_nowait()
except Empty:
pass
# Hex Search is the classic search method, with the pokepath modification,
# searching in a hex grid around the center location.
class HexSearch(BaseScheduler):
# Call base initialization, set step_distance.
def __init__(self, queues, status, args):
BaseScheduler.__init__(self, queues, status, args)
# If we are only scanning for pokestops/gyms, the scan radius can be
# 450m. Otherwise 70m.
if self.args.no_pokemon:
self.step_distance = 0.450
else:
self.step_distance = 0.070
self.step_limit = args.step_limit
# This will hold the list of locations to scan so it can be reused,
# instead of recalculating on each loop.
self.locations = False
# On location change, empty the current queue and the locations list
def location_changed(self, scan_location, dbq):
self.scan_location = scan_location
self.empty_queues()
self.locations = False
# Generates the list of locations to scan.
def _generate_locations(self):
NORTH = 0
EAST = 90
SOUTH = 180
WEST = 270
# Dist between column centers.
xdist = math.sqrt(3) * self.step_distance
ydist = 3 * (self.step_distance / 2) # Dist between row centers.
results = []
results.append((self.scan_location[0], self.scan_location[1], 0))
if self.step_limit > 1:
loc = self.scan_location
# Upper part.
ring = 1
while ring < self.step_limit:
loc = get_new_coords(
loc, xdist, WEST if ring % 2 == 1 else EAST)
results.append((loc[0], loc[1], 0))
for i in range(ring):
loc = get_new_coords(loc, ydist, NORTH)
loc = get_new_coords(
loc, xdist / 2, EAST if ring % 2 == 1 else WEST)
results.append((loc[0], loc[1], 0))
for i in range(ring):
loc = get_new_coords(
loc, xdist, EAST if ring % 2 == 1 else WEST)
results.append((loc[0], loc[1], 0))
for i in range(ring):
loc = get_new_coords(loc, ydist, SOUTH)
loc = get_new_coords(
loc, xdist / 2, EAST if ring % 2 == 1 else WEST)
results.append((loc[0], loc[1], 0))
ring += 1
# Lower part.
ring = self.step_limit - 1
loc = get_new_coords(loc, ydist, SOUTH)
loc = get_new_coords(
loc, xdist / 2, WEST if ring % 2 == 1 else EAST)
results.append((loc[0], loc[1], 0))
while ring > 0:
if ring == 1:
loc = get_new_coords(loc, xdist, WEST)
results.append((loc[0], loc[1], 0))
else:
for i in range(ring - 1):
loc = get_new_coords(loc, ydist, SOUTH)
loc = get_new_coords(
loc, xdist / 2, WEST if ring % 2 == 1 else EAST)
results.append((loc[0], loc[1], 0))
for i in range(ring):
loc = get_new_coords(
loc, xdist, WEST if ring % 2 == 1 else EAST)
results.append((loc[0], loc[1], 0))
for i in range(ring - 1):
loc = get_new_coords(loc, ydist, NORTH)
loc = get_new_coords(
loc, xdist / 2, WEST if ring % 2 == 1 else EAST)
results.append((loc[0], loc[1], 0))
loc = get_new_coords(
loc, xdist, EAST if ring % 2 == 1 else WEST)
results.append((loc[0], loc[1], 0))
ring -= 1
# This will pull the last few steps back to the front of the list,
# so you get a "center nugget" at the beginning of the scan, instead
# of the entire nothern area before the scan spots 70m to the south.
if self.step_limit >= 3:
if self.step_limit == 3:
results = results[-2:] + results[:-2]
else:
results = results[-7:] + results[:-7]
# Add the required appear and disappear times.
locationsZeroed = []
for step, location in enumerate(results, 1):
altitude = get_altitude(self.args, location)
locationsZeroed.append(
(step, (location[0], location[1], altitude), 0, 0))
return locationsZeroed
# Schedule the work to be done.
def schedule(self):
if not self.scan_location:
log.warning(
'Cannot schedule work until scan location has been set')
return
# Only generate the list of locations if we don't have it already
# calculated.
if not self.locations:
self.locations = self._generate_locations()
for location in self.locations:
# FUTURE IMPROVEMENT - For now, queues is assumed to have a single
# queue.
self.queues[0].put(location)
log.debug("Added location {}".format(location))
self.size = len(self.locations)
self.ready = True
# Spawn Only Hex Search works like Hex Search, but skips locations that
# have no known spawnpoints.
class HexSearchSpawnpoint(HexSearch):
def _any_spawnpoints_in_range(self, coords, spawnpoints):
return any(
geopy.distance.distance(coords, x).meters <= 70
for x in spawnpoints)
# Extend the generate_locations function to remove locations with no
# spawnpoints.
def _generate_locations(self):
n, e, s, w = hex_bounds(self.scan_location, self.step_limit)
spawnpoints = set((d['latitude'], d['longitude'])
for d in Pokemon.get_spawnpoints(s, w, n, e))
if len(spawnpoints) == 0:
log.warning('No spawnpoints found in the specified area! (Did ' +
'you forget to run a normal scan in this area first?)')
# Call the original _generate_locations.
locations = super(HexSearchSpawnpoint, self)._generate_locations()
# Remove items with no spawnpoints in range.
locations = [
coords for coords in locations
if self._any_spawnpoints_in_range(coords[1], spawnpoints)]
return locations
# Spawn Scan searches known spawnpoints at the specific time they spawn.
class SpawnScan(BaseScheduler):
def __init__(self, queues, status, args):
BaseScheduler.__init__(self, queues, status, args)
# On the first scan, we want to search the last 15 minutes worth of
# spawns to get existing pokemon onto the map.
self.firstscan = True
# If we are only scanning for pokestops/gyms, the scan radius can be
# 450m. Otherwise 70m.
if self.args.no_pokemon:
self.step_distance = 0.450
else:
self.step_distance = 0.070
self.step_limit = args.step_limit
self.locations = False
# Generate locations is called when the locations list is cleared - the
# first time it scans or after a location change.
def _generate_locations(self):
# Attempt to load spawns from file.
if self.args.spawnpoint_scanning != 'nofile':
log.debug('Loading spawn points from json file @ %s',
self.args.spawnpoint_scanning)
try:
with open(self.args.spawnpoint_scanning) as file:
self.locations = json.load(file)
except ValueError as e:
log.error('JSON error: %s; will fallback to database', repr(e))
except IOError as e:
log.error(
'Error opening json file: %s; will fallback to database',
repr(e))
# No locations yet? Try the database!
if not self.locations:
log.debug('Loading spawn points from database')
self.locations = Pokemon.get_spawnpoints_in_hex(
self.scan_location, self.args.step_limit)
# Well shit...
# if not self.locations:
# raise Exception('No availabe spawn points!')
# locations[]:
# {"lat": 37.53079079414139, "lng": -122.28811690874117,
# "spawnpoint_id": "808f9f1601d", "time": 511
log.info('Total of %d spawns to track', len(self.locations))
# locations.sort(key=itemgetter('time'))
if self.args.very_verbose:
for i in self.locations:
sec = i['time'] % 60
minute = (i['time'] / 60) % 60
m = 'Scan [{:02}:{:02}] ({}) @ {},{}'.format(
minute, sec, i['time'], i['lat'], i['lng'])
log.debug(m)
# 'time' from json and db alike has been munged to appearance time as
# seconds after the hour.
# Here we'll convert that to a real timestamp.
for location in self.locations:
# For a scan which should cover all CURRENT pokemon, we can offset
# the comparison time by 15 minutes so that the "appears" time
# won't be rolled over to the next hour.
# TODO: Make it work. The original logic (commented out) was
# producing bogus results if your first scan was in the last
# 15 minute of the hour. Wrapping my head around this isn't
# work right now, so I'll just drop the feature for the time
# being. It does need to come back so that
# repositioning/pausing works more nicely, but we can live
# without it too.
# if sps_scan_current:
# cursec = (location['time'] + 900) % 3600
# else:
cursec = location['time']
if cursec > cur_sec():
# Hasn't spawn in the current hour.
from_now = location['time'] - cur_sec()
appears = now() + from_now
else:
# Won't spawn till next hour.
late_by = cur_sec() - location['time']
appears = now() + 3600 - late_by
location['appears'] = appears
location['leaves'] = appears + 900
# Put the spawn points in order of next appearance time.
self.locations.sort(key=itemgetter('appears'))
# Match expected structure:
# locations = [((lat, lng, alt), ts_appears, ts_leaves),...]
retset = []
for step, location in enumerate(self.locations, 1):
altitude = get_altitude(self.args, [location['lat'],
location['lng']])
retset.append((step, (location['lat'], location['lng'], altitude),
location['appears'], location['leaves']))
return retset
# Schedule the work to be done.
def schedule(self):
if not self.scan_location:
log.warning(
'Cannot schedule work until scan location has been set')
return
# SpawnScan needs to calculate the list every time, since the times
# will change.
self.locations = self._generate_locations()
for location in self.locations:
# FUTURE IMPROVEMENT - For now, queues is assumed to have a single
# queue.
self.queues[0].put(location)
log.debug("Added location {}".format(location))
# Clear the locations list so it gets regenerated next cycle.
self.size = len(self.locations)
self.locations = None
self.ready = True
# SpeedScan is a complete search method that initially does a spawnpoint
# search in each scan location by scanning five two-minute bands within
# an hour and ten minute intervals between bands.
# After finishing the spawnpoint search or if timing isn't right for any of
# the remaining search bands, workers will search the nearest scan location
# that has a new spawn.
class SpeedScan(HexSearch):
# Call base initialization, set step_distance
def __init__(self, queues, status, args):
super(SpeedScan, self).__init__(queues, status, args)
self.refresh_date = datetime.utcnow() - timedelta(days=1)
self.next_band_date = self.refresh_date
self.queues = [[]]
self.ready = False
self.spawns_found = 0
self.spawns_missed_delay = {}
self.scans_done = 0
self.scans_missed = 0
self.scans_missed_list = []
# Minutes between queue refreshes. Should be less than 10 to allow for
# new bands during Initial scan
self.minutes = 5
self.found_percent = []
self.scan_percent = []
self.spawn_percent = []
self.status_message = []
self.tth_found = 0
# Initiate special types.
self._stat_init()
self._locks_init()
def _stat_init(self):
self.spawns_found = 0
self.spawns_missed_delay = {}
self.scans_done = 0
self.scans_missed = 0
self.scans_missed_list = []
def _locks_init(self):
self.lock_next_item = Lock()
# On location change, empty the current queue and the locations list
def location_changed(self, scan_location, db_update_queue):
super(SpeedScan, self).location_changed(scan_location, db_update_queue)
self.locations = self._generate_locations()
scans = {}
initial = {}
all_scans = {}
for sl in ScannedLocation.select_in_hex(self.scan_location,
self.args.step_limit):
all_scans[cellid((sl['latitude'], sl['longitude']))] = sl
for i, e in enumerate(self.locations):
cell = cellid(e[1])
scans[cell] = {'loc': e[1], # Lat/long pair
'step': e[0]}
initial[cell] = all_scans[cell] if cell in all_scans.keys(
) else ScannedLocation.new_loc(e[1])
self.scans = scans
db_update_queue.put((ScannedLocation, initial))
log.info('%d steps created', len(scans))
self.band_spacing = int(10 * 60 / len(scans))
self.band_status()
spawnpoints = SpawnPoint.select_in_hex(
self.scan_location, self.args.step_limit)
if not spawnpoints:
log.info('No spawnpoints in hex found in SpawnPoint table. ' +
'Doing initial scan.')
log.info('Found %d spawn points within hex', len(spawnpoints))
log.info('Doing %s distance calcs to assign spawn points to scans',
"{:,}".format(len(spawnpoints) * len(scans)))
scan_spawn_point = {}
ScannedLocation.link_spawn_points(scans, initial, spawnpoints,
self.step_distance, scan_spawn_point,
force=True)
if len(scan_spawn_point):
log.info('%d relations found between the spawn points and steps',
len(scan_spawn_point))
db_update_queue.put((ScanSpawnPoint, scan_spawn_point))
else:
log.info('Spawn points assigned')
# Generates the list of locations to scan
# Created a new function, because speed scan requires fixed locations,
# even when increasing -st. With HexSearch locations, the location of
# inner rings would change if -st was increased requiring rescanning
# since it didn't recognize the location in the ScannedLocation table
def _generate_locations(self):
NORTH = 0
EAST = 90
SOUTH = 180
WEST = 270
# dist between column centers
xdist = math.sqrt(3) * self.step_distance
ydist = 3 * (self.step_distance / 2) # dist between row centers
results = []
loc = self.scan_location
results.append((loc[0], loc[1], 0))
# upper part
for ring in range(1, self.step_limit):
for i in range(max(ring - 1, 1)):
if ring > 1:
loc = get_new_coords(loc, ydist, NORTH)
loc = get_new_coords(loc, xdist / (1 + (ring > 1)), WEST)
results.append((loc[0], loc[1], 0))
for i in range(ring):
loc = get_new_coords(loc, ydist, NORTH)
loc = get_new_coords(loc, xdist / 2, EAST)
results.append((loc[0], loc[1], 0))
for i in range(ring):
loc = get_new_coords(loc, xdist, EAST)
results.append((loc[0], loc[1], 0))
for i in range(ring):
loc = get_new_coords(loc, ydist, SOUTH)
loc = get_new_coords(loc, xdist / 2, EAST)
results.append((loc[0], loc[1], 0))
for i in range(ring):
loc = get_new_coords(loc, ydist, SOUTH)
loc = get_new_coords(loc, xdist / 2, WEST)
results.append((loc[0], loc[1], 0))
for i in range(ring + (ring + 1 < self.step_limit)):
loc = get_new_coords(loc, xdist, WEST)
results.append((loc[0], loc[1], 0))
generated_locations = []
for step, location in enumerate(results):
altitude = get_altitude(self.args, location)
generated_locations.append(
(step, (location[0], location[1], altitude), 0, 0))
return generated_locations
def getsize(self):
return len(self.queues[0])
def get_overseer_message(self):
n = 0
ms = (datetime.utcnow() - self.refresh_date).total_seconds() + \
self.refresh_ms
counter = {
'TTH': 0,
'spawn': 0,
'band': 0,
}
for item in self.queues[0]:
if item.get('done', False):
continue
if ms > item['end']:
continue
if ms < item['start']:
break
n += 1
counter[item['kind']] += 1
message = ('Scanning status: {} total waiting, {} initial bands, ' +
'{} TTH searches, and {} new spawns').format(
n, counter['band'], counter['TTH'], counter['spawn'])
if self.status_message:
message += '\n' + self.status_message
return message
# Refresh queue every 5 minutes
# the first band of a scan is done
def time_to_refresh_queue(self):
return ((datetime.utcnow() - self.refresh_date).total_seconds() >
self.minutes * 60 or self.queues == [[]])
# Function to empty all queues in the queues list
def empty_queues(self):
self.queues = [[]]
# How long to delay since last action
def delay(self, last_scan_date):
return max(
((last_scan_date - datetime.utcnow()).total_seconds() +
self.args.scan_delay),
2)
def band_status(self):
try:
bands_total = len(self.locations) * 5
bands_filled = ScannedLocation.get_bands_filled_by_cellids(
self.scans.keys())
percent = bands_filled * 100.0 / bands_total
if bands_total == bands_filled:
log.info('Initial spawnpoint scan is complete')
else:
log.info('Initial spawnpoint scan, %d of %d bands are done ' +
'or %.1f%% complete', bands_filled, bands_total,
percent)
return percent
except Exception as e:
log.error(
'Exception in band_status: Exception message: {}'.format(
repr(e)))
# Update the queue, and provide a report on performance of last minutes
def schedule(self):
log.info('Refreshing queue')
self.ready = False
now_date = datetime.utcnow()
self.refresh_date = now_date
self.refresh_ms = now_date.minute * 60 + now_date.second
old_q = deepcopy(self.queues[0])
queue = []
# Measure the time it takes to refresh the queue
start = time.time()
# prefetch all scanned locations
scanned_locations = ScannedLocation.get_by_cellids(self.scans.keys())
# extract all spawnpoints into a dict with spawnpoint
# id -> spawnpoint for easy access later
cell_to_linked_spawn_points = (
ScannedLocation.get_cell_to_linked_spawn_points(self.scans.keys()))
sp_by_id = {}
for sps in cell_to_linked_spawn_points.itervalues():
for sp in sps:
sp_by_id[sp['id']] = sp
for cell, scan in self.scans.iteritems():
queue += ScannedLocation.get_times(scan, now_date,
scanned_locations)
queue += SpawnPoint.get_times(cell, scan, now_date,
self.args.spawn_delay,
cell_to_linked_spawn_points,
sp_by_id)
end = time.time()
queue.sort(key=itemgetter('start'))
self.queues[0] = queue
self.ready = True
log.info('New queue created with %d entries in %f seconds', len(queue),
(end - start))
if old_q:
# Enclosing in try: to avoid divide by zero exceptions from
# killing overseer
try:
# Possible 'done' values are 'Missed', 'Scanned', None, or
# number
Not_none_list = filter(lambda e: e.get(
'done', None) is not None, old_q)
Missed_list = filter(lambda e: e.get(
'done', None) == 'Missed', Not_none_list)
Scanned_list = filter(lambda e: e.get(
'done', None) == 'Scanned', Not_none_list)
Timed_list = filter(lambda e: type(
e['done']) is not str, Not_none_list)
spawns_timed_list = filter(
lambda e: e['kind'] == 'spawn', Timed_list)
spawns_timed = len(spawns_timed_list)
bands_timed = len(
filter(lambda e: e['kind'] == 'band', Timed_list))
spawns_all = spawns_timed + \
len(filter(lambda e: e['kind'] == 'spawn', Scanned_list))
spawns_missed = len(
filter(lambda e: e['kind'] == 'spawn', Missed_list))
band_percent = self.band_status()
kinds = {}
tth_ranges = {}
self.tth_found = 0
self.active_sp = 0
found_percent = 100.0
good_percent = 100.0
spawns_reached = 100.0
spawnpoints = SpawnPoint.select_in_hex(
self.scan_location, self.args.step_limit)
for sp in spawnpoints:
if sp['missed_count'] > 5:
continue
self.active_sp += 1
self.tth_found += (sp['earliest_unseen'] ==
sp['latest_seen'])
kind = sp['kind']
kinds[kind] = kinds.get(kind, 0) + 1
tth_range = str(int(round(
((sp['earliest_unseen'] - sp['latest_seen']) % 3600) /
60.0)))
tth_ranges[tth_range] = tth_ranges.get(tth_range, 0) + 1
tth_ranges['0'] = tth_ranges.get('0', 0) - self.tth_found
len_spawnpoints = len(spawnpoints) + (not len(spawnpoints))
log.info('Total Spawn Points found in hex: %d',
len(spawnpoints))
log.info('Inactive Spawn Points found in hex: %d or %.1f%%',
len(spawnpoints) - self.active_sp,
(len(spawnpoints) -
self.active_sp) * 100.0 / len_spawnpoints)
log.info('Active Spawn Points found in hex: %d or %.1f%%',
self.active_sp,
self.active_sp * 100.0 / len_spawnpoints)
self.active_sp += self.active_sp == 0
for k in sorted(kinds.keys()):
log.info('%s kind spawns: %d or %.1f%%', k,
kinds[k], kinds[k] * 100.0 / self.active_sp)
log.info('Spawns with found TTH: %d or %.1f%% [%d missing]',
self.tth_found,
self.tth_found * 100.0 / self.active_sp,
self.active_sp - self.tth_found)
for k in sorted(tth_ranges.keys(), key=int):
log.info('Spawnpoints with a %sm range to find TTH: %d', k,
tth_ranges[k])
log.info('Over last %d minutes: %d new bands, %d Pokemon ' +
'found', self.minutes, bands_timed, spawns_all)
log.info('Of the %d total spawns, %d were targeted, and %d ' +
'found scanning for others', spawns_all, spawns_timed,
spawns_all - spawns_timed)
scan_total = spawns_timed + bands_timed
spm = scan_total / self.minutes
seconds_per_scan = self.minutes * 60 * \
self.args.workers / scan_total if scan_total else 0
log.info('%d scans over %d minutes, %d scans per minute, %d ' +
'secs per scan per worker', scan_total, self.minutes,
spm, seconds_per_scan)
sum = spawns_all + spawns_missed
if sum:
spawns_reached = spawns_all * 100.0 / \
(spawns_all + spawns_missed)
log.info('%d Pokemon found, and %d were not reached in ' +
'time for %.1f%% found', spawns_all,
spawns_missed, spawns_reached)
if spawns_timed:
average = reduce(
lambda x, y: x + y['done'],
spawns_timed_list,
0) / spawns_timed
log.info('%d Pokemon found, %d were targeted, with an ' +
'average delay of %d sec', spawns_all,
spawns_timed, average)
spawns_missed = reduce(
lambda x, y: x + len(y),
self.spawns_missed_delay.values(), 0)
sum = spawns_missed + self.spawns_found
found_percent = (
self.spawns_found * 100.0 / sum if sum else 0)
log.info('%d spawns scanned and %d spawns were not ' +
'there when expected for %.1f%%',
self.spawns_found, spawns_missed, found_percent)
self.spawn_percent.append(round(found_percent, 1))
if self.spawns_missed_delay:
log.warning('Missed spawn IDs with times after spawn:')
log.warning(self.spawns_missed_delay)
log.info('History: %s', str(
self.spawn_percent).strip('[]'))
sum = self.scans_done + len(self.scans_missed_list)
good_percent = self.scans_done * 100.0 / sum if sum else 0
log.info(
'%d scans successful and %d scans missed for %.1f%% found',
self.scans_done, len(self.scans_missed_list), good_percent)
self.scan_percent.append(round(good_percent, 1))
if self.scans_missed_list:
log.warning('Missed scans: %s', Counter(
self.scans_missed_list).most_common(3))
log.info('History: %s', str(self.scan_percent).strip('[]'))
self.status_message = ('Initial scan: {:.2f}%, TTH found: ' +
'{:.2f}% [{} missing], ').format(
band_percent, self.tth_found * 100.0 / self.active_sp,
self.active_sp - self.tth_found)
self.status_message += ('Spawns reached: {:.2f}%, Spawns ' +
'found: {:.2f}%, Good scans ' +
'{:.2f}%').format(spawns_reached,
found_percent,
good_percent)
self._stat_init()
except Exception as e:
log.error(
'Performance statistics had an Exception: {}'.format(
repr(e)))
traceback.print_exc(file=sys.stdout)
# Find the best item to scan next
def next_item(self, status):
# Thread safety: don't let multiple threads get the same "best item".
with self.lock_next_item:
# Score each item in the queue by # of due spawns or scan time
# bands can be filled.
while not self.ready:
time.sleep(1)
now_date = datetime.utcnow()
now_time = time.time()
n = 0 # count valid scans reviewed
q = self.queues[0]
ms = ((now_date - self.refresh_date).total_seconds() +
self.refresh_ms)
best = {'score': 0}
cant_reach = False
worker_loc = [status['latitude'], status['longitude']]
last_action = status['last_scan_date']
# Check all scan locations possible in the queue.
for i, item in enumerate(q):
# If already claimed by another worker or done, pass.
if item.get('done', False):
continue
# If the item is parked by a different thread (or by a
# different account, which should be on that one thread),
# pass.
our_parked_name = status['username']
if 'parked_name' in item:
# We use 'parked_last_update' to determine when the
# last time was since the thread passed the item with the
# same thread name & username. If it's been too long, unset
# the park so another worker can pick it up.
now = default_timer()
max_parking_idle_seconds = 3 * 60
if (now - item.get('parked_last_update', now)
> max_parking_idle_seconds):
# Unpark & don't skip it.
item.pop('parked_name', None)
item.pop('parked_last_update', None)
else:
# Still parked and not our item. Skip it.
if item.get('parked_name') != our_parked_name:
continue
# If already timed out, mark it as Missed and check next.
if ms > item['end']:
item['done'] = 'Missed' if not item.get(
'done', False) else item['done']
continue
# If we just did a fresh band recently, wait a few seconds to
# space out the band scans.
if now_date < self.next_band_date:
continue
# If the start time isn't yet, don't bother looking further,
# since queue sorted by start time.
if ms < item['start']:
break
loc = item['loc']
distance = equi_rect_distance(loc, worker_loc)
secs_to_arrival = distance / self.args.kph * 3600
# If we can't make it there before it disappears, don't bother
# trying.
if ms + secs_to_arrival > item['end']:
cant_reach = True
continue
n += 1
# Bands are top priority to find new spawns first
score = 1e12 if item['kind'] == 'band' else (
1e6 if item['kind'] == 'TTH' else 1)
# For spawns, score is purely based on how close they are to
# last worker position
score = score / (distance + .01)
if score > best['score']:
best = {'score': score, 'i': i}
best.update(item)
prefix = 'Calc %.2f for %d scans:' % (time.time() - now_time, n)
loc = best.get('loc', [])
step = best.get('step', 0)
i = best.get('i', 0)
messages = {
'wait': 'Nothing to scan.',
'early': 'Early for step {}; waiting a few seconds...'.format(
step),
'late': ('API response on step {} delayed by {} seconds. ' +
'Possible causes: slow proxies, internet, or ' +
'Niantic servers.').format(
step,
int((now_date - last_action).total_seconds())),
'search': 'Searching at step {}.'.format(step),
'invalid': ('Invalid response at step {}, abandoning ' +
'location.').format(step)
}
try:
item = q[i]
except IndexError:
messages['wait'] = ('Search aborting.'
+ ' Overseer refreshing queue.')
return -1, 0, 0, 0, messages
if best['score'] == 0:
if cant_reach:
messages['wait'] = ('Not able to reach any scan'
+ ' under the speed limit.')
return -1, 0, 0, 0, messages
distance = equi_rect_distance(loc, worker_loc)
if (distance >
(now_date - last_action).total_seconds() *
self.args.kph / 3600):
# Flag item as "parked" by a specific thread, because
# we're waiting for it. This will avoid all threads "walking"
# to the same item.
our_parked_name = status['username']
item['parked_name'] = our_parked_name
# CTRL+F 'parked_last_update' in this file for more info.
item['parked_last_update'] = default_timer()
messages['wait'] = 'Moving {}m to step {} for a {}.'.format(
int(distance * 1000), step,
best['kind'])
return -1, 0, 0, 0, messages
prefix += ' Step %d,' % (step)
# Check again if another worker heading there.
# TODO: Check if this is still necessary. I believe this was
# originally a failed attempt at thread safety, which still
# resulted in a race condition (multiple workers heading to the
# same spot). A thread Lock has since been added.
if item.get('done', False):
messages['wait'] = ('Skipping step {}. Other worker already ' +
'scanned.').format(step)
return -1, 0, 0, 0, messages
if not self.ready:
messages['wait'] = ('Search aborting.'
+ ' Overseer refreshing queue.')
return -1, 0, 0, 0, messages
# If a new band, set the date to wait until for the next band.
if best['kind'] == 'band' and best['end'] - best['start'] > 5 * 60:
self.next_band_date = datetime.utcnow() + timedelta(
seconds=self.band_spacing)
# Mark scanned
item['done'] = 'Scanned'
status['index_of_queue_item'] = i
messages['search'] = 'Scanning step {} for a {}.'.format(
best['step'], best['kind'])
return best['step'], best['loc'], 0, 0, messages
def task_done(self, status, parsed=False):
if parsed:
# Record delay between spawn time and scanning for statistics
now_secs = date_secs(datetime.utcnow())
item = self.queues[0][status['index_of_queue_item']]
seconds_within_band = (
int((datetime.utcnow() - self.refresh_date).total_seconds()) +
self.refresh_ms)
enforced_delay = (self.args.spawn_delay if item['kind'] == 'spawn'
else 0)
start_delay = seconds_within_band - item['start'] + enforced_delay
safety_buffer = item['end'] - seconds_within_band
if safety_buffer < 0:
log.warning('Too late by %d sec for a %s at step %d', -
safety_buffer, item['kind'], item['step'])
# If we had a 0/0/0 scan, then unmark as done so we can retry, and
# save for Statistics
elif parsed['bad_scan']:
self.scans_missed_list.append(cellid(item['loc']))
# Only try for a set amount of times (BAD_SCAN_RETRY)
if self.args.bad_scan_retry > 0 and (
self.scans_missed_list.count(cellid(item['loc'])) >
self.args.bad_scan_retry):
log.info('Step %d failed scan for %d times! Giving up...',
item['step'], self.args.bad_scan_retry + 1)
else:
item['done'] = None
log.info('Putting back step %d in queue', item['step'])
else:
# Scan returned data
self.scans_done += 1
item['done'] = start_delay
# Were we looking for spawn?
if item['kind'] == 'spawn':
sp_id = item['sp']
# Did we find the spawn?
if sp_id in parsed['sp_id_list']:
self.spawns_found += 1
elif start_delay > 0: # not sure why this could be
# negative, but sometimes it is
# if not, record ID and put back in queue
self.spawns_missed_delay[
sp_id] = self.spawns_missed_delay.get(sp_id, [])
self.spawns_missed_delay[sp_id].append(start_delay)
item['done'] = 'Scanned'
# For existing spawn points, if in any other queue items, mark
# 'scanned'
for sp_id in parsed['sp_id_list']:
for item in self.queues[0]:
if (sp_id == item.get('sp', None) and
item.get('done', None) is None and
now_secs > item['start'] and
now_secs < item['end']):
item['done'] = 'Scanned'
# The SchedulerFactory returns an instance of the correct type of scheduler.
class SchedulerFactory():
__schedule_classes = {
"hexsearch": HexSearch,
"hexsearchspawnpoint": HexSearchSpawnpoint,
"spawnscan": SpawnScan,
"speedscan": SpeedScan,
}
@staticmethod
def get_scheduler(name, *args, **kwargs):
scheduler_class = SchedulerFactory.__schedule_classes.get(
name.lower(), None)
if scheduler_class:
return scheduler_class(*args, **kwargs)
raise NotImplementedError(
"The requested scheduler has not been implemented")
# The KeyScheduler returns a scheduler that cycles through the given hash
# server keys.
class KeyScheduler(object):
def __init__(self, keys):
self.keys = {}
for key in keys:
self.keys[key] = {
'remaining': 0,
'maximum': 0,
'peak': 0
}
self.key_cycle = itertools.cycle(keys)
self.curr_key = ''
def keys(self):
return self.keys
def current(self):
return self.curr_key
def next(self):
self.curr_key = self.key_cycle.next()
return self.curr_key
| slgphantom/RocketMap | pogom/schedulers.py | Python | agpl-3.0 | 46,757 | 0.000021 |
#! /usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright 2018-2019 Luiko Czub, TestLink-API-Python-client developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------
import os.path
import pytest
from testlink import TestlinkAPIClient, TestlinkAPIGeneric, TestLinkHelper
# example text file attachment = this python file
# why not using os.path.realpath(__file__)
# -> cause __file__ could be compiled python file *.pyc, if the test run is
# repeated without changing the test code
ATTACHMENT_EXAMPLE_TEXT= os.path.join(os.path.dirname(__file__),
os.path.basename(__file__))
#attachemantFile = open(ATTACHMENT_EXAMPLE_TEXT, 'r')
@pytest.fixture()
def attachmentFile():
''' open readonly attachment sample before test and close it afterwards '''
aFile = open(ATTACHMENT_EXAMPLE_TEXT, 'r')
yield aFile
aFile.close()
@pytest.fixture(scope='session')
def api_helper_class():
return TestLinkHelper
@pytest.fixture(scope='session')
def api_generic_client(api_helper_class):
''' Init TestlinkAPIGeneric Client with connection parameters defined in
environment variables
TESTLINK_API_PYTHON_DEVKEY and TESTLINK_API_PYTHON_DEVKEY
'''
return api_helper_class().connect(TestlinkAPIGeneric)
@pytest.fixture(scope='session')
def api_general_client(api_helper_class):
''' Init TestlinkAPIClient Client with connection parameters defined in
environment variables
TESTLINK_API_PYTHON_DEVKEY and TESTLINK_API_PYTHON_DEVKEY
'''
return api_helper_class().connect(TestlinkAPIClient)
@pytest.fixture(scope='session', params=[TestlinkAPIGeneric, TestlinkAPIClient])
def api_client_class(request):
''' all variations of Testlink API Client classes '''
return request.param
@pytest.fixture(scope='session')
def api_client(api_client_class, api_helper_class):
''' Init Testlink API Client class defined in fixtures api_client_class with
connection parameters defined in environment variables
TESTLINK_API_PYTHON_DEVKEY and TESTLINK_API_PYTHON_DEVKEY
Tests will be call for each Testlink API Client class, defined in
fixtures parameter list
'''
return api_helper_class().connect(api_client_class)
| lczub/TestLink-API-Python-client | test/conftest.py | Python | apache-2.0 | 2,865 | 0.008028 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
import getpass
from os.path import expanduser
import stat
import shutil
import bit.git as git
structure="\n\
/file_system_a\n\
|\n\
'- data\n\
|\n\
'- projects\n\
|\n\
|- Company_A\n\
| |\n\
| |- CA_project_y\n\
| |\n\
| '- CA_project_x\n\
| |\n\
| |- results\n\
| |- models\n\
| |- scripts\n\
| |- tmp\n\
| |- slurm_logs\n\
| '- wiki\n\
|\n\
'- Company_B\n\
|\n\
'- CB_project_n\n\n\
absolute path to projects = /file_system_a/data/projects/"
requirements=["owncloud_address","owncloud_upload_folder",\
"owncloud_download_folder","owncloud_user",\
"owncloud_pass","github_address",\
"github_organization","github_user",\
"github_pass","local_path", "user_group" ]
special_reqs=["owncloud_user","owncloud_pass",\
"github_user","github_pass"]
start_reqs=["github_address","github_organization",\
"github_user","github_pass","local_path"]
def get_owncloud_address():
owncloud_address=str(input("Please give in your ownCloud address (eg. http://domain.tld/owncloud): ")) or None
return owncloud_address
def get_owncloud_upload_folder():
owncloud_upload_folder=str(input("Please give in the folder in your ownCloud that will be used to deliver data to users.\nYou can share this folder with your colleagues so that everybody delivers data through the same folder. (default: DELIVERY_SERVICE):")) or "DELIVERY_SERVICE"
return owncloud_upload_folder
def get_owncloud_download_folder():
owncloud_download_folder=str(input("Please give in the folder in your ownCloud that will be used to retrieve data from users.\nYou can share this folder with your colleagues so that everybody retrieves data through the same folder. (default: DROPBOX):")) or "DROPBOX"
return owncloud_download_folder
def get_owncloud_user(config_file=None):
if config_file:
owncloud_user=str(input("Please give in your ownCloud user name or press Enter if you do not want to save this information on the config file: ")) or None
else:
owncloud_user=str(input("Please give in your ownCloud user name: ")) or None
return owncloud_user
def get_owncloud_pass(config_file=None):
if config_file:
owncloud_pass=str(getpass.getpass(prompt="Please give in your ownCloud password or press Enter if you do not want to save this information on the config file: ")) or None
else:
owncloud_pass=str(getpass.getpass(prompt="Please give in your ownCloud password: ")) or None
return owncloud_pass
def get_github_address():
github_address=str(input("Github server address (default: https://github.com): ") or "https://github.com")
return github_address
def get_github_organization():
github_organization=str(input("Your GitHub organization name (eg. mpg-age-bioinformatics for https://github.com/mpg-age-bioinformatics): ")) or None
return github_organization
def get_github_user(config_file=None,gitssh=None):
if not gitssh:
if config_file:
github_user=str(input("Please give in your user name for your github server or press Enter if you do not want to save this information on the config file: ")) or None
else:
github_user=str(input("Please give in your user name for your github server: ")) or None
else:
github_user=None
return github_user
def get_github_pass(config_file=None,gitssh=None):
if not gitssh:
if config_file:
github_pass=str(getpass.getpass(prompt="Please give in your password or access token (infos on: https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/) for your github server or press Enter if you do not want to save this information on the config file: ")) or None
else:
github_pass=str(getpass.getpass(prompt="Please give in your password or access token for your github server: ")) or None
else:
github_pass=None
return github_pass
def get_local_path(structure=structure):
local_path=str(input("The bermuda information triangle works on the basis that all your projects are located in the same path and have a parent subpath in your local machine ie. %s\n Please give in the absolute path to your projects folder: " %structure ) ) or None
return local_path
def get_user_group():
user_group=str(input("If you are using ACLs to give your group members access to this project please give in the users that will have read write access to every projects top folders. eg. userA,userB,userC -- DO NOT forger to give in your own user name: ")) or None
if user_group:
user_group=user_group.split(",")
return user_group
def get_github_api(github_address):
if "github.com" in github_address:
github_api="https://api.github.com/orgs/"
else:
github_api=github_address+"/api/v3/orgs/"
return github_api
def make_bitconfig(require_func=requirements,special_reqs=special_reqs):
configdic={}
configdic=check_reqs(require_func,configdic,config_file=True, gitssh=None)
uhome=expanduser("~")+"/"
configfile=open(uhome+".bit_config","w+")
with open(uhome+".bit_config", 'w') as configfile:
json.dump(configdic, configfile)
os.chmod(uhome+".bit_config", stat.S_IRWXU )
print("Your bit config file as been generated:")
for c in configdic:
if "pass" not in c:
print( c, configdic.get(c) )
sys.stdout.flush()
elif configdic.get(c) == None:
print(c, configdic.get(c) )
sys.stdout.flush()
else:
print(c, "*")
sys.stdout.flush()
def read_bitconfig(showit=None,bit_config=".bit_config"):
uhome=expanduser("~")+"/"
with open(uhome+bit_config, 'r') as configfile:
configdic=json.load(configfile)
if showit:
for c in configdic:
if "pass" not in c:
print(c, configdic.get(c))
sys.stdout.flush()
elif configdic.get(c) == None:
print(c, configdic.get(c))
sys.stdout.flush()
else:
print(c, "*")
sys.stdout.flush()
return configdic
def check_reqs(requirements,configdic,config_file=None, gitssh=None):
if "owncloud_address" in requirements:
configdic["owncloud_address"]=get_owncloud_address()
if "owncloud_upload_folder" in requirements:
configdic["owncloud_upload_folder"]=get_owncloud_upload_folder()
if "owncloud_download_folder" in requirements:
configdic["owncloud_download_folder"]=get_owncloud_download_folder()
if "owncloud_user" in requirements:
configdic["owncloud_user"]=get_owncloud_user(config_file=config_file)
if "owncloud_pass" in requirements:
configdic["owncloud_pass"]=get_owncloud_pass(config_file=config_file)
if "github_address" in requirements:
configdic["github_address"]=get_github_address()
if "github_organization" in requirements:
configdic["github_organization"]=get_github_organization()
if "github_user" in requirements:
configdic["github_user"]=get_github_user(config_file=config_file,gitssh=gitssh )
if "github_pass" in requirements:
configdic["github_pass"]=get_github_pass(config_file=config_file,gitssh=gitssh )
if "local_path" in requirements:
configdic["local_path"]=get_local_path()
if "user_group" in requirements:
configdic["user_group"]=get_user_group()
return configdic
def init_user(path_to_project,github_address,github_organization,github_repo,github_user=None,github_pass=None,gitssh=None):
user_name=getpass.getuser()
if not os.path.exists(path_to_project):
os.makedirs(path_to_project)
response=git.git_clone(path_to_project+"/scripts."+user_name , github_address, github_organization, github_repo, github_user=github_user, github_pass=github_pass, gitssh=gitssh)
response=git.git_clone(path_to_project+"/wiki."+user_name , github_address, github_organization, github_repo+".wiki", github_user=github_user, github_pass=github_pass, gitssh=gitssh)
if response == 1:
input("\n\n*************\n\nThe wiki for this project has not yet been created.\n\n Please go to %s/%s/%s/wiki and click on 'Create the first page' and then 'Save Page'.\n\nPress Enter once you have saved the first wiki page.\n\nOtherwise press enter to skip wiki creation.\n\n*************\n\n" %(github_address,github_organization,github_repo) )
response=git.git_clone(path_to_project+"/wiki."+user_name ,github_address,github_organization,github_repo+".wiki",github_user=github_user,github_pass=github_pass,gitssh=gitssh)
if response == 1:
shutil.rmtree(path_to_project+"/wiki."+user_name, ignore_errors=True)
print("Skipping wiki creation.")
sys.stdout.flush()
print("User initialized.")
sys.stdout.flush() | mpg-age-bioinformatics/bit | bit/config.py | Python | mit | 9,252 | 0.016105 |
#!/usr/bin/env python3
"""
This module runs all the tests of the auxi package at once.
"""
import unittest
from auxi.core.objects_test import ObjectUnitTester
from auxi.core.objects_test import NamedObjectUnitTester
from auxi.core.time_test import ClockUnitTester
from auxi.tools.chemistry.stoichiometry_test import StoichFunctionTester
from auxi.tools.chemistry.thermochemistry_test import ThermoFunctionTester
from auxi.tools.materialphysicalproperties.core_test import DataSetTester
from auxi.tools.materialphysicalproperties.idealgas_test \
import BetaTTester, RhoTTester, RhoTPTester, RhoTPxTester
from auxi.tools.materialphysicalproperties.polynomial_test \
import PolynomialModelTTester
from auxi.tools.transportphenomena.heattransfer.naturalconvection_test \
import IsothermalFlatSurface_RegionTester, IsothermalFlatSurfaceTester
from auxi.tools.transportphenomena.dimensionlessquantities_test \
import DimensionlessQiantitiesTester
from auxi.modelling.process.materials.chem_test \
import ChemMaterialUnitTester, ChemMaterialPackageUnitTester
from auxi.modelling.process.materials.thermo_test \
import ThermoMaterialUnitTester
# from auxi.modelling.process.materials.thermo_test \
# import ThermoMaterialPackageUnitTester
from auxi.modelling.process.materials.psd_test \
import PsdMaterialUnitTester, PsdMaterialPackageUnitTester
from auxi.modelling.process.materials.slurry_test \
import SlurryMaterialUnitTester, SlurryMaterialPackageUnitTester
# MODELLING.FINANCIAL
from auxi.modelling.financial.des_test import GeneralLedgerAccountUnitTester
from auxi.modelling.financial.des_test import TransactionUnitTester
from auxi.modelling.financial.des_test import TransactionTemplateUnitTester
from auxi.modelling.financial.des_test import GeneralLedgerStructureUnitTester
from auxi.modelling.financial.des_test import GeneralLedgerUnitTester
from auxi.modelling.financial.reporting_test import GeneralLedgerStructureUnitTester
from auxi.modelling.financial.reporting_test import TransactionListUnitTester
# MODELLING.BUSINESS
from auxi.modelling.business.structure_test import ActivityUnitTester
from auxi.modelling.business.structure_test import ComponentUnitTester
from auxi.modelling.business.structure_test import EntityUnitTester
from auxi.modelling.business.basic_test import BasicActivityUnitTester
from auxi.modelling.business.basic_test import BasicLoanActivityUnitTester
from auxi.modelling.business.models_test import TimeBasedModelUnitTester
__version__ = '0.3.2'
__license__ = 'LGPL v3'
__copyright__ = 'Copyright 2016, Ex Mente Technologies (Pty) Ltd'
__author__ = 'Christoff Kok, Johan Zietsman'
__credits__ = ['Christoff Kok', 'Johan Zietsman']
__maintainer__ = 'Christoff Kok'
__email__ = 'christoff.kok@ex-mente.co.za'
__status__ = 'Planning'
if __name__ == '__main__':
unittest.main()
| christoffkok/auxi.0 | src/tests.py | Python | lgpl-3.0 | 2,865 | 0.000349 |
import itertools
import json
import re
import flask
from flask import request
from web.cache import cache
import rethinkdb as r
import web.api.api_util as api_util
import db
import util
api = flask.Blueprint("api", __name__, url_prefix="/api")
r_conn = db.util.r_conn
def _should_skip_get_plugins_cache():
"""Whether the current request to /api/plugins should not be cached."""
page = int(request.args.get('page', 1))
search = request.args.get('query', '')
# Only cache empty searches for now.
# TODO(david): Also cache simple category and tag searches. May also want
# to actually use a proper cache backend like Redis so we can
# arbitrarily cache (right now we use an in-memory cache).
should_cache = search == '' and (1 <= page <= 10)
return not should_cache
def _make_get_plugins_cache_key():
"""Get a cache key for the /api/plugins route.
By default this is just request.path which ignores query params.
"""
page = int(request.args.get('page', 1))
search = request.args.get('query', '')
return '%s_%s_%s' % (request.path, page, search)
# TODO(david): Consider saving categories just as special tags. Would make
# search implementation simpler but determining which category a plugin
# belongs to harder. See discussion on
# http://phabricator.benalpert.com/D171
def _apply_category_filters(results, tokens):
"""Consumes and applies category filters (e.g. "cat:other") to results.
Arguments:
results: List of search result plugins.
tokens: Remaining search text tokens that have not been consumed.
Returns:
(results, tokens): Results that match the given category, and tokens
that have not been consumed.
"""
category_filter = lambda t: t.startswith('cat:')
category_tokens = filter(category_filter, tokens)
tokens = list(itertools.ifilterfalse(category_filter, tokens))
if category_tokens:
category_ids = set(t[len('cat:'):] for t in category_tokens)
results = filter(lambda plugin:
plugin['category'] in category_ids, results)
return results, tokens
def _apply_tag_filters(results, tokens):
"""Consumes and applies tag filters (e.g. "tag:python") to search results.
Arguments:
results: List of search result plugins.
tokens: Remaining search text tokens that have not been consumed.
Returns:
(results, tokens): Results that match the given tag, and tokens
that have not been consumed.
"""
tag_filter = lambda t: t.startswith('tag:')
tag_tokens = filter(tag_filter, tokens)
tokens = list(itertools.ifilterfalse(tag_filter, tokens))
if tag_tokens:
required_tags = set(t[len('tag:'):] for t in tag_tokens)
results = filter(lambda plugin:
required_tags <= set(plugin['tags']), results)
return results, tokens
def _apply_keyword_filters(results, tokens):
"""Filters results that match the given keywords (tokens).
Arguments:
results: List of search result plugins.
tokens: Keywords to filter results on.
Returns:
List of plugins that match the given keywords.
"""
if tokens:
# Create a regex that matches a string S iff for each keyword K in
# `search` there is a corresponding word in S that begins with K.
tokens_regex = (r'\b%s' % re.escape(t) for t in tokens)
search_regex = re.compile('.*'.join(tokens_regex))
# Surprisingly, regex matching like this is slightly faster than
# prefix-matching two sorted lists of tokens.
results = filter(lambda plugin:
search_regex.search(plugin['keywords']), results)
return results
@api.route('/plugins', methods=['GET'])
@cache.cached(timeout=60 * 60 * 25, key_prefix=_make_get_plugins_cache_key,
unless=_should_skip_get_plugins_cache)
def get_plugins():
RESULTS_PER_PAGE = 20
page = int(request.args.get('page', 1))
search = request.args.get('query', '')
results = get_search_index_cached()
if search:
tokens = [t.lower() for t in sorted(search.split())]
results, tokens = _apply_category_filters(results, tokens)
results, tokens = _apply_tag_filters(results, tokens)
results = _apply_keyword_filters(results, tokens)
count = len(results)
total_pages = (count + RESULTS_PER_PAGE - 1) / RESULTS_PER_PAGE # ceil
results = results[((page - 1) * RESULTS_PER_PAGE):
(page * RESULTS_PER_PAGE)]
return api_util.jsonify({
'plugins': results,
'total_pages': total_pages,
'total_results': count,
'results_per_page': RESULTS_PER_PAGE,
})
@api.route('/plugins/<slug>', methods=['GET'])
def get_plugin(slug):
plugin = r.table('plugins').get(slug).run(r_conn())
if plugin:
return api_util.jsonify(db.plugins.to_json(plugin))
else:
return api_util.api_not_found('No plugin with slug %s' % slug)
# TODO(david): Make it not so easy for an attacker to completely obliterate all
# of our tags, or at least be able to recover from it.
@api.route('/plugins/<slug>/tags', methods=['POST', 'PUT'])
def update_plugin_tags(slug):
data = json.loads(flask.request.data)
plugin = r.table('plugins').get(slug).run(r_conn())
if not plugin:
return api_util.api_not_found('No plugin with slug %s' % slug)
db.plugins.update_tags(plugin, data['tags'])
r.table('plugins').update(plugin).run(r_conn())
return api_util.jsonify({
'tags': plugin['tags']
})
@api.route('/tags', methods=['GET'])
@cache.cached(timeout=60 * 60)
def get_tags():
tags = r.table('tags').filter({}).run(r_conn())
return api_util.jsonify(list(tags))
@api.route('/categories', methods=['GET'])
@cache.cached(timeout=60 * 60)
def get_categories():
return api_util.jsonify(get_all_categories_cached())
@api.route('/plugins/<slug>/category/<category>', methods=['PUT'])
def update_plugin_category(slug, category):
plugin = r.table('plugins').get(slug).run(r_conn())
if not plugin:
return api_util.api_not_found('No plugin with slug %s' % slug)
if category not in (c['id'] for c in get_all_categories_cached()):
return api_util.api_bad_request('No such category %s' % category)
# TODO(david): Also update search index (stale cache)
plugin['category'] = category
r.table('plugins').update(plugin).run(r_conn())
return api_util.jsonify({
'category': plugin['category']
})
@api.route('/submit', methods=['POST'])
def submit_plugin():
plugin_data = flask.request.form.to_dict()
plugin_data['tags'] = json.loads(plugin_data['tags'])
db.submitted_plugins.insert(plugin_data)
plugin_markdown = "```\n%s\n```" % json.dumps(plugin_data, indent=4)
util.log_to_gitter("Someone just submitted a plugin!\n%s" % plugin_markdown)
return flask.redirect('/thanks-for-submitting')
@cache.cached(timeout=60 * 60 * 26, key_prefix='search_index')
def get_search_index_cached():
return db.plugins.get_search_index()
@cache.cached(timeout=60 * 60 * 27, key_prefix='all_categories')
def get_all_categories_cached():
return db.categories.get_all()
| vim-awesome/vim-awesome | web/api/api.py | Python | mit | 7,261 | 0.001102 |
#!/usr/bin/env python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that files include headers from allowed directories.
Checks DEPS files in the source tree for rules, and applies those rules to
"#include" commands in source files. Any source file including something not
permitted by the DEPS files will fail.
The format of the deps file:
First you have the normal module-level deps. These are the ones used by
gclient. An example would be:
deps = {
"base":"http://foo.bar/trunk/base"
}
DEPS files not in the top-level of a module won't need this. Then you have
any additional include rules. You can add (using "+") or subtract (using "-")
from the previously specified rules (including module-level deps).
include_rules = {
# Code should be able to use base (it's specified in the module-level
# deps above), but nothing in "base/evil" because it's evil.
"-base/evil",
# But this one subdirectory of evil is OK.
"+base/evil/not",
# And it can include files from this other directory even though there is
# no deps rule for it.
"+tools/crime_fighter"
}
DEPS files may be placed anywhere in the tree. Each one applies to all
subdirectories, where there may be more DEPS files that provide additions or
subtractions for their own sub-trees.
There is an implicit rule for the current directory (where the DEPS file lives)
and all of its subdirectories. This prevents you from having to explicitly
allow the current directory everywhere. This implicit rule is applied first,
so you can modify or remove it using the normal include rules.
The rules are processed in order. This means you can explicitly allow a higher
directory and then take away permissions from sub-parts, or the reverse.
Note that all directory separators must be slashes (Unix-style) and not
backslashes. All directories should be relative to the source root and use
only lowercase.
"""
from __future__ import print_function
import os
import optparse
import pipes
import re
import sys
import copy
# Variable name used in the DEPS file to add or subtract include files from
# the module-level deps.
INCLUDE_RULES_VAR_NAME = "include_rules"
# Optionally present in the DEPS file to list subdirectories which should not
# be checked. This allows us to skip third party code, for example.
SKIP_SUBDIRS_VAR_NAME = "skip_child_includes"
# The maximum number of non-include lines we can see before giving up.
MAX_UNINTERESTING_LINES = 50
# The maximum line length, this is to be efficient in the case of very long
# lines (which can't be #includes).
MAX_LINE_LENGTH = 128
# Set to true for more output. This is set by the command line options.
VERBOSE = False
# This regular expression will be used to extract filenames from include
# statements.
EXTRACT_INCLUDE_PATH = re.compile('[ \t]*#[ \t]*(?:include|import)[ \t]+"(.*)"')
# In lowercase, using forward slashes as directory separators, ending in a
# forward slash. Set by the command line options.
BASE_DIRECTORY = ""
# The directories which contain the sources managed by git.
GIT_SOURCE_DIRECTORY = set()
# Specifies a single rule for an include, which can be either allow or disallow.
class Rule(object):
def __init__(self, allow, dir, source):
self._allow = allow
self._dir = dir
self._source = source
def __str__(self):
if (self._allow):
return '"+%s" from %s.' % (self._dir, self._source)
return '"-%s" from %s.' % (self._dir, self._source)
def ParentOrMatch(self, other):
"""Returns true if the input string is an exact match or is a parent
of the current rule. For example, the input "foo" would match "foo/bar"."""
return self._dir == other or self._dir.startswith(other + "/")
def ChildOrMatch(self, other):
"""Returns true if the input string would be covered by this rule. For
example, the input "foo/bar" would match the rule "foo"."""
return self._dir == other or other.startswith(self._dir + "/")
def ParseRuleString(rule_string, source):
"""Returns a tuple of a boolean indicating whether the directory is an allow
rule, and a string holding the directory name.
"""
if len(rule_string) < 1:
raise Exception('The rule string "%s" is too short\nin %s' %
(rule_string, source))
if rule_string[0] == "+":
return (True, rule_string[1:])
if rule_string[0] == "-":
return (False, rule_string[1:])
raise Exception('The rule string "%s" does not begin with a "+" or a "-"' %
rule_string)
class Rules:
def __init__(self):
"""Initializes the current rules with an empty rule list."""
self._rules = []
def __str__(self):
ret = "Rules = [\n"
ret += "\n".join([" %s" % x for x in self._rules])
ret += "]\n"
return ret
def AddRule(self, rule_string, source):
"""Adds a rule for the given rule string.
Args:
rule_string: The include_rule string read from the DEPS file to apply.
source: A string representing the location of that string (filename, etc.)
so that we can give meaningful errors.
"""
(add_rule, rule_dir) = ParseRuleString(rule_string, source)
# Remove any existing rules or sub-rules that apply. For example, if we're
# passed "foo", we should remove "foo", "foo/bar", but not "foobar".
self._rules = [x for x in self._rules if not x.ParentOrMatch(rule_dir)]
self._rules.insert(0, Rule(add_rule, rule_dir, source))
def DirAllowed(self, allowed_dir):
"""Returns a tuple (success, message), where success indicates if the given
directory is allowed given the current set of rules, and the message tells
why if the comparison failed."""
for rule in self._rules:
if rule.ChildOrMatch(allowed_dir):
# This rule applies.
if rule._allow:
return (True, "")
return (False, rule.__str__())
# No rules apply, fail.
return (False, "no rule applying")
def ApplyRules(existing_rules, includes, cur_dir):
"""Applies the given include rules, returning the new rules.
Args:
existing_rules: A set of existing rules that will be combined.
include: The list of rules from the "include_rules" section of DEPS.
cur_dir: The current directory. We will create an implicit rule that
allows inclusion from this directory.
Returns: A new set of rules combining the existing_rules with the other
arguments.
"""
rules = copy.copy(existing_rules)
# First apply the implicit "allow" rule for the current directory.
if cur_dir.lower().startswith(BASE_DIRECTORY):
relative_dir = cur_dir[len(BASE_DIRECTORY) + 1:]
# Normalize path separators to slashes.
relative_dir = relative_dir.replace("\\", "/")
source = relative_dir
if len(source) == 0:
source = "top level" # Make the help string a little more meaningful.
rules.AddRule("+" + relative_dir, "Default rule for " + source)
else:
raise Exception("Internal error: base directory is not at the beginning" +
" for\n %s and base dir\n %s" %
(cur_dir, BASE_DIRECTORY))
# Last, apply the additional explicit rules.
for (index, rule_str) in enumerate(includes):
if not len(relative_dir):
rule_description = "the top level include_rules"
else:
rule_description = relative_dir + "'s include_rules"
rules.AddRule(rule_str, rule_description)
return rules
def ApplyDirectoryRules(existing_rules, dir_name):
"""Combines rules from the existing rules and the new directory.
Any directory can contain a DEPS file. Toplevel DEPS files can contain
module dependencies which are used by gclient. We use these, along with
additional include rules and implicit rules for the given directory, to
come up with a combined set of rules to apply for the directory.
Args:
existing_rules: The rules for the parent directory. We'll add-on to these.
dir_name: The directory name that the deps file may live in (if it exists).
This will also be used to generate the implicit rules.
Returns: A tuple containing: (1) the combined set of rules to apply to the
sub-tree, and (2) a list of all subdirectories that should NOT be
checked, as specified in the DEPS file (if any).
"""
# Check for a .svn directory in this directory or check this directory is
# contained in git source direcotries. This will tell us if it's a source
# directory and should be checked.
if not (os.path.exists(os.path.join(dir_name, ".svn")) or
(dir_name.lower() in GIT_SOURCE_DIRECTORY)):
return (None, [])
# Check the DEPS file in this directory.
if VERBOSE:
print("Applying rules from", dir_name)
def FromImpl(unused, unused2):
pass # NOP function so "From" doesn't fail.
def FileImpl(unused):
pass # NOP function so "File" doesn't fail.
class _VarImpl:
def __init__(self, local_scope):
self._local_scope = local_scope
def Lookup(self, var_name):
"""Implements the Var syntax."""
if var_name in self._local_scope.get("vars", {}):
return self._local_scope["vars"][var_name]
raise Error("Var is not defined: %s" % var_name)
local_scope = {}
global_scope = {
"File": FileImpl,
"From": FromImpl,
"Var": _VarImpl(local_scope).Lookup,
}
deps_file = os.path.join(dir_name, "DEPS")
if os.path.isfile(deps_file):
execfile(deps_file, global_scope, local_scope)
elif VERBOSE:
print(" No deps file found in", dir_name)
# Even if a DEPS file does not exist we still invoke ApplyRules
# to apply the implicit "allow" rule for the current directory
include_rules = local_scope.get(INCLUDE_RULES_VAR_NAME, [])
skip_subdirs = local_scope.get(SKIP_SUBDIRS_VAR_NAME, [])
return (ApplyRules(existing_rules, include_rules, dir_name), skip_subdirs)
def ShouldCheckFile(file_name):
"""Returns True if the given file is a type we want to check."""
checked_extensions = [
'.c',
'.cc',
'.h',
'.m',
'.mm',
# These are not the preferred extension in our codebase,
# but including them for good measure.
# (They do appear in the newlib toolchain + third_party libraries).
'.cpp',
'.hpp',
]
basename, extension = os.path.splitext(file_name)
return extension in checked_extensions
def CheckLine(rules, line):
"""Checks the given file with the given rule set.
Returns a tuple (is_include, illegal_description).
If the line is an #include directive the first value will be True.
If it is also an illegal include, the second value will be a string describing
the error. Otherwise, it will be None."""
found_item = EXTRACT_INCLUDE_PATH.match(line)
if not found_item:
return False, None # Not a match
include_path = found_item.group(1)
# Fix up backslashes in case somebody accidentally used them.
include_path.replace("\\", "/")
if include_path.find("/") < 0:
# Don't fail when no directory is specified. We may want to be more
# strict about this in the future.
if VERBOSE:
print(" WARNING: directory specified with no path: " + include_path)
return True, None
(allowed, why_failed) = rules.DirAllowed(include_path)
if not allowed:
if VERBOSE:
retval = "\nFor " + rules.__str__()
else:
retval = ""
return True, retval + ('Illegal include: "%s"\n Because of %s' %
(include_path, why_failed))
return True, None
def CheckFile(rules, file_name):
"""Checks the given file with the given rule set.
Args:
rules: The set of rules that apply to files in this directory.
file_name: The source file to check.
Returns: Either a string describing the error if there was one, or None if
the file checked out OK.
"""
if VERBOSE:
print("Checking: " + file_name)
ret_val = "" # We'll collect the error messages in here
last_include = 0
try:
cur_file = open(file_name, "r")
in_if0 = 0
for line_num in xrange(sys.maxint):
if line_num - last_include > MAX_UNINTERESTING_LINES:
break
cur_line = cur_file.readline(MAX_LINE_LENGTH)
if cur_line == "":
break
cur_line = cur_line.strip()
# Check to see if we're at / inside a #if 0 block
if cur_line == '#if 0':
in_if0 += 1
continue
if in_if0 > 0:
if cur_line.startswith('#if'):
in_if0 += 1
elif cur_line == '#endif':
in_if0 -= 1
continue
is_include, line_status = CheckLine(rules, cur_line)
if is_include:
last_include = line_num
if line_status is not None:
if len(line_status) > 0: # Add newline to separate messages.
line_status += "\n"
ret_val += line_status
cur_file.close()
except IOError:
if VERBOSE:
print("Unable to open file: " + file_name)
cur_file.close()
# Map empty string to None for easier checking.
if len(ret_val) == 0:
return None
return ret_val
def CheckDirectory(parent_rules, dir_name):
(rules, skip_subdirs) = ApplyDirectoryRules(parent_rules, dir_name)
if rules == None:
return True
# Collect a list of all files and directories to check.
files_to_check = []
dirs_to_check = []
success = True
contents = os.listdir(dir_name)
for cur in contents:
if cur in skip_subdirs:
continue # Don't check children that DEPS has asked us to skip.
full_name = os.path.join(dir_name, cur)
if os.path.isdir(full_name):
dirs_to_check.append(full_name)
elif ShouldCheckFile(full_name):
files_to_check.append(full_name)
# First check all files in this directory.
for cur in files_to_check:
file_status = CheckFile(rules, cur)
if file_status != None:
print("ERROR in " + cur + "\n" + file_status)
success = False
# Next recurse into the subdirectories.
for cur in dirs_to_check:
if not CheckDirectory(rules, cur):
success = False
return success
def GetGitSourceDirectory(root):
"""Returns a set of the directories to be checked.
Args:
root: The repository root where .git directory exists.
Returns:
A set of directories which contain sources managed by git.
"""
git_source_directory = set()
popen_out = os.popen("cd %s && git ls-files --full-name ." %
pipes.quote(root))
for line in popen_out.readlines():
dir_name = os.path.join(root, os.path.dirname(line))
# Add the directory as well as all the parent directories.
while dir_name != root:
git_source_directory.add(dir_name)
dir_name = os.path.dirname(dir_name)
git_source_directory.add(root)
return git_source_directory
def PrintUsage():
print("""Usage: python checkdeps.py [--root <root>] [tocheck]
--root Specifies the repository root. This defaults to "../../.." relative
to the script file. This will be correct given the normal location
of the script in "<root>/tools/checkdeps".
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything. Only one level deep is currently
supported, so you can say "chrome" but not "chrome/browser".
Examples:
python checkdeps.py
python checkdeps.py --root c:\\source chrome""")
def checkdeps(options, args):
global VERBOSE
if options.verbose:
VERBOSE = True
# Optional base directory of the repository.
global BASE_DIRECTORY
if not options.base_directory:
BASE_DIRECTORY = os.path.abspath(
os.path.join(os.path.abspath(os.path.dirname(__file__)), "../.."))
else:
BASE_DIRECTORY = os.path.abspath(options.base_directory)
# Figure out which directory we have to check.
if len(args) == 0:
# No directory to check specified, use the repository root.
start_dir = BASE_DIRECTORY
elif len(args) == 1:
# Directory specified. Start here. It's supposed to be relative to the
# base directory.
start_dir = os.path.abspath(os.path.join(BASE_DIRECTORY, args[0]))
else:
# More than one argument, we don't handle this.
PrintUsage()
return 1
print("Using base directory:", BASE_DIRECTORY)
print("Checking:", start_dir)
base_rules = Rules()
# The base directory should be lower case from here on since it will be used
# for substring matching on the includes, and we compile on case-insensitive
# systems. Plus, we always use slashes here since the include parsing code
# will also normalize to slashes.
BASE_DIRECTORY = BASE_DIRECTORY.lower()
BASE_DIRECTORY = BASE_DIRECTORY.replace("\\", "/")
start_dir = start_dir.replace("\\", "/")
if os.path.exists(os.path.join(BASE_DIRECTORY, ".git")):
global GIT_SOURCE_DIRECTORY
GIT_SOURCE_DIRECTORY = GetGitSourceDirectory(BASE_DIRECTORY)
success = CheckDirectory(base_rules, start_dir)
if not success:
print("\nFAILED\n")
return 1
print("\nSUCCESS\n")
return 0
def main():
option_parser = optparse.OptionParser()
option_parser.add_option("", "--root", default="", dest="base_directory",
help='Specifies the repository root. This defaults '
'to "../../.." relative to the script file, which '
'will normally be the repository root.')
option_parser.add_option("-v", "--verbose", action="store_true",
default=False, help="Print debug logging")
options, args = option_parser.parse_args()
return checkdeps(options, args)
if '__main__' == __name__:
sys.exit(main())
| endlessm/chromium-browser | native_client/tools/checkdeps/checkdeps.py | Python | bsd-3-clause | 17,715 | 0.008863 |
from typing import Optional
from gear.cloud_config import get_azure_config, get_gcp_config, get_global_config
from hailtop.aiocloud import aioazure, aiogoogle
from hailtop.aiotools.fs import AsyncFS, AsyncFSFactory
def get_identity_client(credentials_file: Optional[str] = None):
if credentials_file is None:
credentials_file = '/gsa-key/key.json'
cloud = get_global_config()['cloud']
if cloud == 'azure':
scopes = ['https://graph.microsoft.com/.default']
return aioazure.AzureGraphClient(
credentials_file=credentials_file,
scopes=scopes,
)
assert cloud == 'gcp', cloud
project = get_gcp_config().project
return aiogoogle.GoogleIAmClient(project, credentials_file=credentials_file)
def get_compute_client(credentials_file: Optional[str] = None):
if credentials_file is None:
credentials_file = '/gsa-key/key.json'
cloud = get_global_config()['cloud']
if cloud == 'azure':
azure_config = get_azure_config()
return aioazure.AzureComputeClient(azure_config.subscription_id, azure_config.resource_group)
assert cloud == 'gcp', cloud
project = get_gcp_config().project
return aiogoogle.GoogleComputeClient(project, credentials_file=credentials_file)
def get_cloud_async_fs(credentials_file: Optional[str] = None) -> AsyncFS:
if credentials_file is None:
credentials_file = '/gsa-key/key.json'
cloud = get_global_config()['cloud']
if cloud == 'azure':
return aioazure.AzureAsyncFS(credential_file=credentials_file)
assert cloud == 'gcp', cloud
return aiogoogle.GoogleStorageAsyncFS(credentials_file=credentials_file)
def get_cloud_async_fs_factory() -> AsyncFSFactory:
cloud = get_global_config()['cloud']
if cloud == 'azure':
return aioazure.AzureAsyncFSFactory()
assert cloud == 'gcp', cloud
return aiogoogle.GoogleStorageAsyncFSFactory()
| hail-is/hail | gear/gear/clients.py | Python | mit | 1,942 | 0.00206 |
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Dbus(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""D-Bus message bus"""
plugin_name = "dbus"
profiles = ('system',)
packages = ('dbus',)
def setup(self):
self.add_copy_spec([
"/etc/dbus-1",
"/var/lib/dbus/machine-id"
])
# vim: set et ts=4 sw=4 :
| nijinashok/sos | sos/plugins/dbus.py | Python | gpl-2.0 | 745 | 0 |
# Copyright (c) 2012 Citrix Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Aggregate admin API extension."""
import datetime
from webob import exc
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'aggregates')
def _get_context(req):
return req.environ['nova.context']
def get_host_from_body(fn):
"""Makes sure that the host exists."""
def wrapped(self, req, id, body, *args, **kwargs):
if len(body) == 1 and "host" in body:
host = body['host']
else:
raise exc.HTTPBadRequest()
return fn(self, req, id, host, *args, **kwargs)
return wrapped
class AggregateController(object):
"""The Host Aggregates API controller for the OpenStack API."""
def __init__(self):
self.api = compute_api.AggregateAPI()
def index(self, req):
"""Returns a list a host aggregate's id, name, availability_zone."""
context = _get_context(req)
authorize(context)
aggregates = self.api.get_aggregate_list(context)
return {'aggregates': [self._marshall_aggregate(a)['aggregate']
for a in aggregates]}
def create(self, req, body):
"""Creates an aggregate, given its name and availability_zone."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest()
try:
host_aggregate = body["aggregate"]
name = host_aggregate["name"]
avail_zone = host_aggregate["availability_zone"]
except KeyError:
raise exc.HTTPBadRequest()
try:
utils.check_string_length(name, "Aggregate name", 1, 255)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if len(host_aggregate) != 2:
raise exc.HTTPBadRequest()
try:
aggregate = self.api.create_aggregate(context, name, avail_zone)
except exception.AggregateNameExists as e:
LOG.info(e)
raise exc.HTTPConflict()
except exception.InvalidAggregateAction as e:
LOG.info(e)
raise
return self._marshall_aggregate(aggregate)
def show(self, req, id):
"""Shows the details of an aggregate, hosts and metadata included."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.get_aggregate(context, id)
except exception.AggregateNotFound:
LOG.info(_("Cannot show aggregate: %s"), id)
raise exc.HTTPNotFound()
return self._marshall_aggregate(aggregate)
def update(self, req, id, body):
"""Updates the name and/or availability_zone of given aggregate."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest()
try:
updates = body["aggregate"]
except KeyError:
raise exc.HTTPBadRequest()
if len(updates) < 1:
raise exc.HTTPBadRequest()
for key in updates.keys():
if key not in ["name", "availability_zone"]:
raise exc.HTTPBadRequest()
if 'name' in updates:
try:
utils.check_string_length(updates['name'], "Aggregate name", 1,
255)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
try:
aggregate = self.api.update_aggregate(context, id, updates)
except exception.AggregateNotFound:
LOG.info(_('Cannot update aggregate: %s'), id)
raise exc.HTTPNotFound()
return self._marshall_aggregate(aggregate)
def delete(self, req, id):
"""Removes an aggregate by id."""
context = _get_context(req)
authorize(context)
try:
self.api.delete_aggregate(context, id)
except exception.AggregateNotFound:
LOG.info(_('Cannot delete aggregate: %s'), id)
raise exc.HTTPNotFound()
def action(self, req, id, body):
_actions = {
'add_host': self._add_host,
'remove_host': self._remove_host,
'set_metadata': self._set_metadata,
}
for action, data in body.iteritems():
if action not in _actions.keys():
msg = _('Aggregates does not have %s action') % action
raise exc.HTTPBadRequest(explanation=msg)
return _actions[action](req, id, data)
raise exc.HTTPBadRequest(explanation=_("Invalid request body"))
@get_host_from_body
def _add_host(self, req, id, host):
"""Adds a host to the specified aggregate."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.add_host_to_aggregate(context, id, host)
except (exception.AggregateNotFound, exception.ComputeHostNotFound):
LOG.info(_('Cannot add host %(host)s in aggregate %(id)s'),
{'host': host, 'id': id})
raise exc.HTTPNotFound()
except (exception.AggregateHostExists,
exception.InvalidAggregateAction) as e:
LOG.info(_('Cannot add host %(host)s in aggregate %(id)s'),
{'host': host, 'id': id})
raise exc.HTTPConflict(explanation=e.format_message())
return self._marshall_aggregate(aggregate)
@get_host_from_body
def _remove_host(self, req, id, host):
"""Removes a host from the specified aggregate."""
context = _get_context(req)
authorize(context)
try:
aggregate = self.api.remove_host_from_aggregate(context, id, host)
except (exception.AggregateNotFound, exception.AggregateHostNotFound,
exception.ComputeHostNotFound):
LOG.info(_('Cannot remove host %(host)s in aggregate %(id)s'),
{'host': host, 'id': id})
raise exc.HTTPNotFound()
except exception.InvalidAggregateAction:
LOG.info(_('Cannot remove host %(host)s in aggregate %(id)s'),
{'host': host, 'id': id})
raise exc.HTTPConflict()
return self._marshall_aggregate(aggregate)
def _set_metadata(self, req, id, body):
"""Replaces the aggregate's existing metadata with new metadata."""
context = _get_context(req)
authorize(context)
if len(body) != 1:
raise exc.HTTPBadRequest()
try:
metadata = body["metadata"]
except KeyError:
raise exc.HTTPBadRequest()
try:
aggregate = self.api.update_aggregate_metadata(context,
id, metadata)
except exception.AggregateNotFound:
LOG.info(_('Cannot set metadata %(metadata)s in aggregate %(id)s'),
{'metadata': metadata, 'id': id})
raise exc.HTTPNotFound()
return self._marshall_aggregate(aggregate)
def _marshall_aggregate(self, aggregate):
_aggregate = {}
for key, value in aggregate.items():
# NOTE(danms): The original API specified non-TZ-aware timestamps
if isinstance(value, datetime.datetime):
value = value.replace(tzinfo=None)
_aggregate[key] = value
return {"aggregate": _aggregate}
class Aggregates(extensions.ExtensionDescriptor):
"""Admin-only aggregate administration."""
name = "Aggregates"
alias = "os-aggregates"
namespace = "http://docs.openstack.org/compute/ext/aggregates/api/v1.1"
updated = "2012-01-12T00:00:00+00:00"
def __init__(self, ext_mgr):
ext_mgr.register(self)
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-aggregates',
AggregateController(),
member_actions={"action": "POST", })
resources.append(res)
return resources
| ntt-sic/nova | nova/api/openstack/compute/contrib/aggregates.py | Python | apache-2.0 | 8,962 | 0.000112 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-07-02 11:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0022_query_amcat_options'),
]
operations = [
migrations.AlterField(
model_name='query',
name='amcat_query_id',
field=models.IntegerField(),
),
migrations.AlterUniqueTogether(
name='query',
unique_together=set([('system', 'amcat_query_id')]),
),
migrations.AlterModelTable(
name='querycache',
table=None,
),
]
| amcat/amcat-dashboard | dashboard/migrations/0023_auto_20180702_1140.py | Python | agpl-3.0 | 696 | 0 |
"""
__MT_post__indirectLink_S.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: levi
Modified: Sun Aug 9 23:46:05 2015
_________________________________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from ATOM3String import *
from graph_MT_post__indirectLink_S import *
class MT_post__indirectLink_S(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
self.superTypes = []
self.graphClass_ = graph_MT_post__indirectLink_S
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.MT_label__=ATOM3String('', 20)
self.MT_pivotOut__=ATOM3String('', 20)
self.generatedAttributes = {'MT_label__': ('ATOM3String', ),
'MT_pivotOut__': ('ATOM3String', ) }
self.realOrder = ['MT_label__','MT_pivotOut__']
self.directEditing = [1,1]
def clone(self):
cloneObject = MT_post__indirectLink_S( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def preAction (self, actionID, * params):
if actionID == self.CREATE:
self.autoIncrLabel(params)
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST/PRE action trigger
Constraints will be added/removed in a logical manner by other mechanisms.
"""
return # <--- Remove this if you want to use QOCA
# Get the high level constraint helper and solver
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
# Constraint only makes sense if there exists 2 objects connected to this link
if(not (self.in_connections_ and self.out_connections_)): return
# Get the graphical objects (subclass of graphEntity/graphLink)
graphicalObjectLink = self.graphObject_
graphicalObjectSource = self.in_connections_[0].graphObject_
graphicalObjectTarget = self.out_connections_[0].graphObject_
objTuple = (graphicalObjectSource, graphicalObjectTarget, graphicalObjectLink)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.LeftExactDistance(objTuple, 20)
oc.resolve() # Resolve immediately after creating entity & constraint
def autoIncrLabel(self, params):
#===============================================================================
# Auto increment the label
#===============================================================================
# If there is already one, ignore
if not self.MT_label__.isNone(): return
# Get the maximum label of all MT_pre__ elements
label = 0
for nt in self.parent.ASGroot.listNodes:
if nt.startswith('MT_post__'):
for node in self.parent.ASGroot.listNodes[nt]:
currLabel = 0
try:
currLabel = int(node.MT_label__.getValue())
except:
pass
if currLabel > label:
label = currLabel
# The label of this instance will be the max label + 1
self.MT_label__.setValue(str(label + 1))
| levilucio/SyVOLT | GM2AUTOSAR_MM/MT_post__indirectLink_S.py | Python | mit | 4,553 | 0.028992 |
#! /usr/bin/env python
#
# python script to generate a valid stepfile for WRF cycling
#
import sys
import pandas
import calendar
filename = 'stepfile' # default filename
lleap = True # allow leap days (not used in some GCM calendars)
lecho = False
lperiod = False
dateargs = [] # list of date arguments passed to date_range
for arg in sys.argv[1:]:
if arg[:11] == '--interval=':
freq = arg[11:].lower() # remains a string and is interpreted by date_range
elif arg[:8] == '--steps=':
lperiod = True; periods = int(arg[8:]) + 1 # each step is bounded by two timestamps
elif arg == '-l' or arg == '--noleap':
lleap = False # omit leap days to accomodate some GCM calendars
elif arg == '-e' or arg == '--echo':
lecho = True
elif arg == '-h' or arg == '--help':
print('')
print("Usage: "+sys.argv[0]+" [-e] [-h] [--interval=interval] [--steps=steps] begin-date [end-date]")
print(" Interval, begin-date and end-date or steps must be specified.")
print("")
print(" --interval= step spacing / interval (D=days, W=weeks, M=month)")
print(" --steps= number of steps in stepfile")
print(" -l | --noleap omit leap days (to accomodate some GCM calendars)")
print(" -e | --echo print steps to stdout instead of writing to stepfile")
print(" -h | --help print this message")
print('')
sys.exit(1)
else:
dateargs.append(arg)
# output patterns
lmonthly = False
dateform = '%Y-%m-%d_%H:%M:%S'
# N.B.: because pandas date_range always anchors intervals at the end of the month, we have to subtract one
# day and add it again later, in order to re-anchor at the first of the month
stepform = '%Y-%m-%d'
offset = pandas.DateOffset() # no offset
if 'w' in freq:
oo = 1 if '-sun' in freq else 0
offset = pandas.DateOffset(days=pandas.to_datetime(dateargs[0]).dayofweek + oo)
elif 'm' in freq:
lmonthly = True
stepform = '%Y-%m'
offset = pandas.DateOffset(days=pandas.to_datetime(dateargs[0]).day)
#print dateargs
begindate = pandas.to_datetime(dateargs[0]) - offset
# check input and generate datelist
if lperiod:
if len(dateargs) != 1: raise ValueError('Can only specify begin-date, if the number of periods is given.')
datelist = pandas.date_range(begindate, periods=periods, freq=freq) # generate datelist
else:
if len(dateargs) != 2: raise ValueError('Specify begin-date and end-date, if no number of periods is given.')
enddate = pandas.to_datetime(dateargs[1]) - offset
datelist = pandas.date_range(begindate, enddate, freq=freq) # generate datelist
# open file, if not writing to stdout
if not lecho: stepfile = open(filename, mode='w')
# iterate over dates (skip first)
lastdate = datelist[0] + offset # first element
llastleap = False
for date in datelist[1:]:
lcurrleap = False
currentdate = date + offset
# N.B.: offset is not the interval/frequency; it is an offset at the beginning of the month or week
if lmonthly:
mon = date.month +1
if mon == 2: maxdays = 29 if calendar.isleap(date.year) else 28
elif mon in [4, 6, 9, 11]: maxdays = 30
else: maxdays = 31
if currentdate > date + pandas.DateOffset(days=maxdays):
currentdate = date + pandas.DateOffset(days=maxdays)
# handle calendars without leap days (turn Feb. 29th into Mar. 1st)
if not lleap and calendar.isleap(currentdate.year) and ( currentdate.month==2 and currentdate.day==29 ):
lcurrleap = True
currentdate += pandas.DateOffset(days=1) # move one day ahead
# generate line for last step
# print currentdate.month,currentdate.day
if lleap or not (freq.lower()=='1d' and llastleap):
# skip if this is daily output, a leap day, and a non-leap-year calendar...
stepline = "{0:s} '{1:s}' '{2:s}'\n".format(lastdate.strftime(stepform),lastdate.strftime(dateform),
currentdate.strftime(dateform))
# write to appropriate output
if lecho: sys.stdout.write(stepline)
else: stepfile.write(stepline)
# remember last step
lastdate = currentdate
llastleap = lcurrleap
# close file
if not lecho: stepfile.close()
| aerler/WRF-Tools | Python/wrfrun/generateStepfile.py | Python | gpl-3.0 | 4,142 | 0.021004 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Wrapper of Beam runners that's built for running and verifying e2e tests."""
from __future__ import absolute_import
from __future__ import print_function
import logging
import time
from apache_beam.internal import pickler
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TestOptions
from apache_beam.runners.dataflow.dataflow_runner import DataflowRunner
from apache_beam.runners.runner import PipelineState
__all__ = ['TestDataflowRunner']
# Dataflow take up to 10mins for the long tail of starting/stopping worker
# pool.
WAIT_IN_STATE_TIMEOUT = 10 * 60
_LOGGER = logging.getLogger(__name__)
class TestDataflowRunner(DataflowRunner):
def run_pipeline(self, pipeline, options):
"""Execute test pipeline and verify test matcher"""
test_options = options.view_as(TestOptions)
on_success_matcher = test_options.on_success_matcher
wait_duration = test_options.wait_until_finish_duration
is_streaming = options.view_as(StandardOptions).streaming
# [BEAM-1889] Do not send this to remote workers also, there is no need to
# send this option to remote executors.
test_options.on_success_matcher = None
self.result = super(TestDataflowRunner, self).run_pipeline(
pipeline, options)
if self.result.has_job:
# TODO(markflyhigh)(BEAM-1890): Use print since Nose dosen't show logs
# in some cases.
print('Worker logs: %s' % self.build_console_url(options))
try:
self.wait_until_in_state(PipelineState.RUNNING)
if is_streaming and not wait_duration:
_LOGGER.warning('Waiting indefinitely for streaming job.')
self.result.wait_until_finish(duration=wait_duration)
if on_success_matcher:
from hamcrest import assert_that as hc_assert_that
hc_assert_that(self.result, pickler.loads(on_success_matcher))
finally:
if not self.result.is_in_terminal_state():
self.result.cancel()
self.wait_until_in_state(PipelineState.CANCELLED)
return self.result
def build_console_url(self, options):
"""Build a console url of Dataflow job."""
project = options.view_as(GoogleCloudOptions).project
region_id = options.view_as(GoogleCloudOptions).region
job_id = self.result.job_id()
return (
'https://console.cloud.google.com/dataflow/jobsDetail/locations'
'/%s/jobs/%s?project=%s' % (region_id, job_id, project))
def wait_until_in_state(self, expected_state, timeout=WAIT_IN_STATE_TIMEOUT):
"""Wait until Dataflow pipeline enters a certain state."""
if not self.result.has_job:
raise IOError('Failed to get the Dataflow job id.')
start_time = time.time()
while time.time() - start_time <= timeout:
job_state = self.result.state
if self.result.is_in_terminal_state() or job_state == expected_state:
return job_state
time.sleep(5)
raise RuntimeError('Timeout after %d seconds while waiting for job %s '
'enters expected state %s. Current state is %s.' %
(timeout, self.result.job_id(),
expected_state, self.result.state))
| RyanSkraba/beam | sdks/python/apache_beam/runners/dataflow/test_dataflow_runner.py | Python | apache-2.0 | 4,039 | 0.003714 |
from os import path
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from files.models import CaptionedFile
fixture_dir = path.join(path.abspath(path.dirname(__file__)), 'fixtures')
class CaptionedFileTestCase(TestCase):
def setUp(self):
self.captioned_file = CaptionedFile.objects.create(
caption="this is a file",
publication=path.join('pubtest.txt')
)
self.captioned_file.save()
def test_creation(self):
cf = CaptionedFile.objects.create(
caption="lo lo",
publication=path.join('pubtest.txt')
)
cf.save()
self.assertEqual(CaptionedFile.objects.count(), 2)
# Cause setup created one already
def test_update(self):
self.captioned_file.caption = "I like text files"
self.captioned_file.save()
cf = CaptionedFile.objects.get()
self.assertEqual(cf.caption, "I like text files")
def test_delete(self):
cf = CaptionedFile.objects.get()
cf.delete()
self.assertEqual(CaptionedFile.objects.count(), 0)
class MultiEncodedAdminFormTest(TestCase):
def setUp(self):
self.user = User(
username='admin',
is_staff=True,
is_superuser=True)
self.user.set_password('admin')
self.user.save()
self.create_url = reverse('admin2:example3_captioned_file_create')
| pydanny/django-admin2 | example/files/tests/test_models.py | Python | bsd-3-clause | 1,470 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# mainnav-reader - Version: 0.5.1
#
# Copyright (c) 2009-2013, Dennis Keitzel
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
from distutils.core import setup
from mainnav_reader.helper import check_requirements
if sys.argv[1] == 'install':
check_requirements()
setup(
name='mainnav-reader',
version='0.5.1',
author='Dennis Keitzel',
author_email='dennis.keitzel@arcor.de',
url='http://code.google.com/p/mainnav-reader/',
description='This little tool has the ability to read out and delete tracklog data from mainnav gps devices',
license='BSD',
packages=['mainnav_reader'],
scripts=['mainnav-reader'],
)
| pbabik/mainnav-reader | setup.py | Python | bsd-2-clause | 1,980 | 0.002525 |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HUnitConnectCAG_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HUnitConnectCAG_ConnectedLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HUnitConnectCAG_ConnectedLHS, self).__init__(name='HUnitConnectCAG_ConnectedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HUnitConnectCAG_ConnectedLHS')
self["equations"] = []
# Set the node attributes
# match class Channel(Channel) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__Channel"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Channel')
# Add the edges
self.add_edges([
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
return True
# define evaluation methods for each match association.
def constraint(self, PreNode, graph):
return True
| levilucio/SyVOLT | RSS2ATOM/contracts/unit/HUnitConnectCAG_ConnectedLHS.py | Python | mit | 1,301 | 0.032283 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Tatiana Mazur
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon.utils import fields
from horizon import workflows
from openstack_dashboard import api
class AddVPNServiceAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
router_id = forms.ChoiceField(label=_("Router"))
subnet_id = forms.ChoiceField(label=_("Subnet"))
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddVPNServiceAction, self).__init__(request, *args, **kwargs)
def populate_subnet_id_choices(self, request, context):
subnet_id_choices = [('', _("Select a Subnet"))]
try:
tenant_id = request.user.tenant_id
networks = api.neutron.network_list_for_tenant(request, tenant_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve networks list.'))
networks = []
for n in networks:
for s in n['subnets']:
subnet_id_choices.append((s.id, s.cidr))
self.fields['subnet_id'].choices = subnet_id_choices
return subnet_id_choices
def populate_router_id_choices(self, request, context):
router_id_choices = [('', _("Select a Router"))]
try:
routers = api.neutron.router_list(request)
except Exception:
exceptions.handle(request,
_('Unable to retrieve routers list.'))
routers = []
for r in routers:
router_id_choices.append((r.id, r.name))
self.fields['router_id'].choices = router_id_choices
return router_id_choices
class Meta:
name = _("Add New VPN Service")
permissions = ('openstack.services.network',)
help_text = _("Create VPN Service for current project.\n\n"
"Assign a name and description for the VPN Service. "
"Select a router and a subnet. "
"Admin State is Up (checked) by default."
)
class AddVPNServiceStep(workflows.Step):
action_class = AddVPNServiceAction
contributes = ("name", "description", "subnet_id",
"router_id", "admin_state_up")
def contribute(self, data, context):
context = super(AddVPNServiceStep, self).contribute(data, context)
if data:
return context
class AddVPNService(workflows.Workflow):
slug = "addvpnservice"
name = _("Add VPN Service")
finalize_button_name = _("Add")
success_message = _('Added VPN Service "%s".')
failure_message = _('Unable to add VPN Service "%s".')
success_url = "horizon:project:vpn:index"
default_steps = (AddVPNServiceStep,)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.vpn.vpnservice_create(request, **context)
return True
except Exception:
return False
class AddIKEPolicyAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
auth_algorithm = forms.ChoiceField(label=_("Authorization algorithm"))
encryption_algorithm = forms.ChoiceField(label=_("Encryption algorithm"))
ike_version = forms.ChoiceField(label=_("IKE version"))
lifetime_units = forms.ChoiceField(label=_("Lifetime units for IKE keys"))
lifetime_value = forms.IntegerField(
min_value=60, label=_("Lifetime value for IKE keys"),
initial=3600,
help_text=_("Equal to or more than 60"))
pfs = forms.ChoiceField(label=_("Perfect Forward Secrecy"))
phase1_negotiation_mode = forms.ChoiceField(
label=_("IKE Phase1 negotiation mode"))
def __init__(self, request, *args, **kwargs):
super(AddIKEPolicyAction, self).__init__(request, *args, **kwargs)
auth_algorithm_choices = [("sha1", "sha1")]
self.fields['auth_algorithm'].choices = auth_algorithm_choices
encryption_algorithm_choices = [("3des", "3des"),
("aes-128", "aes-128"),
("aes-192", "aes-192"),
("aes-256", "aes-256")]
self.fields[
'encryption_algorithm'].choices = encryption_algorithm_choices
self.fields['encryption_algorithm'].initial = "aes-128"
# Currently this field has only one choice, so mark it as readonly.
self.fields['encryption_algorithm'].widget.attrs['readonly'] = True
ike_version_choices = [("v1", "v1"),
("v2", "v2")]
self.fields['ike_version'].choices = ike_version_choices
lifetime_units_choices = [("seconds", "seconds")]
self.fields['lifetime_units'].choices = lifetime_units_choices
# Currently this field has only one choice, so mark it as readonly.
self.fields['lifetime_units'].widget.attrs['readonly'] = True
pfs_choices = [("group2", "group2"),
("group5", "group5"),
("group14", "group14")]
self.fields['pfs'].choices = pfs_choices
self.fields['pfs'].initial = "group5"
phase1_neg_mode_choices = [("main", "main")]
self.fields[
'phase1_negotiation_mode'].choices = phase1_neg_mode_choices
# Currently this field has only one choice, so mark it as readonly.
self.fields['phase1_negotiation_mode'].widget.attrs['readonly'] = True
class Meta:
name = _("Add New IKE Policy")
permissions = ('openstack.services.network',)
help_text = _("Create IKE Policy for current project.\n\n"
"Assign a name and description for the IKE Policy. "
)
class AddIKEPolicyStep(workflows.Step):
action_class = AddIKEPolicyAction
contributes = ("name", "description", "auth_algorithm",
"encryption_algorithm", "ike_version",
"lifetime_units", "lifetime_value",
"pfs", "phase1_negotiation_mode")
def contribute(self, data, context):
context = super(AddIKEPolicyStep, self).contribute(data, context)
context.update({'lifetime': {'units': data['lifetime_units'],
'value': data['lifetime_value']}})
context.pop('lifetime_units')
context.pop('lifetime_value')
if data:
return context
class AddIKEPolicy(workflows.Workflow):
slug = "addikepolicy"
name = _("Add IKE Policy")
finalize_button_name = _("Add")
success_message = _('Added IKE Policy "%s".')
failure_message = _('Unable to add IKE Policy "%s".')
success_url = "horizon:project:vpn:index"
default_steps = (AddIKEPolicyStep,)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.vpn.ikepolicy_create(request, **context)
return True
except Exception:
return False
class AddIPSecPolicyAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
auth_algorithm = forms.ChoiceField(label=_("Authorization algorithm"))
encapsulation_mode = forms.ChoiceField(label=_("Encapsulation mode"))
encryption_algorithm = forms.ChoiceField(label=_("Encryption algorithm"))
lifetime_units = forms.ChoiceField(label=_("Lifetime units"))
lifetime_value = forms.IntegerField(
min_value=60, label=_("Lifetime value for IKE keys "),
initial=3600,
help_text=_("Equal to or more than 60"))
pfs = forms.ChoiceField(label=_("Perfect Forward Secrecy"))
transform_protocol = forms.ChoiceField(label=_("Transform Protocol"))
def __init__(self, request, *args, **kwargs):
super(AddIPSecPolicyAction, self).__init__(request, *args, **kwargs)
auth_algorithm_choices = [("sha1", "sha1")]
self.fields['auth_algorithm'].choices = auth_algorithm_choices
# Currently this field has only one choice, so mark it as readonly.
self.fields['auth_algorithm'].widget.attrs['readonly'] = True
encapsulation_mode_choices = [("tunnel", "tunnel"),
("transport", "transport")]
self.fields['encapsulation_mode'].choices = encapsulation_mode_choices
encryption_algorithm_choices = [("3des", "3des"),
("aes-128", "aes-128"),
("aes-192", "aes-192"),
("aes-256", "aes-256")]
self.fields[
'encryption_algorithm'].choices = encryption_algorithm_choices
self.fields['encryption_algorithm'].initial = "aes-128"
lifetime_units_choices = [("seconds", "seconds")]
self.fields['lifetime_units'].choices = lifetime_units_choices
# Currently this field has only one choice, so mark it as readonly.
self.fields['lifetime_units'].widget.attrs['readonly'] = True
pfs_choices = [("group2", "group2"),
("group5", "group5"),
("group14", "group14")]
self.fields['pfs'].choices = pfs_choices
self.fields['pfs'].initial = "group5"
transform_protocol_choices = [("esp", "esp"),
("ah", "ah"),
("ah-esp", "ah-esp")]
self.fields['transform_protocol'].choices = transform_protocol_choices
class Meta:
name = _("Add New IPSec Policy")
permissions = ('openstack.services.network',)
help_text = _("Create IPSec Policy for current project.\n\n"
"Assign a name and description for the IPSec Policy. "
)
class AddIPSecPolicyStep(workflows.Step):
action_class = AddIPSecPolicyAction
contributes = ("name", "description", "auth_algorithm",
"encapsulation_mode", "encryption_algorithm",
"lifetime_units", "lifetime_value",
"pfs", "transform_protocol")
def contribute(self, data, context):
context = super(AddIPSecPolicyStep, self).contribute(data, context)
context.update({'lifetime': {'units': data['lifetime_units'],
'value': data['lifetime_value']}})
context.pop('lifetime_units')
context.pop('lifetime_value')
if data:
return context
class AddIPSecPolicy(workflows.Workflow):
slug = "addipsecpolicy"
name = _("Add IPSec Policy")
finalize_button_name = _("Add")
success_message = _('Added IPSec Policy "%s".')
failure_message = _('Unable to add IPSec Policy "%s".')
success_url = "horizon:project:vpn:index"
default_steps = (AddIPSecPolicyStep,)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.vpn.ipsecpolicy_create(request, **context)
return True
except Exception:
return False
class AddIPSecSiteConnectionAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
vpnservice_id = forms.ChoiceField(
label=_("VPN Service associated with this connection"))
ikepolicy_id = forms.ChoiceField(
label=_("IKE Policy associated with this connection"))
ipsecpolicy_id = forms.ChoiceField(
label=_("IPSec Policy associated with this connection"))
peer_address = fields.IPField(
label=_("Peer gateway public IPv4/IPv6 Address or FQDN"),
help_text=_("Peer gateway public IPv4/IPv6 address or FQDN for "
"the VPN Connection"),
version=fields.IPv4 | fields.IPv6,
mask=False)
peer_id = fields.IPField(
label=_("Peer router identity for authentication (Peer ID)"),
help_text=_("Peer router identity for authentication. "
"Can be IPv4/IPv6 address, e-mail, key ID, or FQDN"),
version=fields.IPv4 | fields.IPv6,
mask=False)
peer_cidrs = fields.IPField(label=_("Remote peer subnet"),
help_text=_("Remote peer subnet address "
"with mask in CIDR format "
"(e.g. 20.1.0.0/24)"),
version=fields.IPv4 | fields.IPv6,
mask=True)
psk = forms.CharField(max_length=80,
label=_("Pre-Shared Key (PSK) string"))
def populate_ikepolicy_id_choices(self, request, context):
ikepolicy_id_choices = [('', _("Select IKE Policy"))]
try:
ikepolicies = api.vpn.ikepolicies_get(request)
except Exception:
exceptions.handle(request,
_('Unable to retrieve IKE Policies list.'))
ikepolicies = []
for p in ikepolicies:
ikepolicy_id_choices.append((p.id, p.name))
self.fields['ikepolicy_id'].choices = ikepolicy_id_choices
return ikepolicy_id_choices
def populate_ipsecpolicy_id_choices(self, request, context):
ipsecpolicy_id_choices = [('', _("Select IPSec Policy"))]
try:
ipsecpolicies = api.vpn.ipsecpolicies_get(request)
except Exception:
exceptions.handle(request,
_('Unable to retrieve IPSec Policies list.'))
ipsecpolicies = []
for p in ipsecpolicies:
ipsecpolicy_id_choices.append((p.id, p.name))
self.fields['ipsecpolicy_id'].choices = ipsecpolicy_id_choices
return ipsecpolicy_id_choices
def populate_vpnservice_id_choices(self, request, context):
vpnservice_id_choices = [('', _("Select VPN Service"))]
try:
vpnservices = api.vpn.vpnservices_get(request)
except Exception:
exceptions.handle(request,
_('Unable to retrieve VPN Services list.'))
vpnservices = []
for s in vpnservices:
vpnservice_id_choices.append((s.id, s.name))
self.fields['vpnservice_id'].choices = vpnservice_id_choices
return vpnservice_id_choices
class Meta:
name = _("Add New IPSec Site Connection")
permissions = ('openstack.services.network',)
help_text = _("Create IPSec Site Connection for current project.\n\n"
"Assign a name and description for the "
"IPSec Site Connection. "
"All fields in this tab are required."
)
class AddIPSecSiteConnectionStep(workflows.Step):
action_class = AddIPSecSiteConnectionAction
contributes = ("name", "description",
"vpnservice_id", "ikepolicy_id", "ipsecpolicy_id",
"peer_address", "peer_id", "peer_cidrs", "psk")
class AddIPSecSiteConnectionOptionalAction(workflows.Action):
mtu = forms.IntegerField(
min_value=68,
label=_("Maximum Transmission Unit size for the connection"),
initial=1500,
help_text=_("Equal to or more than 68 if the local subnet is IPv4. "
"Equal to or more than 1280 if the local subnet is IPv6."))
dpd_action = forms.ChoiceField(label=_("Dead peer detection actions"))
dpd_interval = forms.IntegerField(
min_value=1, label=_("Dead peer detection interval"),
initial=30,
help_text=_("Valid integer"))
dpd_timeout = forms.IntegerField(
min_value=1, label=_("Dead peer detection timeout"),
initial=120,
help_text=_("Valid integer greater than the DPD interval"))
initiator = forms.ChoiceField(label=_("Initiator state"))
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddIPSecSiteConnectionOptionalAction, self).__init__(
request, *args, **kwargs)
initiator_choices = [("bi-directional", "bi-directional"),
("response-only", "response-only")]
self.fields['initiator'].choices = initiator_choices
def populate_dpd_action_choices(self, request, context):
dpd_action_choices = [("hold", "hold"),
("clear", "clear"),
("disabled", "disabled"),
("restart", "restart"),
("restart-by-peer", "restart-by-peer")]
self.fields['dpd_action'].choices = dpd_action_choices
return dpd_action_choices
class Meta:
name = _("Optional Parameters")
permissions = ('openstack.services.network',)
help_text = _("Fields in this tab are optional. "
"You can configure the detail of "
"IPSec site connection created."
)
class AddIPSecSiteConnectionOptionalStep(workflows.Step):
action_class = AddIPSecSiteConnectionOptionalAction
contributes = ("dpd_action", "dpd_interval", "dpd_timeout",
"initiator", "mtu", "admin_state_up")
def contribute(self, data, context):
context = super(
AddIPSecSiteConnectionOptionalStep, self).contribute(data, context)
context.update({'dpd': {'action': data['dpd_action'],
'interval': data['dpd_interval'],
'timeout': data['dpd_timeout']}})
context.pop('dpd_action')
context.pop('dpd_interval')
context.pop('dpd_timeout')
if data:
return context
class AddIPSecSiteConnection(workflows.Workflow):
slug = "addipsecsiteconnection"
name = _("Add IPSec Site Connection")
finalize_button_name = _("Add")
success_message = _('Added IPSec Site Connection "%s".')
failure_message = _('Unable to add IPSec Site Connection "%s".')
success_url = "horizon:project:vpn:index"
default_steps = (AddIPSecSiteConnectionStep,
AddIPSecSiteConnectionOptionalStep)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.vpn.ipsecsiteconnection_create(request, **context)
return True
except Exception:
return False
| Havate/havate-openstack | proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/project/vpn/workflows.py | Python | apache-2.0 | 19,939 | 0 |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import unittest2
from jsonschema.exceptions import ValidationError
from st2common.models.system import actionchain
from st2tests.fixturesloader import FixturesLoader
FIXTURES_PACK = "generic"
TEST_FIXTURES = {
"actionchains": [
"chain1.yaml",
"malformedchain.yaml",
"no_default_chain.yaml",
"chain_with_vars.yaml",
"chain_with_publish.yaml",
]
}
FIXTURES = FixturesLoader().load_fixtures(
fixtures_pack=FIXTURES_PACK, fixtures_dict=TEST_FIXTURES
)
CHAIN_1 = FIXTURES["actionchains"]["chain1.yaml"]
MALFORMED_CHAIN = FIXTURES["actionchains"]["malformedchain.yaml"]
NO_DEFAULT_CHAIN = FIXTURES["actionchains"]["no_default_chain.yaml"]
CHAIN_WITH_VARS = FIXTURES["actionchains"]["chain_with_vars.yaml"]
CHAIN_WITH_PUBLISH = FIXTURES["actionchains"]["chain_with_publish.yaml"]
class ActionChainSchemaTest(unittest2.TestCase):
def test_actionchain_schema_valid(self):
chain = actionchain.ActionChain(**CHAIN_1)
self.assertEqual(len(chain.chain), len(CHAIN_1["chain"]))
self.assertEqual(chain.default, CHAIN_1["default"])
def test_actionchain_no_default(self):
chain = actionchain.ActionChain(**NO_DEFAULT_CHAIN)
self.assertEqual(len(chain.chain), len(NO_DEFAULT_CHAIN["chain"]))
self.assertEqual(chain.default, None)
def test_actionchain_with_vars(self):
chain = actionchain.ActionChain(**CHAIN_WITH_VARS)
self.assertEqual(len(chain.chain), len(CHAIN_WITH_VARS["chain"]))
self.assertEqual(len(chain.vars), len(CHAIN_WITH_VARS["vars"]))
def test_actionchain_with_publish(self):
chain = actionchain.ActionChain(**CHAIN_WITH_PUBLISH)
self.assertEqual(len(chain.chain), len(CHAIN_WITH_PUBLISH["chain"]))
self.assertEqual(
len(chain.chain[0].publish), len(CHAIN_WITH_PUBLISH["chain"][0]["publish"])
)
def test_actionchain_schema_invalid(self):
with self.assertRaises(ValidationError):
actionchain.ActionChain(**MALFORMED_CHAIN)
| nzlosh/st2 | st2common/tests/unit/test_actionchain_schema.py | Python | apache-2.0 | 2,701 | 0.00037 |
def first(l):
try:
return l.pop()
except Exception:
return None
| jdavisp3/TigerShark | tigershark/facade/utils.py | Python | bsd-3-clause | 88 | 0.011364 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Base class and interface for tools.
'''
from __future__ import print_function
class Tool(object):
'''Base class for all tools. Tools should use their docstring (i.e. the
class-level docstring) for the help they want to have printed when they
are invoked.'''
#
# Interface (abstract methods)
#
def ShortDescription(self):
'''Returns a short description of the functionality of the tool.'''
raise NotImplementedError()
def Run(self, global_options, my_arguments):
'''Runs the tool.
Args:
global_options: object grit_runner.Options
my_arguments: [arg1 arg2 ...]
Return:
0 for success, non-0 for error
'''
raise NotImplementedError()
#
# Base class implementation
#
def __init__(self):
self.o = None
def ShowUsage(self):
'''Show usage text for this tool.'''
print(self.__doc__)
def SetOptions(self, opts):
self.o = opts
def Out(self, text):
'''Always writes out 'text'.'''
self.o.output_stream.write(text)
def VerboseOut(self, text):
'''Writes out 'text' if the verbose option is on.'''
if self.o.verbose:
self.o.output_stream.write(text)
def ExtraVerboseOut(self, text):
'''Writes out 'text' if the extra-verbose option is on.
'''
if self.o.extra_verbose:
self.o.output_stream.write(text)
| endlessm/chromium-browser | tools/grit/grit/tool/interface.py | Python | bsd-3-clause | 1,507 | 0.011944 |
"""
Global configuration for the problem settings
"""
import numpy as np
from scipy import stats
horizon = 300
runs = 40
DefaultConfiguration = {
"price_buy" : [1.2,2.1,3.3],
"price_sell" : [1,2,3],
"price_probabilities" : np.array([[0.8, 0.1, 0.1],[0.1, 0.8, 0.1],[0.1, 0.1, 0.8]]),
"initial_capacity" : 1,
"initial_inventory" : 0.5,
"degradation" : {"fun":"polynomial","charge":[0.0,0,0.01],
"discharge":[0.01,-0.02,0.01] },
"capacity_cost" : 1,
"change_capacity" : False # assume that the capacity does not change
}
def construct_martingale(prices, variance):
"""
Constructs a definitions with a martingale definition of transition probabilities.
The change in price is modeled as a normal distribution with zero mean and
the specified variance.
The capacity of the battery does in fact change
Parameters
----------
prices : array
**Sell** prices that correspond to states in the Martingale price state
process. **Buy** prices are 10% higher.
variance : float
Variance of the normal distribution
Returns
-------
out : dict
Configuration that corresponds to the martingale
"""
states = len(prices)
# defines over how many states the probability is spread over
spread = min(5,states-1)
if type(prices) is not np.ndarray:
prices = np.array(prices)
# relative transition probabilities
p = stats.norm(0,variance).pdf(np.arange(-spread,spread+1))
p = p / p.sum()
# add extra 0s to both ends of p
p = np.concatenate((np.zeros(states-spread-1), p, np.zeros(states-spread-1)))
P = [p[states-i-1:2*states-i-1] for i in range(states)]
P = np.array(P)
P = np.diag(1/P.sum(1)).dot(P)
configuration = {
"price_buy" : 1.1 * prices,
"price_sell" : prices,
"price_probabilities" : P,
"initial_capacity" : 1,
"initial_inventory" : 0.5,
"degradation" : {"fun":"polynomial","charge":[0.0,0,0.01],
"discharge":[0.01,0.02,0.01] },
"capacity_cost" : 1,
"change_capacity" : True # assume that the capacity does not change
}
return configuration
def construct_massdata(degrade):
"""
Returns a problem definition on what is described in the experimental
section of the paper
This uses a simple uniform quantization of energy prices
Paramaters
----------
degrade : bool
Whether the battery degrades
"""
prices = np.array([25.0, 50.0, 75.0, 100.0, 125.0, 150.0, 175.0, 200.0, 250.0, 300.0])
P = np.array([[ 8.15584416e-01, 1.76623377e-01, 5.19480519e-03,
2.59740260e-03, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 4.70114171e-02, 8.72397582e-01, 7.25319006e-02,
7.38750839e-03, 0.00000000e+00, 6.71591672e-04,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 1.19904077e-03, 1.31894484e-01, 7.79376499e-01,
6.95443645e-02, 1.43884892e-02, 3.59712230e-03,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00],
[ 0.00000000e+00, 4.24528302e-02, 2.83018868e-01,
5.14150943e-01, 1.22641509e-01, 2.35849057e-02,
9.43396226e-03, 0.00000000e+00, 0.00000000e+00,
4.71698113e-03],
[ 0.00000000e+00, 2.15053763e-02, 9.67741935e-02,
2.68817204e-01, 4.30107527e-01, 1.29032258e-01,
4.30107527e-02, 1.07526882e-02, 0.00000000e+00,
0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 3.22580645e-02,
2.58064516e-01, 3.54838710e-01, 1.93548387e-01,
9.67741935e-02, 6.45161290e-02, 0.00000000e+00,
0.00000000e+00],
[ 0.00000000e+00, 7.14285714e-02, 1.42857143e-01,
0.00000000e+00, 7.14285714e-02, 2.14285714e-01,
2.85714286e-01, 1.42857143e-01, 7.14285714e-02,
0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 1.42857143e-01,
0.00000000e+00, 2.85714286e-01, 0.00000000e+00,
0.00000000e+00, 2.85714286e-01, 2.85714286e-01,
0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 2.50000000e-01, 2.50000000e-01,
2.50000000e-01, 0.00000000e+00, 2.50000000e-01,
0.00000000e+00],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
1.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00]])
if degrade:
degradation = {"fun":"polynomial","charge" : [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00142857142857143],
"discharge" : [0.0, 0.00500000000000000, -0.00750000000000000, 0.00500000000000000, -0.00125000000000000] }
else:
degradation = {"fun":"polynomial","charge" : [0.0],
"discharge" : [0.0] }
configuration = {
"price_buy" : 1.05 * prices,
"price_sell" : 0.95 * prices,
"price_probabilities" : P,
"initial_capacity" : 1,
"initial_inventory" : 0.5,
"degradation" : degradation,
"capacity_cost" : 20000,
"change_capacity" : True
}
return configuration
| marekpetrik/RAAM | raam/examples/inventory/configuration.py | Python | mit | 5,606 | 0.019265 |
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from ._testing import _make_credentials
from google.cloud.bigtable.cluster import Cluster
class TestInstance(unittest.TestCase):
PROJECT = "project"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT + "/instances/" + INSTANCE_ID
LOCATION_ID = "locid"
LOCATION = "projects/" + PROJECT + "/locations/" + LOCATION_ID
APP_PROFILE_PATH = (
"projects/" + PROJECT + "/instances/" + INSTANCE_ID + "/appProfiles/"
)
DISPLAY_NAME = "display_name"
LABELS = {"foo": "bar"}
OP_ID = 8915
OP_NAME = "operations/projects/{}/instances/{}operations/{}".format(
PROJECT, INSTANCE_ID, OP_ID
)
TABLE_ID = "table_id"
TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID
@staticmethod
def _get_target_class():
from google.cloud.bigtable.instance import Instance
return Instance
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def test_constructor_defaults(self):
client = object()
instance = self._make_one(self.INSTANCE_ID, client)
self.assertEqual(instance.instance_id, self.INSTANCE_ID)
self.assertEqual(instance.display_name, self.INSTANCE_ID)
self.assertIsNone(instance.type_)
self.assertIsNone(instance.labels)
self.assertIs(instance._client, client)
self.assertIsNone(instance.state)
def test_constructor_non_default(self):
from google.cloud.bigtable import enums
instance_type = enums.Instance.Type.DEVELOPMENT
state = enums.Instance.State.READY
labels = {"test": "test"}
client = object()
instance = self._make_one(
self.INSTANCE_ID,
client,
display_name=self.DISPLAY_NAME,
instance_type=instance_type,
labels=labels,
_state=state,
)
self.assertEqual(instance.instance_id, self.INSTANCE_ID)
self.assertEqual(instance.display_name, self.DISPLAY_NAME)
self.assertEqual(instance.type_, instance_type)
self.assertEqual(instance.labels, labels)
self.assertIs(instance._client, client)
self.assertEqual(instance.state, state)
def test__update_from_pb_success(self):
from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2
from google.cloud.bigtable import enums
instance_type = enums.Instance.Type.PRODUCTION
state = enums.Instance.State.READY
instance_pb = data_v2_pb2.Instance(
display_name=self.DISPLAY_NAME,
type=instance_type,
labels=self.LABELS,
state=state,
)
instance = self._make_one(None, None)
self.assertIsNone(instance.display_name)
self.assertIsNone(instance.type_)
self.assertIsNone(instance.labels)
instance._update_from_pb(instance_pb)
self.assertEqual(instance.display_name, self.DISPLAY_NAME)
self.assertEqual(instance.type_, instance_type)
self.assertEqual(instance.labels, self.LABELS)
self.assertEqual(instance._state, state)
def test__update_from_pb_success_defaults(self):
from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2
from google.cloud.bigtable import enums
instance_pb = data_v2_pb2.Instance(display_name=self.DISPLAY_NAME)
instance = self._make_one(None, None)
self.assertIsNone(instance.display_name)
self.assertIsNone(instance.type_)
self.assertIsNone(instance.labels)
instance._update_from_pb(instance_pb)
self.assertEqual(instance.display_name, self.DISPLAY_NAME)
self.assertEqual(instance.type_, enums.Instance.Type.UNSPECIFIED)
self.assertFalse(instance.labels)
def test__update_from_pb_no_display_name(self):
from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2
instance_pb = data_v2_pb2.Instance()
instance = self._make_one(None, None)
self.assertIsNone(instance.display_name)
with self.assertRaises(ValueError):
instance._update_from_pb(instance_pb)
def test_from_pb_success(self):
from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2
from google.cloud.bigtable import enums
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
instance_type = enums.Instance.Type.PRODUCTION
state = enums.Instance.State.READY
instance_pb = data_v2_pb2.Instance(
name=self.INSTANCE_NAME,
display_name=self.INSTANCE_ID,
type=instance_type,
labels=self.LABELS,
state=state,
)
klass = self._get_target_class()
instance = klass.from_pb(instance_pb, client)
self.assertIsInstance(instance, klass)
self.assertEqual(instance._client, client)
self.assertEqual(instance.instance_id, self.INSTANCE_ID)
self.assertEqual(instance.display_name, self.INSTANCE_ID)
self.assertEqual(instance.type_, instance_type)
self.assertEqual(instance.labels, self.LABELS)
self.assertEqual(instance._state, state)
def test_from_pb_bad_instance_name(self):
from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2
instance_name = "INCORRECT_FORMAT"
instance_pb = data_v2_pb2.Instance(name=instance_name)
klass = self._get_target_class()
with self.assertRaises(ValueError):
klass.from_pb(instance_pb, None)
def test_from_pb_project_mistmatch(self):
from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2
ALT_PROJECT = "ALT_PROJECT"
credentials = _make_credentials()
client = self._make_client(
project=ALT_PROJECT, credentials=credentials, admin=True
)
self.assertNotEqual(self.PROJECT, ALT_PROJECT)
instance_pb = data_v2_pb2.Instance(name=self.INSTANCE_NAME)
klass = self._get_target_class()
with self.assertRaises(ValueError):
klass.from_pb(instance_pb, client)
def test_name_property(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client
api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
# Patch the the API method.
client._instance_admin_client = api
instance = self._make_one(self.INSTANCE_ID, client)
self.assertEqual(instance.name, self.INSTANCE_NAME)
def test___eq__(self):
client = object()
instance1 = self._make_one(self.INSTANCE_ID, client)
instance2 = self._make_one(self.INSTANCE_ID, client)
self.assertEqual(instance1, instance2)
def test___eq__type_differ(self):
client = object()
instance1 = self._make_one(self.INSTANCE_ID, client)
instance2 = object()
self.assertNotEqual(instance1, instance2)
def test___ne__same_value(self):
client = object()
instance1 = self._make_one(self.INSTANCE_ID, client)
instance2 = self._make_one(self.INSTANCE_ID, client)
comparison_val = instance1 != instance2
self.assertFalse(comparison_val)
def test___ne__(self):
instance1 = self._make_one("instance_id1", "client1")
instance2 = self._make_one("instance_id2", "client2")
self.assertNotEqual(instance1, instance2)
def test_create_check_location_and_clusters(self):
instance = self._make_one(self.INSTANCE_ID, None)
with self.assertRaises(ValueError):
instance.create(location_id=self.LOCATION_ID, clusters=[object(), object()])
def test_create_check_serve_nodes_and_clusters(self):
instance = self._make_one(self.INSTANCE_ID, None)
with self.assertRaises(ValueError):
instance.create(serve_nodes=3, clusters=[object(), object()])
def test_create_check_default_storage_type_and_clusters(self):
instance = self._make_one(self.INSTANCE_ID, None)
with self.assertRaises(ValueError):
instance.create(default_storage_type=1, clusters=[object(), object()])
def _instance_api_response_for_create(self):
import datetime
from google.api_core import operation
from google.longrunning import operations_pb2
from google.protobuf.any_pb2 import Any
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client
from google.cloud.bigtable_admin_v2.proto import (
bigtable_instance_admin_pb2 as messages_v2_pb2,
)
from google.cloud.bigtable_admin_v2.types import instance_pb2
NOW = datetime.datetime.utcnow()
NOW_PB = _datetime_to_pb_timestamp(NOW)
metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB)
type_url = "type.googleapis.com/{}".format(
messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name
)
response_pb = operations_pb2.Operation(
name=self.OP_NAME,
metadata=Any(type_url=type_url, value=metadata.SerializeToString()),
)
response = operation.from_gapic(
response_pb,
mock.Mock(),
instance_pb2.Instance,
metadata_type=messages_v2_pb2.CreateInstanceMetadata,
)
project_path_template = "projects/{}"
location_path_template = "projects/{}/locations/{}"
instance_api = mock.create_autospec(
bigtable_instance_admin_client.BigtableInstanceAdminClient
)
instance_api.create_instance.return_value = response
instance_api.project_path = project_path_template.format
instance_api.location_path = location_path_template.format
return instance_api, response
def test_create(self):
from google.cloud.bigtable import enums
from google.cloud.bigtable_admin_v2.types import instance_pb2
import warnings
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
instance = self._make_one(
self.INSTANCE_ID,
client,
self.DISPLAY_NAME,
enums.Instance.Type.PRODUCTION,
self.LABELS,
)
instance_api, response = self._instance_api_response_for_create()
client._instance_admin_client = instance_api
serve_nodes = 3
with warnings.catch_warnings(record=True) as warned:
result = instance.create(
location_id=self.LOCATION_ID, serve_nodes=serve_nodes
)
cluster_pb = instance_pb2.Cluster(
location=instance_api.location_path(self.PROJECT, self.LOCATION_ID),
serve_nodes=serve_nodes,
default_storage_type=enums.StorageType.UNSPECIFIED,
)
instance_pb = instance_pb2.Instance(
display_name=self.DISPLAY_NAME,
type=enums.Instance.Type.PRODUCTION,
labels=self.LABELS,
)
cluster_id = "{}-cluster".format(self.INSTANCE_ID)
instance_api.create_instance.assert_called_once_with(
parent=instance_api.project_path(self.PROJECT),
instance_id=self.INSTANCE_ID,
instance=instance_pb,
clusters={cluster_id: cluster_pb},
)
self.assertEqual(len(warned), 1)
self.assertIs(warned[0].category, DeprecationWarning)
self.assertIs(result, response)
def test_create_w_clusters(self):
from google.cloud.bigtable import enums
from google.cloud.bigtable_admin_v2.types import instance_pb2
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
instance = self._make_one(
self.INSTANCE_ID,
client,
self.DISPLAY_NAME,
enums.Instance.Type.PRODUCTION,
self.LABELS,
)
instance_api, response = self._instance_api_response_for_create()
client._instance_admin_client = instance_api
# Perform the method and check the result.
cluster_id_1 = "cluster-1"
cluster_id_2 = "cluster-2"
location_id_1 = "location-id-1"
location_id_2 = "location-id-2"
serve_nodes_1 = 3
serve_nodes_2 = 5
clusters = [
Cluster(
cluster_id_1,
instance,
location_id=location_id_1,
serve_nodes=serve_nodes_1,
),
Cluster(
cluster_id_2,
instance,
location_id=location_id_2,
serve_nodes=serve_nodes_2,
),
]
result = instance.create(clusters=clusters)
cluster_pb_1 = instance_pb2.Cluster(
location=instance_api.location_path(self.PROJECT, location_id_1),
serve_nodes=serve_nodes_1,
default_storage_type=enums.StorageType.UNSPECIFIED,
)
cluster_pb_2 = instance_pb2.Cluster(
location=instance_api.location_path(self.PROJECT, location_id_2),
serve_nodes=serve_nodes_2,
default_storage_type=enums.StorageType.UNSPECIFIED,
)
instance_pb = instance_pb2.Instance(
display_name=self.DISPLAY_NAME,
type=enums.Instance.Type.PRODUCTION,
labels=self.LABELS,
)
instance_api.create_instance.assert_called_once_with(
parent=instance_api.project_path(self.PROJECT),
instance_id=self.INSTANCE_ID,
instance=instance_pb,
clusters={cluster_id_1: cluster_pb_1, cluster_id_2: cluster_pb_2},
)
self.assertIs(result, response)
def test_exists(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client
from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2
from google.api_core import exceptions
api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
# Create response_pb
instance_name = client.instance_admin_client.instance_path(
self.PROJECT, self.INSTANCE_ID
)
response_pb = data_v2_pb2.Instance(name=instance_name)
# Patch the stub used by the API method.
client._instance_admin_client = api
instance_admin_client = client._instance_admin_client
instance_stub = instance_admin_client.transport
instance_stub.get_instance.side_effect = [
response_pb,
exceptions.NotFound("testing"),
exceptions.BadRequest("testing"),
]
# Perform the method and check the result.
non_existing_instance_id = "instance-id-2"
alt_instance_1 = self._make_one(self.INSTANCE_ID, client)
alt_instance_2 = self._make_one(non_existing_instance_id, client)
self.assertTrue(alt_instance_1.exists())
self.assertFalse(alt_instance_2.exists())
with self.assertRaises(exceptions.BadRequest):
alt_instance_2.exists()
def test_reload(self):
from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2
from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client
from google.cloud.bigtable import enums
api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
instance = self._make_one(self.INSTANCE_ID, client)
# Create response_pb
DISPLAY_NAME = u"hey-hi-hello"
instance_type = enums.Instance.Type.PRODUCTION
response_pb = data_v2_pb2.Instance(
display_name=DISPLAY_NAME, type=instance_type, labels=self.LABELS
)
# Patch the stub used by the API method.
client._instance_admin_client = api
bigtable_instance_stub = client._instance_admin_client.transport
bigtable_instance_stub.get_instance.side_effect = [response_pb]
# Create expected_result.
expected_result = None # reload() has no return value.
# Check Instance optional config values before.
self.assertEqual(instance.display_name, self.INSTANCE_ID)
# Perform the method and check the result.
result = instance.reload()
self.assertEqual(result, expected_result)
# Check Instance optional config values before.
self.assertEqual(instance.display_name, DISPLAY_NAME)
def _instance_api_response_for_update(self):
import datetime
from google.api_core import operation
from google.longrunning import operations_pb2
from google.protobuf.any_pb2 import Any
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client
from google.cloud.bigtable_admin_v2.proto import (
bigtable_instance_admin_pb2 as messages_v2_pb2,
)
from google.cloud.bigtable_admin_v2.types import instance_pb2
NOW = datetime.datetime.utcnow()
NOW_PB = _datetime_to_pb_timestamp(NOW)
metadata = messages_v2_pb2.UpdateInstanceMetadata(request_time=NOW_PB)
type_url = "type.googleapis.com/{}".format(
messages_v2_pb2.UpdateInstanceMetadata.DESCRIPTOR.full_name
)
response_pb = operations_pb2.Operation(
name=self.OP_NAME,
metadata=Any(type_url=type_url, value=metadata.SerializeToString()),
)
response = operation.from_gapic(
response_pb,
mock.Mock(),
instance_pb2.Instance,
metadata_type=messages_v2_pb2.UpdateInstanceMetadata,
)
instance_path_template = "projects/{project}/instances/{instance}"
instance_api = mock.create_autospec(
bigtable_instance_admin_client.BigtableInstanceAdminClient
)
instance_api.partial_update_instance.return_value = response
instance_api.instance_path = instance_path_template.format
return instance_api, response
def test_update(self):
from google.cloud.bigtable import enums
from google.protobuf import field_mask_pb2
from google.cloud.bigtable_admin_v2.types import instance_pb2
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
instance = self._make_one(
self.INSTANCE_ID,
client,
display_name=self.DISPLAY_NAME,
instance_type=enums.Instance.Type.DEVELOPMENT,
labels=self.LABELS,
)
instance_api, response = self._instance_api_response_for_update()
client._instance_admin_client = instance_api
result = instance.update()
instance_pb = instance_pb2.Instance(
name=instance.name,
display_name=instance.display_name,
type=instance.type_,
labels=instance.labels,
)
update_mask_pb = field_mask_pb2.FieldMask(
paths=["display_name", "type", "labels"]
)
instance_api.partial_update_instance.assert_called_once_with(
instance=instance_pb, update_mask=update_mask_pb
)
self.assertIs(result, response)
def test_update_empty(self):
from google.protobuf import field_mask_pb2
from google.cloud.bigtable_admin_v2.types import instance_pb2
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
instance = self._make_one(None, client)
instance_api, response = self._instance_api_response_for_update()
client._instance_admin_client = instance_api
result = instance.update()
instance_pb = instance_pb2.Instance(
name=instance.name,
display_name=instance.display_name,
type=instance.type_,
labels=instance.labels,
)
update_mask_pb = field_mask_pb2.FieldMask()
instance_api.partial_update_instance.assert_called_once_with(
instance=instance_pb, update_mask=update_mask_pb
)
self.assertIs(result, response)
def test_delete(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
instance = self._make_one(self.INSTANCE_ID, client)
instance_api = mock.create_autospec(
bigtable_instance_admin_client.BigtableInstanceAdminClient
)
instance_api.delete_instance.return_value = None
client._instance_admin_client = instance_api
result = instance.delete()
instance_api.delete_instance.assert_called_once_with(instance.name)
self.assertIsNone(result)
def test_get_iam_policy(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client
from google.iam.v1 import policy_pb2
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
instance = self._make_one(self.INSTANCE_ID, client)
version = 1
etag = b"etag_v1"
members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}]
iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
# Patch the stub used by the API method.
instance_api = mock.create_autospec(
bigtable_instance_admin_client.BigtableInstanceAdminClient
)
client._instance_admin_client = instance_api
instance_api.get_iam_policy.return_value = iam_policy
# Perform the method and check the result.
result = instance.get_iam_policy()
instance_api.get_iam_policy.assert_called_once_with(resource=instance.name)
self.assertEqual(result.version, version)
self.assertEqual(result.etag, etag)
admins = result.bigtable_admins
self.assertEqual(len(admins), len(members))
for found, expected in zip(sorted(admins), sorted(members)):
self.assertEqual(found, expected)
def test_set_iam_policy(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client
from google.iam.v1 import policy_pb2
from google.cloud.bigtable.policy import Policy
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
instance = self._make_one(self.INSTANCE_ID, client)
version = 1
etag = b"etag_v1"
members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}]
iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
# Patch the stub used by the API method.
instance_api = mock.create_autospec(
bigtable_instance_admin_client.BigtableInstanceAdminClient
)
instance_api.set_iam_policy.return_value = iam_policy_pb
client._instance_admin_client = instance_api
# Perform the method and check the result.
iam_policy = Policy(etag=etag, version=version)
iam_policy[BIGTABLE_ADMIN_ROLE] = [
Policy.user("user1@test.com"),
Policy.service_account("service_acc1@test.com"),
]
result = instance.set_iam_policy(iam_policy)
instance_api.set_iam_policy.assert_called_once_with(
resource=instance.name, policy=iam_policy_pb
)
self.assertEqual(result.version, version)
self.assertEqual(result.etag, etag)
admins = result.bigtable_admins
self.assertEqual(len(admins), len(members))
for found, expected in zip(sorted(admins), sorted(members)):
self.assertEqual(found, expected)
def test_test_iam_permissions(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client
from google.iam.v1 import iam_policy_pb2
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
instance = self._make_one(self.INSTANCE_ID, client)
permissions = ["bigtable.tables.create", "bigtable.clusters.create"]
response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions)
instance_api = mock.create_autospec(
bigtable_instance_admin_client.BigtableInstanceAdminClient
)
instance_api.test_iam_permissions.return_value = response
client._instance_admin_client = instance_api
result = instance.test_iam_permissions(permissions)
self.assertEqual(result, permissions)
instance_api.test_iam_permissions.assert_called_once_with(
resource=instance.name, permissions=permissions
)
def test_cluster_factory(self):
from google.cloud.bigtable import enums
CLUSTER_ID = "{}-cluster".format(self.INSTANCE_ID)
LOCATION_ID = "us-central1-c"
SERVE_NODES = 3
STORAGE_TYPE = enums.StorageType.HDD
instance = self._make_one(self.INSTANCE_ID, None)
cluster = instance.cluster(
CLUSTER_ID,
location_id=LOCATION_ID,
serve_nodes=SERVE_NODES,
default_storage_type=STORAGE_TYPE,
)
self.assertIsInstance(cluster, Cluster)
self.assertEqual(cluster.cluster_id, CLUSTER_ID)
self.assertEqual(cluster.location_id, LOCATION_ID)
self.assertIsNone(cluster._state)
self.assertEqual(cluster.serve_nodes, SERVE_NODES)
self.assertEqual(cluster.default_storage_type, STORAGE_TYPE)
def test_list_clusters(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client
from google.cloud.bigtable_admin_v2.proto import (
bigtable_instance_admin_pb2 as messages_v2_pb2,
)
from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2
from google.cloud.bigtable.instance import Instance
from google.cloud.bigtable.instance import Cluster
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
instance = Instance(self.INSTANCE_ID, client)
failed_location = "FAILED"
cluster_id1 = "cluster-id1"
cluster_id2 = "cluster-id2"
cluster_path_template = "projects/{}/instances/{}/clusters/{}"
cluster_name1 = cluster_path_template.format(
self.PROJECT, self.INSTANCE_ID, cluster_id1
)
cluster_name2 = cluster_path_template.format(
self.PROJECT, self.INSTANCE_ID, cluster_id2
)
# Create response_pb
response_pb = messages_v2_pb2.ListClustersResponse(
failed_locations=[failed_location],
clusters=[
data_v2_pb2.Cluster(name=cluster_name1),
data_v2_pb2.Cluster(name=cluster_name2),
],
)
# Patch the stub used by the API method.
instance_api = mock.create_autospec(
bigtable_instance_admin_client.BigtableInstanceAdminClient
)
instance_api.list_clusters.side_effect = [response_pb]
instance_api.cluster_path = cluster_path_template.format
client._instance_admin_client = instance_api
# Perform the method and check the result.
clusters, failed_locations = instance.list_clusters()
cluster_1, cluster_2 = clusters
self.assertIsInstance(cluster_1, Cluster)
self.assertEqual(cluster_1.name, cluster_name1)
self.assertIsInstance(cluster_2, Cluster)
self.assertEqual(cluster_2.name, cluster_name2)
self.assertEqual(failed_locations, [failed_location])
def test_table_factory(self):
from google.cloud.bigtable.table import Table
app_profile_id = "appProfileId1262094415"
instance = self._make_one(self.INSTANCE_ID, None)
table = instance.table(self.TABLE_ID, app_profile_id=app_profile_id)
self.assertIsInstance(table, Table)
self.assertEqual(table.table_id, self.TABLE_ID)
self.assertEqual(table._instance, instance)
self.assertEqual(table._app_profile_id, app_profile_id)
def _list_tables_helper(self, table_name=None):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_data_v2_pb2
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as table_messages_v1_pb2,
)
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_table_admin_client,
bigtable_instance_admin_client,
)
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock()
)
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
instance = self._make_one(self.INSTANCE_ID, client)
# Create response_pb
if table_name is None:
table_name = self.TABLE_NAME
response_pb = table_messages_v1_pb2.ListTablesResponse(
tables=[table_data_v2_pb2.Table(name=table_name)]
)
# Patch the stub used by the API method.
client._table_admin_client = table_api
client._instance_admin_client = instance_api
bigtable_table_stub = client._table_admin_client.transport
bigtable_table_stub.list_tables.side_effect = [response_pb]
# Create expected_result.
expected_table = instance.table(self.TABLE_ID)
expected_result = [expected_table]
# Perform the method and check the result.
result = instance.list_tables()
self.assertEqual(result, expected_result)
def test_list_tables(self):
self._list_tables_helper()
def test_list_tables_failure_bad_split(self):
with self.assertRaises(ValueError):
self._list_tables_helper(table_name="wrong-format")
def test_list_tables_failure_name_bad_before(self):
BAD_TABLE_NAME = (
"nonempty-section-before"
+ "projects/"
+ self.PROJECT
+ "/instances/"
+ self.INSTANCE_ID
+ "/tables/"
+ self.TABLE_ID
)
with self.assertRaises(ValueError):
self._list_tables_helper(table_name=BAD_TABLE_NAME)
def test_app_profile_factory(self):
from google.cloud.bigtable.enums import RoutingPolicyType
APP_PROFILE_ID_1 = "app-profile-id-1"
ANY = RoutingPolicyType.ANY
DESCRIPTION_1 = "routing policy any"
APP_PROFILE_ID_2 = "app-profile-id-2"
SINGLE = RoutingPolicyType.SINGLE
DESCRIPTION_2 = "routing policy single"
ALLOW_WRITES = True
CLUSTER_ID = "cluster-id"
instance = self._make_one(self.INSTANCE_ID, None)
app_profile1 = instance.app_profile(
APP_PROFILE_ID_1, routing_policy_type=ANY, description=DESCRIPTION_1
)
app_profile2 = instance.app_profile(
APP_PROFILE_ID_2,
routing_policy_type=SINGLE,
description=DESCRIPTION_2,
cluster_id=CLUSTER_ID,
allow_transactional_writes=ALLOW_WRITES,
)
self.assertEqual(app_profile1.app_profile_id, APP_PROFILE_ID_1)
self.assertIs(app_profile1._instance, instance)
self.assertEqual(app_profile1.routing_policy_type, ANY)
self.assertEqual(app_profile1.description, DESCRIPTION_1)
self.assertEqual(app_profile2.app_profile_id, APP_PROFILE_ID_2)
self.assertIs(app_profile2._instance, instance)
self.assertEqual(app_profile2.routing_policy_type, SINGLE)
self.assertEqual(app_profile2.description, DESCRIPTION_2)
self.assertEqual(app_profile2.cluster_id, CLUSTER_ID)
self.assertEqual(app_profile2.allow_transactional_writes, ALLOW_WRITES)
def test_list_app_profiles(self):
from google.api_core.page_iterator import Iterator
from google.api_core.page_iterator import Page
from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client
from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2
from google.cloud.bigtable.app_profile import AppProfile
class _Iterator(Iterator):
def __init__(self, pages):
super(_Iterator, self).__init__(client=None)
self._pages = pages
def _next_page(self):
if self._pages:
page, self._pages = self._pages[0], self._pages[1:]
return Page(self, page, self.item_to_value)
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT, credentials=credentials, admin=True
)
instance = self._make_one(self.INSTANCE_ID, client)
# Setup Expected Response
app_profile_path_template = "projects/{}/instances/{}/appProfiles/{}"
app_profile_id1 = "app-profile-id1"
app_profile_id2 = "app-profile-id2"
app_profile_name1 = app_profile_path_template.format(
self.PROJECT, self.INSTANCE_ID, app_profile_id1
)
app_profile_name2 = app_profile_path_template.format(
self.PROJECT, self.INSTANCE_ID, app_profile_id2
)
routing_policy = data_v2_pb2.AppProfile.MultiClusterRoutingUseAny()
app_profiles = [
data_v2_pb2.AppProfile(
name=app_profile_name1, multi_cluster_routing_use_any=routing_policy
),
data_v2_pb2.AppProfile(
name=app_profile_name2, multi_cluster_routing_use_any=routing_policy
),
]
iterator = _Iterator(pages=[app_profiles])
# Patch the stub used by the API method.
instance_api = mock.create_autospec(
bigtable_instance_admin_client.BigtableInstanceAdminClient
)
client._instance_admin_client = instance_api
instance_api.app_profile_path = app_profile_path_template.format
instance_api.list_app_profiles.return_value = iterator
# Perform the method and check the result.
app_profiles = instance.list_app_profiles()
app_profile_1, app_profile_2 = app_profiles
self.assertIsInstance(app_profile_1, AppProfile)
self.assertEqual(app_profile_1.name, app_profile_name1)
self.assertIsInstance(app_profile_2, AppProfile)
self.assertEqual(app_profile_2.name, app_profile_name2)
| tseaver/google-cloud-python | bigtable/tests/unit/test_instance.py | Python | apache-2.0 | 37,126 | 0.001104 |
"""
Defines the URL routes for this app.
"""
from django.conf import settings
from django.conf.urls import patterns, url
from ..profile_images.views import ProfileImageView
from .accounts.views import AccountDeactivationView, AccountViewSet
from .preferences.views import PreferencesView, PreferencesDetailView
from .verification_api.views import PhotoVerificationStatusView
ME = AccountViewSet.as_view({
'get': 'get',
})
ACCOUNT_LIST = AccountViewSet.as_view({
'get': 'list',
})
ACCOUNT_DETAIL = AccountViewSet.as_view({
'get': 'retrieve',
'patch': 'partial_update',
})
urlpatterns = patterns(
'',
url(r'^v1/me$', ME, name='own_username_api'),
url(r'^v1/accounts/{}$'.format(settings.USERNAME_PATTERN), ACCOUNT_DETAIL, name='accounts_api'),
url(r'^v1/accounts$', ACCOUNT_LIST, name='accounts_detail_api'),
url(
r'^v1/accounts/{}/image$'.format(settings.USERNAME_PATTERN),
ProfileImageView.as_view(),
name='accounts_profile_image_api'
),
url(
r'^v1/accounts/{}/deactivate/$'.format(settings.USERNAME_PATTERN),
AccountDeactivationView.as_view(),
name='accounts_deactivation'
),
url(
r'^v1/accounts/{}/verification_status/$'.format(settings.USERNAME_PATTERN),
PhotoVerificationStatusView.as_view(),
name='verification_status'
),
url(
r'^v1/preferences/{}$'.format(settings.USERNAME_PATTERN),
PreferencesView.as_view(),
name='preferences_api'
),
url(
r'^v1/preferences/{}/(?P<preference_key>[a-zA-Z0-9_]+)$'.format(settings.USERNAME_PATTERN),
PreferencesDetailView.as_view(),
name='preferences_detail_api'
),
)
| prarthitm/edxplatform | openedx/core/djangoapps/user_api/urls.py | Python | agpl-3.0 | 1,706 | 0.001758 |
#NVDAObjects/IAccessible/sysTreeView32.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2007-2010 Michael Curran <mick@kulgan.net>, James Teh <jamie@jantrid.net>
from ctypes import *
from ctypes.wintypes import *
import api
import winKernel
import controlTypes
import speech
import UIAHandler
from . import IAccessible
if UIAHandler.isUIAAvailable: from ..UIA import UIA
from .. import NVDAObject
from logHandler import log
import watchdog
TV_FIRST=0x1100
TVIS_STATEIMAGEMASK=0xf000
#Window messages
TVM_GETITEMSTATE=TV_FIRST+39
TVM_GETITEM=TV_FIRST+62
TVM_MAPACCIDTOHTREEITEM=TV_FIRST+42
TVM_MAPHTREEITEMTOACCID=TV_FIRST+43
TVM_GETNEXTITEM=TV_FIRST+10
#item mask flags
TVIF_CHILDREN=0x40
#Relation codes
TVGN_ROOT=0
TVGN_NEXT=1
TVGN_PREVIOUS=2
TVGN_PARENT=3
TVGN_CHILD=4
class TVItemStruct(Structure):
_fields_=[
('mask',c_uint),
('hItem',c_void_p),
('state',c_uint),
('stateMask',c_uint),
('pszText',LPWSTR),
('cchTextMax',c_int),
('iImage',c_int),
('iSelectedImage',c_int),
('cChildren',c_int),
('lParam',LPARAM),
]
class TreeView(IAccessible):
def _get_firstChild(self):
try:
return super(TreeView, self).firstChild
except:
# Broken commctrl 5 tree view.
return BrokenCommctrl5Item.getFirstItem(self)
class TreeViewItem(IAccessible):
def _get_role(self):
return controlTypes.ROLE_TREEVIEWITEM
def _get_treeview_hItem(self):
if not hasattr(self,'_treeview_hItem'):
self._treeview_hItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_MAPACCIDTOHTREEITEM,self.IAccessibleChildID,0)
if not self._treeview_hItem:
# Tree views from comctl < 6.0 use the hItem as the child ID.
self._treeview_hItem=self.IAccessibleChildID
return self._treeview_hItem
def _get_treeview_level(self):
return int(self.IAccessibleObject.accValue(self.IAccessibleChildID))
def _get_states(self):
states=super(TreeViewItem,self)._get_states()
hItem=self.treeview_hItem
itemStates=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETITEMSTATE,hItem,TVIS_STATEIMAGEMASK)
ch=(itemStates>>12)&3
if ch>0:
states.add(controlTypes.STATE_CHECKABLE)
if ch==2:
states.add(controlTypes.STATE_CHECKED)
elif ch==3:
states.add(controlTypes.STATE_HALFCHECKED)
return states
def _get_value(self):
return None
def _get_parent(self):
if self.IAccessibleChildID==0:
return super(TreeViewItem,self)._get_parent()
hItem=self.treeview_hItem
if not hItem:
return super(TreeViewItem,self)._get_parent()
parentItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_PARENT,hItem)
if parentItem<=0:
return super(TreeViewItem,self)._get_parent()
newID=watchdog.cancellableSendMessage(self.windowHandle,TVM_MAPHTREEITEMTOACCID,parentItem,0)
if not newID:
# Tree views from comctl < 6.0 use the hItem as the child ID.
newID=parentItem
return IAccessible(windowHandle=self.windowHandle,IAccessibleObject=self.IAccessibleObject,IAccessibleChildID=newID)
def _get_firstChild(self):
if self.IAccessibleChildID==0:
return super(TreeViewItem,self)._get_firstChild()
hItem=self.treeview_hItem
if not hItem:
return super(TreeViewItem,self)._get_firstChild()
childItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_CHILD,hItem)
if childItem<=0:
return super(TreeViewItem,self)._get_firstChild()
newID=watchdog.cancellableSendMessage(self.windowHandle,TVM_MAPHTREEITEMTOACCID,childItem,0)
if not newID:
# Tree views from comctl < 6.0 use the hItem as the child ID.
newID=childItem
return IAccessible(windowHandle=self.windowHandle,IAccessibleObject=self.IAccessibleObject,IAccessibleChildID=newID)
def _get_next(self):
if self.IAccessibleChildID==0:
return super(TreeViewItem,self)._get_next()
hItem=self.treeview_hItem
if not hItem:
return None
nextItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_NEXT,hItem)
if nextItem<=0:
return None
newID=watchdog.cancellableSendMessage(self.windowHandle,TVM_MAPHTREEITEMTOACCID,nextItem,0)
if not newID:
# Tree views from comctl < 6.0 use the hItem as the child ID.
newID=nextItem
return IAccessible(windowHandle=self.windowHandle,IAccessibleObject=self.IAccessibleObject,IAccessibleChildID=newID)
def _get_previous(self):
if self.IAccessibleChildID==0:
return super(TreeViewItem,self)._get_previous()
hItem=self.treeview_hItem
if not hItem:
return None
prevItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_PREVIOUS,hItem)
if prevItem<=0:
return None
newID=watchdog.cancellableSendMessage(self.windowHandle,TVM_MAPHTREEITEMTOACCID,prevItem,0)
if not newID:
# Tree views from comctl < 6.0 use the hItem as the child ID.
newID=prevItem
return IAccessible(windowHandle=self.windowHandle,IAccessibleObject=self.IAccessibleObject,IAccessibleChildID=newID)
def _get_children(self):
children=[]
child=self.firstChild
while child:
children.append(child)
child=child.next
return children
def _get_childCount(self):
hItem=self.treeview_hItem
if not hItem:
return 0
childItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_CHILD,hItem)
if childItem<=0:
return 0
numItems=0
while childItem>0:
numItems+=1
childItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_NEXT,childItem)
return numItems
def _get_positionInfo(self):
if self.IAccessibleChildID==0:
return super(TreeViewItem,self)._get_positionInfo()
info={}
info['level']=self.treeview_level
hItem=self.treeview_hItem
if not hItem:
return info
newItem=hItem
index=0
while newItem>0:
index+=1
newItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_PREVIOUS,newItem)
newItem=hItem
numItems=index-1
while newItem>0:
numItems+=1
newItem=watchdog.cancellableSendMessage(self.windowHandle,TVM_GETNEXTITEM,TVGN_NEXT,newItem)
info['indexInGroup']=index
info['similarItemsInGroup']=numItems
return info
def event_stateChange(self):
announceContains = self is api.getFocusObject() and controlTypes.STATE_EXPANDED in self.states and controlTypes.STATE_EXPANDED not in getattr(self,'_speakObjectPropertiesCache',{}).get('states',frozenset())
super(TreeViewItem,self).event_stateChange()
if announceContains:
speech.speakMessage(_("%s items")%self.childCount)
class BrokenCommctrl5Item(IAccessible):
"""Handle broken CommCtrl v5 SysTreeView32 items in 64 bit applications.
In these controls, IAccessible fails to retrieve any info, so we must retrieve it using UIA.
We do this by obtaining a UIA NVDAObject and redirecting properties to it.
We can't simply use UIA objects alone for these controls because UIA events are also broken.
"""
def __init__(self, _uiaObj=None, **kwargs):
# This class is being directly instantiated.
if not _uiaObj:
raise ValueError("Cannot instantiate directly without supplying _uiaObj")
self._uiaObj = _uiaObj
super(BrokenCommctrl5Item, self).__init__(**kwargs)
def initOverlayClass(self):
self._uiaObj = None
if UIAHandler.handler:
parent=super(BrokenCommctrl5Item, self).parent
if parent and parent.hasFocus:
try:
kwargs = {}
UIA.kwargsFromSuper(kwargs, relation="focus")
self._uiaObj = UIA(**kwargs)
except:
log.debugWarning("Retrieving UIA focus failed", exc_info=True)
def _get_role(self):
return self._uiaObj.role if self._uiaObj else controlTypes.ROLE_UNKNOWN
def _get_name(self):
return self._uiaObj.name if self._uiaObj else None
def _get_description(self):
return self._uiaObj.description if self._uiaObj else None
def _get_value(self):
return self._uiaObj.value if self._uiaObj else None
def _get_states(self):
return self._uiaObj.states if self._uiaObj else set()
def _get_positionInfo(self):
return self._uiaObj.positionInfo if self._uiaObj else {}
def _get_location(self):
return self._uiaObj.location if self._uiaObj else None
def _makeRelatedObj(self, uiaObj):
# We need to wrap related UIA objects so that the ancestry will return to IAccessible for the tree view itself.
if not uiaObj:
return None
return BrokenCommctrl5Item(IAccessibleObject=self.IAccessibleObject, IAccessibleChildID=self.IAccessibleChildID, windowHandle=self.windowHandle, _uiaObj=uiaObj)
def _get_parent(self):
if self._uiaObj:
uiaParent = self._uiaObj.parent
# If the parent is the tree view itself (root window object), just use super's parent. IAccessible isn't broken on the container itself.
if not uiaParent.UIAElement.cachedNativeWindowHandle:
return self._makeRelatedObj(uiaParent)
return super(BrokenCommctrl5Item, self).parent
def _get_next(self):
return self._makeRelatedObj(self._uiaObj.next) if self._uiaObj else None
def _get_previous(self):
return self._makeRelatedObj(self._uiaObj.previous) if self._uiaObj else None
def _get_firstChild(self):
return self._makeRelatedObj(self._uiaObj.firstChild) if self._uiaObj else None
def _get_lastChild(self):
return self._makeRelatedObj(self._uiaObj.lastChild) if self._uiaObj else None
def _get_children(self):
# Use the base algorithm, which uses firstChild and next.
return NVDAObject._get_children(self)
@classmethod
def getFirstItem(cls, treeObj):
"""Get an instance for the first item in a given tree view.
"""
if not UIAHandler.handler:
return None
# Get a UIA object for the tree view by getting the root object for the window.
try:
kwargs = {"windowHandle": treeObj.windowHandle}
UIA.kwargsFromSuper(kwargs)
uiaObj = UIA(**kwargs)
except:
log.debugWarning("Error retrieving UIA object for tree view", exc_info=True)
return None
# Get the first tree item.
uiaObj = uiaObj.firstChild
if not uiaObj:
return None
# The IAccessibleChildID for this object isn't really used.
# However, it must not be 0, as 0 is the tree view itself.
return cls(IAccessibleObject=treeObj.IAccessibleObject, IAccessibleChildID=1, windowHandle=treeObj.windowHandle, _uiaObj=uiaObj)
| daisymax/nvda | source/NVDAObjects/IAccessible/sysTreeView32.py | Python | gpl-2.0 | 10,467 | 0.040222 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 15 23:06:23 2017
@author: franklin
"""
# Your task here is to extract data from xml on authors of an article
# and add it to a list, one item for an author.
# See the provided data structure for the expected format.
# The tags for first name, surname and email should map directly
# to the dictionary keys
import xml.etree.ElementTree as ET
article_file = "data/exampleresearcharticle.xml"
def get_root(fname):
tree = ET.parse(fname)
return tree.getroot()
def get_author(root):
authors = []
for author in root.findall('./fm/bibl/aug/au'):
data = {
"fnm": None,
"snm": None,
"email": None
}
data["fnm"] = author.find('./fnm').text
data["snm"] = author.find('./snm').text
data["email"] = author.find('./email').text
authors.append(data)
return authors
def test():
solution = [{'fnm': 'Omer', 'snm': 'Mei-Dan', 'email': 'omer@extremegate.com'}, {'fnm': 'Mike', 'snm': 'Carmont', 'email': 'mcarmont@hotmail.com'}, {'fnm': 'Lior', 'snm': 'Laver', 'email': 'laver17@gmail.com'}, {'fnm': 'Meir', 'snm': 'Nyska', 'email': 'nyska@internet-zahav.net'}, {'fnm': 'Hagay', 'snm': 'Kammar', 'email': 'kammarh@gmail.com'}, {'fnm': 'Gideon', 'snm': 'Mann', 'email': 'gideon.mann.md@gmail.com'}, {'fnm': 'Barnaby', 'snm': 'Clarck', 'email': 'barns.nz@gmail.com'}, {'fnm': 'Eugene', 'snm': 'Kots', 'email': 'eukots@gmail.com'}]
root = get_root(article_file)
data = get_author(root)
assert data[0] == solution[0]
assert data[1]["fnm"] == solution[1]["fnm"]
test() | franklinsales/udacity-data-analyst-nanodegree | project3/class-works/data-wrangling/data-in-more-complex-formats/quiz-extracting-data-corrected.py | Python | mit | 1,669 | 0.001797 |
"""Benchmark runner.
Copyright 2013 by Rackspace Hosting, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from falcon import bench
def fail(returncode, e):
sys.stderr.write('\nERROR: %s\n\n' % e)
sys.exit(returncode)
def main():
try:
bench.main()
except KeyboardInterrupt:
fail(1, 'Interrupted, terminating benchmark')
except RuntimeError as e:
fail(1, e)
if __name__ == '__main__':
main()
| openilabs/falconlab | env/lib/python2.7/site-packages/falcon/cmd/bench.py | Python | mit | 944 | 0 |
# -*- coding: utf-8 -*-
# This file carries the module's version information which will be updated
# during execution of the installation script, setup.py. Distribution tarballs
# contain a pre-generated copy of this file.
__version__ = '0.2'
| EMS-TU-Ilmenau/fastmat | fastmat/version.py | Python | apache-2.0 | 245 | 0 |
# https://www.w3resource.com/python-exercises/
# 1. Write a Python program to print the following string in a specific format (see the output).
# Sample String : "Twinkle, twinkle, little star, How I wonder what you are! Up above the world so high, Like a diamond
# in the sky. Twinkle, twinkle, little star, How I wonder what you are" Output :
# Twinkle, twinkle, little star,
# How I wonder what you are!
# Up above the world so high,
# Like a diamond in the sky.
# Twinkle, twinkle, little star,
# How I wonder what you are
string = """
Twinkle, twinkle, little star,
\t\tUp above the world so high,
\t\tLike a diamond in the sky.
Twinkle, twinkle, little star,
\tHow I wonder what you are
"""
print string
| dadavidson/Python_Lab | Python-w3resource/Python_Basic/ex01.py | Python | mit | 715 | 0.00979 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import cairo, Image
import gtk, gobject
from numpy import cos, sin, pi, sqrt, sort, square,array, zeros, diff,\
column_stack,ones, reshape, linspace, arctan2
from numpy import sum as npsum
from numpy.random import random, seed
from itertools import count
from speedup.speedup import pyx_collision_reject
#from speedup.speedup import pyx_growth_branch
from speedup.speedup import pyx_growth
from speedup.speedup import pyx_segment_attract
seed(4)
FNAME = './img/xx'
BACK = [1]*3
FRONT = [0,0,0,0.7]
CONTRASTA = [0.84,0.37,0] # orange
CONTRASTB = [0.53,0.53,1] # lightblue
CONTRASTC = [0.84,1,0]
PI = pi
TWOPI = 2.*pi
NMAX = 2*1e8
SIZE = 1500
ONE = 1./SIZE
STP = ONE*0.9
FARL = 30.*ONE
NEARL = 4.*ONE
GROW_NEAR_LIMIT = NEARL
MID = 0.5
LINEWIDTH = 3.*ONE
####
RENDER_ITT = 2000 # redraw this often
ZONEWIDTH = FARL/ONE
ZONES = int(SIZE/ZONEWIDTH)
class Render(object):
def __init__(self,n):
self.n = n
self.__init_cairo()
window = gtk.Window()
window.resize(self.n, self.n)
window.connect("destroy", self.__write_image_and_exit)
darea = gtk.DrawingArea()
darea.connect("expose-event", self.expose)
window.add(darea)
window.show_all()
self.darea = darea
self.num_img = 0
def clear_canvas(self):
self.ctx.set_source_rgb(*BACK)
self.ctx.rectangle(0,0,1,1)
self.ctx.fill()
def __write_image_and_exit(self,*args):
self.sur.write_to_png('on_exit.png')
gtk.main_quit(*args)
def __init_cairo(self):
sur = cairo.ImageSurface(cairo.FORMAT_ARGB32,self.n,self.n)
ctx = cairo.Context(sur)
ctx.scale(self.n,self.n)
ctx.set_source_rgb(*BACK)
ctx.rectangle(0,0,1,1)
ctx.fill()
self.sur = sur
self.ctx = ctx
def init_step(self,e):
self.step = e
#gobject.timeout_add(5,self.step_wrap)
gobject.idle_add(self.step_wrap)
self.steps = 0
def line(self,x1,y1,x2,y2):
self.ctx.set_source_rgba(*FRONT)
self.ctx.move_to(x1,y1)
self.ctx.line_to(x2,y2)
self.ctx.stroke()
def circle(self,x,y,r,fill=False):
self.ctx.arc(x,y,r,0,pi*2.)
if fill:
self.ctx.fill()
else:
self.ctx.stroke()
def circles(self,xx,yy,rr,fill=False):
if fill:
action = self.ctx.fill
else:
action = self.ctx.stroke
for x,y,r in zip(xx,yy,rr):
self.ctx.arc(x,y,r,0,TWOPI)
action()
def circle_stroke(self,x1,y1,x2,y2,r):
dx = x1-x2
dy = y1-y2
dd = sqrt(dx*dx+dy*dy)
n = int(dd/ONE)
n = n if n>2 else 2
a = arctan2(dy,dx)
scale = linspace(0,dd,n)
xp = x1-scale*cos(a)
yp = y1-scale*sin(a)
for x,y in zip(xp,yp):
self.ctx.arc(x,y,r,0,pi*2.)
self.ctx.fill()
def expose(self,*args):
cr = self.darea.window.cairo_create()
cr.set_source_surface(self.sur,0,0)
cr.paint()
def step_wrap(self,*args):
res = self.step()
self.steps += 1
if not self.steps%RENDER_ITT:
self.expose()
return res
class Line(object):
def __init__(self):
self.X = zeros((NMAX,2),'float')
self.SV = zeros((NMAX,2),'int')
self.SVMASK = zeros(NMAX,'int')
self.VS = {}
self.vnum = 0
self.snum = 0
self.sind = 0
self.ZV = [[] for i in xrange((ZONES+2)**2)]
self.VZ = zeros(NMAX,'int')
def _add_vertex(self,x):
"""
add vertex loctated at x.
zone maps are updated.
"""
vnum = self.vnum
self.X[vnum,:] = x
z = get_z(x,ZONES)
self.ZV[z].append(vnum)
self.VZ[vnum] = z
self.vnum += 1
return self.vnum-1
def update_zone_maps(self):
"""
check if vertices have changed zone, and update those that have.
"""
vnum = self.vnum
zz = get_zz(self.X[:vnum,:],ZONES)
mask = (zz != self.VZ[:vnum]).nonzero()[0]
for bad_v in mask:
new_z = zz[bad_v]
old_z = self.VZ[bad_v]
new = [v for v in self.ZV[old_z] if v != bad_v]
self.ZV[old_z] = new
self.ZV[new_z].append(bad_v)
self.VZ[mask] = zz[mask]
def _add_segment(self,a,b):
"""
add new segment between vertices a,b.
"""
self.SV[self.sind,:] = [a,b]
self.SVMASK[self.sind] = 1
add = make_dict_list_add(self.VS)
add(a,self.sind)
add(b,self.sind)
self.sind += 1
self.snum += 1
return self.sind-1
def _add_vertex_segment(self,x,a):
"""
add new vertex x connected to vertex a with a new segment.
"""
v = self._add_vertex(x)
self._add_segment(v,a)
def _delete_segment(self,a):
"""
delete segment a and related connections.
"""
vv = self.SV[a,:]
self.SVMASK[a] = 0
self.snum -= 1
for v in vv:
if self.VS.has_key(v):
vs = [s for s in self.VS[v] if s!=a]
if len(vs)>0:
self.VS[v] = vs
else:
del(self.VS[v])
return vv
def split_segment(self,a):
"""
add new vertex, v, at the middle of segment a with vertices: [v0,v1]
creates new segments b,c such that: v0 -b- v -c- v1
"""
vv = self.SV[a,:]
midx = (self.X[vv[1],0] + self.X[vv[0],0])*0.5
midy = (self.X[vv[1],1] + self.X[vv[0],1])*0.5
#TODO: improve
newv = self._add_vertex([midx,midy])
self._delete_segment(a)
b = self._add_segment(vv[0],newv)
c = self._add_segment(vv[1],newv)
return newv, [b,c]
def make_dict_list_add(d):
def add(k,v):
if d.has_key(k):
d[k].append(v)
else:
d[k] = [v]
return add
def get_z(x,nz):
"""
find zone z of x. we have nz zones in each direction.
"""
i = 1+int(x[0]*nz)
j = 1+int(x[1]*nz)
z = i*nz+j
return z
def get_zz(xx,nz):
"""
same as get_z for a vector of points.
"""
ij = (xx*nz).astype('int')
zz = ij[:,0]*(nz+2) + ij[:,1]+1
return zz
def init_circle(l,ix,iy,r,n):
th = sort(random(n)*TWOPI)
rad = (0.9 + 0.1*(0.5-random(n)))*r
xx = column_stack( (ix+cos(th)*rad, iy+sin(th)*rad) )
vv = []
for x in xx:
vv.append(l._add_vertex(x))
for i in xrange(len(vv)-1):
seg = l._add_segment(vv[i],vv[i+1])
l._add_segment(vv[0],vv[-1])
def init_horizontal_line(l,x1,x2,y1,y2,n):
x = sort(x1+(x2-x1)*random(n))
y = y1 + (y2-y1)*random(n)
xx = column_stack((x,y))
vv = []
for x in xx:
vv.append(l._add_vertex(x))
for i in xrange(len(vv)-1):
seg = l._add_segment(vv[i],vv[i+1])
if i == 0:
first = seg
def main():
L = Line()
render = Render(SIZE)
init_circle(L,MID,MID,0.001,50)
#init_horizontal_line(L,MID-0.2,MID+0.2,MID-0.001,MID+0.001,100)
SX = zeros((NMAX,2),'float')
def show(render,l):
render.clear_canvas()
render.ctx.set_source_rgba(*FRONT)
render.ctx.set_line_width(LINEWIDTH)
for vv in l.SV[:l.sind,:][l.SVMASK[:l.sind]>0,:]:
render.circle_stroke(l.X[vv[0],0],l.X[vv[0],1],
l.X[vv[1],0],l.X[vv[1],1], ONE*2)
def step():
rnd1 = random(L.sind)
#rnd2 = random(L.sind)
pyx_growth(L,rnd1,GROW_NEAR_LIMIT)
#pyx_growth_branch(L,rnd1,rnd2,GROW_NEAR_LIMIT)
L.update_zone_maps()
if not render.steps%RENDER_ITT:
show(render,L)
print 'steps:',render.steps,'vnum:',L.vnum,'snum:',L.snum
fn = '{:s}_nearl{:0.0f}_itt{:07d}.png'
fn = fn.format(FNAME,FARL/ONE,render.steps)
render.sur.write_to_png(fn)
vnum = L.vnum
SX[:vnum,:] = 0.
pyx_segment_attract(L,SX[:vnum,:],NEARL)
pyx_collision_reject(L,SX[:vnum,:],FARL,ZONES)
SX[:vnum,:] *= STP
L.X[:vnum,:] += SX[:vnum,:]
return True
render.init_step(step)
gtk.main()
if __name__ == '__main__' :
if True:
import pstats, cProfile
fn = './profile/profile'
cProfile.run('main()',fn)
p = pstats.Stats(fn)
p.strip_dirs().sort_stats('cumulative').print_stats()
else:
main()
| inconvergent/differential_ani | differential.py | Python | mit | 7,824 | 0.034126 |
# -*- coding: utf-8 -*-
import classes.level_controller as lc
import classes.game_driver as gd
import classes.extras as ex
import classes.board
import random
import math
import pygame
class Board(gd.BoardGame):
def __init__(self, mainloop, speaker, config, screen_w, screen_h):
self.level = lc.Level(self,mainloop,999,1)
gd.BoardGame.__init__(self,mainloop,speaker,config,screen_w,screen_h,11,9)
def create_game_objects(self, level = 1):
self.board.draw_grid = False
color = ex.hsv_to_rgb(225,15,235)
color2 = (255,255,255)
self.col_r = (255,0,0)
self.col_g = (0,255,0)
self.col_b = (0,0,255)
self.col_k = (0,0,0)
self.col_e = (255,255,255)
colorkey = (2,2,2)
self.col_bg = (255,255,255) #self.col_k #(255,246,219)
data = [32,23]
#stretch width to fit the screen size
x_count = self.get_x_count(data[1],even=True)
if x_count > 32:
data[0] = x_count
self.data = data
self.points = 20
self.vis_buttons = [1,0,0,0,1,1,1,0,0]
self.mainloop.info.hide_buttonsa(self.vis_buttons)
self.layout.update_layout(data[0],data[1])
scale = self.layout.scale
self.board.level_start(data[0],data[1],scale)
self.board.board_bg.initcolor = self.col_bg
self.board.board_bg.color = self.col_bg
self.board.board_bg.update_me = True
self.board.moved = self.moved
self.choice_list = []
step = 255 / 20.0
for i in range(21):
self.choice_list.append(int(255 - i*step))
self.picked = []
for i in range(3):
self.picked.append(self.choice_list[random.randrange(0,len(self.choice_list))])
y = data[1]-3
self.rgb_g = [y,y,y]
self.rgbx3 = [self.col_k,self.col_k,self.col_k]
self.board.add_unit(1,y,2,3,classes.board.ImgAlphaShip,"",self.col_r,"light_r.png")
self.board.add_unit(4,y,2,3,classes.board.ImgAlphaShip,"",self.col_g,"light_g.png")
self.board.add_unit(7,y,2,3,classes.board.ImgAlphaShip,"",self.col_b,"light_b.png")
for each in self.board.ships:
each.outline = False
each.audible = False
each.image.set_colorkey(each.initcolor)
#add colour circles - canvas
self.board.add_unit(10,0,data[0]-10,data[1],classes.board.Label,"",self.col_e,"",0)
self.canvas = self.board.units[0]
self.canvas_center = [(self.canvas.grid_w*self.board.scale)//2,(self.canvas.grid_h*self.board.scale)//2]
#adding borders between the colour tubes
self.board.add_unit(0,0,1,data[1],classes.board.Label,"",self.col_bg,"",0)
self.board.add_unit(3,0,1,data[1],classes.board.Label,"",self.col_bg,"",0)
self.board.add_unit(6,0,1,data[1],classes.board.Label,"",self.col_bg,"",0)
self.board.add_unit(9,0,1,data[1],classes.board.Label,"",self.col_bg,"",0)
#adding colour guides
self.board.add_door(1,0,2,data[1],classes.board.Door,"",color,"",0)
self.board.units[-1].set_outline(self.col_r, 1)
self.board.add_door(4,0,2,data[1],classes.board.Door,"",color,"",0)
self.board.units[-1].set_outline(self.col_g, 1)
self.board.add_door(7,0,2,data[1],classes.board.Door,"",color,"",0)
self.board.units[-1].set_outline(self.col_b, 1)
#adding colour strips
self.board.add_door(1,data[1]-1,2,1,classes.board.Door,"",self.col_r,"",0)
self.board.add_door(4,data[1]-1,2,1,classes.board.Door,"",self.col_g,"",0)
self.board.add_door(7,data[1]-1,2,1,classes.board.Door,"",self.col_b,"",0)
#black background
self.board.add_door(1,0,2,data[1],classes.board.Door,"",self.col_k,"",0)
self.board.units[-1].image.set_colorkey(None)
self.board.add_door(4,0,2,data[1],classes.board.Door,"",self.col_k,"",0)
self.board.units[-1].image.set_colorkey(None)
self.board.add_door(7,0,2,data[1],classes.board.Door,"",self.col_k,"",0)
self.board.units[-1].image.set_colorkey(None)
for i in [5,6,7,8,9,10,11,12,13]:
if i>7:
self.board.units[i].image.set_colorkey(colorkey)
self.board.all_sprites_list.move_to_back(self.board.units[i])
else:
self.board.all_sprites_list.move_to_front(self.board.units[i])
self.canvas.set_outline((255,75,0),1)
self.canv = []
for i in range(4):
self.canv.append(pygame.Surface([self.canvas.grid_w*self.board.scale, self.canvas.grid_h*self.board.scale-1]))
self.board.all_sprites_list.move_to_back(self.board.board_bg)
self.mix()
def mix(self):
for i in range(3):
self.rgb_g[i] = self.board.ships[i].grid_y
self.update_sliders()
self.canv[3].fill(self.col_k)
ct = self.canvas_center
radius = 9*self.board.scale
radius2 = 5*self.board.scale
x = 1*self.board.scale
rect = [[ct[0],ct[1]-x],[ct[0]-x,ct[1]+x],[ct[0]+x,ct[1]+x]]
for i in range(3):
pygame.draw.circle(self.canv[i], self.rgbx3[i], rect[i], radius, 0)
self.canv[3].blit(self.canv[i],[0,0],special_flags = pygame.BLEND_ADD)
pygame.draw.circle(self.canv[3], self.picked, ct, radius2, 0)
self.canvas.painting = self.canv[3].copy()
self.canvas.update_me = True
def update_sliders(self):
for i in range(3):
strip = self.board.units[i+8]
strip.grid_y = self.rgb_g[i]+3-3
strip.grid_h = self.data[1]-strip.grid_y+3
col = []
for each in strip.initcolor:
if each > 0:
if strip.grid_y == 20:
col.append(0)
elif strip.grid_y == 0:
col.append(255)
else:
step = 255 / 20.0
col.append(int(255 - (strip.grid_y) * step))
else:
col.append(0)
self.rgbx3[i] = col
strip.color = col
strip.pos_update()
strip.update_me = True
def moved(self):
self.mix()
def handle(self,event):
gd.BoardGame.handle(self, event) #send event handling up
def update(self,game):
game.fill((0,0,0))
gd.BoardGame.update(self, game) #rest of painting done by parent
def check_result(self):
r = self.rgbx3[0][0]
g = self.rgbx3[1][1]
b = self.rgbx3[2][2]
if self.picked != [r,g,b]:
help = ""
if self.picked[0] > r:
help += self.dp['more red'] + ", "
elif self.picked[0] < r:
help += self.dp['less red'] + ", "
else:
help += self.dp['red is ok'] + ", "
if self.picked[1] > g:
help += self.dp['more green'] + ", "
elif self.picked[1] < g:
help += self.dp['less green'] + ", "
else:
help += self.dp['green is ok'] + ", "
if self.picked[2] > b:
help += self.dp['more blue'] + ". "
elif self.picked[2] < b:
help += self.dp['less blue'] + ". "
else:
help += self.dp['blue is ok'] + ". "
self.say(help)
if self.points > 0:
self.points -= 1
self.level.try_again(silent = self.mainloop.speaker.talkative)
else:
self.update_score(self.points)
self.level.next_board()
| OriHoch/pysiogame | game_boards/game054.py | Python | gpl-3.0 | 7,891 | 0.034216 |
from os.path import join, expanduser
from subprocess import Popen
from unittest import skipIf
from unittest.mock import ANY, call, MagicMock, Mock
from genty import genty, genty_dataset
import re
from app.project_type.git import Git
from app.util.conf.configuration import Configuration
from app.util.process_utils import is_windows, get_environment_variable_setter_command
from test.framework.base_unit_test_case import BaseUnitTestCase
from test.framework.comparators import AnyStringMatching
@genty
class TestGit(BaseUnitTestCase):
def setUp(self):
super().setUp()
self.patch('app.project_type.git.fs.create_dir')
self.patch('os.unlink')
self.patch('os.symlink')
self.os_path_exists_mock = self.patch('app.project_type.git.os.path.exists')
self.os_path_exists_mock.return_value = False
self.os_path_isfile_mock = self.patch('app.project_type.git.os.path.isfile')
self.os_path_isfile_mock.return_value = False
def test_timing_file_path_happy_path(self):
git_env = Git("ssh://scm.dev.box.net/box/www/current", 'origin', 'refs/changes/78/151978/27')
actual_timing_file_sys_path = git_env.timing_file_path('QUnit')
expected_timing_file_sys_path = join(
Configuration['base_directory'],
'timings',
'master',
'scm.dev.box.net',
'box',
'www',
'current',
'QUnit.timing.json',
)
self.assertEquals(expected_timing_file_sys_path, actual_timing_file_sys_path)
def test_execute_command_in_project_specifies_cwd_if_exists(self):
self.os_path_exists_mock.return_value = True
project_type_popen_patch = self._patch_popen()
fake_project_directory = 'proj_dir'
fake_command = 'some_command'
git_env = Git("ssh://scm.dev.box.net/box/www/current", 'origin', 'refs/changes/78/151978/27')
git_env.project_directory = fake_project_directory
git_env.execute_command_in_project(fake_command)
env_setter = get_environment_variable_setter_command('PROJECT_DIR', fake_project_directory)
project_type_popen_patch.assert_called_once_with(
'{} {}'.format(env_setter, fake_command),
cwd=fake_project_directory,
shell=ANY,
stdout=ANY,
stderr=ANY,
start_new_session=ANY,
)
def test_execute_command_in_project_type_specifies_cwd_if_doesnt_exist(self):
project_type_popen_patch = self._patch_popen()
fake_project_directory = 'proj_dir'
fake_command = 'some_command'
git_env = Git("ssh://scm.dev.box.net/box/www/current", 'origin', 'refs/changes/78/151978/27')
git_env.project_directory = fake_project_directory
git_env.execute_command_in_project(fake_command)
env_setter = get_environment_variable_setter_command('PROJECT_DIR', fake_project_directory)
project_type_popen_patch.assert_called_once_with(
'{} {}'.format(env_setter, fake_command),
cwd=None,
shell=ANY,
stdout=ANY,
stderr=ANY,
start_new_session=ANY,
)
@genty_dataset(
regular_path=(
'http://scm.example.com/path/to/project',
join('scm.example.com', 'path', 'to', 'project')
),
with_netloc=(
'ssh://scm.dev.box.net:12345/awesome-project',
join('scm.dev.box.net12345', 'awesomeproject')
),
no_netloc=(
'git.dev.box.net:Productivity/ClusterRunnerHealthCheck',
join('git.dev.box.net', 'Productivity', 'ClusterRunnerHealthCheck')
),
)
def test_get_full_repo_directory(self, url, expected_repo_path_without_base):
Configuration['repo_directory'] = join(expanduser('~'), '.clusterrunner', 'repos')
expected_repo_path = join(
Configuration['repo_directory'],
expected_repo_path_without_base,
)
actual_repo_path = Git.get_full_repo_directory(url)
self.assertEqual(expected_repo_path, actual_repo_path)
def test_get_timing_file_directory(self):
Configuration['timings_directory'] = join(expanduser('~'), '.clusterrunner', 'timing')
url = 'http://scm.example.com/path/to/project'
actual_timings_sys_path = Git.get_timing_file_directory(url)
expected_timings_sys_path = join(
Configuration['timings_directory'],
'scm.example.com',
'path',
'to',
'project',
)
self.assertEqual(expected_timings_sys_path, actual_timings_sys_path)
def test_get_repo_directory_removes_colon_from_directory_if_exists(self):
Configuration['repo_directory'] = join(expanduser('~'), 'tmp', 'repos')
git = Git("some_remote_value", 'origin', 'ref/to/some/branch')
actual_repo_directory = git.get_full_repo_directory('ssh://source_control.cr.com:1234/master')
expected_repo_directory = join(
Configuration['repo_directory'],
'source_control.cr.com1234',
'master'
)
self.assertEqual(expected_repo_directory, actual_repo_directory)
def test_get_timing_file_directory_removes_colon_from_directory_if_exists(self):
Configuration['timings_directory'] = join(expanduser('~'), 'tmp', 'timings')
git = Git("some_remote_value", 'origin', 'ref/to/some/branch')
actual_timing_directory = git.get_timing_file_directory('ssh://source_control.cr.com:1234/master')
expected_timing_directory = join(
Configuration['timings_directory'],
'source_control.cr.com1234',
'master',
)
self.assertEqual(expected_timing_directory, actual_timing_directory)
@genty_dataset(
shallow_clone_false=(False, True),
shallow_clone_true=(True, False),
)
def test_fetch_project_with_pre_shallow_cloned_repo(self, shallow_clone, should_delete_clone):
Configuration['shallow_clones'] = shallow_clone
self.os_path_isfile_mock.return_value = True
self.os_path_exists_mock.return_value = True
mock_fs = self.patch('app.project_type.git.fs')
mock_rmtree = self.patch('shutil.rmtree')
git = Git('url')
git._repo_directory = 'fake/repo_path'
git._execute_and_raise_on_failure = MagicMock()
git.execute_command_in_project = Mock(return_value=('', 0))
mock_fs.create_dir.call_count = 0 # only measure calls made in _fetch_project
mock_rmtree.call_count = 0
git._fetch_project()
if should_delete_clone:
mock_rmtree.assert_called_once_with('fake/repo_path')
else:
self.assertFalse(mock_rmtree.called)
@genty_dataset(
failed_rev_parse=(1, True),
successful_rev_parse=(0, False),
)
def test_repo_is_cloned_if_and_only_if_rev_parse_fails(self, rev_parse_return_code, expect_git_clone_call):
mock_popen = self._patch_popen({
'git rev-parse$': _FakePopenResult(return_code=rev_parse_return_code)
})
Configuration['repo_directory'] = '/repo-directory'
git = Git(url='http://original-user-specified-url.test/repo-path/repo-name')
git.fetch_project()
git_clone_call = call(AnyStringMatching('git clone'), start_new_session=ANY,
stdout=ANY, stderr=ANY, cwd=ANY, shell=ANY)
if expect_git_clone_call:
self.assertIn(git_clone_call, mock_popen.call_args_list, 'If "git rev-parse" returns a failing exit code, '
'"git clone" should be called.')
else:
self.assertNotIn(git_clone_call, mock_popen.call_args_list, 'If "git rev-parse" returns a successful exit '
'code, "git clone" should not be called.')
@genty_dataset(
shallow_clone=(True,),
no_shallow_clone=(False,),
)
def test_fetch_project_passes_depth_parameter_for_shallow_clone_configuration(self, shallow_clone):
Configuration['shallow_clones'] = shallow_clone
self.os_path_isfile_mock.return_value = False
self.os_path_exists_mock.return_value = False
mock_popen = self._patch_popen({'git rev-parse$': _FakePopenResult(return_code=1)})
git = Git(url='http://original-user-specified-url.test/repo-path/repo-name')
git.fetch_project()
git_clone_call = call(AnyStringMatching('git clone --depth=1'), start_new_session=ANY,
stdout=ANY, stderr=ANY, cwd=ANY, shell=ANY)
if shallow_clone:
self.assertIn(git_clone_call, mock_popen.call_args_list, 'If shallow cloning, the --depth=1 parameter '
'should be present.')
else:
self.assertNotIn(git_clone_call, mock_popen.call_args_list, 'If deep cloning, the --depth=1 parameter '
'must be absent.')
@genty_dataset(
strict_host_checking_is_on=(True,),
strict_host_checking_is_off=(False,),
)
def test_execute_git_command_auto_sets_strict_host_option_correctly(self, strict_host_check_setting):
Configuration['git_strict_host_key_checking'] = strict_host_check_setting
popen_mock = self._patch_popen()
git = Git(url='http://some-user-url.com/repo-path/repo-name')
git._execute_git_command_in_repo_and_raise_on_failure('fakecmd')
if strict_host_check_setting:
expected_ssh_arg = '-o StrictHostKeyChecking=yes'
else:
expected_ssh_arg = '-o StrictHostKeyChecking=no'
expected_call = call(AnyStringMatching(expected_ssh_arg),
start_new_session=ANY, stdout=ANY, stderr=ANY, cwd=ANY, shell=ANY)
self.assertIn(expected_call, popen_mock.call_args_list, 'Executed git command should include the correct '
'option for StrictHostKeyChecking.')
@skipIf(is_windows(), 'Skipping test for cloning repo from master on Windows')
def test_slave_param_overrides_returns_expected(self):
Configuration['get_project_from_master'] = True
Configuration['repo_directory'] = '/repo-directory'
self._patch_popen({
'git rev-parse FETCH_HEAD': _FakePopenResult(stdout='deadbee123\n')
})
git = Git(url='http://original-user-specified-url.test/repo-path/repo-name')
git.fetch_project()
actual_overrides = git.slave_param_overrides()
expected_overrides = {
'url': 'ssh://fake_hostname/repodirectory/originaluserspecifiedurl.test/repopath/reponame',
'branch': 'refs/clusterrunner/deadbee123',
}
self.assertEqual(expected_overrides, actual_overrides, 'Slave param overrides from Git object should match'
'expected.')
def test_slave_param_overrides_when_get_project_from_master_is_disabled(self):
Configuration['get_project_from_master'] = False
git = Git(url='http://original-user-specified-url.test/repo-path/repo-name')
actual_overrides = git.slave_param_overrides()
self.assertFalse(
'url' in actual_overrides,
'"url" should not be in the params to override when "get_project_from_master" is False',
)
self.assertFalse(
'branch' in actual_overrides,
'"branch" should not be in the params to override when "get_project_from_master" is False',
)
def _patch_popen(self, command_to_result_map=None):
"""
Mock out calls to Popen to inject fake results for specific command strings.
:param command_to_result_map: A dict that maps a command string regex to a _FakePopenResult object
:type command_to_result_map: dict[str, _FakePopenResult]
:return: The patched popen constructor mock
:rtype: MagicMock
"""
command_to_result_map = command_to_result_map or {}
self.patch('app.project_type.project_type.TemporaryFile', new=lambda: Mock())
project_type_popen_patch = self.patch('app.project_type.project_type.Popen_with_delayed_expansion')
def fake_popen_constructor(command, stdout, stderr, *args, **kwargs):
fake_result = _FakePopenResult() # default value
for command_regex in command_to_result_map:
if re.search(command_regex, command):
fake_result = command_to_result_map[command_regex]
break
stdout.read.return_value = fake_result.stdout.encode()
return Mock(spec=Popen, returncode=fake_result.return_code)
project_type_popen_patch.side_effect = fake_popen_constructor
return project_type_popen_patch
class _FakePopenResult:
def __init__(self, return_code=0, stdout='', stderr=''):
self.return_code = return_code
self.stdout = stdout
self.stderr = stderr
| box/ClusterRunner | test/unit/project_type/test_git.py | Python | apache-2.0 | 13,290 | 0.004138 |
'''
Created on Mar 22, 2011
@author: jeroen
'''
import os
from fileinfo import FileInfo
from bytesize import ByteSize
class DirInfo(object):
'''
Simple class to represent a directory and obtain data about if when needed.
'''
def __init__(self, path, recursive=False):
'''
Constructor
'''
self._path = path
self._initiated = False
self._recursive = recursive
self._files = []
self._dirs = []
self._filecount = 0
self._dircount = 0
self._totalsize = -1
self._codelines = 0
self._commentlines = 0
self._whitespacelines = 0
'''
Check if the dir data is cached, and if not, obtain it.
'''
def _init_if_needed(self):
if not self._initiated:
self._initiated = True
self._get_dir_info(self._path)
def _get_dir_info(self, rootdir):
for item in os.listdir(rootdir):
fullname = os.path.join(rootdir, item)
if not item.startswith('.') and not os.path.islink(fullname):
if os.path.isdir(fullname):
dir = DirInfo(fullname, self._recursive)
self._dirs.append(dir)
self._dircount += 1
if self._recursive:
self._filecount += dir.get_filecount()
self._dircount += dir.get_dircount()
self._totalsize += dir.get_totalsize()
self._codelines += dir.get_code_lines()
self._commentlines += dir.get_comment_lines()
self._whitespacelines += dir.get_whitespace_lines()
else:
file = FileInfo(rootdir, item)
self._files.append(file)
self._filecount += 1
self._totalsize += file.get_filesize()
self._codelines += file.get_code_lines()
self._commentlines += file.get_comment_lines()
self._whitespacelines += file.get_whitespace_lines()
def __repr__(self, recursive=None):
self.set_recursive(recursive)
self._init_if_needed()
return "%s (%s dirs, %s files, %s lines: %s code, %s comment, %s empty) %s" % (
self._path,
self._dircount,
self._filecount,
self.get_line_count(),
self.get_code_lines(),
self.get_comment_lines(),
self.get_whitespace_lines(),
ByteSize(self._totalsize).__repr__()
)
'''
Sets that the directory should report data obtained recursivly,
or only look at what's directly in it. Note that changing the
recursive setting invalidates the cached info.
'''
def set_recursive(self, recursive):
if recursive is not None and recursive != self._recursive:
self._recursive = recursive
self._initiated = False
def get_files(self):
self._init_if_needed()
return self._files
def get_dirs(self):
self._init_if_needed()
return self._dirs
def get_path(self):
return self._path
def get_totalsize(self):
self._init_if_needed()
return self._totalsize
def get_code_lines(self):
self._init_if_needed()
return self._codelines
def get_comment_lines(self):
self._init_if_needed()
return self._commentlines
def get_whitespace_lines(self):
self._init_if_needed()
return self._whitespacelines
def get_line_count(self):
self._init_if_needed()
return self._codelines + self._commentlines + self._whitespacelines
def get_filecount(self):
self._init_if_needed()
return self._filecount
def get_dircount(self):
self._init_if_needed()
return self._dircount | JeroenDeDauw/phpstat | src/phpstat/dirinfo.py | Python | gpl-3.0 | 4,098 | 0.006833 |
from math import log
from PIL import Image, ImageDraw
my_data = [['slashdot', 'USA', 'yes', 18, 'None'],
['google', 'France', 'yes', 23, 'Premium'],
['digg', 'USA', 'yes', 24, 'Basic'],
['kiwitobes', 'France', 'yes', 23, 'Basic'],
['google', 'UK', 'no', 21, 'Premium'],
['(direct)', 'New Zealand', 'no', 12, 'None'],
['(direct)', 'UK', 'no', 21, 'Basic'],
['google', 'USA', 'no', 24, 'Premium'],
['slashdot', 'France', 'yes', 19, 'None'],
['digg', 'USA', 'no', 18, 'None'],
['google', 'UK', 'no', 18, 'None'],
['kiwitobes', 'UK', 'no', 19, 'None'],
['digg', 'New Zealand', 'yes', 12, 'Basic'],
['slashdot', 'UK', 'no', 21, 'None'],
['google', 'UK', 'yes', 18, 'Basic'],
['kiwitobes', 'France', 'yes', 19, 'Basic']]
class decisionnode(object):
"""docstring for decisionnode"""
def __init__(self, col=-1, value=None, results=None, tb=None, fb=None):
self.col = col
self.value = value
self.results = results
self.tb = tb
self.fb = fb
def divideset(rows, column, value):
split_function = None
if isinstance(value, int) or isinstance(value, float):
split_function = lambda row: row[column] >= value
else:
split_function = lambda row: row[column] == value
set1 = [row for row in rows if split_function(row)]
set2 = [row for row in rows if not split_function(row)]
return (set1, set2)
def uniquecounts(rows):
results = {}
for row in rows:
r = row[-1]
results.setdefault(r, 0)
results[r] += 1
return results
def giniimpurity(rows):
total = len(rows)
counts = uniquecounts(rows)
imp = 0
for k1 in counts:
p1 = float(counts[k1]) / total
imp += p1 * (1 - p1)
return imp
def entropy(rows):
total = len(rows)
log2 = lambda x: log(x) / log(2)
results = uniquecounts(rows)
ent = 0.0
for r in results.keys():
p = float(results[r]) / total
ent -= p * log2(p)
return ent
def buildtree(rows, scoref=entropy):
if len(rows) == 0:
return decisionnode()
current_score = scoref(rows)
best_gain = 0.0
best_criteria = None
best_sets = None
column_count = len(rows[0]) - 1
for col in xrange(column_count):
column_values = {}
for row in rows:
column_values.setdefault(row[col], 1)
for value in column_values.keys():
(set1, set2) = divideset(rows, col, value)
p = float(len(set1)) / len(rows)
gain = current_score - p * scoref(set1) - (1 - p) * scoref(set2)
if gain > best_gain and len(set1) > 0 and len(set2) > 0:
best_gain = gain
best_criteria = (col, value)
best_sets = (set1, set2)
if best_gain > 0:
trueBranch = buildtree(best_sets[0])
falseBranch = buildtree(best_sets[1])
return decisionnode(col=best_criteria[0], value=best_criteria[1], tb=trueBranch, fb=falseBranch)
else:
return decisionnode(results=uniquecounts(rows))
def printtree(tree, indent=''):
if tree.results != None:
print str(tree.results)
else:
print '%s:%s?' % (str(tree.col), str(tree.value))
print indent + 'T->'
printtree(tree.tb, indent + '--')
print indent + 'F->'
printtree(tree.fb, indent + '--')
def getwidth(trees):
if trees.tb is None and trees.fb is None:
return 1
else:
return getwidth(trees.tb) + getwidth(trees.fb)
def getdepth(trees):
if trees.tb is None and trees.fb is None:
return 0
else:
return max(getdepth(trees.tb), getdepth(trees.fb)) + 1
def drawtrees(trees, jpeg='trees.jpg', widdelta=100, depdelta=100):
w = getwidth(trees) * widdelta
h = getdepth(trees) * depdelta + 120
img = Image.new('RGB', (w, h), (255, 255, 255))
draw = ImageDraw.Draw(img)
drawnode(draw, trees, w / 2, 20, widdelta, depdelta)
img.save(jpeg, 'JPEG')
def drawnode(draw, trees, x, y, widdelta=100, depdelta=100):
if trees.results is None:
wf = getwidth(trees.fb) * widdelta
wt = getwidth(trees.tb) * widdelta
left = x - (wf + wt) / 2
right = x + (wf + wt) / 2
if isinstance(trees.value, int) or isinstance(trees.value, float):
draw.text((x - 20, y - 10), '%s:>=%s?\n' %
(str(trees.col), str(trees.value)), (0, 0, 0))
else:
draw.text((x - 20, y - 10), '%s:==%s?\n' %
(str(trees.col), str(trees.value)), (0, 0, 0))
draw.line((x, y, left + wf / 2, y + depdelta), fill=(255, 0, 0))
draw.line((x, y, right - wt / 2, y + depdelta), fill=(255, 0, 0))
drawnode(draw, trees.fb, left + wf / 2,
y + depdelta, widdelta, depdelta)
drawnode(draw, trees.tb, right - wt / 2,
y + depdelta, widdelta, depdelta)
else:
txt = ' \n'.join(['%s:%d' % v for v in trees.results.items()])
draw.text((x - 20, y), txt, (0, 0, 0))
def classify(obs, tree):
if tree.results is not None:
return tree.results
else:
v = obs[tree.col]
branch = None
if isinstance(v, int) or isinstance(v, float):
if v >= tree.value:
branch = tree.tb
else:
branch = tree.fb
else:
if v == tree.value:
branch = tree.tb
else:
branch = tree.fb
return classify(obs, branch)
def prune(tree, mingain):
if tree.tb.results is None:
prune(tree.tb, mingain)
if tree.fb.results is None:
prune(tree.fb, mingain)
if tree.tb.results is not None and tree.fb.results is not None:
tb, fb = [], []
for v, c in tree.tb.results.items():
tb += [[v]] * c
for v, c in tree.fb.results.items():
fb += [[v]] * c
delta = entropy(tb + fb) - (entropy(tb) + entropy(fb)) / 2
if delta < mingain:
tree.tb, tree.fb = None, None
tree.results = uniquecounts(tb + fb)
def mdclassify(obs, tree):
if tree.results is not None:
return tree.results
else:
v=obs[tree.col]
if v is None:
tr, fr = mdclassify(obs, tree.tb), mdclassify(obs, tree.fb)
tcount = sum(tr.values())
fcount = sum(fr.values())
tw = float(tcount)/(tcount+fcount)
fw = float(fcount)/(tcount+fcount)
result = {}
for k,v in tr.items():
result.setdefault(k, v*tw)
for k,v in fr.items():
result.setdefault(k, 0)
result[k] += v*fw
return result
else:
if isinstance(v, int) or isinstance(v, float):
if v>=tree.value: branch = tree.tb
else: branch = tree.fb
else:
if v == tree.value: branch = tree.tb
else: branch = tree.fb
return mdclassify(obs, branch)
def variance(rows):
if len(rows)==0:
return 0
data = [float(row[-1]) for row in rows]
mean = sum(data)/len(data)
variance = sum([(d-mean)**2 for d in data])/len(data)
return variance | luoshao23/ML_algorithm | Decission_Tree/tree.py | Python | mit | 7,395 | 0.002299 |
"""Tests for coil."""
| marineam/coil | coil/test/__init__.py | Python | mit | 22 | 0 |
from django.contrib.auth.models import User
from pandas import read_csv, notnull, DataFrame
from numpy import isnan
from django.test import TestCase
from rhizome.models.campaign_models import Campaign, CampaignType, \
DataPointComputed, AggDataPoint
from rhizome.models.location_models import Location, LocationType, \
LocationTree
from rhizome.models.indicator_models import Indicator, IndicatorTag, \
IndicatorToTag, CalculatedIndicatorComponent
from rhizome.models.document_models import Document, SourceSubmission
from rhizome.models.datapoint_models import DataPoint
from rhizome.cache_meta import LocationTreeCache
from rhizome.tests.setup_helpers import TestSetupHelpers
class AggRefreshTestCase(TestCase):
'''
'''
def __init__(self, *args, **kwargs):
super(AggRefreshTestCase, self).__init__(*args, **kwargs)
def setUp(self):
self.ts = TestSetupHelpers()
data_df = read_csv('rhizome/tests/_data/calc_data.csv')
self.create_metadata()
self.user = User.objects.get(username="test")
self.test_df = data_df[data_df['is_raw'] == 1]
self.target_df = data_df[data_df['is_raw'] == 0]
self.campaign_id = Campaign.objects.all()[0].id
self.top_lvl_location = Location.objects.filter(name='Nigeria')[0]
ltr = LocationTreeCache()
ltr.main()
def create_metadata(self):
'''
Creating the Indicator, location, Campaign, meta data needed for the
system to aggregate / caclulate.
'''
read_csv('rhizome/tests/_data/campaigns.csv')
location_df = read_csv('rhizome/tests/_data/locations.csv')
indicator_df = read_csv('rhizome/tests/_data/indicators.csv')
user_id = User.objects.create_user('test', 'john@john.com', 'test').id
self.location_type1 = LocationType.objects.create(admin_level=0,
name="country", id=1)
self.location_type2 = LocationType.objects.create(admin_level=1,
name="province", id=2)
campaign_type1 = CampaignType.objects.create(name='test')
self.locations = self.model_df_to_data(location_df, Location)
self.indicators = self.model_df_to_data(indicator_df, Indicator)
ind_tag = IndicatorTag.objects.create(tag_name='Polio')
sub_tag = IndicatorTag.objects.create(tag_name='Polio Management',
parent_tag_id=ind_tag.id)
ind_to_tag_batch = [IndicatorToTag(
**{'indicator_tag_id': sub_tag.id, 'indicator_id': ind.id}) for ind in self.indicators]
IndicatorToTag.objects.bulk_create(ind_to_tag_batch)
self.campaign_id = Campaign.objects.create(
start_date='2016-01-01',
end_date='2016-01-02',
campaign_type_id=campaign_type1.id
).id
document = Document.objects.create(
doc_title='test',
created_by_id=user_id,
guid='test')
self.ss = SourceSubmission.objects.create(
document_id=document.id,
submission_json='',
row_number=0,
data_date='2016-01-01'
).id
def model_df_to_data(self, model_df, model):
meta_ids = []
non_null_df = model_df.where((notnull(model_df)), None)
list_of_dicts = non_null_df.transpose().to_dict()
for row_ix, row_dict in list_of_dicts.iteritems():
row_id = model.objects.create(**row_dict)
meta_ids.append(row_id)
return meta_ids
def create_raw_datapoints(self):
for row_ix, row_data in self.test_df.iterrows():
dp_id = self.create_datapoint(row_data.location_id, row_data
.data_date, row_data.indicator_id, row_data.value)
# def create_datapoint(self, **kwargs):
def create_datapoint(self, location_id, data_date, indicator_id, value):
'''
Right now this is being performed as a database insert. I would like to
Test this against the data entry resource, but this will do for now
in order to test caching.
'''
document_id = Document.objects.get(doc_title='test').id
ss_id = SourceSubmission.objects.get(document_id=document_id).id
dp = DataPoint.objects.create(
location_id=location_id,
data_date=data_date,
indicator_id=indicator_id,
campaign_id=self.campaign_id,
value=value,
source_submission_id=ss_id,
unique_index=str(location_id) + str(data_date) +
str(self.campaign_id) + str(indicator_id)
)
return dp
def test_location_aggregation(self):
'''
Using the calc_data.csv, create a test_df and target_df. Ensure that
the aggregation and calcuation are working properly, but ingesting the
stored data, running the cache, and checking that the calculated data
for the aggregate location (parent location, in this case Nigeria) is as
expected.
In addition to the datapoints in the test file, i insert a null valu
to ensure that any null won't corrpupt the calculation.
python manage.py test rhizome.tests.test_agg.AggRefreshTestCase.
test_location_aggregation --settings=rhizome.settings.test
'''
self.create_raw_datapoints()
indicator_id, data_date, raw_location_id,\
agg_location_id, null_location_id, NaN_location_id = \
22, '2016-01-01', 12910, 12907, 12928, 12913
location_ids = Location.objects.filter(
parent_location_id=agg_location_id).values_list('id', flat=True)
DataPoint.objects.filter(
indicator_id=indicator_id,
# data_date = data_date,
location_id=null_location_id
).update(value=None)
DataPoint.objects.filter(
indicator_id=indicator_id,
# data_date = data_date,
location_id=NaN_location_id
).update(value='NaN')
dps = DataPoint.objects.filter(
indicator_id=indicator_id,
# data_date = data_date,
location_id__in=location_ids,
value__isnull=False
).values_list('id', 'value')
sum_dp_value = sum([y for x, y in dps if not isnan(y)])
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
#################################################
## ensure that raw data gets into AggDataPoint ##
#################################################
raw_value = DataPoint.objects.get(
# data_date = data_date,
indicator_id=indicator_id,
location_id=raw_location_id)\
.value
ind_obj = Indicator.objects.get(id=indicator_id)
raw_value_in_agg = AggDataPoint.objects.get(
# data_date = data_date,
indicator_id=indicator_id,
location_id=raw_location_id)\
.value
self.assertEqual(raw_value, raw_value_in_agg)
#############################################
## ensure that the aggregated data gets in ##
#############################################
loc_tree_df = DataFrame(list(LocationTree.objects.all().values()))
agg_df = DataFrame(list(AggDataPoint.objects.filter(\
indicator_id=indicator_id,\
campaign_id=self.campaign_id
).values()))
agg_value = AggDataPoint.objects.get(
indicator_id=indicator_id,
campaign_id=self.campaign_id,
location_id=agg_location_id
).value
self.assertEqual(agg_value, sum_dp_value)
######################################################
## ensure that any raw data will override aggregate ##
######################################################
override_value = 909090
agg_override_dp = self.create_datapoint(agg_location_id, data_date,
indicator_id, override_value)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
override_value_in_agg = AggDataPoint.objects.get(
campaign_id=self.campaign_id,
indicator_id=indicator_id,
location_id=agg_location_id).value
self.assertEqual(override_value, override_value_in_agg)
###########################################
# ensure that percentages do not aggregate
###########################################
pct_ind = Indicator.objects.create(
name='pct missed',
short_name='pct_missed',
description='missed pct',
data_format='pct',
source_name='my brain',
)
dp_1 = DataPoint.objects.create(
indicator_id=pct_ind.id,
location_id=location_ids[0],
campaign_id=self.campaign_id,
data_date=data_date,
value=.2,
source_submission_id=self.ss,
unique_index=1
)
dp_2 = DataPoint.objects.create(
indicator_id=pct_ind.id,
location_id=location_ids[1],
campaign_id=self.campaign_id,
data_date=data_date,
value=.6,
source_submission_id=self.ss,
unique_index=2
)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
try:
agg_dp_qs = AggDataPoint.objects.get(
location_id=agg_location_id,
indicator_id=pct_ind,
campaign_id=self.campaign_id,
)
error_ocurred = False
except AggDataPoint.DoesNotExist:
error_ocurred = True
self.assertTrue(error_ocurred)
def test_raw_data_to_computed(self):
'''
This just makes sure that any data in the datapoint table, gets into the
Calculated DataPoint table. That is, i insert a value for missed
children in Borno, the same exact data should be in the
datapoint_with_computed table no matter what.
'''
self.create_raw_datapoints()
indicator_id, data_date, raw_location_id,\
agg_location_id, campaign_id = 22, '2016-01-01', 12910, 12907, 1
location_ids = Location.objects.filter(
parent_location_id=agg_location_id).values_list('id', flat=True)
dp_values = DataPoint.objects.filter(
indicator_id=indicator_id,
data_date=data_date,
location_id__in=location_ids
).values_list('value', flat=True)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
############################################################
## ensure that raw data gets into datapoint_with_computed ##
############################################################
raw_value = DataPoint.objects.get(data_date=data_date,
indicator_id=indicator_id,
location_id=raw_location_id)\
.value
raw_value_in_agg = DataPointComputed.objects.get(
campaign_id=self.campaign_id,
indicator_id=indicator_id,
location_id=raw_location_id)\
.value
self.assertEqual(raw_value, raw_value_in_agg)
def test_sum_and_pct(self):
'''
The system uses the "PART_TO_BE_SUMMED" edge type in order to create
indicators such that the sum of:
- Number Missed
- Missed due to other reasons(24)
- Child Absent(251)
- Not in Plan (267)
- Not Visited (268)
- Non Compliance(264)
gives us: All Missed Children (21)
as well as: pct missed children due to refusal (166)
Here we create new metadata so we can test this functionality for an
Abstracted use case and test that
1. We can SUM indicators
2. We can use the result of #2 as the denominator for a percentage
calculation.
'''
Indicator.objects.all().delete()
data_date, location_id, agg_location_id = '2016-01-01', 12910, 12907
val_1, val_2, val_3 = 303, 808, 909
## create the parent and sub indicators ##
parent_indicator = Indicator.objects.create(
name='Number of Avoidable Deaths',
short_name='Number of Avoidable Deaths',
data_format='int'
)
sub_indicator_1 = Indicator.objects.create(
name='Number of Deaths due to Conflict',
short_name='Number of Deaths due to Conflict',
data_format='int'
)
sub_indicator_2 = Indicator.objects.create(
name='Number of Deaths due to Malaria',
short_name='Number of Deaths due to Malaria',
data_format='int'
)
sub_indicator_3 = Indicator.objects.create(
name='Number of Deaths due to Hunger',
short_name='Number of Deaths due to Hunger',
data_format='int'
)
pct_indicator = Indicator.objects.create(
name='pct of Deaths due to Hunger',
short_name='pct of Deaths due to Hunger',
data_format='pct'
)
## FOR SUM OF PARTS CALUCLATIONS ##
indicator_calc_1 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_1.id,
calculation='PART_TO_BE_SUMMED'
)
indicator_calc_2 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_2.id,
calculation='PART_TO_BE_SUMMED'
)
indicator_calc_3 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_3.id,
calculation='PART_TO_BE_SUMMED'
)
## FOR PART OVER WHOLE CALCULATIONS ##
indicator_calc_numerator = CalculatedIndicatorComponent.objects.create(
indicator_id=pct_indicator.id,
indicator_component_id=sub_indicator_3.id,
calculation='NUMERATOR'
)
indicator_calc_denominator = CalculatedIndicatorComponent.objects.create(
indicator_id=pct_indicator.id,
indicator_component_id=parent_indicator.id,
calculation='DENOMINATOR'
)
ss_id = SourceSubmission.objects.all()[0].id
## create the datapoints ##
dp_1 = DataPoint.objects.create(
indicator_id=sub_indicator_1.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=val_1,
source_submission_id=ss_id,
unique_index=1
)
dp_2 = DataPoint.objects.create(
indicator_id=sub_indicator_2.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=val_2,
source_submission_id=ss_id,
unique_index=2
)
dp_3 = DataPoint.objects.create(
indicator_id=sub_indicator_3.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=val_3,
source_submission_id=ss_id,
unique_index=3
)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
calc_value_sum = DataPointComputed.objects.get(
indicator_id=parent_indicator.id,
campaign_id=self.campaign_id,
location_id=location_id
).value
calc_value_pct = DataPointComputed.objects.get(
indicator_id=pct_indicator.id,
campaign_id=self.campaign_id,
location_id=location_id
).value
# test SUM calculation
sum_target_value = val_1 + val_2 + val_3
self.assertEqual(calc_value_sum, sum_target_value)
# test part over whole calction
pct_target_value = val_3 / float(sum_target_value)
self.assertEqual(calc_value_pct, pct_target_value)
def test_part_of_difference(self):
'''
see here: rhizome.work/manage_system/manage/indicator/187
We use this calculation to perform the following calculation:
WHOLE_OF_DIFFERENCE(x) - PART_OF_DIFFERENCE(y)
-----------------------------------------
WHOLE_OF_DIFFERENCE(x)
'''
data_date, location_id, agg_location_id = '2016-01-01', 12910, 12907
x, y = 303.00, 808.00
## create the parent and sub indicators ##
parent_indicator = Indicator.objects.create(
name='Refsual Conversion',
short_name='Refsual Conversion',
data_format='pct'
)
sub_indicator_part = Indicator.objects.create(
name='Refusals After Revisit',
short_name='Refusals After Revisit',
data_format='int'
)
sub_indicator_denom = Indicator.objects.create(
name='Refusals Before Revisit',
short_name='Refusals Before Revisit',
data_format='int'
)
## FOR SUM OF PARTS CALUCLATIONS ##
indicator_calc_1 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_part.id,
calculation='PART_OF_DIFFERENCE'
)
indicator_calc_3 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_denom.id,
calculation='WHOLE_OF_DIFFERENCE'
)
ss_id = SourceSubmission.objects.all()[0].id
## create the datapoints ##
dp_1 = DataPoint.objects.create(
indicator_id=sub_indicator_denom.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=x,
source_submission_id=ss_id,
unique_index=1
)
dp_2 = DataPoint.objects.create(
indicator_id=sub_indicator_part.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=y,
source_submission_id=ss_id,
unique_index=2
)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
calc_value = DataPointComputed.objects.get(
indicator_id=parent_indicator.id,
campaign_id=self.campaign_id,
location_id=location_id
).value
# test SUM calculation
target_value = (x - y) / x
self.assertEqual(round(calc_value, 4), round(target_value, 4))
def test_missing_part_of_sum(self):
data_date, location_id, agg_location_id = '2016-01-01', 12910, 12907
val_1, val_2 = 101, 102
## create the parent and sub indicators ##
parent_indicator = Indicator.objects.create(
name='Number of Missing Children',
short_name='Number of Avoidable Deaths',
data_format='int'
)
sub_indicator_1 = Indicator.objects.create(
name='Number Missing Due to Refusal',
short_name='Number Missing Due to Refusal',
data_format='int'
)
sub_indicator_2 = Indicator.objects.create(
name='Number Missing Due to Absence',
short_name='Number Missing Due to Absence',
data_format='int'
)
sub_indicator_3 = Indicator.objects.create(
name='Number Missing Due to ??',
short_name='Number Missing Due to ??',
data_format='int'
)
indicator_calc_1 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_1.id,
calculation='PART_TO_BE_SUMMED'
)
indicator_calc_2 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_2.id,
calculation='PART_TO_BE_SUMMED'
)
indicator_calc_3 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_3.id,
calculation='PART_TO_BE_SUMMED'
)
ss_id = SourceSubmission.objects.all()[0].id
## create the datapoints. We're only adding data points for ##
## two of the three datapoints that are mapped as parts to be summed ##
dp_1 = DataPoint.objects.create(
indicator_id=sub_indicator_1.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=val_1,
source_submission_id=ss_id,
unique_index=1
)
dp_2 = DataPoint.objects.create(
indicator_id=sub_indicator_2.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=val_2,
source_submission_id=ss_id,
unique_index=2
)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
calc_value_sum = DataPointComputed.objects.get(
indicator_id=parent_indicator.id,
campaign_id=self.campaign_id,
location_id=location_id
).value
sum_target_value = val_1 + val_2
self.assertEqual(calc_value_sum, sum_target_value)
def test_recursive_sum(self):
'''
Consider the case in which we have "number of missed children" which is
the sum of "missed children due to absence", "missed children due to
refusal", and "missed children due to child absence."
Now consider that "missed children due to refusal" is also generated
from the sum of "refusal due to religious reasons", "refusal due to
too many rounds", "refusal due to - unhappy with team " (see more here:
http://rhizome.work/manage_system/manage/indicator/264).
There are two levels here and this test aims to cover this use case.
'''
data_date, location_id = '2016-01-01', 12910
Indicator.objects.all().delete()
parent_indicator = Indicator.objects.create(
name='Number of Avoidable Deaths',
short_name='Number of Avoidable Deaths',
data_format='int'
)
sub_indicator_1 = Indicator.objects.create(
name='Number of Deaths due to Conflict',
short_name='Number of Deaths due to Conflict',
data_format='int'
)
sub_sub_indicator_1 = Indicator.objects.create(
name='Number Conflict Deaths - Children',
short_name='Conflict Deaths - Children',
data_format='int'
)
sub_sub_indicator_2 = Indicator.objects.create(
name='Number of Adult Civilian Deaths',
short_name='Number of Adult Civilian Deaths',
data_format='int'
)
sub_sub_indicator_3 = Indicator.objects.create(
name='Number of Conflict Deaths - Militants',
short_name='Conflict Deaths - Militants',
data_format='int'
)
sub_indicator_2 = Indicator.objects.create(
name='Number of Deaths due to Malaria',
short_name='Number of Deaths due to Malaria',
data_format='int'
)
sub_indicator_2_sub_1 = Indicator.objects.create(
name='Number of Deaths due to Malaria -- Child had No Net',
short_name='Number of Deaths due to Malaria -- no net',
data_format='int'
)
sub_indicator_2_sub_2 = Indicator.objects.create(
name='Number of Deaths due to Malaria -- Child had No Medicine',
short_name='Number of Deaths due to Malaria -- no Medicie',
data_format='int'
)
sub_indicator_3 = Indicator.objects.create(
name='Number of Deaths due to Hunger',
short_name='Number of Deaths due to Hunger',
data_format='int'
)
## FOR SUM OF PARTS CALUCLATIONS ##
indicator_calc_1 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_1.id,
calculation='PART_TO_BE_SUMMED'
)
indicator_calc_2 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_2.id,
calculation='PART_TO_BE_SUMMED'
)
indicator_calc_3 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_3.id,
calculation='PART_TO_BE_SUMMED'
)
## 2nd layer of indicator calculation ##
sub_indicator_calc_1 = CalculatedIndicatorComponent.objects.create(
indicator_id=sub_indicator_1.id,
indicator_component_id=sub_sub_indicator_1.id,
calculation='PART_TO_BE_SUMMED'
)
sub_indicator_calc_2 = CalculatedIndicatorComponent.objects.create(
indicator_id=sub_indicator_1.id,
indicator_component_id=sub_sub_indicator_2.id,
calculation='PART_TO_BE_SUMMED'
)
sub_indicator_calc_3 = CalculatedIndicatorComponent.objects.create(
indicator_id=sub_indicator_1.id,
indicator_component_id=sub_sub_indicator_3.id,
calculation='PART_TO_BE_SUMMED'
)
## 2nd layer of indicator calculation ##
sub_indicator_calc_1 = CalculatedIndicatorComponent.objects.create(
indicator_id=sub_indicator_2.id,
indicator_component_id=sub_indicator_2_sub_1.id,
calculation='PART_TO_BE_SUMMED'
)
sub_indicator_calc_2 = CalculatedIndicatorComponent.objects.create(
indicator_id=sub_indicator_2.id,
indicator_component_id=sub_indicator_2_sub_2.id,
calculation='PART_TO_BE_SUMMED'
)
## create all the datapoints ##
values_to_insert = {
sub_indicator_2.id: 33,
sub_indicator_3.id: 44,
sub_sub_indicator_1.id: 44,
sub_sub_indicator_2.id: 55,
sub_sub_indicator_3.id: 66,
sub_indicator_2_sub_1.id: 77,
sub_indicator_2_sub_2.id: 88,
}
for k, v in values_to_insert.iteritems():
self.create_datapoint(location_id, data_date, k, v)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
parent_indicator_target_value = sum(values_to_insert.values())
parent_indicator_1_actual_value = DataPointComputed.objects.get(
location_id=location_id,
indicator_id=parent_indicator,
).value
self.assertEqual(parent_indicator_1_actual_value,
parent_indicator_target_value)
# test that a parent overrides the sum of its children when there
## are multiple levels of indicator calcuations ##
sub_2_target_val = values_to_insert[sub_indicator_2.id]
sub_2_actual_val = DataPointComputed.objects.get(
location_id=location_id,
indicator_id=sub_indicator_2.id,
).value
self.assertEqual(sub_2_target_val, sub_2_actual_val)
def test_boolean_aggregation(self):
# create a boolean indicato
boolean_indicator = Indicator.objects.create(
name='Is Controlled by "Anti Governemnt Elements"',
short_name='Is at War',
data_format='bool'
)
# find the locations for which we should store raw data.. For instance
# if it is 'district is at war', then we dont expect data stored at
# the porivnce level. Here though, we get all children of a particluar
# parent.
locations = Location.objects.filter(
parent_location_id=self.top_lvl_location.id)
# split the data into 1 value being fale, the rest being trye.
# this aludes to the fact that the parent location shoul dhave a value
# that is somethign like [ 1 / data.length() ]
false_loc_id = locations[0].id
true_loc_list = locations[1:]
## create the true and false datapoints ##
false_datapoint = DataPoint.objects.create(
campaign_id=self.campaign_id,
location_id=false_loc_id,
indicator_id=boolean_indicator.id,
source_submission_id=self.ss,
value=0
)
true_datapoint_batch = [DataPoint(**{
'campaign_id': self.campaign_id,
'location_id': loc.id,
'indicator_id': boolean_indicator.id,
'source_submission_id': self.ss,
'value': 1,
'unique_index': str(self.campaign_id) + str(boolean_indicator.id) + str(loc.id)
}) for loc in true_loc_list]
DataPoint.objects.bulk_create(true_datapoint_batch)
# run the agg refresh ( this is the code that will actually transofrm
# the booleans to numerics. )
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
# now get the expected aggrgated data and compare it with the percentage
# value that we expect given how we split up the locations above.
dwc_value = DataPointComputed.objects.get(
location_id=self.top_lvl_location.id,
campaign_id=self.campaign_id,
indicator=boolean_indicator.id
).value
expected_value = 1 - (1.0 / len(locations))
self.assertEqual(expected_value, dwc_value)
def test_calculated_indicator_agg(self):
Indicator.objects.all().delete()
data_date, agg_location_id = '2016-01-01', 12907
child_locations = Location.objects.filter(
parent_location_id=agg_location_id)
location_id = child_locations[0].id
location_id_2 = child_locations[1].id
## create the parent and sub indicators ##
parent_indicator = Indicator.objects.create(
name='Number of Avoidable Deaths',
short_name='Number of Avoidable Deaths',
data_format='int'
)
sub_indicator_1 = Indicator.objects.create(
name='Number of Deaths due to Conflict',
short_name='Number of Deaths due to Conflict',
data_format='int'
)
pct_indicator = Indicator.objects.create(
name='pct of Deaths due to Conflict',
short_name='pct of Deaths due to Conflict',
data_format='pct'
)
## FOR PART OVER WHOLE CALCULATIONS ##
indicator_calc_numerator = CalculatedIndicatorComponent.objects.create(
indicator_id=pct_indicator.id,
indicator_component_id=sub_indicator_1.id,
calculation='NUMERATOR'
)
indicator_calc_denominator = CalculatedIndicatorComponent.objects.create(
indicator_id=pct_indicator.id,
indicator_component_id=parent_indicator.id,
calculation='DENOMINATOR'
)
val_1 = 32
val_2 = 100
val_1_loc_2 = 48
val_2_loc_2 = 200
ss_id = SourceSubmission.objects.all()[0].id
## create the datapoints ##
dp_1 = DataPoint.objects.create(
indicator_id=sub_indicator_1.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=val_1,
source_submission_id=ss_id,
unique_index=1
)
dp_2 = DataPoint.objects.create(
indicator_id=parent_indicator.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=val_2,
source_submission_id=ss_id,
unique_index=2
)
dp_1_loc_2 = DataPoint.objects.create(
indicator_id=sub_indicator_1.id,
data_date=data_date,
location_id=location_id_2,
campaign_id=self.campaign_id,
value=val_1_loc_2,
source_submission_id=ss_id,
unique_index=3
)
dp_2_loc_2 = DataPoint.objects.create(
indicator_id=parent_indicator.id,
data_date=data_date,
location_id=location_id_2,
campaign_id=self.campaign_id,
value=val_2_loc_2,
source_submission_id=ss_id,
unique_index=4
)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
calc_value_pct = DataPointComputed.objects.get(
indicator_id=pct_indicator.id,
campaign_id=self.campaign_id,
location_id=location_id
).value
calc_value_pct_2 = DataPointComputed.objects.get(
indicator_id=pct_indicator.id,
campaign_id=self.campaign_id,
location_id=location_id_2
).value
# test part over whole calculation for child locations
pct_target_value = val_1 / float(val_2)
self.assertEqual(calc_value_pct, pct_target_value)
pct_target_value_2 = val_1_loc_2 / float(val_2_loc_2)
self.assertEqual(calc_value_pct_2, pct_target_value_2)
# make sure that part over whole aggregates as well
total_dp = DataPointComputed.objects.get(
indicator_id=parent_indicator.id,
campaign_id=self.campaign_id,
location_id=agg_location_id).value
self.assertEqual(total_dp, val_2 + val_2_loc_2)
try:
pct_dp = DataPointComputed.objects.get(
indicator_id=pct_indicator.id,
campaign_id=self.campaign_id,
location_id=agg_location_id).value
except ObjectDoesNotExist:
fail("aggregation did not work")
self.assertEqual(round(pct_dp, 5), round(
(val_1 + val_1_loc_2) / float(val_2 + val_2_loc_2), 5))
def test_multiple_calculations(self):
num_seen = Indicator.objects.create(
name='number children seen',
short_name='number children seen',
data_format='int'
)
num_vacc = Indicator.objects.create(
name='number children vaccinated',
short_name='number children vaccinated',
data_format='int'
)
num_missed = Indicator.objects.create(
name='number children missed',
short_name='number children missed',
data_format='int'
)
pct_missed = Indicator.objects.create(
name='pct childrent missed',
short_name='pct children missed',
data_format='pct'
)
indicator_calc_numerator = CalculatedIndicatorComponent.objects.create(
indicator_id=pct_missed.id,
indicator_component_id=num_missed.id,
calculation='NUMERATOR'
)
indicator_calc_denominator = CalculatedIndicatorComponent.objects.create(
indicator_id=pct_missed.id,
indicator_component_id=num_seen.id,
calculation='DENOMINATOR'
)
indicator_calc_part_of_diff = CalculatedIndicatorComponent.objects.create(
indicator_id=pct_missed.id,
indicator_component_id=num_vacc.id,
calculation='PART_OF_DIFFERENCE'
)
indicator_calc_part_of_whole = CalculatedIndicatorComponent.objects.create(
indicator_id=pct_missed.id,
indicator_component_id=num_seen.id,
calculation='WHOLE_OF_DIFFERENCE'
)
num_missed_val = 45.0
num_seen_val = 100.0
num_vacc_val = 55.0
ss_id = SourceSubmission.objects.all()[0].id
dp_num_missed = DataPoint.objects.create(
indicator_id=num_missed.id,
location_id=self.top_lvl_location.id,
campaign_id=self.campaign_id,
value=num_missed_val,
source_submission_id=ss_id,
unique_index=3
)
dp_num_seen = DataPoint.objects.create(
indicator_id=num_seen.id,
location_id=self.top_lvl_location.id,
campaign_id=self.campaign_id,
value=num_seen_val,
source_submission_id=ss_id,
unique_index=4
)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
# check that numerator and denominator option work
cdp_pct_missed_1 = DataPointComputed.objects.filter(
indicator_id=pct_missed.id)[0]
self.assertEqual(cdp_pct_missed_1.value,
num_missed_val / float(num_seen_val))
dp_num_vaccinated = DataPoint.objects.create(
indicator_id=num_vacc.id,
location_id=self.top_lvl_location.id,
campaign_id=self.campaign_id,
value=num_vacc_val,
source_submission_id=ss_id,
unique_index=5
)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
# check that this works when we can do whole/part of difference
cdp_pct_missed_2 = DataPointComputed.objects.filter(
indicator_id=pct_missed.id)[0]
1.0 - float(num_vacc_val) / float(num_seen_val)
self.assertEqual(cdp_pct_missed_2.value, 0.45)
# check that this works when we can only do whole/part of difference
DataPoint.objects.filter(indicator_id=num_missed.id).delete()
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
cdp_pct_missed_3 = DataPointComputed.objects.filter(
indicator_id=pct_missed.id)[0]
self.assertEqual(cdp_pct_missed_3.value, 0.45)
| unicef/rhizome | rhizome/tests/test_agg.py | Python | agpl-3.0 | 38,994 | 0.001795 |
"""AWS plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ..util import (
ApplicationError,
display,
is_shippable,
ConfigParser,
)
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..core_ci import (
AnsibleCoreCI,
)
class AwsCloudProvider(CloudProvider):
"""AWS cloud provider plugin. Sets up cloud resources before delegation."""
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
aci = self._create_ansible_core_ci()
if os.path.isfile(aci.ci_key):
return
if is_shippable():
return
super(AwsCloudProvider, self).filter(targets, exclude)
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(AwsCloudProvider, self).setup()
aws_config_path = os.path.expanduser('~/.aws')
if os.path.exists(aws_config_path) and not self.args.docker and not self.args.remote:
raise ApplicationError('Rename "%s" or use the --docker or --remote option to isolate tests.' % aws_config_path)
if not self._use_static_config():
self._setup_dynamic()
def _setup_dynamic(self):
"""Request AWS credentials through the Ansible Core CI service."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
aci = self._create_ansible_core_ci()
response = aci.start()
if not self.args.explain:
credentials = response['aws']['credentials']
values = dict(
ACCESS_KEY=credentials['access_key'],
SECRET_KEY=credentials['secret_key'],
SECURITY_TOKEN=credentials['session_token'],
REGION='us-east-1',
)
display.sensitive.add(values['SECRET_KEY'])
display.sensitive.add(values['SECURITY_TOKEN'])
config = self._populate_config_template(config, values)
self._write_config(config)
def _create_ansible_core_ci(self):
"""
:rtype: AnsibleCoreCI
"""
return AnsibleCoreCI(self.args, 'aws', 'sts', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
class AwsCloudEnvironment(CloudEnvironment):
"""AWS cloud environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
parser = ConfigParser()
parser.read(self.config_path)
ansible_vars = dict(
resource_prefix=self.resource_prefix,
)
ansible_vars.update(dict(parser.items('default')))
display.sensitive.add(ansible_vars.get('aws_secret_key'))
display.sensitive.add(ansible_vars.get('security_token'))
if 'aws_cleanup' not in ansible_vars:
ansible_vars['aws_cleanup'] = not self.managed
env_vars = {'ANSIBLE_DEBUG_BOTOCORE_LOGS': 'True'}
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
callback_plugins=['aws_resource_actions'],
)
def on_failure(self, target, tries):
"""
:type target: TestTarget
:type tries: int
"""
if not tries and self.managed:
display.notice('If %s failed due to permissions, the IAM test policy may need to be updated. '
'For help, consult @mattclay or @gundalow on GitHub or #ansible-devel on IRC.' % target.name)
| thaim/ansible | test/lib/ansible_test/_internal/cloud/aws.py | Python | mit | 3,947 | 0.00228 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListServices
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-service-usage
# [START serviceusage_v1_generated_ServiceUsage_ListServices_async]
from google.cloud import service_usage_v1
async def sample_list_services():
# Create a client
client = service_usage_v1.ServiceUsageAsyncClient()
# Initialize request argument(s)
request = service_usage_v1.ListServicesRequest(
)
# Make the request
page_result = client.list_services(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END serviceusage_v1_generated_ServiceUsage_ListServices_async]
| googleapis/python-service-usage | samples/generated_samples/serviceusage_v1_generated_service_usage_list_services_async.py | Python | apache-2.0 | 1,497 | 0.000668 |
"""
The MIT License
Copyright (c) 2007 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs, parse_qsl
except ImportError:
from cgi import parse_qs, parse_qsl
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occured.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {
'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret
}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None):
if method is not None:
self.method = method
if url is not None:
self.url = url
if parameters is not None:
self.update(parameters)
@setter
def url(self, value):
parts = urlparse.urlparse(value)
scheme, netloc, path = parts[:3]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme != 'http' and scheme != 'https':
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
value = '%s://%s%s' % (scheme, netloc, path)
self.__dict__['url'] = value
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
return self.encode_postdata(self)
def encode_postdata(self, data):
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(data, True)
def to_url(self):
"""Serialize as a URL for a GET request."""
return '%s?%s' % (self.url, self.to_postdata())
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = [(k, v) for k, v in self.items() if k != 'oauth_signature']
encoded_str = urllib.urlencode(sorted(items), True)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
return Request(http_method, http_url, parameters)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
version = self._get_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, request):
"""Verify the correct version request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.sign(request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold))
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout,
proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None,
force_auth_header=False):
if not isinstance(headers, dict):
headers = {}
if body and method == "POST":
parameters = dict(parse_qsl(body))
elif method == "GET":
parsed = urlparse.urlparse(uri)
parameters = parse_qs(parsed.query)
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer, token=self.token,
http_method=method, http_url=uri, parameters=parameters)
req.sign_request(self.method, self.consumer, self.token)
if force_auth_header:
# ensure we always send Authorization
headers.update(req.to_header())
if method == "POST":
if not force_auth_header:
body = req.to_postdata()
else:
body = req.encode_postdata(req.get_nonoauth_parameters())
headers['Content-Type'] = 'application/x-www-form-urlencoded'
elif method == "GET":
if not force_auth_header:
uri = req.to_url()
else:
if not force_auth_header:
# don't call update twice.
headers.update(req.to_header())
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
sig = (
escape(request.method),
escape(request.url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except ImportError:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
| b0nk/botxxy | src/oauth2.py | Python | gpl-2.0 | 23,431 | 0.001536 |
import ctypes
libc = ctypes.CDLL("/usr/lib/libc.dylib")
print(libc.rand())
print(libc.time())
cPrintF = libc.printf
value = b"I'm a C function!"
print(value)
printValue = ctypes.c_char_p(value)
print(printValue.value)
print(printValue)
cPrintF("%s", printValue)
| torchhound/projects | python/ffi.py | Python | gpl-3.0 | 264 | 0 |
#!/usr/bin/env python
"""
A convinience wrapper around mysql connector.
This file is part of open-ihm.
open-ihm is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
open-ihm is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with open-ihm. If not, see <http://www.gnu.org/licenses/>.
"""
import includes.mysql.connector as connector
import data.database
# refactored to remove duplicate code while
# providing same interface as before.
class Database(data.database.Database):
pass
| snim2mirror/openihm | src/openihm/model/database.py | Python | lgpl-3.0 | 949 | 0.002107 |
#!/usr/bin/env python
import sys
sys.path.extend(['.', '..'])
import unittest
suite = unittest.TestLoader().loadTestsFromNames(
[
'test_c_lexer',
'test_c_ast',
'test_general',
'test_c_parser',
]
)
unittest.TextTestRunner(verbosity=1).run(suite)
| kk1987/pycparser | tests/all_tests.py | Python | bsd-3-clause | 294 | 0.006803 |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class hostname(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/hostname. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines TLV 137.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "hostname"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"hostname",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/hostname/state (container)
YANG Description: State parameters of ISIS TLV 137.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/hostname/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of ISIS TLV 137.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class hostname(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/hostname. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines TLV 137.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "hostname"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"hostname",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/hostname/state (container)
YANG Description: State parameters of ISIS TLV 137.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/hostname/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of ISIS TLV 137.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
| napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/hostname/__init__.py | Python | apache-2.0 | 11,660 | 0.001286 |
from steps.bdd_test_util import cli_call
def after_scenario(context, scenario):
if 'doNotDecompose' in scenario.tags:
print("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml))
else:
if 'compose_yaml' in context:
print("Decomposing with yaml '{0}' after scenario {1}, ".format(context.compose_yaml, scenario.name))
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker-compose", "-f", context.compose_yaml, "kill"], expect_success=True)
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker-compose", "-f", context.compose_yaml, "rm","-f"], expect_success=True)
# now remove any other containers (chaincodes)
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker", "ps", "-qa"], expect_success=True)
if context.compose_returncode == 0:
# Remove each container
for containerId in context.compose_output.splitlines():
#print("docker rm {0}".format(containerId))
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker", "rm", containerId], expect_success=True)
| ghaskins/obc-peer | openchain/peer/bddtests/environment.py | Python | apache-2.0 | 1,357 | 0.034635 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from airflow.utils import timezone
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta # flake8: noqa: F401 for doctest
import six
from croniter import croniter
cron_presets = {
'@hourly': '0 * * * *',
'@daily': '0 0 * * *',
'@weekly': '0 0 * * 0',
'@monthly': '0 0 1 * *',
'@yearly': '0 0 1 1 *',
}
def date_range(start_date, end_date=None, num=None, delta=None):
"""
Get a set of dates as a list based on a start, end and delta, delta
can be something that can be added to ``datetime.datetime``
or a cron expression as a ``str``
:param start_date: anchor date to start the series from
:type start_date: datetime.datetime
:param end_date: right boundary for the date range
:type end_date: datetime.datetime
:param num: alternatively to end_date, you can specify the number of
number of entries you want in the range. This number can be negative,
output will always be sorted regardless
:type num: int
>>> date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta=timedelta(1))
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),
datetime.datetime(2016, 1, 3, 0, 0)]
>>> date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta='0 0 * * *')
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),
datetime.datetime(2016, 1, 3, 0, 0)]
>>> date_range(datetime(2016, 1, 1), datetime(2016, 3, 3), delta="0 0 0 * *")
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 2, 1, 0, 0),
datetime.datetime(2016, 3, 1, 0, 0)]
"""
if not delta:
return []
if end_date and start_date > end_date:
raise Exception("Wait. start_date needs to be before end_date")
if end_date and num:
raise Exception("Wait. Either specify end_date OR num")
if not end_date and not num:
end_date = timezone.utcnow()
delta_iscron = False
tz = start_date.tzinfo
if isinstance(delta, six.string_types):
delta_iscron = True
start_date = timezone.make_naive(start_date, tz)
cron = croniter(delta, start_date)
elif isinstance(delta, timedelta):
delta = abs(delta)
dates = []
if end_date:
if timezone.is_naive(start_date):
end_date = timezone.make_naive(end_date, tz)
while start_date <= end_date:
if timezone.is_naive(start_date):
dates.append(timezone.make_aware(start_date, tz))
else:
dates.append(start_date)
if delta_iscron:
start_date = cron.get_next(datetime)
else:
start_date += delta
else:
for _ in range(abs(num)):
if timezone.is_naive(start_date):
dates.append(timezone.make_aware(start_date, tz))
else:
dates.append(start_date)
if delta_iscron:
if num > 0:
start_date = cron.get_next(datetime)
else:
start_date = cron.get_prev(datetime)
else:
if num > 0:
start_date += delta
else:
start_date -= delta
return sorted(dates)
def round_time(dt, delta, start_date=timezone.make_aware(datetime.min)):
"""
Returns the datetime of the form start_date + i * delta
which is closest to dt for any non-negative integer i.
Note that delta may be a datetime.timedelta or a dateutil.relativedelta
>>> round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 1, 2), relativedelta(months=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 16, 0, 0)
>>> round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 15, 0, 0)
>>> round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
>>> round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
"""
if isinstance(delta, six.string_types):
# It's cron based, so it's easy
tz = start_date.tzinfo
start_date = timezone.make_naive(start_date, tz)
cron = croniter(delta, start_date)
prev = cron.get_prev(datetime)
if prev == start_date:
return timezone.make_aware(start_date, tz)
else:
return timezone.make_aware(prev, tz)
# Ignore the microseconds of dt
dt -= timedelta(microseconds=dt.microsecond)
# We are looking for a datetime in the form start_date + i * delta
# which is as close as possible to dt. Since delta could be a relative
# delta we don't know its exact length in seconds so we cannot rely on
# division to find i. Instead we employ a binary search algorithm, first
# finding an upper and lower limit and then disecting the interval until
# we have found the closest match.
# We first search an upper limit for i for which start_date + upper * delta
# exceeds dt.
upper = 1
while start_date + upper * delta < dt:
# To speed up finding an upper limit we grow this exponentially by a
# factor of 2
upper *= 2
# Since upper is the first value for which start_date + upper * delta
# exceeds dt, upper // 2 is below dt and therefore forms a lower limited
# for the i we are looking for
lower = upper // 2
# We now continue to intersect the interval between
# start_date + lower * delta and start_date + upper * delta
# until we find the closest value
while True:
# Invariant: start + lower * delta < dt <= start + upper * delta
# If start_date + (lower + 1)*delta exceeds dt, then either lower or
# lower+1 has to be the solution we are searching for
if start_date + (lower + 1) * delta >= dt:
# Check if start_date + (lower + 1)*delta or
# start_date + lower*delta is closer to dt and return the solution
if (
(start_date + (lower + 1) * delta) - dt <=
dt - (start_date + lower * delta)):
return start_date + (lower + 1) * delta
else:
return start_date + lower * delta
# We intersect the interval and either replace the lower or upper
# limit with the candidate
candidate = lower + (upper - lower) // 2
if start_date + candidate * delta >= dt:
upper = candidate
else:
lower = candidate
# in the special case when start_date > dt the search for upper will
# immediately stop for upper == 1 which results in lower = upper // 2 = 0
# and this function returns start_date.
def infer_time_unit(time_seconds_arr):
"""
Determine the most appropriate time unit for an array of time durations
specified in seconds.
e.g. 5400 seconds => 'minutes', 36000 seconds => 'hours'
"""
if len(time_seconds_arr) == 0:
return 'hours'
max_time_seconds = max(time_seconds_arr)
if max_time_seconds <= 60 * 2:
return 'seconds'
elif max_time_seconds <= 60 * 60 * 2:
return 'minutes'
elif max_time_seconds <= 24 * 60 * 60 * 2:
return 'hours'
else:
return 'days'
def scale_time_units(time_seconds_arr, unit):
"""
Convert an array of time durations in seconds to the specified time unit.
"""
if unit == 'minutes':
return list(map(lambda x: x * 1.0 / 60, time_seconds_arr))
elif unit == 'hours':
return list(map(lambda x: x * 1.0 / (60 * 60), time_seconds_arr))
elif unit == 'days':
return list(map(lambda x: x * 1.0 / (24 * 60 * 60), time_seconds_arr))
return time_seconds_arr
def days_ago(n, hour=0, minute=0, second=0, microsecond=0):
"""
Get a datetime object representing `n` days ago. By default the time is
set to midnight.
"""
today = timezone.utcnow().replace(
hour=hour,
minute=minute,
second=second,
microsecond=microsecond)
return today - timedelta(days=n)
def parse_execution_date(execution_date_str):
"""
Parse execution date string to datetime object.
"""
return timezone.parse(execution_date_str)
| sid88in/incubator-airflow | airflow/utils/dates.py | Python | apache-2.0 | 9,508 | 0.000947 |
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Customizations for the cloudsearchdomain command.
This module customizes the cloudsearchdomain command:
* Add validation that --endpoint-url is required.
"""
def register_cloudsearchdomain(cli):
cli.register_last('calling-command.cloudsearchdomain',
validate_endpoint_url)
def validate_endpoint_url(parsed_globals, **kwargs):
if parsed_globals.endpoint_url is None:
return ValueError(
"--endpoint-url is required for cloudsearchdomain commands")
| LockScreen/Backend | venv/lib/python2.7/site-packages/awscli/customizations/cloudsearchdomain.py | Python | mit | 1,074 | 0.000931 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Description:
Plot the delay evolution during a run
for multiple ones having the run_time
(in seconds) shown on the X axis.
@author: dspaccapeli
"""
#imports to manage the sql db
import sqlite3 as lite
import pandas as pd
#to make the plot show-up from command line
import matplotlib.pyplot as plt
#to get multiple evolution of delay
from random import shuffle
#connect to the database
db_connection = lite.connect('DATABASE_PATH')
#open the cursor to start querying the database - read ops
read_curs = db_connection.cursor()
#this is line 550
route_id = 2550
#refine query
hh_start = 15
hh_end = 2
wk_start = 1
wk_end = 5
direction = 1
#select all infos for stop equals _n_sqlite
df = pd.read_sql_query("SELECT delay, sch_time-start_time as time, start_time as begin from hsl where route_id=%s and direction=%s or hour>=%s and hour<=%s order by time" % \
(route_id, direction, hh_start, hh_end), db_connection)
#select a list of all the different start_time -> they identify daily departures for a bus (~run_code)
unq_dep = df.begin.unique()
#init 9 plots
for count in [1, 2, 3, 4, 5, 6, 7, 8, 9]:
#take a random run_code
shuffle(unq_dep)
i=0
for x in unq_dep:
i+=1
#for each run_code
temp = df[df['begin'] == x]
#plot evolution of the delay
plt.plot(temp['time'], temp['delay'], alpha=0.6)
#plt.scatter(temp['time'], temp['delay'], alpha=0.7)
#up to a max of 5 lines
if i==10:
break
plt.suptitle('Delay progression between %s and %s during the week' % (hh_start, hh_end))
plt.xlabel('run time')
plt.ylabel('delay')
plt.savefig(str(count), ext="png")
plt.clf()
#uncomment if you want to do it cumulatively
#plt.suptitle('Delay progression between %s and %s during the week' % (hh_start, hh_end))
#plt.xlabel('run time')
#plt.ylabel('delay')
#plt.savefig(str(count), ext="png")
#plt.clf()
| dspaccapeli/bus-arrival | visualization/plot_delay_evo.py | Python | gpl-3.0 | 1,979 | 0.016675 |
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.contrib.node.subsystems.command import command_gen
LOG = logging.getLogger(__name__)
PACKAGE_MANAGER_NPM = 'npm'
PACKAGE_MANAGER_YARNPKG = 'yarnpkg'
PACKAGE_MANAGER_YARNPKG_ALIAS = 'yarn'
VALID_PACKAGE_MANAGERS = [PACKAGE_MANAGER_NPM, PACKAGE_MANAGER_YARNPKG, PACKAGE_MANAGER_YARNPKG_ALIAS]
# TODO: Change to enum type when migrated to Python 3.4+
class PackageInstallationTypeOption(object):
PROD = 'prod'
DEV = 'dev'
PEER = 'peer'
BUNDLE = 'bundle'
OPTIONAL = 'optional'
NO_SAVE = 'not saved'
class PackageInstallationVersionOption(object):
EXACT = 'exact'
TILDE = 'tilde'
class PackageManager(object):
"""Defines node package manager functionalities."""
def __init__(self, name, tool_installations):
self.name = name
self.tool_installations = tool_installations
def _get_installation_args(self, install_optional, production_only, force):
"""Returns command line args for installing package.
:param install_optional: True to request install optional dependencies.
:param production_only: True to only install production dependencies, i.e.
ignore devDependencies.
:param force: True to force re-download dependencies.
:rtype: list of strings
"""
raise NotImplementedError
def _get_run_script_args(self):
"""Returns command line args to run a package.json script.
:rtype: list of strings
"""
raise NotImplementedError
def _get_add_package_args(self, package, type_option, version_option):
"""Returns command line args to add a node pacakge.
:rtype: list of strings
"""
raise NotImplementedError()
def run_command(self, args=None, node_paths=None):
"""Returns a command that when executed will run an arbitury command via package manager."""
return command_gen(
self.tool_installations,
self.name,
args=args,
node_paths=node_paths
)
def install_module(
self,
install_optional=False,
production_only=False,
force=False,
node_paths=None):
"""Returns a command that when executed will install node package.
:param install_optional: True to install optional dependencies.
:param production_only: True to only install production dependencies, i.e.
ignore devDependencies.
:param force: True to force re-download dependencies.
:param node_paths: A list of path that should be included in $PATH when
running installation.
"""
args=self._get_installation_args(
install_optional=install_optional,
production_only=production_only,
force=force)
return self.run_command(args=args, node_paths=node_paths)
def run_script(self, script_name, script_args=None, node_paths=None):
"""Returns a command to execute a package.json script.
:param script_name: Name of the script to name. Note that script name 'test'
can be used to run node tests.
:param script_args: Args to be passed to package.json script.
:param node_paths: A list of path that should be included in $PATH when
running the script.
"""
# TODO: consider add a pants.util function to manipulate command line.
package_manager_args = self._get_run_script_args()
package_manager_args.append(script_name)
if script_args:
package_manager_args.append('--')
package_manager_args.extend(script_args)
return self.run_command(args=package_manager_args, node_paths=node_paths)
def add_package(
self,
package,
node_paths=None,
type_option=PackageInstallationTypeOption.PROD,
version_option=None):
"""Returns a command that when executed will add a node package to current node module.
:param package: string. A valid npm/yarn package description. The accepted forms are
package-name, package-name@version, package-name@tag, file:/folder, file:/path/to.tgz
https://url/to.tgz
:param node_paths: A list of path that should be included in $PATH when
running the script.
:param type_option: A value from PackageInstallationTypeOption that indicates the type
of package to be installed. Default to 'prod', which is a production dependency.
:param version_option: A value from PackageInstallationVersionOption that indicates how
to match version. Default to None, which uses package manager default.
"""
args=self._get_add_package_args(
package,
type_option=type_option,
version_option=version_option)
return self.run_command(args=args, node_paths=node_paths)
def run_cli(self, cli, args=None, node_paths=None):
"""Returns a command that when executed will run an installed cli via package manager."""
cli_args = [cli]
if args:
cli_args.append('--')
cli_args.extend(args)
return self.run_command(args=cli_args, node_paths=node_paths)
class PackageManagerYarnpkg(PackageManager):
def __init__(self, tool_installation):
super(PackageManagerYarnpkg, self).__init__(PACKAGE_MANAGER_YARNPKG, tool_installation)
def _get_run_script_args(self):
return ['run']
def _get_installation_args(self, install_optional, production_only, force):
return_args = ['--non-interactive']
if not install_optional:
return_args.append('--ignore-optional')
if production_only:
return_args.append('--production=true')
if force:
return_args.append('--force')
return return_args
def _get_add_package_args(self, package, type_option, version_option):
return_args = ['add', package]
package_type_option = {
PackageInstallationTypeOption.PROD: '', # Yarn save production is the default.
PackageInstallationTypeOption.DEV: '--dev',
PackageInstallationTypeOption.PEER: '--peer',
PackageInstallationTypeOption.OPTIONAL: '--optional',
PackageInstallationTypeOption.BUNDLE: None,
PackageInstallationTypeOption.NO_SAVE: None,
}.get(type_option)
if package_type_option is None:
logging.warning('{} does not support {} packages, ignored.'.format(self.name, type_option))
elif package_type_option: # Skip over '' entries
return_args.append(package_type_option)
package_version_option = {
PackageInstallationVersionOption.EXACT: '--exact',
PackageInstallationVersionOption.TILDE: '--tilde',
}.get(version_option)
if package_version_option is None:
LOG.warning(
'{} does not support install with {} version, ignored'.format(self.name, version_option))
elif package_version_option: # Skip over '' entries
return_args.append(package_version_option)
return return_args
class PackageManagerNpm(PackageManager):
def __init__(self, tool_installation):
super(PackageManagerNpm, self).__init__(PACKAGE_MANAGER_NPM, tool_installation)
def _get_run_script_args(self):
return ['run-script']
def _get_installation_args(self, install_optional, production_only, force):
return_args = ['install']
if not install_optional:
return_args.append('--no-optional')
if production_only:
return_args.append('--production')
if force:
return_args.append('--force')
return return_args
def _get_add_package_args(self, package, type_option, version_option):
return_args = ['install', package]
package_type_option = {
PackageInstallationTypeOption.PROD: '--save-prod',
PackageInstallationTypeOption.DEV: '--save-dev',
PackageInstallationTypeOption.PEER: None,
PackageInstallationTypeOption.OPTIONAL: '--save-optional',
PackageInstallationTypeOption.BUNDLE: '--save-bundle',
PackageInstallationTypeOption.NO_SAVE: '--no-save',
}.get(type_option)
if package_type_option is None:
logging.warning('{} does not support {} packages, ignored.'.format(self.name, type_option))
elif package_type_option: # Skip over '' entries
return_args.append(package_type_option)
package_version_option = {
PackageInstallationVersionOption.EXACT: '--save-exact',
PackageInstallationVersionOption.TILDE: None,
}.get(version_option)
if package_version_option is None:
LOG.warning(
'{} does not support install with {} version, ignored.'.format(self.name, version_option))
elif package_version_option: # Skip over '' entries
return_args.append(package_version_option)
return return_args
def run_cli(self, cli, args=None, node_paths=None):
raise RuntimeError('npm does not support run cli directly. Please use Yarn instead.')
| UnrememberMe/pants | contrib/node/src/python/pants/contrib/node/subsystems/package_managers.py | Python | apache-2.0 | 8,798 | 0.007729 |
#!/usr/bin/env python
"""Shared classes between the client and the server for plist parsing."""
import calendar
import datetime
from binplist import binplist
from grr.lib import lexer
from grr.lib import objectfilter
class PlistFilterParser(objectfilter.Parser):
"""Plist specific filter parser.
Because we will be filtering dictionaries and the path components will be
matched against dictionary keys, we must be more permissive with attribute
names.
This parser allows path components to be enclosed in double quotes to allow
for spaces, dots or even raw hex-escaped data in them, such as:
"My\x20first\x20path component".2nd."TH.IRD" contains "Google"
We store the attribute name as a list of paths into the object instead of as
a simple string that will be chunked in objectfilter.
"""
tokens = [
# Operators and related tokens
lexer.Token("INITIAL", r"\@[\w._0-9]+",
"ContextOperator,PushState", "CONTEXTOPEN"),
lexer.Token("INITIAL", r"[^\s\(\)]", "PushState,PushBack", "ATTRIBUTE"),
lexer.Token("INITIAL", r"\(", "PushState,BracketOpen", None),
lexer.Token("INITIAL", r"\)", "BracketClose", "BINARY"),
# Context
lexer.Token("CONTEXTOPEN", r"\(", "BracketOpen", "INITIAL"),
# Double quoted string
lexer.Token("STRING", "\"", "PopState,StringFinish", None),
lexer.Token("STRING", r"\\x(..)", "HexEscape", None),
lexer.Token("STRING", r"\\(.)", "StringEscape", None),
lexer.Token("STRING", r"[^\\\"]+", "StringInsert", None),
# Single quoted string
lexer.Token("SQ_STRING", "'", "PopState,StringFinish", None),
lexer.Token("SQ_STRING", r"\\x(..)", "HexEscape", None),
lexer.Token("SQ_STRING", r"\\(.)", "StringEscape", None),
lexer.Token("SQ_STRING", r"[^\\']+", "StringInsert", None),
# Basic expression
lexer.Token("ATTRIBUTE", r"\.", "AddAttributePath", "ATTRIBUTE"),
lexer.Token("ATTRIBUTE", r"\s+", "AddAttributePath", "OPERATOR"),
lexer.Token("ATTRIBUTE", "\"", "PushState,StringStart", "STRING"),
lexer.Token("ATTRIBUTE",
r"[\w_0-9\-]+",
"StringStart,StringInsert",
"ATTRIBUTE"),
lexer.Token("OPERATOR", r"(\w+|[<>!=]=?)", "StoreOperator", "ARG"),
lexer.Token("ARG", r"(\d+\.\d+)", "InsertFloatArg", "ARG"),
lexer.Token("ARG", r"(0x\d+)", "InsertInt16Arg", "ARG"),
lexer.Token("ARG", r"(\d+)", "InsertIntArg", "ARG"),
lexer.Token("ARG", "\"", "PushState,StringStart", "STRING"),
lexer.Token("ARG", "'", "PushState,StringStart", "SQ_STRING"),
# When the last parameter from arg_list has been pushed
# State where binary operators are supported (AND, OR)
lexer.Token("BINARY", r"(?i)(and|or|\&\&|\|\|)",
"BinaryOperator", "INITIAL"),
# - We can also skip spaces
lexer.Token("BINARY", r"\s+", None, None),
# - But if it's not "and" or just spaces we have to go back
lexer.Token("BINARY", ".", "PushBack,PopState", None),
# Skip whitespace.
lexer.Token(".", r"\s+", None, None),
]
def StringFinish(self, **_):
"""StringFinish doesn't act on ATTRIBUTEs here."""
if self.state == "ARG":
return self.InsertArg(string=self.string)
def AddAttributePath(self, **_):
"""Adds a path component to the current attribute."""
attribute_path = self.current_expression.attribute
if not attribute_path:
attribute_path = []
attribute_path.append(self.string)
self.current_expression.SetAttribute(attribute_path)
class PlistExpander(objectfilter.ValueExpander):
"""A custom expander specific to plists."""
def _GetValue(self, obj, attr_name):
try:
return obj.get(attr_name, None)
except AttributeError:
# This is no dictionary... are we a list of dictionaries?
return [item.get(attr_name, None) for item in obj]
def _AtNonLeaf(self, attr_value, path):
"""Makes dictionaries expandable when dealing with plists."""
if isinstance(attr_value, dict):
for value in self.Expand(attr_value, path[1:]):
yield value
else:
for v in objectfilter.ValueExpander._AtNonLeaf(self, attr_value, path):
yield v
class PlistFilterImplementation(objectfilter.BaseFilterImplementation):
FILTERS = {}
FILTERS.update(objectfilter.BaseFilterImplementation.FILTERS)
FILTERS.update({"ValueExpander": PlistExpander})
def PlistValueToPlainValue(plist):
"""Takes the plist contents generated by binplist and returns a plain dict.
binplist uses rich types to express some of the plist types. We need to
convert them to types that RDFValueArray will be able to transport.
Args:
plist: A plist to convert.
Returns:
A simple python type.
"""
if isinstance(plist, dict):
ret_value = dict()
for key, value in plist.items():
ret_value[key] = PlistValueToPlainValue(value)
return ret_value
elif isinstance(plist, list):
return [PlistValueToPlainValue(value) for value in plist]
elif isinstance(plist, binplist.RawValue):
return plist.value
elif (isinstance(plist, binplist.CorruptReference)
or isinstance(plist, binplist.UnknownObject)):
return None
elif isinstance(plist, datetime.datetime):
return (calendar.timegm(plist.utctimetuple()) * 1000000) + plist.microsecond
return plist
| darrenbilby/grr | lib/plist.py | Python | apache-2.0 | 5,402 | 0.004813 |
from math import cos, sin, pi
from example_util import get_filename
from glc import Gif
def draw(l, surf, ctx, t):
xpos = cos(t * 2 * pi) * 100 + surf.get_width() * 0.5
ypos = sin(t * 2 * pi) * 100 + surf.get_height() * 0.5
w, h = 100, 100
ctx.set_source_rgb(0, 0, 0)
ctx.translate(xpos, ypos)
ctx.translate(-w * 0.5, -h * 0.5)
ctx.rectangle(0, 0, w, w)
ctx.fill()
with Gif(get_filename(__file__), after_render=draw) as a:
a.save()
| leovoel/glc.py | examples/custom_rendering.py | Python | mit | 473 | 0.002114 |
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.authentication import (BasicAuthentication,
SessionAuthentication)
from django.db import transaction
from storageadmin.auth import DigestAuthentication
from storageadmin.models import (Share, SambaShare, NFSExport, Disk,
IscsiTarget)
from storageadmin.util import handle_exception
from storageadmin.serializers import IscsiSerializer
from system.iscsi import export_iscsi
from fs.btrfs import mount_share
import logging
logger = logging.getLogger(__name__)
class ShareIscsiView(APIView):
def get(self, request, sname):
try:
share = Share.objects.get(name=sname)
if (IscsiTarget.objects.filter(share=share).exists()):
iscsi_o = IscsiTarget.objects.get(share=share)
iscsi_serializer = IscsiSerializer(iscsi_o)
return Response(iscsi_serializer.data)
return Response()
except Exception, e:
handle_exception(e, request)
@transaction.commit_on_success
def post(self, request, sname):
try:
share = Share.objects.get(name=sname)
if (SambaShare.objects.filter(share=share).exists()):
raise Exception('Already exported via Samba')
if (NFSExport.objects.filter(share=share).exists()):
raise Exception('Already exported via nfs')
if (IscsiTarget.objects.filter(share=share).exists()):
raise Exception('Already exported via iscsi')
options = {
'tname': 'fooscsi',
'tid': -1,
'dev_name': 'iscsi.img',
'dev_size': 10,
}
if ('tname' in request.data):
options['tname'] = request.data['tname']
if ('tid' in request.data):
try:
options['tid'] = int(request.data['tid'])
except:
raise Exception('tid must be an integer')
pool_device = Disk.objects.filter(pool=share.pool)[0].name
mnt_pt = '/mnt2/' + share.name
mount_share(share.name, pool_device, mnt_pt)
dev_name = mnt_pt + '/' + options['dev_name']
export_iscsi(options['tid'], options['tname'], options['tid'],
dev_name, options['dev_size'])
iscsi_target = IscsiTarget(share=share, tid=options['tid'],
tname=options['tname'],
dev_name=dev_name,
dev_size=options['dev_size'])
iscsi_target.save()
iscsi_serializer = IscsiSerializer(iscsi_target)
return Response(iscsi_serializer.data)
except Exception, e:
handle_exception(e, request)
@transaction.commit_on_success
def delete(self, request, sname):
try:
share = Share.objects.get(name=sname)
iscsi_target = IscsiTarget.objects.get(share=share)
iscsi_target.delete()
return Response()
except Exception, e:
handle_exception(e, request)
| sbrichards/rockstor-core | src/rockstor/storageadmin/views/share_iscsi.py | Python | gpl-3.0 | 4,001 | 0.00025 |
# small script for
from optparse import OptionParser
import sqlite3
import time
import string
import arnovich.core as core
def parse_command_args():
parser = OptionParser()
parser.add_option("-t", "--ticker", action="append", type="string", dest="tickers")
parser.add_option("--from", action="store", type="string", dest="fromdate", default="")
parser.add_option("--to", action="store", type="string", dest="todate", default="")
parser.add_option("--db", action="store", dest="dbfile")
parser.add_option("--wait", action="store", type="int", dest="wait", default=1)
(options, args) = parser.parse_args()
return (options.dbfile, options.tickers, options.fromdate, options.todate, options.wait)
def find_dates(dbfile, tickers, fromdate, todate, wait):
if fromdate != "":
fromtime = time.mktime(time.strptime(fromdate, "%Y-%m-%d %H:%M:%S"))
if todate != "":
totime = time.mktime(time.strptime(todate, "%Y-%m-%d %H:%M:%S"))
connection = core.connection()
sql_tickers = string.join(tickers, "\",\"")
conn = sqlite3.connect(dbfile)
c = conn.cursor()
d = conn.cursor()
c.execute("select ticker_id, ticker from stocks_static_data where ticker in (\""+sql_tickers+"\")")
prevtime = 0
for ticker_id in c:
#should check if it already exists using get_ticker
srv_id = connection.add_ticker(str(ticker_id[1]))
srv_id_opt = connection.add_ticker(str(ticker_id[1])+"_options")
if (fromdate == "") or (todate == ""):
d.execute("select date, data from stocks_data where ticker_id="+str(ticker_id[0])+" ORDER BY date")
else:
d.execute("select date, data from stocks_data where ticker_id="+str(ticker_id[0])+" and (date > "+str(fromtime)+" AND date < "+str(totime)+")")
for r in d:
rowdate = str(r[0])
rowdata = str(r[1])
rowtime = float(r[0])
if prevtime == 0:
prevtime = rowtime
connection.push_ticker(srv_id, rowdata)
vcursor = conn.cursor()
vcursor.execute("select data from options_data where ticker_id="+str(ticker_id[0])+" and date="+rowdate)
for row in vcursor:
connection.push_ticker(srv_id_opt, str(row[0]))
#TODO make this better: take exec time into consideration
time.sleep((rowtime-prevtime)/wait)
prevtime = rowtime
c.close()
def main():
(dbfile, tickers, fromdate, todate, wait) = parse_command_args()
find_dates(dbfile, tickers, fromdate, todate, wait)
if __name__ == "__main__":
main()
| arnovich/core | test/yahoo/py/extract_to_srv.py | Python | bsd-3-clause | 2,381 | 0.025619 |
class Solution:
# @param {integer} s
# @param {integer[]} nums
# @return {integer}
def minSubArrayLen(self, s, nums):
i = 0
j = -1
n = len(nums)
t = 0
min_len = sys.maxint
while(i<n and j <n):
if t < s:
j += 1
if j >=n :
break
t += nums[j]
else:
if min_len > (j-i+1):
min_len = j-i+1
t -= nums[i]
i += 1
if min_len == sys.maxint:
return 0
else:
return min_len | saai/codingbitch | twoPointers/minSubArrayLen.py | Python | mit | 623 | 0.008026 |
__version__ = "0.0.2a3"
| trueneu/swiss-knife | swk_plugins/swk_casp/swk_casp/version.py | Python | gpl-3.0 | 24 | 0 |
from __future__ import division
import numpy as np
from numpy.lib.stride_tricks import as_strided as ast
### striding data for efficient AR computations
def AR_striding(data,nlags):
# I had some trouble with views and as_strided, so copy if not contiguous
data = np.asarray(data)
if not data.flags.c_contiguous:
data = data.copy(order='C')
if data.ndim == 1:
data = np.reshape(data,(-1,1))
sz = data.dtype.itemsize
return ast(
data,
shape=(data.shape[0]-nlags,data.shape[1]*(nlags+1)),
strides=(data.shape[1]*sz,sz))
def undo_AR_striding(strided_data,nlags):
sz = strided_data.dtype.itemsize
return ast(
strided_data,
shape=(strided_data.shape[0]+nlags,strided_data.shape[1]//(nlags+1)),
strides=(strided_data.shape[1]//(nlags+1)*sz,sz))
### analyzing AR coefficient matrices
def canonical_matrix(A):
# NOTE: throws away affine part
D, nlags, _ = dimensions(A)
mat = np.zeros((D*nlags,D*nlags))
mat[:-D,D:] = np.eye(D*(nlags-1))
mat[-D:,:] = A[:,:D*nlags]
return mat
def eval_siso_transfer_function(A,from_idx,to_idx,freqs):
D, _, _ = dimensions(A)
assert 0 <= from_idx < D and 0 <= to_idx < D
bigA = canonical_matrix(A)
I = np.eye(bigA.shape[0])
zs = np.exp(1j*np.array(freqs))
return np.array(
[np.linalg.inv(z*I-bigA)[-D:,-2*D:-D][to_idx,from_idx]
for z in zs])
def is_affine(A):
return bool(A.shape[1] % A.shape[0])
def is_stable(A):
bigA = canonical_matrix(A)
return np.all(np.abs(np.linalg.eigvals(bigA)) < 1.)
def dimensions(A):
if is_affine(A):
A = A[:,:-1]
D, nlags = A.shape[0], A.shape[1] // A.shape[0]
return D, nlags, is_affine(A)
| mattjj/pyhsmm-autoregressive | autoregressive/util.py | Python | gpl-2.0 | 1,782 | 0.016835 |
import unittest
import numpy as np
import pyoptima as opt
class SimulatedAnnealingTest(unittest.TestCase):
def test_with_parabola(self):
""" Test with a simple parabolic function with 2 variables """
def neighbour_func(params):
new_params = params
params['x0'] += np.random.uniform(-1., 1.)
params['x1'] += np.random.uniform(-1., 1.)
return new_params
hyper_params = {
'temperature_func': lambda t, i: t/np.log(i+2),
'neighbour_func': neighbour_func,
'initial_temp': 1000000.0
}
params = {}
params["x0"] = np.random.uniform(-10., 10.)
params["x1"] = np.random.uniform(-10., 10.)
s = opt.SimulatedAnnealing(params, hyper_params)
s.optimize(opt.parabola, 100000)
bst_solution = s.get_best_parameters()
self.assertAlmostEqual(bst_solution['x0'], 0, 2)
self.assertAlmostEqual(bst_solution['x1'], 0, 2)
if __name__ == "__main__":
unittest.main()
| samueljackson92/metaopt | python_tests/simulated_annealing_test.py | Python | mit | 1,038 | 0.000963 |
import angr
import claripy
import os
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
def test_i386():
p = angr.Project(os.path.join(test_location, 'i386', 'test_strcasecmp'), auto_load_libs=False)
arg1 = claripy.BVS('arg1', 20*8)
s = p.factory.entry_state(args=("test_strcasecmp", arg1))
sm = p.factory.simulation_manager(s)
sm.explore()
sm.move('deadended', 'found', filter_func=lambda s: b"Welcome" in s.posix.dumps(1))
assert len(sm.found) == 1
f = sm.found[0]
sol = f.solver.eval(arg1, cast_to=bytes)
assert b'\x00' in sol
assert sol[:sol.index(b'\x00')].lower() == b'letmein'
assert b'wchar works' in f.posix.dumps(1)
if __name__ == "__main__":
test_i386()
| angr/angr | tests/test_strcasecmp.py | Python | bsd-2-clause | 780 | 0.00641 |
from django.contrib import admin
from priorityDB.models import *
# Register your models here
# For more information on this file, see
# https://docs.djangoproject.com/en/dev/intro/tutorial02/
class TaskHistoryInline(admin.StackedInline):
model = TaskHistory
extra = 0
class EventAdmin(admin.ModelAdmin):
inlines = [TaskHistoryInline]
admin.site.register(Event, EventAdmin)
admin.site.register(Task) | lowellbander/ngVote | priorityDB/priorityDB/admin.py | Python | gpl-2.0 | 418 | 0.014354 |
#
# Copyright (c) 2015 Autodesk Inc.
# All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script illustrates how we handle process level sanity checks. This is an optional feature which lets you
customize how you want ochopod to know what you're running is healthy. You could curl the process or run some script
for instance.
Too many sanity check failures will turn the pod off (which can be seen in the CLI for instance). Just start a local
standalone Zookeeper server and run "python sanity.py".
"""
from ochopod.bindings.generic.marathon import Pod
from ochopod.models.piped import Actor as Piped
if __name__ == '__main__':
class Strategy(Piped):
#
# - by default ochopod will only allow for one single sanity check to fail before turning off the pod
# - you can specify both how many times you are ready to fail and how much time should go by in between
# - here we want to tolerate up to 3 sanity check failures in a row with 5 seconds between each
#
checks = 3
check_every = 5.0
def sanity_check(self, _):
#
# - this optional callback will be invoked by ochopod on a regular basis
# - you can do whatever you want inside and the goal is to not throw
# - you can for instance simply assert if something is not right
# - let's make it fail for the sake of illustration
# - the callback will be invoked (and will blow up) every 5 seconds up to 3 times
#
assert 0, 'let us fail the sanity check just for fun'
def configure(self, _):
#
# - just go to sleep, the point is not to run anything meaningful
# - the sanity-check will keep failing until the pod turns off
#
return 'sleep 3600', {}
#
# - if you run this script locally you will notice the pod will turn off after around 15 seconds
# - simply type CTRL-C to exit
#
Pod().boot(Strategy, local=1)
| gomezsan/ochopod | examples/sanity.py | Python | apache-2.0 | 2,544 | 0.003931 |
import gamerocket
from flask import Flask, request, render_template
app = Flask(__name__)
gamerocket.Configuration.configure(gamerocket.Environment.Development,
apiKey = "your_apiKey",
secretKey = "your_secretKey")
@app.route("/")
def form():
return render_template("form.html")
@app.route("/create_player", methods=["POST"])
def create_player():
result = gamerocket.Player.create({
"name":request.form["name"],
"locale":request.form["locale"]
})
if result.is_success:
return "<h1>Success! Player ID: " + result.player.id + "</h1>"
else:
return "<h1>Error " + result.error + ": " + result.error_description + "</h1>"
if __name__ == '__main__':
app.run(debug=True) | workbandits/gamerocket-python-guide | 1_create_player/src/app.py | Python | mit | 837 | 0.016726 |
'''
Simple parser that extracts a webpage's content and hyperlinks.
'''
import urllib2
import re
class Parser():
def __init__(self):
pass
def parse(self, url):
f = urllib2.urlopen(url)
text = f.read() # get page's contents.
#use re.findall to get all the links
links = re.findall('href=[\'"]?([^\'" >]+)', text)
return text, links
| nyu-dl/WebNav | simple_parser.py | Python | bsd-3-clause | 399 | 0.010025 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from . import ObjectCreationParameters
__author__ = 'Shamal Faily'
class ResponseParameters(ObjectCreationParameters.ObjectCreationParameters):
def __init__(self,respName,respRisk,tags,cProps,rType):
ObjectCreationParameters.ObjectCreationParameters.__init__(self)
self.theName = respName
self.theTags = tags
self.theRisk = respRisk
self.theEnvironmentProperties = cProps
self.theResponseType = rType
def name(self): return self.theName
def tags(self): return self.theTags
def risk(self): return self.theRisk
def environmentProperties(self): return self.theEnvironmentProperties
def responseType(self): return self.theResponseType
| nathanbjenx/cairis | cairis/core/ResponseParameters.py | Python | apache-2.0 | 1,469 | 0.008169 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
spark_configuration_name: str,
workspace_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sparkconfigurations/{sparkConfigurationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"sparkConfigurationName": _SERIALIZER.url("spark_configuration_name", spark_configuration_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class SparkConfigurationOperations(object):
"""SparkConfigurationOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
spark_configuration_name: str,
workspace_name: str,
**kwargs: Any
) -> "_models.SparkConfigurationResource":
"""Get SparkConfiguration by name.
Get SparkConfiguration by name in a workspace.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param spark_configuration_name: SparkConfiguration name.
:type spark_configuration_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkConfigurationResource, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.SparkConfigurationResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SparkConfigurationResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
spark_configuration_name=spark_configuration_name,
workspace_name=workspace_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SparkConfigurationResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sparkconfigurations/{sparkConfigurationName}'} # type: ignore
| Azure/azure-sdk-for-python | sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/operations/_spark_configuration_operations.py | Python | mit | 6,172 | 0.004213 |
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2011> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.http import HttpResponse
def index(request):
return HttpResponse('OK')
| scaphe/lettuce-dirty | tests/integration/django/couves/leaves/views.py | Python | gpl-3.0 | 872 | 0.001148 |
#
# THIS IS WORK IN PROGRESS
#
# The Python Imaging Library
# $Id: PcfFontFile.py 2134 2004-10-06 08:55:20Z fredrik $
#
# portable compiled font file parser
#
# history:
# 1997-08-19 fl created
# 2003-09-13 fl fixed loading of unicode fonts
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1997-2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
import Image
import FontFile
import string
# --------------------------------------------------------------------
# declarations
PCF_MAGIC = 0x70636601 # "\x01fcp"
PCF_PROPERTIES = (1<<0)
PCF_ACCELERATORS = (1<<1)
PCF_METRICS = (1<<2)
PCF_BITMAPS = (1<<3)
PCF_INK_METRICS = (1<<4)
PCF_BDF_ENCODINGS = (1<<5)
PCF_SWIDTHS = (1<<6)
PCF_GLYPH_NAMES = (1<<7)
PCF_BDF_ACCELERATORS = (1<<8)
BYTES_PER_ROW = [
lambda bits: ((bits+7) >> 3),
lambda bits: ((bits+15) >> 3) & ~1,
lambda bits: ((bits+31) >> 3) & ~3,
lambda bits: ((bits+63) >> 3) & ~7,
]
def l16(c):
return ord(c[0]) + (ord(c[1])<<8)
def l32(c):
return ord(c[0]) + (ord(c[1])<<8) + (ord(c[2])<<16) + (ord(c[3])<<24)
def b16(c):
return ord(c[1]) + (ord(c[0])<<8)
def b32(c):
return ord(c[3]) + (ord(c[2])<<8) + (ord(c[1])<<16) + (ord(c[0])<<24)
def sz(s, o):
return s[o:string.index(s, "\0", o)]
##
# Font file plugin for the X11 PCF format.
class PcfFontFile(FontFile.FontFile):
name = "name"
def __init__(self, fp):
magic = l32(fp.read(4))
if magic != PCF_MAGIC:
raise SyntaxError, "not a PCF file"
FontFile.FontFile.__init__(self)
count = l32(fp.read(4))
self.toc = {}
for i in range(count):
type = l32(fp.read(4))
self.toc[type] = l32(fp.read(4)), l32(fp.read(4)), l32(fp.read(4))
self.fp = fp
self.info = self._load_properties()
metrics = self._load_metrics()
bitmaps = self._load_bitmaps(metrics)
encoding = self._load_encoding()
#
# create glyph structure
for ch in range(256):
ix = encoding[ch]
if ix is not None:
x, y, l, r, w, a, d, f = metrics[ix]
glyph = (w, 0), (l, d-y, x+l, d), (0, 0, x, y), bitmaps[ix]
self.glyph[ch] = glyph
def _getformat(self, tag):
format, size, offset = self.toc[tag]
fp = self.fp
fp.seek(offset)
format = l32(fp.read(4))
if format & 4:
i16, i32 = b16, b32
else:
i16, i32 = l16, l32
return fp, format, i16, i32
def _load_properties(self):
#
# font properties
properties = {}
fp, format, i16, i32 = self._getformat(PCF_PROPERTIES)
nprops = i32(fp.read(4))
# read property description
p = []
for i in range(nprops):
p.append((i32(fp.read(4)), ord(fp.read(1)), i32(fp.read(4))))
if nprops & 3:
fp.seek(4 - (nprops & 3), 1) # pad
data = fp.read(i32(fp.read(4)))
for k, s, v in p:
k = sz(data, k)
if s:
v = sz(data, v)
properties[k] = v
return properties
def _load_metrics(self):
#
# font metrics
metrics = []
fp, format, i16, i32 = self._getformat(PCF_METRICS)
append = metrics.append
if (format & 0xff00) == 0x100:
# "compressed" metrics
for i in range(i16(fp.read(2))):
left = ord(fp.read(1)) - 128
right = ord(fp.read(1)) - 128
width = ord(fp.read(1)) - 128
ascent = ord(fp.read(1)) - 128
descent = ord(fp.read(1)) - 128
xsize = right - left
ysize = ascent + descent
append(
(xsize, ysize, left, right, width,
ascent, descent, 0)
)
else:
# "jumbo" metrics
for i in range(i32(fp.read(4))):
left = i16(fp.read(2))
right = i16(fp.read(2))
width = i16(fp.read(2))
ascent = i16(fp.read(2))
descent = i16(fp.read(2))
attributes = i16(fp.read(2))
xsize = right - left
ysize = ascent + descent
append(
(xsize, ysize, left, right, width,
ascent, descent, attributes)
)
return metrics
def _load_bitmaps(self, metrics):
#
# bitmap data
bitmaps = []
fp, format, i16, i32 = self._getformat(PCF_BITMAPS)
nbitmaps = i32(fp.read(4))
if nbitmaps != len(metrics):
raise IOError, "Wrong number of bitmaps"
offsets = []
for i in range(nbitmaps):
offsets.append(i32(fp.read(4)))
bitmapSizes = []
for i in range(4):
bitmapSizes.append(i32(fp.read(4)))
byteorder = format & 4 # non-zero => MSB
bitorder = format & 8 # non-zero => MSB
padindex = format & 3
bitmapsize = bitmapSizes[padindex]
offsets.append(bitmapsize)
data = fp.read(bitmapsize)
pad = BYTES_PER_ROW[padindex]
mode = "1;R"
if bitorder:
mode = "1"
for i in range(nbitmaps):
x, y, l, r, w, a, d, f = metrics[i]
b, e = offsets[i], offsets[i+1]
bitmaps.append(
Image.fromstring("1", (x, y), data[b:e], "raw", mode, pad(x))
)
return bitmaps
def _load_encoding(self):
# map character code to bitmap index
encoding = [None] * 256
fp, format, i16, i32 = self._getformat(PCF_BDF_ENCODINGS)
firstCol, lastCol = i16(fp.read(2)), i16(fp.read(2))
firstRow, lastRow = i16(fp.read(2)), i16(fp.read(2))
default = i16(fp.read(2))
nencoding = (lastCol - firstCol + 1) * (lastRow - firstRow + 1)
for i in range(nencoding):
encodingOffset = i16(fp.read(2))
if encodingOffset != 0xFFFF:
try:
encoding[i+firstCol] = encodingOffset
except IndexError:
break # only load ISO-8859-1 glyphs
return encoding
| KAMI911/loec | examples/Sharpen/binaries-windows-python26/PcfFontFile.py | Python | gpl-3.0 | 6,642 | 0.004968 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.