text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
#
# Collective Knowledge (individual environment - setup)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net
#
import os
extra_dirs=['C:\\msys64', 'C:\\tools\\msys2', 'D:\\msys64', 'D:\\tools\\msys2']
##############################################################################
# customize directories to automatically find and register software
def dirs(i):
hosd=i['host_os_dict']
phosd=hosd.get('ck_name','')
dirs=i.get('dirs', [])
if phosd=='win':
for d in extra_dirs:
if os.path.isdir(d):
dirs.append(d)
return {'return':0, 'dirs':dirs}
##############################################################################
# parse software version
def parse_version(i):
lst=i['output']
ver=''
for q in lst:
q=q.strip()
if q!='':
j=q.lower().find('version ')
if j>0:
ver=q[j+8:].strip()
j2=ver.find(' ')
if j2>0:
ver=ver[:j2]
ver=ver.strip()
break
return {'return':0, 'version':ver}
##############################################################################
# setup environment
def setup(i):
s=''
cus=i['customize']
env=i['env']
fp=cus.get('full_path','')
ep=cus['env_prefix']
if fp=='':
return {'return':1, 'error':'full path required by the soft customization script is empty'}
p1=os.path.dirname(fp)
p2=os.path.dirname(p1)
p3=os.path.dirname(p2)
env[ep]=p3
env[ep+'_BIN']=p1
env[ep+'_BASH']=fp
return {'return':0, 'bat':s}
|
ctuning/ck-env
|
soft/env.msys2/customize.py
|
Python
|
bsd-3-clause
| 1,741 | 0.029868 |
#! /usr/bin/python
'''
@author: Alister Maguire
Given a counts file and a taxa file, condense
repeated genus' and their counts, and output
a file that maps genus names to their counts
for each experiment.
'''
import argparse
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("counts_file")
parser.add_argument("taxa_file")
args = parser.parse_args()
taxa_f = open(args.taxa_file, "r")
counts_f = open(args.counts_file, "r")
condensed_counts = []
genus_dct = {}
genus_lst = []
count_lst = []
count_dct = {}
taxa = taxa_f.readlines()
counts = counts_f.readlines()
for c in counts:
count_lst.append(c.split())
#create a dictionary that associates
#experiment IDs with lists for counts
c_size = len(counts)
for i in range(1, c_size):
count_dct[count_lst[i][0]] = []
#retrieve the genus names and their
#associated OTU values (look for repeats)
for i in range(len(taxa)):
taxa[i] = taxa[i].split()
j = -3
genus = taxa[i][j]
j -= 1
#condense genus names that have been
#split into pieces
while not is_number(taxa[i][j]):
genus = taxa[i][j] + " " + genus
j -= 1
#if genus in exempt:
# continue
if genus not in genus_dct:
genus_dct[genus] = []
genus_dct[genus].append(taxa[i][0])
genus_lst.append(genus)
g_size = len(genus_lst)
#create a list for condensed counts
#that we can use to map genus' with their counts
for i in range(1, len(count_lst)):
condensed_counts.append([])
condensed_counts[i-1] = ([0]*(g_size+1))
for i in range(0, g_size):
for j in range(1, len(count_lst)):
total = 0
for otu in genus_dct[genus_lst[i]]:
#the otu number is an index into the counts list
idx = int(otu[3:]) + 1
total += int(count_lst[j][idx])
condensed_counts[j-1][0] = count_lst[j][0]
condensed_counts[j-1][i] = total
genus_counts_f = open("condensed_counts.txt", "w+")
#Write the new file that assoicates genus names
#with experiment counts. The first line of the
#file contains all of the genus names, and the position
#of this name is an index into the experiment counts.
#The following lines are of the form
# Experiment_ID, count0, count1, ...., countn
#
genus_keys = ""
for genus in genus_lst:
genus_keys = genus_keys + ", " + genus
genus_keys = genus_keys[2:] + "\n"
genus_counts_f.write(genus_keys)
for row in condensed_counts:
exp_counts = ""
for col in row:
exp_counts = exp_counts + ", " + str(col)
exp_counts = exp_counts[2:] + "\n"
genus_counts_f.write(exp_counts)
genus_counts_f.close()
taxa_f.close()
counts_f.close()
|
aowen87/PhyloViewer
|
src/condense.py
|
Python
|
gpl-3.0
| 3,122 | 0.012812 |
#!C:\Python27
# sslsa.py
# Structural Superimposition of Local Sequence Alignment
# A program which finds out whether a local sequence
# alignment of two protein sequences also implies structural
# similarity of the aligned parts
import os
import sys
import glob
from Bio.PDB import *
from Bio import pairwise2
from Bio.SubsMat import MatrixInfo as matlist
# Obtain structures directory
str_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'structures')
# Create it if it doesn't exist
if not os.path.isdir(str_dir):
os.makedirs(str_dir)
# Get PDB IDs from the user
if len(sys.argv) > 2:
pdb_ids = [sys.argv[1], sys.argv[2]]
else:
sys.exit("Two separate valid PDB IDs must be given")
# Initiate PDB list object
pdb_list = PDBList(server="http://www.rcsb.org/pdb/files")
# Retrieve PDB files from the server to structures directory
pdb_list.retrieve_pdb_file(pdb_ids[0], obsolete=False, pdir=str_dir)
pdb_list.retrieve_pdb_file(pdb_ids[1], obsolete=False, pdir=str_dir)
# Generate PDB file paths
pdb_paths = [''.join(glob.glob(os.path.join(str_dir, '*' + pdb_ids[0] + '.ent'))), ''.join(glob.glob(os.path.join(str_dir, '*' + pdb_ids[1] + '.ent')))]
# Initiate PDB parser object
pdb_parser = PDBParser(QUIET=True)
# Generate PDB structures using PDB parser
pdb_strs = [pdb_parser.get_structure(pdb_ids[0], pdb_paths[0]), pdb_parser.get_structure(pdb_ids[1], pdb_paths[1])]
# Initiate an empty list for storing PDB sequences
pdb_seqs = ["", ""]
# Initiate CA polypeptide builder used to get sequences of each protein
ppb = CaPPBuilder()
for i in range(len(pdb_seqs)):
for pp in ppb.build_peptides(pdb_strs[i]):
pdb_seqs[i] += str(pp.get_sequence())
# Get BLOSUM62 matrix
matrix = matlist.blosum62
# Set gap penalties or get from the user
if len(sys.argv) >= 4:
gap_open = int(sys.argv[3])
else:
gap_open = -10
if len(sys.argv) == 5:
gap_extend = int(sys.argv[4])
else:
gap_extend = -5
# Do the pairwise alignment and get alignments
alns = pairwise2.align.localds(pdb_seqs[0], pdb_seqs[1], matrix, gap_open, gap_extend)
# Obtain the best alignment
best_aln = alns[0]
# Decompose best alignment into its components
aln_first, aln_second, score, begin, end = best_aln
# Print the alignment and alignment length
print aln_first[begin:end] + "\n" + aln_second[begin:end]
print "Alignment length: " + str(end - begin)
# Initiate an empty list to store atom objects
pdb_atms = [[], []]
for i in range(len(pdb_atms)):
# Get only the first model and use it
model = pdb_strs[i][0]
for chain in model:
for residue in chain:
# Only if the residue has CA atom
if "CA" in residue:
# Append the atom object
pdb_atms[i].append(residue["CA"])
# Initiate another empty string for mapping the atom objects
pdb_atms_mapped = [[], []]
# i is the index for the two alignments, j is for the first
# atom object list and k is for the other atom object list
i, j, k = 0, 0, 0
while i < len(aln_first[:end]):
# Check if there is no gap in either part of
# the alignment because there will be no atom
# for ones with -
if aln_first[i] != "-" and aln_second[i] != "-":
# Check if it's the beginning of the alignment
# here's where we need to start mapping
if i >= begin:
# Append the atom objects accordingly
pdb_atms_mapped[0].append(pdb_atms[0][j])
pdb_atms_mapped[1].append(pdb_atms[1][k])
# Move j to the next amino acid if it wasn't a gap
# that is we put its atom object in the previous
# step. If it's a gap, stay at the same atom object
if aln_first[i] != "-":
j += 1
# Move k to the next amino acid if it wasn't a gap
# that is we put its atom object in the previous
# step. If it's a gap, stay at the same atom object
if aln_second[i] != "-":
k += 1
# Move i to the next amino acid in the alignment
# because we process it no matter what
i += 1
# Initiate the superimposer
superimposer = Superimposer()
# Set (translate/rotate) atoms minimizing RMSD
superimposer.set_atoms(pdb_atms_mapped[0], pdb_atms_mapped[1])
# Print RMSD
print "RMSD: " + str(superimposer.rms)
|
gungorbudak/sslsa
|
sslsa.py
|
Python
|
mit
| 4,211 | 0.00095 |
import numpy
import pytest
import theano
class TestInputLayer:
@pytest.fixture
def layer(self):
from lasagne.layers.input import InputLayer
return InputLayer((3, 2))
def test_input_var(self, layer):
assert layer.input_var.ndim == 2
def test_get_output_shape(self, layer):
assert layer.get_output_shape() == (3, 2)
def test_get_output_without_arguments(self, layer):
assert layer.get_output() is layer.input_var
def test_get_output_input_is_variable(self, layer):
variable = theano.Variable("myvariable")
assert layer.get_output(variable) is variable
def test_get_output_input_is_array(self, layer):
input = [[1,2,3]]
output = layer.get_output(input)
assert numpy.all(output.eval() == input)
def test_get_output_input_is_a_mapping(self, layer):
input = {layer: theano.tensor.matrix()}
assert layer.get_output(input) is input[layer]
def test_input_var_name(self, layer):
assert layer.input_var.name == "input"
def test_named_layer_input_var_name(self):
from lasagne.layers.input import InputLayer
layer = InputLayer((3, 2), name="foo")
assert layer.input_var.name == "foo.input"
|
diogo149/Lasagne
|
lasagne/tests/layers/test_input.py
|
Python
|
mit
| 1,255 | 0.001594 |
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import struct
import dns.exception
import dns.immutable
import dns.rdata
import dns.tokenizer
@dns.immutable.immutable
class HINFO(dns.rdata.Rdata):
"""HINFO record"""
# see: RFC 1035
__slots__ = ['cpu', 'os']
def __init__(self, rdclass, rdtype, cpu, os):
super().__init__(rdclass, rdtype)
self.cpu = self._as_bytes(cpu, True, 255)
self.os = self._as_bytes(os, True, 255)
def to_text(self, origin=None, relativize=True, **kw):
return '"{}" "{}"'.format(dns.rdata._escapify(self.cpu),
dns.rdata._escapify(self.os))
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True,
relativize_to=None):
cpu = tok.get_string(max_length=255)
os = tok.get_string(max_length=255)
return cls(rdclass, rdtype, cpu, os)
def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
l = len(self.cpu)
assert l < 256
file.write(struct.pack('!B', l))
file.write(self.cpu)
l = len(self.os)
assert l < 256
file.write(struct.pack('!B', l))
file.write(self.os)
@classmethod
def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
cpu = parser.get_counted_bytes()
os = parser.get_counted_bytes()
return cls(rdclass, rdtype, cpu, os)
|
4shadoww/usploit
|
lib/dns/rdtypes/ANY/HINFO.py
|
Python
|
mit
| 2,266 | 0.000883 |
# Bonding Topology of heavy atoms. This is a dict of a dict of sets
topology = {'PRO': {'N': {'C-1', 'CA', 'CD'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'CG', 'HB2', 'HB3'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'CD', 'HG2', 'HG3'},
'HG2': {'CG'},
'HG3': {'CG'},
'CD': {'CG', 'N', 'HD2', 'HD3'},
'HD2': {'CD'},
'HD3': {'CD'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'GLY': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'HA2', 'HA3'},
'HA2': {'CA'},
'HA3': {'CA'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'ALA': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB1', 'HB2', 'HB3'},
'HB1': {'CB'},
'HB2': {'CB'},
'HB3': {'CB'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'ARG': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'HG2', 'HG3', 'CD'},
'HG2': {'CG'},
'HG3': {'CG'},
'CD': {'CG', 'HD2', 'HD3', 'NE'},
'HD2': {'CD'},
'HD3': {'CD'},
'NE': {'CD', 'HE', 'CZ'},
'HE': {'NE'},
'CZ': {'NE', 'NH1', 'NH2',},
'NH1': {'CZ', 'HH11', 'HH12'},
'HH11': {'NH1'},
'HH12': {'NH1'},
'NH2': {'CZ', 'HH21', 'HH22'},
'HH21': {'NH2'},
'HH22': {'NH2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'ASN': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'OD1', 'ND2'},
'OD1': {'CG'},
'ND2': {'CG', 'HD21', 'HD22'},
'HD21': {'ND2'},
'HD22': {'ND2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'ASP': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'OD1', 'OD2'},
'OD1': {'CG','HD1'},
'HD1': {'OD1'},
'OD2': {'CG','HD2'},
'HD2': {'OD2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'CYS': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CB'},
'CB': {'CA', 'SG', 'HB2', 'HB3'},
'HB2': {'CB'},
'HB3': {'CB'},
'SG': {'CB', 'HG'},
'HG': {'SG'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'GLN': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'HG2', 'HG3', 'CD'},
'HG2': {'CG'},
'HG3': {'CG'},
'CD': {'CG', 'OE1', 'NE2'},
'OE1': {'CD'},
'NE2': {'CD', 'HE21', 'HE22'},
'HE21': {'NE2'},
'HE22': {'NE2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'GLU': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'HG2', 'HG3', 'CD'},
'HG2': {'CG'},
'HG3': {'CG'},
'CD': {'CG', 'OE1', 'OE2'},
'OE1': {'CD', 'HE1'},
'HE1': {'OE1'},
'OE2': {'CD', 'HE2'},
'HE2': {'OE2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'HIS': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'ND1', 'CD2'},
'ND1': {'CG', 'CE1', 'HD1'},
'HD1': {'ND1'},
'CE1': {'ND1', 'NE2', 'HE1'},
'HE1': {'CE1'},
'NE2': {'CE1', 'CD2', 'HE2'},
'HE2': {'NE2'},
'CD2': {'CG', 'NE2', 'HD2'},
'HD2': {'CD2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'ILE': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB', 'CG1', 'CG2'},
'HB': {'CB'},
'CG1': {'CB', 'HG12', 'HG13', 'CD1'},
'HG12': {'CG1'},
'HG13': {'CG1'},
'CD1': {'CG1', 'HD11', 'HD12', 'HD13'},
'HD11': {'CD1'},
'HD12': {'CD1'},
'HD13': {'CD1'},
'CG2': {'CB', 'HG21', 'HG22', 'HG23'},
'HG21': {'CG2'},
'HG22': {'CG2'},
'HG23': {'CG2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'LEU': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'CD1', 'CD2', 'HG'},
'HG': {'CG'},
'CD1': {'CG', 'HD11', 'HD12', 'HD13'},
'HD11': {'CD1'},
'HD12': {'CD1'},
'HD13': {'CD1'},
'CD2': {'CG', 'HD21', 'HD22', 'HD23'},
'HD21': {'CD2'},
'HD22': {'CD2'},
'HD23': {'CD2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'LYS': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'HG2', 'HG3', 'CD'},
'HG2': {'CG'},
'HG3': {'CG'},
'CD': {'CG', 'HD2', 'HD3', 'CE'},
'HD2': {'CD'},
'HD3': {'CD'},
'CE': {'CD', 'HE2', 'HE3', 'NZ'},
'HE2': {'CE'},
'HE3': {'CE'},
'NZ': {'CE', 'HZ1', 'HZ2', 'HZ3'},
'HZ1': {'NZ'},
'HZ2': {'NZ'},
'HZ3': {'NZ'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'MET': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'HG2', 'HG3', 'SD'},
'HG2': {'CG'},
'HG3': {'CG'},
'SD': {'CG', 'CE'},
'CE': {'SD', 'HE1', 'HE2', 'HE3'},
'HE1': {'CE'},
'HE2': {'CE'},
'HE3': {'CE'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'PHE': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'CD1', 'CD2'},
'CD1': {'CG', 'HD1', 'CE1'},
'HD1': {'CD1'},
'CE1': {'CD1', 'HE1', 'CZ'},
'HE1': {'CE1'},
'CZ': {'CE1', 'HZ', 'CE2'},
'HZ': {'CZ'},
'CE2': {'CZ', 'HE2', 'CD2'},
'HE2': {'CE2'},
'CD2': {'CE2', 'HD2', 'CG'},
'HD2': {'CD2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'SER': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'OG'},
'HB2': {'CB'},
'HB3': {'CB'},
'OG': {'CB', 'HG'},
'HG': {'OG'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'THR': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB', 'CG2', 'OG1'},
'HB2': {'CB'},
'HB3': {'CB'},
'OG1': {'CB', 'HG1'},
'HG1': {'HG1'},
'CG2': {'CB', 'HG21', 'HG22', 'HG23'},
'HG21': {'CG2'},
'HG22': {'CG2'},
'HG23': {'CG2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'TRP': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'CD1', 'CD2'},
'CD1': {'CG', 'HD1', 'NE1'},
'HD1': {'CD1'},
'NE1': {'CD1', 'HE1', 'CE2'},
'HE1': {'NE1'},
'CE2': {'NE1', 'CD2', 'CZ2'},
'CZ2': {'CE2', 'HZ2', 'CH2'},
'HZ2': {'CZ2'},
'CH2': {'CZ2', 'HH2', 'CZ3'},
'HH2': {'CH2'},
'CZ3': {'CH2', 'HZ3', 'CE3'},
'HZ3': {'CZ3'},
'CE3': {'CZ3', 'HE3', 'CD2'},
'HE3': {'CE3'},
'CD2': {'CE3', 'CE2', 'CG'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'TYR': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB2', 'HB3', 'CG'},
'HB2': {'CB'},
'HB3': {'CB'},
'CG': {'CB', 'CD1', 'CD2'},
'CD1': {'CG', 'HD1', 'CE1'},
'HD1': {'CD1'},
'CE1': {'CD1', 'HE1', 'CZ'},
'HE1': {'CE1'},
'CZ': {'CE1', 'CE2', 'OH'},
'OH': {'CZ', 'HH'},
'HH': {'OH'},
'CE2': {'CZ', 'HE2', 'CD2'},
'HE2': {'CE2'},
'CD2': {'CE2', 'HD2', 'CG'},
'HD2': {'CD2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
'VAL': {'N': {'C-1', 'CA', 'H'},
'H': {'N'},
'CA': {'N', 'C', 'CB', 'HA'},
'HA': {'CA'},
'CB': {'CA', 'HB', 'CG1', 'CG2'},
'HB': {'CB'},
'CG1': {'CB', 'HG11', 'HG12', 'HG13'},
'HG11': {'CG1'},
'HG12': {'CG1'},
'HG12': {'CG1'},
'CG2': {'CB', 'HG21', 'HG22', 'HG23'},
'HG21': {'CG2'},
'HG22': {'CG2'},
'HG22': {'CG2'},
'C': {'CA', 'O', 'N+1'},
'O': {'C'},
},
}
|
jlorieau/mollib
|
mollib/core/topology.py
|
Python
|
gpl-3.0
| 14,201 | 0.000282 |
# -*- coding: utf-8 -*-
import socket
from string import strip
__author__ = 'Vinícius da Silveira Serafim <vinicius@serafim.eti.br>'
# endereço e porta do servidor para conexão
server_addr = ("127.0.0.1", 9000)
def main():
"""
Função principal.
"""
# (1) Criar socket cliente
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# (2) Conectar socket ao servidor
client_socket.connect(server_addr)
while True:
# (3) Ler uma linha do teclado
data = strip(raw_input("> "))
# se a linha está vazia, vamos desconectar
if not len(data):
break
# (4) Enviar linha ao servidor
sent_bytes = client_socket.send(data)
print "[>] Enviado: '%s' (%s bytes) ao servidor." % (data, sent_bytes)
# (5) Ler resposta do servidor
resp = client_socket.recv(128)
# se o servidor não enviou nada, a conexão foi encerrada
if not len(resp):
break
print "[<] Recebido: '%s' (%s bytes) do servidor %s:%s" %\
(resp, len(resp), server_addr[0], server_addr[1])
# (5) Desconectar
client_socket.close()
print "Conexão encerrada com o servidor %s:%s" % server_addr
if __name__ == '__main__':
main()
# eof
|
vsserafim/aula_sockets_echo
|
client_01.py
|
Python
|
gpl-2.0
| 1,291 | 0 |
from test_support import verify, verbose, TestFailed
from string import join
from random import random, randint
# SHIFT should match the value in longintrepr.h for best testing.
SHIFT = 15
BASE = 2 ** SHIFT
MASK = BASE - 1
# Max number of base BASE digits to use in test cases. Doubling
# this will at least quadruple the runtime.
MAXDIGITS = 10
# build some special values
special = map(long, [0, 1, 2, BASE, BASE >> 1])
special.append(0x5555555555555555L)
special.append(0xaaaaaaaaaaaaaaaaL)
# some solid strings of one bits
p2 = 4L # 0 and 1 already added
for i in range(2*SHIFT):
special.append(p2 - 1)
p2 = p2 << 1
del p2
# add complements & negations
special = special + map(lambda x: ~x, special) + \
map(lambda x: -x, special)
# ------------------------------------------------------------ utilities
# Use check instead of assert so the test still does something
# under -O.
def check(ok, *args):
if not ok:
raise TestFailed, join(map(str, args), " ")
# Get quasi-random long consisting of ndigits digits (in base BASE).
# quasi == the most-significant digit will not be 0, and the number
# is constructed to contain long strings of 0 and 1 bits. These are
# more likely than random bits to provoke digit-boundary errors.
# The sign of the number is also random.
def getran(ndigits):
verify(ndigits > 0)
nbits_hi = ndigits * SHIFT
nbits_lo = nbits_hi - SHIFT + 1
answer = 0L
nbits = 0
r = int(random() * (SHIFT * 2)) | 1 # force 1 bits to start
while nbits < nbits_lo:
bits = (r >> 1) + 1
bits = min(bits, nbits_hi - nbits)
verify(1 <= bits <= SHIFT)
nbits = nbits + bits
answer = answer << bits
if r & 1:
answer = answer | ((1 << bits) - 1)
r = int(random() * (SHIFT * 2))
verify(nbits_lo <= nbits <= nbits_hi)
if random() < 0.5:
answer = -answer
return answer
# Get random long consisting of ndigits random digits (relative to base
# BASE). The sign bit is also random.
def getran2(ndigits):
answer = 0L
for i in range(ndigits):
answer = (answer << SHIFT) | randint(0, MASK)
if random() < 0.5:
answer = -answer
return answer
# --------------------------------------------------------------- divmod
def test_division_2(x, y):
q, r = divmod(x, y)
q2, r2 = x/y, x%y
pab, pba = x*y, y*x
check(pab == pba, "multiplication does not commute for", x, y)
check(q == q2, "divmod returns different quotient than / for", x, y)
check(r == r2, "divmod returns different mod than % for", x, y)
check(x == q*y + r, "x != q*y + r after divmod on", x, y)
if y > 0:
check(0 <= r < y, "bad mod from divmod on", x, y)
else:
check(y < r <= 0, "bad mod from divmod on", x, y)
def test_division(maxdigits=MAXDIGITS):
print "long / * % divmod"
digits = range(1, maxdigits+1)
for lenx in digits:
x = getran(lenx)
for leny in digits:
y = getran(leny) or 1L
test_division_2(x, y)
# -------------------------------------------------------------- ~ & | ^
def test_bitop_identities_1(x):
check(x & 0 == 0, "x & 0 != 0 for", x)
check(x | 0 == x, "x | 0 != x for", x)
check(x ^ 0 == x, "x ^ 0 != x for", x)
check(x & -1 == x, "x & -1 != x for", x)
check(x | -1 == -1, "x | -1 != -1 for", x)
check(x ^ -1 == ~x, "x ^ -1 != ~x for", x)
check(x == ~~x, "x != ~~x for", x)
check(x & x == x, "x & x != x for", x)
check(x | x == x, "x | x != x for", x)
check(x ^ x == 0, "x ^ x != 0 for", x)
check(x & ~x == 0, "x & ~x != 0 for", x)
check(x | ~x == -1, "x | ~x != -1 for", x)
check(x ^ ~x == -1, "x ^ ~x != -1 for", x)
check(-x == 1 + ~x == ~(x-1), "not -x == 1 + ~x == ~(x-1) for", x)
for n in range(2*SHIFT):
p2 = 2L ** n
check(x << n >> n == x, "x << n >> n != x for", x, n)
check(x / p2 == x >> n, "x / p2 != x >> n for x n p2", x, n, p2)
check(x * p2 == x << n, "x * p2 != x << n for x n p2", x, n, p2)
check(x & -p2 == x >> n << n == x & ~(p2 - 1),
"not x & -p2 == x >> n << n == x & ~(p2 - 1) for x n p2",
x, n, p2)
def test_bitop_identities_2(x, y):
check(x & y == y & x, "x & y != y & x for", x, y)
check(x | y == y | x, "x | y != y | x for", x, y)
check(x ^ y == y ^ x, "x ^ y != y ^ x for", x, y)
check(x ^ y ^ x == y, "x ^ y ^ x != y for", x, y)
check(x & y == ~(~x | ~y), "x & y != ~(~x | ~y) for", x, y)
check(x | y == ~(~x & ~y), "x | y != ~(~x & ~y) for", x, y)
check(x ^ y == (x | y) & ~(x & y),
"x ^ y != (x | y) & ~(x & y) for", x, y)
check(x ^ y == (x & ~y) | (~x & y),
"x ^ y == (x & ~y) | (~x & y) for", x, y)
check(x ^ y == (x | y) & (~x | ~y),
"x ^ y == (x | y) & (~x | ~y) for", x, y)
def test_bitop_identities_3(x, y, z):
check((x & y) & z == x & (y & z),
"(x & y) & z != x & (y & z) for", x, y, z)
check((x | y) | z == x | (y | z),
"(x | y) | z != x | (y | z) for", x, y, z)
check((x ^ y) ^ z == x ^ (y ^ z),
"(x ^ y) ^ z != x ^ (y ^ z) for", x, y, z)
check(x & (y | z) == (x & y) | (x & z),
"x & (y | z) != (x & y) | (x & z) for", x, y, z)
check(x | (y & z) == (x | y) & (x | z),
"x | (y & z) != (x | y) & (x | z) for", x, y, z)
def test_bitop_identities(maxdigits=MAXDIGITS):
print "long bit-operation identities"
for x in special:
test_bitop_identities_1(x)
digits = range(1, maxdigits+1)
for lenx in digits:
x = getran(lenx)
test_bitop_identities_1(x)
for leny in digits:
y = getran(leny)
test_bitop_identities_2(x, y)
test_bitop_identities_3(x, y, getran((lenx + leny)/2))
# ------------------------------------------------- hex oct repr str atol
def slow_format(x, base):
if (x, base) == (0, 8):
# this is an oddball!
return "0L"
digits = []
sign = 0
if x < 0:
sign, x = 1, -x
while x:
x, r = divmod(x, base)
digits.append(int(r))
digits.reverse()
digits = digits or [0]
return '-'[:sign] + \
{8: '0', 10: '', 16: '0x'}[base] + \
join(map(lambda i: "0123456789ABCDEF"[i], digits), '') + \
"L"
def test_format_1(x):
from string import atol
for base, mapper in (8, oct), (10, repr), (16, hex):
got = mapper(x)
expected = slow_format(x, base)
check(got == expected, mapper.__name__, "returned",
got, "but expected", expected, "for", x)
check(atol(got, 0) == x, 'atol("%s", 0) !=' % got, x)
# str() has to be checked a little differently since there's no
# trailing "L"
got = str(x)
expected = slow_format(x, 10)[:-1]
check(got == expected, mapper.__name__, "returned",
got, "but expected", expected, "for", x)
def test_format(maxdigits=MAXDIGITS):
print "long str/hex/oct/atol"
for x in special:
test_format_1(x)
for i in range(10):
for lenx in range(1, maxdigits+1):
x = getran(lenx)
test_format_1(x)
# ----------------------------------------------------------------- misc
def test_misc(maxdigits=MAXDIGITS):
print "long miscellaneous operations"
import sys
# check the extremes in int<->long conversion
hugepos = sys.maxint
hugeneg = -hugepos - 1
hugepos_aslong = long(hugepos)
hugeneg_aslong = long(hugeneg)
check(hugepos == hugepos_aslong, "long(sys.maxint) != sys.maxint")
check(hugeneg == hugeneg_aslong,
"long(-sys.maxint-1) != -sys.maxint-1")
# long -> int should not fail for hugepos_aslong or hugeneg_aslong
try:
check(int(hugepos_aslong) == hugepos,
"converting sys.maxint to long and back to int fails")
except OverflowError:
raise TestFailed, "int(long(sys.maxint)) overflowed!"
try:
check(int(hugeneg_aslong) == hugeneg,
"converting -sys.maxint-1 to long and back to int fails")
except OverflowError:
raise TestFailed, "int(long(-sys.maxint-1)) overflowed!"
# but long -> int should overflow for hugepos+1 and hugeneg-1
x = hugepos_aslong + 1
try:
int(x)
raise ValueError
except OverflowError:
pass
except:
raise TestFailed, "int(long(sys.maxint) + 1) didn't overflow"
x = hugeneg_aslong - 1
try:
int(x)
raise ValueError
except OverflowError:
pass
except:
raise TestFailed, "int(long(-sys.maxint-1) - 1) didn't overflow"
# ---------------------------------------------------------------- do it
test_division()
test_bitop_identities()
test_format()
test_misc()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.1/Lib/test/test_long.py
|
Python
|
mit
| 8,805 | 0.003748 |
import cPickle, string, numpy, getopt, sys, random, time, re, pprint
import sys
import onlineldauser
import citydoc
import os
import subprocess
import MySQLdb
def main():
db=MySQLdb.Connect(host="localhost",
user="team06",
passwd="aiM7chah,d",
db="randomtrip")
cur=db.cursor()
batchsize = 1
D = 3.3e6
K = 100 # number of topics
documentstoanalyze=1; # number of batches
user_id='2'
cur.execute("select tags from UserProfile where id=%s",user_id)
user_tags=cur.fetchone()
user_tags=[user_tags[0]]
print user_tags
vocab = file('./vocaball.txt').readlines()
W = len(vocab)
olda=onlineldauser.OnlineLDA(vocab)
(gamma, bound) = olda.update_lambda(user_tags)
(wordids, wordcts) = onlineldauser.parse_doc_list(user_tags, olda._vocab)
perwordbound = bound * len(user_tags) / (D * sum(map(sum, wordcts)))
gamma=str(gamma[0]).strip(' []').replace('\n','')
gamma=gamma.split()
gamma_db=''
for index,value in enumerate(gamma):
if float(value)>0.01:
gamma_db+=(str(index)+':'+value+',')
cur.execute("update UserProfile set user_vector=%s where id=%s",(gamma_db,user_id))
db.commit()
if __name__ == '__main__':
main()
|
huyilin/TopicLda
|
src/onlineuser.py
|
Python
|
gpl-3.0
| 1,278 | 0.021127 |
import MySQLdb
import csv
import cStringIO
import codecs
import pprint
from datetime import datetime
from decimal import *
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [unicode(s, "utf-8") for s in row]
def __iter__(self):
return self
INSERT_STMT="""INSERT INTO `tweets_Oso`
(`created_at`,
`lang`,
`text`,
`uuid`,
`user_id`,
`geo_coordinates_0`,
`geo_coordinates_1`,
`user_screen_name`,
`user_description`,
`user_followers_count`,
`user_friends_count`,
`user_location`,
`entities_urls_0_expanded_url`,
`entities_urls_1_expanded_url`,
`entities_urls_2_expanded_url`,
`user_statuses_count`,
`entities_urls_0_display_url`,
`entities_urls_1_display_url`,
`entities_urls_2_display_url`,
`retweeted_status_id`,
`retweeted_status_user_screen_name`,
`retweeted_status_retweet_count`,
`retweeted_status_created_at`,
`retweeted_status_text`,
`retweeted_status_favorite_count`,
`retweeted_status_user_id`,
`retweeted_status_user_time_zone`,
`retweeted_status_user_friends_count`,
`retweeted_status_user_statuses_count`,
`retweeted_status_user_followers_count`,
`in_reply_to_screen_name`,
`in_reply_to_status_id`,
`in_reply_to_user_id`)
VALUES
(%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s,
%s)
"""
parser = argparse.ArgumentParser(description='whatevs')
parser.add_argument('host', help='host')
parser.add_argument('database', help='database name')
parser.add_argument('username', help="username")
parser.add_argument('-l', '--limit', help="limit", type=int, default=0)
#parser.add_argument('-o', '--output', help="outfile")
parser.add_argument('-f', '--filename', help="input file")
parser.add_argument('-e', '--encoding', default="utf-8", help="json file encoding (default is utf-8)")
parser.add_argument('--db_encoding', default="utf8mb4", help="database encoding")
#parser.add_argument('-b', '--batchsize', default=1000, type=int, help="batch insert size")
parser.add_argument('-c', '--check', dest="check", action="store_true", help="check if tweet exists before inserting")
parser.add_argument('-r', '--no_retweets', dest="no_retweets", action="store_true", help="do not add embedded retweets")
args = parser.parse_args()
# ask for password
password = getpass.getpass("Enter password for %s@%s (%s) : "%(args.username, args.host, args.database))
# connect to db
db=MySQLdb.connect(args.host, args.username, password, args.database, charset=args.db_encoding, use_unicode=True)
c=db.cursor()
with open(args.filename, 'r') as infile:
coder = codecs.iterencode(codecs.iterdecode(infile, "utf-8"), "utf-8")
csvreader = csv.DictReader(coder, delimiter=',', quotechar='"')
queue = []
total = 0
try:
last_item = {}
for row in csvreader:
#print "%d - %s,%s,%s,%s"%(total, row["created_ts"], row["text"], row["retweeted_status.created_ts"], row["retweeted_status.text"])
last_item = row
created_ts = datetime.strptime( row["created_ts"], "%Y-%m-%dT%H:%M:%SZ" )
retweet_created = datetime.strptime( row["retweeted_status.created_ts"], "%Y-%m-%dT%H:%M:%SZ" ) if row["retweeted_status.created_ts"] else None
geo_0 = Decimal(row["geo.coordinates.0"] if len(row["geo.coordinates.0"]) < 16 else row["geo.coordinates.0"][:16]) if row["geo.coordinates.0"] else None
geo_1 = Decimal(Decimal(row["geo.coordinates.1"] if len(row["geo.coordinates.1"]) < 16 else row["geo.coordinates.1"][:16])) if row["geo.coordinates.1"] else None
#if geo_0 is not None:
# print geo_0, geo_1
item = (
created_ts,
row["lang"],
row["text"],
int(row["id"]),
int(row["user.id"]),
geo_0,
geo_1,
row["user.screen_name"],
row["user.description"],
int(row["user.followers_count"]),
int(row["user.friends_count"]),
row["user.location"],
row["entities.urls.0.expanded_url"],
row["entities.urls.1.expanded_url"],
row["entities.urls.2.expanded_url"],
int(row["user.statuses_count"]),
row["entities.urls.0.display_url"],
row["entities.urls.1.display_url"],
row["entities.urls.2.display_url"],
int(row["retweeted_status.id"]) if row["retweeted_status.id"] else None,
row["retweeted_status.user.screen_name"] if row["retweeted_status.user.screen_name"] else None,
int(row["retweeted_status.retweet_count"]) if row["retweeted_status.retweet_count"] else None,
retweet_created,
row["retweeted_status.text"] if row["retweeted_status.text"] else None,
int(row["retweeted_status.favorite_count"]) if row["retweeted_status.favorite_count"] else None,
int(row["retweeted_status.user.id"]) if row["retweeted_status.user.id"] else None,
row["retweeted_status.user.time_zone"] if row["retweeted_status.user.time_zone"] else None,
int(row["retweeted_status.user.friends_count"]) if row["retweeted_status.user.friends_count"] else None,
int(row["retweeted_status.user.statuses_count"]) if row["retweeted_status.user.statuses_count"] else None,
int(row["retweeted_status.user.followers_count"]) if row["retweeted_status.user.followers_count"] else None,
row["in_reply_to_screen_name"] if row["in_reply_to_screen_name"] else None,
int(row["in_reply_to_status_id"]) if row["in_reply_to_status_id"] else None,
int(row["in_reply_to_user_id"]) if row["in_reply_to_user_id"] else None
)
queue.append(item)
total += 1
if len(queue) >= MAX_NUM:
#print
#print "---------------------"
#print
c.executemany(INSERT_STMT, queue)
queue = []
print total
#print "---------------------"
#print
# insert the last few
c.executemany(INSERT_STMT, queue)
print "%d total inserted"%(total)
c.close()
except Exception, e:
print "error ", e
print "last item: "
pprint.pprint(last_item)
c.close()
db.close()
raise e
finally:
c.close()
db.commit()
db.close()
|
emCOMP/twitter-mysql
|
bin/simple_import.py
|
Python
|
bsd-3-clause
| 6,719 | 0.01786 |
# Generated by Django 3.0.8 on 2020-07-11 03:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("places", "0007_auto_20200711_0104"),
]
operations = [
migrations.CreateModel(
name="Category",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=255)),
("places", models.ManyToManyField(to="places.Restaurant")),
],
options={"verbose_name_plural": "categories", "db_table": "category"},
),
]
|
huangsam/chowist
|
places/migrations/0008_category.py
|
Python
|
mit
| 852 | 0.001174 |
# -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <xavier@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
OpenStack - Tests
"""
# Imports #####################################################################
import requests
from collections import namedtuple
from unittest.mock import Mock, call, patch
from instance import openstack
from instance.tests.base import TestCase
# Tests #######################################################################
class OpenStackTestCase(TestCase):
"""
Test cases for OpenStack helper functions
"""
def setUp(self):
super().setUp()
self.nova = Mock()
def test_create_server(self):
"""
Create a VM via nova
"""
self.nova.flavors.find.return_value = 'test-flavor'
self.nova.images.find.return_value = 'test-image'
openstack.create_server(self.nova, 'test-vm', {"ram": 4096, "disk": 40}, {"name": "Ubuntu 12.04"})
self.assertEqual(self.nova.mock_calls, [
call.flavors.find(disk=40, ram=4096),
call.images.find(name='Ubuntu 12.04'),
call.servers.create('test-vm', 'test-image', 'test-flavor', key_name=None)
])
def test_delete_servers_by_name(self):
"""
Delete all servers with a given name
"""
server_class = namedtuple('server_class', 'name pk')
self.nova.servers.list.return_value = [
server_class(name='server-a', pk=1),
server_class(name='server-a', pk=2),
server_class(name='server-b', pk=3),
]
openstack.delete_servers_by_name(self.nova, 'server-a')
self.assertEqual(self.nova.mock_calls, [
call.servers.list(),
call.servers.delete(server_class(name='server-a', pk=1)),
call.servers.delete(server_class(name='server-a', pk=2)),
])
def test_get_server_public_address_none(self):
"""
No public IP when none has been assigned yet
"""
server_class = namedtuple('Server', 'addresses')
server = server_class(addresses=[])
self.assertEqual(openstack.get_server_public_address(server), None)
@patch('requests.packages.urllib3.util.retry.Retry.sleep')
@patch('http.client.HTTPConnection.getresponse')
@patch('http.client.HTTPConnection.request')
def test_nova_client_connection_error(self, mock_request, mock_getresponse, mock_retry_sleep):
"""
Connection error during a request from the nova client
Ensure requests are retried before giving up, with a backoff sleep between attempts
"""
def getresponse_call(*args, **kwargs):
""" Invoked by the nova client when making a HTTP request (via requests/urllib3) """
raise ConnectionResetError('[Errno 104] Connection reset by peer')
mock_getresponse.side_effect = getresponse_call
nova = openstack.get_nova_client()
with self.assertRaises(requests.exceptions.ConnectionError):
nova.servers.get('test-id')
self.assertEqual(mock_getresponse.call_count, 11)
self.assertEqual(mock_retry_sleep.call_count, 10)
|
omarkhan/opencraft
|
instance/tests/test_openstack.py
|
Python
|
agpl-3.0
| 3,877 | 0.00129 |
# This file is part of django-ca (https://github.com/mathiasertl/django-ca).
#
# django-ca is free software: you can redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# django-ca is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with django-ca. If not,
# see <http://www.gnu.org/licenses/>.
"""Test cases for :py:mod:`django_ca.extensions`."""
import doctest
import os
import sys
import typing
from unittest import TestLoader
from unittest import TestSuite
from cryptography import x509
from cryptography.x509 import TLSFeatureType
from cryptography.x509.oid import AuthorityInformationAccessOID
from cryptography.x509.oid import ExtendedKeyUsageOID
from cryptography.x509.oid import ExtensionOID
from cryptography.x509.oid import ObjectIdentifier
from django.conf import settings
from django.test import TestCase
from django.utils.functional import cached_property
from ..extensions import KEY_TO_EXTENSION
from ..extensions import OID_TO_EXTENSION
from ..extensions import AuthorityInformationAccess
from ..extensions import AuthorityKeyIdentifier
from ..extensions import BasicConstraints
from ..extensions import CertificatePolicies
from ..extensions import CRLDistributionPoints
from ..extensions import ExtendedKeyUsage
from ..extensions import Extension
from ..extensions import FreshestCRL
from ..extensions import InhibitAnyPolicy
from ..extensions import IssuerAlternativeName
from ..extensions import KeyUsage
from ..extensions import NameConstraints
from ..extensions import OCSPNoCheck
from ..extensions import PolicyConstraints
from ..extensions import PrecertificateSignedCertificateTimestamps
from ..extensions import PrecertPoison
from ..extensions import SubjectAlternativeName
from ..extensions import SubjectKeyIdentifier
from ..extensions import TLSFeature
from ..extensions.base import UnrecognizedExtension
from ..extensions.utils import PolicyInformation
from ..models import X509CertMixin
from ..typehints import ParsablePolicyInformation
from ..utils import GeneralNameList
from .base import certs
from .base import dns
from .base import uri
from .base.extensions import CRLDistributionPointsTestCaseBase
from .base.extensions import ExtensionTestMixin
from .base.extensions import ListExtensionTestMixin
from .base.extensions import NullExtensionTestMixin
from .base.extensions import OrderedSetExtensionTestMixin
from .base.extensions import TestValues
from .base.mixins import TestCaseMixin
def load_tests( # pylint: disable=unused-argument
loader: TestLoader, tests: TestSuite, ignore: typing.Optional[str] = None
) -> TestSuite:
"""Load doctests."""
if sys.version_info >= (3, 7):
# Older python versions return a different str for classes
docs_path = os.path.join(settings.DOC_DIR, "python", "extensions.rst")
tests.addTests(
doctest.DocFileSuite(
docs_path,
module_relative=False,
globs={
"KEY_TO_EXTENSION": KEY_TO_EXTENSION,
"OID_TO_EXTENSION": OID_TO_EXTENSION,
},
)
)
tests.addTests(
doctest.DocTestSuite(
"django_ca.extensions",
extraglobs={
"ExtensionOID": ExtensionOID,
},
)
)
tests.addTests(
doctest.DocTestSuite(
"django_ca.extensions.base",
extraglobs={
"ExtendedKeyUsage": ExtendedKeyUsage,
"ExtendedKeyUsageOID": ExtendedKeyUsageOID,
"ExtensionOID": ExtensionOID,
"KeyUsage": KeyUsage,
"OCSPNoCheck": OCSPNoCheck,
"SubjectAlternativeName": SubjectAlternativeName,
"SubjectKeyIdentifier": SubjectKeyIdentifier,
},
)
)
tests.addTests(doctest.DocTestSuite("django_ca.extensions.utils"))
return tests
class AuthorityInformationAccessTestCase(ExtensionTestMixin[AuthorityInformationAccess], TestCase):
"""Test AuthorityInformationAccess extension."""
ext_class = AuthorityInformationAccess
ext_class_key = "authority_information_access"
ext_class_name = "AuthorityInformationAccess"
uri1 = "https://example1.com"
uri2 = "https://example2.net"
uri3 = "https://example3.org"
uri4 = "https://example4.at"
test_values = {
"empty": {
"values": [{}],
"expected": {"issuers": [], "ocsp": []},
"expected_bool": False,
"expected_repr": "issuers=[], ocsp=[]",
"expected_serialized": {},
"expected_text": "",
"extension_type": x509.AuthorityInformationAccess(descriptions=[]),
},
"issuer": {
"values": [
{"issuers": [uri1]},
{"issuers": [uri(uri1)]},
],
"expected": {"issuers": [uri(uri1)], "ocsp": []},
"expected_repr": f"issuers=['URI:{uri1}'], ocsp=[]",
"expected_serialized": {"issuers": [f"URI:{uri1}"]},
"expected_text": f"CA Issuers:\n * URI:{uri1}",
"extension_type": x509.AuthorityInformationAccess(
descriptions=[x509.AccessDescription(AuthorityInformationAccessOID.CA_ISSUERS, uri(uri1))]
),
},
"ocsp": {
"values": [
{"ocsp": [uri2]},
{"ocsp": [uri(uri2)]},
],
"expected": {"ocsp": [uri(uri2)], "issuers": []},
"expected_repr": f"issuers=[], ocsp=['URI:{uri2}']",
"expected_serialized": {"ocsp": [f"URI:{uri2}"]},
"expected_text": f"OCSP:\n * URI:{uri2}",
"extension_type": x509.AuthorityInformationAccess(
descriptions=[x509.AccessDescription(AuthorityInformationAccessOID.OCSP, uri(uri2))]
),
},
"both": {
"values": [
{"ocsp": [uri1], "issuers": [uri2]},
{"ocsp": [uri(uri1)], "issuers": [uri(uri2)]},
],
"expected": {"ocsp": [uri(uri1)], "issuers": [uri(uri2)]},
"expected_repr": f"issuers=['URI:{uri2}'], ocsp=['URI:{uri1}']",
"expected_serialized": {"ocsp": [f"URI:{uri1}"], "issuers": [f"URI:{uri2}"]},
"expected_text": f"CA Issuers:\n * URI:{uri2}\nOCSP:\n * URI:{uri1}",
"extension_type": x509.AuthorityInformationAccess(
descriptions=[
x509.AccessDescription(AuthorityInformationAccessOID.CA_ISSUERS, uri(uri2)),
x509.AccessDescription(AuthorityInformationAccessOID.OCSP, uri(uri1)),
]
),
},
"multiple": {
"values": [
{"ocsp": [uri1, uri2], "issuers": [uri3, uri4]},
{"ocsp": [uri1, uri(uri2)], "issuers": [uri3, uri(uri4)]},
{"ocsp": [uri(uri1), uri(uri2)], "issuers": [uri(uri3), uri(uri4)]},
],
"expected": {"ocsp": [uri(uri1), uri(uri2)], "issuers": [uri(uri3), uri(uri4)]},
"expected_repr": f"issuers=['URI:{uri3}', 'URI:{uri4}'], ocsp=['URI:{uri1}', 'URI:{uri2}']",
"expected_serialized": {
"ocsp": [f"URI:{uri1}", f"URI:{uri2}"],
"issuers": [f"URI:{uri3}", f"URI:{uri4}"],
},
"expected_text": f"CA Issuers:\n * URI:{uri3}\n * URI:{uri4}\n"
f"OCSP:\n * URI:{uri1}\n * URI:{uri2}",
"extension_type": x509.AuthorityInformationAccess(
descriptions=[
x509.AccessDescription(AuthorityInformationAccessOID.CA_ISSUERS, uri(uri3)),
x509.AccessDescription(AuthorityInformationAccessOID.CA_ISSUERS, uri(uri4)),
x509.AccessDescription(AuthorityInformationAccessOID.OCSP, uri(uri1)),
x509.AccessDescription(AuthorityInformationAccessOID.OCSP, uri(uri2)),
]
),
},
}
def test_bool(self) -> None:
"""Test bool(ext)."""
for config in self.test_values.values():
ext = self.ext(config["expected"])
self.assertEqual(bool(ext), config.get("expected_bool", True))
def test_value(self) -> None:
"""Overwritten because extension has no value."""
return
def test_none_value(self) -> None:
"""Test that we can use and pass None as values for GeneralNamesList values."""
ext = self.ext_class({"value": {"issuers": None, "ocsp": None}})
self.assertEqual(ext.issuers, [])
self.assertEqual(ext.ocsp, [])
self.assertEqual(ext.extension_type, x509.AuthorityInformationAccess(descriptions=[]))
def test_properties(self) -> None:
"""Test issuers and ocsp properties"""
expected_issuers = GeneralNameList([self.uri1])
expected_ocsp = GeneralNameList([self.uri2])
expected = AuthorityInformationAccess({"value": {"issuers": [self.uri1], "ocsp": [self.uri2]}})
ext = AuthorityInformationAccess()
ext.issuers = [self.uri1]
ext.ocsp = [self.uri2]
self.assertEqual(ext.issuers, expected_issuers)
self.assertEqual(ext.ocsp, expected_ocsp)
self.assertIsInstance(ext.issuers, GeneralNameList)
self.assertIsInstance(ext.ocsp, GeneralNameList)
self.assertEqual(ext, expected)
ext = AuthorityInformationAccess()
ext.issuers = expected_issuers
ext.ocsp = expected_ocsp
self.assertEqual(ext.issuers, expected_issuers)
self.assertEqual(ext.ocsp, expected_ocsp)
self.assertIsInstance(ext.issuers, GeneralNameList)
self.assertIsInstance(ext.ocsp, GeneralNameList)
self.assertEqual(ext, expected)
class AuthorityKeyIdentifierTestCase(ExtensionTestMixin[AuthorityKeyIdentifier], TestCase):
"""Test AuthorityKeyIdentifier extension."""
ext_class = AuthorityKeyIdentifier
ext_class_key = "authority_key_identifier"
ext_class_name = "AuthorityKeyIdentifier"
b1 = b"333333"
b2 = b"DDDDDD"
b3 = b"UUUUUU"
hex1 = "33:33:33:33:33:33"
hex2 = "44:44:44:44:44:44"
hex3 = "55:55:55:55:55:55"
uri1 = "http://ca.example.com/crl"
dns1 = "example.org"
s1 = 0
s2 = 1
test_values = {
"one": {
"values": [
hex1,
],
"expected": b1,
"expected_repr": f"keyid: {hex1}",
"expected_serialized": {"key_identifier": hex1},
"expected_text": f"* KeyID: {hex1}",
"extension_type": x509.AuthorityKeyIdentifier(b1, None, None),
},
"two": {
"values": [
hex2,
],
"expected": b2,
"expected_repr": f"keyid: {hex2}",
"expected_serialized": {"key_identifier": hex2},
"expected_text": f"* KeyID: {hex2}",
"extension_type": x509.AuthorityKeyIdentifier(b2, None, None),
},
"three": {
"values": [
hex3,
],
"expected": b3,
"expected_repr": f"keyid: {hex3}",
"expected_serialized": {"key_identifier": hex3},
"expected_text": f"* KeyID: {hex3}",
"extension_type": x509.AuthorityKeyIdentifier(b3, None, None),
},
"issuer/serial": {
"expected": {"authority_cert_issuer": [dns1], "authority_cert_serial_number": s1},
"values": [{"authority_cert_issuer": [dns1], "authority_cert_serial_number": s1}],
"expected_repr": f"issuer: ['DNS:{dns1}'], serial: {s1}",
"expected_serialized": {
"authority_cert_issuer": [f"DNS:{dns1}"],
"authority_cert_serial_number": s1,
},
"expected_text": f"* Issuer:\n * DNS:{dns1}\n* Serial: {s1}",
"extension_type": x509.AuthorityKeyIdentifier(None, [dns(dns1)], s1),
},
}
def test_from_subject_key_identifier(self) -> None:
"""Test creating an extension from a subject key identifier."""
for config in self.test_values.values():
if not isinstance(config["expected"], bytes):
continue
ski = SubjectKeyIdentifier({"value": config["expected"]})
ext = self.ext_class(ski)
self.assertExtensionEqual(ext, self.ext_class({"value": config["expected"]}))
def test_none_value(self) -> None:
"""Test that we can use and pass None as values for GeneralNamesList values."""
ext = self.ext_class(
{
"value": {
"key_identifier": self.b1,
"authority_cert_issuer": None,
"authority_cert_serial_number": None,
}
}
)
self.assertEqual(
ext.extension_type,
x509.AuthorityKeyIdentifier(
key_identifier=self.b1, authority_cert_issuer=None, authority_cert_serial_number=None
),
)
def test_value(self) -> None:
"""Overwritten because extension has no value."""
return
class BasicConstraintsTestCase(ExtensionTestMixin[BasicConstraints], TestCase):
"""Test BasicConstraints extension."""
ext_class = BasicConstraints
ext_class_key = "basic_constraints"
ext_class_name = "BasicConstraints"
test_values = {
"no_ca": {
"values": [
{"ca": False},
{"ca": False, "pathlen": 3}, # ignored b/c ca=False
{"ca": False, "pathlen": None}, # ignored b/c ca=False
],
"expected": {"ca": False, "pathlen": None},
"expected_text": "CA:FALSE",
"expected_repr": "ca=False",
"expected_serialized": {"ca": False},
"extension_type": x509.BasicConstraints(ca=False, path_length=None),
},
"no_pathlen": {
"values": [
{"ca": True},
{"ca": True, "pathlen": None},
],
"expected": {"ca": True, "pathlen": None},
"expected_text": "CA:TRUE",
"expected_repr": "ca=True, pathlen=None",
"expected_serialized": {"ca": True, "pathlen": None},
"extension_type": x509.BasicConstraints(ca=True, path_length=None),
},
"pathlen_zero": {
"values": [
{"ca": True, "pathlen": 0},
],
"expected": {"ca": True, "pathlen": 0},
"expected_text": "CA:TRUE, pathlen:0",
"expected_repr": "ca=True, pathlen=0",
"expected_serialized": {"ca": True, "pathlen": 0},
"extension_type": x509.BasicConstraints(ca=True, path_length=0),
},
"pathlen_three": {
"values": [
{"ca": True, "pathlen": 3},
],
"expected": {"ca": True, "pathlen": 3},
"expected_text": "CA:TRUE, pathlen:3",
"expected_repr": "ca=True, pathlen=3",
"expected_serialized": {"ca": True, "pathlen": 3},
"extension_type": x509.BasicConstraints(ca=True, path_length=3),
},
}
def test_invalid_pathlen(self) -> None:
"""Test passing an invalid pathlen."""
with self.assertRaisesRegex(ValueError, r'^Could not parse pathlen: "foo"$'):
BasicConstraints({"value": {"ca": True, "pathlen": "foo"}})
with self.assertRaisesRegex(ValueError, r'^Could not parse pathlen: ""$'):
BasicConstraints({"value": {"ca": True, "pathlen": ""}})
with self.assertRaisesRegex(ValueError, r'^Could not parse pathlen: "foobar"$'):
BasicConstraints({"value": {"ca": True, "pathlen": "foobar"}})
def test_value(self) -> None:
"""Overwritten because extension has no value."""
return
class CRLDistributionPointsTestCase(
CRLDistributionPointsTestCaseBase[CRLDistributionPoints, x509.CRLDistributionPoints], TestCase
):
"""Test CRLDistributionPoints extension."""
ext_class = CRLDistributionPoints
ext_class_key = "crl_distribution_points"
ext_class_name = "CRLDistributionPoints"
ext_class_type = x509.CRLDistributionPoints
cg_dps1 = x509.CRLDistributionPoints([CRLDistributionPointsTestCaseBase.cg_dp1])
cg_dps2 = x509.CRLDistributionPoints([CRLDistributionPointsTestCaseBase.cg_dp2])
cg_dps3 = x509.CRLDistributionPoints([CRLDistributionPointsTestCaseBase.cg_dp3])
cg_dps4 = x509.CRLDistributionPoints([CRLDistributionPointsTestCaseBase.cg_dp4])
class CertificatePoliciesTestCase(
ListExtensionTestMixin[CertificatePolicies], ExtensionTestMixin[CertificatePolicies], TestCase
):
"""Test CertificatePolicies extension."""
ext_class = CertificatePolicies
ext_class_name = "CertificatePolicies"
ext_class_key = "certificate_policies"
oid = "2.5.29.32.0"
text1, text2, text3, text4, text5, text6 = [f"text{i}" for i in range(1, 7)]
un1: ParsablePolicyInformation = {
"policy_identifier": oid,
"policy_qualifiers": [text1],
}
un2: ParsablePolicyInformation = {
"policy_identifier": oid,
"policy_qualifiers": [
{
"explicit_text": text2,
}
],
}
un3: ParsablePolicyInformation = {
"policy_identifier": oid,
"policy_qualifiers": [
{
"notice_reference": {
"organization": text3,
"notice_numbers": [
1,
],
}
}
],
}
un4: ParsablePolicyInformation = {
"policy_identifier": oid,
"policy_qualifiers": [
text4,
{
"explicit_text": text5,
"notice_reference": {
"organization": text6,
"notice_numbers": [1, 2, 3],
},
},
],
}
p1 = PolicyInformation(un1)
p2 = PolicyInformation(un2)
p3 = PolicyInformation(un3)
p4 = PolicyInformation(un4)
xun1 = text1
xun2 = x509.UserNotice(explicit_text=text2, notice_reference=None)
xun3 = x509.UserNotice(
explicit_text=None, notice_reference=x509.NoticeReference(organization=text3, notice_numbers=[1])
)
xun4_1 = text4
xun4_2 = x509.UserNotice(
explicit_text=text5,
notice_reference=x509.NoticeReference(organization=text6, notice_numbers=[1, 2, 3]),
)
xpi1 = x509.PolicyInformation(policy_identifier=ObjectIdentifier(oid), policy_qualifiers=[xun1])
xpi2 = x509.PolicyInformation(policy_identifier=ObjectIdentifier(oid), policy_qualifiers=[xun2])
xpi3 = x509.PolicyInformation(policy_identifier=ObjectIdentifier(oid), policy_qualifiers=[xun3])
xpi4 = x509.PolicyInformation(policy_identifier=ObjectIdentifier(oid), policy_qualifiers=[xun4_1, xun4_2])
xcp1 = x509.CertificatePolicies(policies=[xpi1])
xcp2 = x509.CertificatePolicies(policies=[xpi2])
xcp3 = x509.CertificatePolicies(policies=[xpi3])
xcp4 = x509.CertificatePolicies(policies=[xpi4])
xcp5 = x509.CertificatePolicies(policies=[xpi1, xpi2, xpi4])
test_values = {
"one": {
"values": [[un1], [xpi1]],
"expected": [p1],
"expected_djca": [p1],
"expected_repr": "1 policy",
"expected_serialized": [un1],
"expected_text": f"* Policy Identifier: {oid}\n Policy Qualifiers:\n * {text1}",
"extension_type": xcp1,
},
"two": {
"values": [[un2], [xpi2]],
"expected": [p2],
"expected_djca": [p2],
"expected_repr": "1 policy",
"expected_serialized": [un2],
"expected_text": f"* Policy Identifier: {oid}\n Policy Qualifiers:\n * UserNotice:\n"
f" * Explicit text: {text2}",
"extension_type": xcp2,
},
"three": {
"values": [[un3], [xpi3]],
"expected": [p3],
"expected_djca": [p3],
"expected_repr": "1 policy",
"expected_serialized": [un3],
"expected_text": f"* Policy Identifier: {oid}\n Policy Qualifiers:\n * UserNotice:\n"
f" * Reference:\n * Organiziation: {text3}\n * Notice Numbers: [1]",
"extension_type": xcp3,
},
"four": {
"values": [[un4], [xpi4]],
"expected": [p4],
"expected_djca": [p4],
"expected_repr": "1 policy",
"expected_serialized": [un4],
"expected_text": f"""* Policy Identifier: {oid}\n Policy Qualifiers:\n * {text4}
* UserNotice:\n * Explicit text: {text5}\n * Reference:\n * Organiziation: {text6}
* Notice Numbers: [1, 2, 3]""",
"extension_type": xcp4,
},
"five": {
"values": [[un1, un2, un4], [xpi1, xpi2, xpi4], [un1, xpi2, un4]],
"expected": [p1, p2, p4],
"expected_djca": [p1, p2, p4],
"expected_repr": "3 policies",
"expected_serialized": [un1, un2, un4],
"expected_text": f"* Policy Identifier: {oid}\n Policy Qualifiers:\n * {text1}\n"
f"* Policy Identifier: {oid}\n Policy Qualifiers:\n * UserNotice:\n"
f" * Explicit text: {text2}\n"
f"* Policy Identifier: {oid}\n Policy Qualifiers:\n * {text4}\n * UserNotice:\n"
f" * Explicit text: {text5}\n * Reference:\n * Organiziation: {text6}\n"
" * Notice Numbers: [1, 2, 3]",
"extension_type": xcp5,
},
}
class FreshestCRLTestCase(CRLDistributionPointsTestCaseBase[FreshestCRL, x509.FreshestCRL], TestCase):
"""Test FreshestCRL extension."""
ext_class = FreshestCRL
ext_class_key = "freshest_crl"
ext_class_name = "FreshestCRL"
ext_class_type = x509.FreshestCRL
cg_dps1 = x509.FreshestCRL([CRLDistributionPointsTestCaseBase.cg_dp1])
cg_dps2 = x509.FreshestCRL([CRLDistributionPointsTestCaseBase.cg_dp2])
cg_dps3 = x509.FreshestCRL([CRLDistributionPointsTestCaseBase.cg_dp3])
cg_dps4 = x509.FreshestCRL([CRLDistributionPointsTestCaseBase.cg_dp4])
class InhibitAnyPolicyTestCase(ExtensionTestMixin[InhibitAnyPolicy], TestCase):
"""Test InhibitAnyPolicy extension."""
ext_class = InhibitAnyPolicy
ext_class_key = "inhibit_any_policy"
ext_class_name = "InhibitAnyPolicy"
test_values = {
"zero": {
"values": [
0,
],
"expected": 0,
"expected_repr": "0",
"expected_serialized": 0,
"expected_text": "0",
"extension_type": x509.InhibitAnyPolicy(0),
},
"one": {
"values": [
1,
],
"expected": 1,
"expected_repr": "1",
"expected_serialized": 1,
"expected_text": "1",
"extension_type": x509.InhibitAnyPolicy(1),
},
}
def test_int(self) -> None:
"""Test passing various int values."""
ext = InhibitAnyPolicy(0)
self.assertEqual(ext.skip_certs, 0)
ext = InhibitAnyPolicy(1)
self.assertEqual(ext.skip_certs, 1)
with self.assertRaisesRegex(ValueError, r"-1: must be a positive int$"):
InhibitAnyPolicy(-1)
with self.assertRaisesRegex(ValueError, r"-1: must be a positive int$"):
InhibitAnyPolicy({"value": -1})
def test_default(self) -> None:
"""Test the default value for the constructor."""
self.assertEqual(InhibitAnyPolicy().skip_certs, 0)
def test_no_int(self) -> None:
"""Test passing invalid values."""
with self.assertRaisesRegex(ValueError, r"^abc: must be an int$"):
InhibitAnyPolicy({"value": "abc"})
with self.assertRaisesRegex(ValueError, r"^Value is of unsupported type str$"):
InhibitAnyPolicy("abc") # type: ignore[arg-type]
def test_value(self) -> None:
"""Overwritten because extension has no value."""
return
class IssuerAlternativeNameTestCase(
ListExtensionTestMixin[IssuerAlternativeName], ExtensionTestMixin[IssuerAlternativeName], TestCase
):
"""Test IssuerAlternativeName extension."""
ext_class = IssuerAlternativeName
ext_class_key = "issuer_alternative_name"
ext_class_name = "IssuerAlternativeName"
ext_class_type = x509.IssuerAlternativeName
uri1 = value1 = "https://example.com"
uri2 = value2 = "https://example.net"
dns1 = value3 = "example.com"
dns2 = value4 = "example.net"
et1 = x509.IssuerAlternativeName([uri(value1)])
invalid_values = ["DNS:https://example.com", True, None]
test_values = {
"empty": {
"values": [[]],
"expected": [],
"expected_repr": "[]",
"expected_serialized": [],
"expected_text": "",
"extension_type": ext_class_type([]),
},
"uri": {
"values": [[uri1], [uri(uri1)]],
"expected": [uri(uri1)],
"expected_repr": f"['URI:{uri1}']",
"expected_serialized": [f"URI:{uri1}"],
"expected_text": f"* URI:{uri1}",
"extension_type": ext_class_type([uri(uri1)]),
},
"dns": {
"values": [[dns1], [dns(dns1)]],
"expected": [dns(dns1)],
"expected_repr": f"['DNS:{dns1}']",
"expected_serialized": [f"DNS:{dns1}"],
"expected_text": f"* DNS:{dns1}",
"extension_type": ext_class_type([dns(dns1)]),
},
"both": {
"values": [[uri1, dns1], [uri(uri1), dns(dns1)], [uri1, dns(dns1)], [uri(uri1), dns1]],
"expected": [uri(uri1), dns(dns1)],
"expected_repr": f"['URI:{uri1}', 'DNS:{dns1}']",
"expected_serialized": [f"URI:{uri1}", f"DNS:{dns1}"],
"expected_text": f"* URI:{uri1}\n* DNS:{dns1}",
"extension_type": ext_class_type([uri(uri1), dns(dns1)]),
},
"all": {
"values": [
[uri1, uri2, dns1, dns2],
[uri(uri1), uri(uri2), dns1, dns2],
[uri1, uri2, dns(dns1), dns(dns2)],
[uri(uri1), uri(uri2), dns(dns1), dns(dns2)],
],
"expected": [uri(uri1), uri(uri2), dns(dns1), dns(dns2)],
"expected_repr": f"['URI:{uri1}', 'URI:{uri2}', 'DNS:{dns1}', 'DNS:{dns2}']",
"expected_serialized": [f"URI:{uri1}", f"URI:{uri2}", f"DNS:{dns1}", f"DNS:{dns2}"],
"expected_text": f"* URI:{uri1}\n* URI:{uri2}\n* DNS:{dns1}\n* DNS:{dns2}",
"extension_type": ext_class_type([uri(uri1), uri(uri2), dns(dns1), dns(dns2)]),
},
"order": { # same as "all" above but other order
"values": [
[dns2, dns1, uri2, uri1],
[dns(dns2), dns(dns1), uri2, uri1],
[dns2, dns1, uri(uri2), uri(uri1)],
[dns(dns2), dns(dns1), uri(uri2), uri(uri1)],
],
"expected": [dns(dns2), dns(dns1), uri(uri2), uri(uri1)],
"expected_repr": f"['DNS:{dns2}', 'DNS:{dns1}', 'URI:{uri2}', 'URI:{uri1}']",
"expected_serialized": [f"DNS:{dns2}", f"DNS:{dns1}", f"URI:{uri2}", f"URI:{uri1}"],
"expected_text": f"* DNS:{dns2}\n* DNS:{dns1}\n* URI:{uri2}\n* URI:{uri1}",
"extension_type": ext_class_type([dns(dns2), dns(dns1), uri(uri2), uri(uri1)]),
},
}
def test_none_value(self) -> None:
"""Test that we can pass a None value for GeneralNameList items."""
empty = self.ext_class({"value": None})
self.assertEqual(empty.extension_type, self.ext_class_type([]))
self.assertEqual(empty, self.ext_class({"value": []}))
empty.insert(0, self.value1)
self.assertEqual(empty.extension_type, self.et1)
class PolicyConstraintsTestCase(ExtensionTestMixin[PolicyConstraints], TestCase):
"""Test PolicyConstraints extension."""
ext_class = PolicyConstraints
ext_class_key = "policy_constraints"
ext_class_name = "PolicyConstraints"
test_values = {
"rep_zero": {
"values": [
{"require_explicit_policy": 0},
],
"expected": {"require_explicit_policy": 0},
"expected_repr": "require_explicit_policy=0",
"expected_serialized": {"require_explicit_policy": 0},
"expected_text": "* RequireExplicitPolicy: 0",
"extension_type": x509.PolicyConstraints(require_explicit_policy=0, inhibit_policy_mapping=None),
},
"rep_one": {
"values": [
{"require_explicit_policy": 1},
],
"expected": {"require_explicit_policy": 1},
"expected_repr": "require_explicit_policy=1",
"expected_serialized": {"require_explicit_policy": 1},
"expected_text": "* RequireExplicitPolicy: 1",
"extension_type": x509.PolicyConstraints(require_explicit_policy=1, inhibit_policy_mapping=None),
},
"rep_none": {
"values": [
{"require_explicit_policy": None},
],
"expected": {},
"expected_repr": "-",
"expected_serialized": {},
"expected_text": "",
"extension_type": None,
},
"iap_zero": {
"values": [
{"inhibit_policy_mapping": 0},
],
"expected": {"inhibit_policy_mapping": 0},
"expected_repr": "inhibit_policy_mapping=0",
"expected_serialized": {"inhibit_policy_mapping": 0},
"expected_text": "* InhibitPolicyMapping: 0",
"extension_type": x509.PolicyConstraints(require_explicit_policy=None, inhibit_policy_mapping=0),
},
"iap_one": {
"values": [
{"inhibit_policy_mapping": 1},
],
"expected": {"inhibit_policy_mapping": 1},
"expected_repr": "inhibit_policy_mapping=1",
"expected_serialized": {"inhibit_policy_mapping": 1},
"expected_text": "* InhibitPolicyMapping: 1",
"extension_type": x509.PolicyConstraints(require_explicit_policy=None, inhibit_policy_mapping=1),
},
"iap_none": {
"values": [
{"inhibit_policy_mapping": None},
],
"expected": {},
"expected_repr": "-",
"expected_serialized": {},
"expected_text": "",
"extension_type": None,
},
"both": {
"values": [
{"inhibit_policy_mapping": 2, "require_explicit_policy": 3},
],
"expected": {"inhibit_policy_mapping": 2, "require_explicit_policy": 3},
"expected_repr": "inhibit_policy_mapping=2, require_explicit_policy=3",
"expected_serialized": {"inhibit_policy_mapping": 2, "require_explicit_policy": 3},
"expected_text": "* InhibitPolicyMapping: 2\n* RequireExplicitPolicy: 3",
"extension_type": x509.PolicyConstraints(require_explicit_policy=3, inhibit_policy_mapping=2),
},
}
def test_init_error(self) -> None:
"""Test constructor errors."""
with self.assertRaisesRegex(ValueError, r"^abc: inhibit_policy_mapping must be int or None$"):
PolicyConstraints({"value": {"inhibit_policy_mapping": "abc"}})
with self.assertRaisesRegex(ValueError, r"^-1: inhibit_policy_mapping must be a positive int$"):
PolicyConstraints({"value": {"inhibit_policy_mapping": -1}})
with self.assertRaisesRegex(ValueError, r"^abc: require_explicit_policy must be int or None$"):
PolicyConstraints({"value": {"require_explicit_policy": "abc"}})
with self.assertRaisesRegex(ValueError, r"^-1: require_explicit_policy must be a positive int$"):
PolicyConstraints({"value": {"require_explicit_policy": -1}})
def test_properties(self) -> None:
"""Test properties"""
pconst = PolicyConstraints()
self.assertIsNone(pconst.inhibit_policy_mapping)
self.assertIsNone(pconst.require_explicit_policy)
pconst = PolicyConstraints({"value": {"inhibit_policy_mapping": 1, "require_explicit_policy": 2}})
self.assertEqual(pconst.inhibit_policy_mapping, 1)
self.assertEqual(pconst.require_explicit_policy, 2)
pconst.inhibit_policy_mapping = 3
pconst.require_explicit_policy = 4
self.assertEqual(pconst.inhibit_policy_mapping, 3)
self.assertEqual(pconst.require_explicit_policy, 4)
pconst.inhibit_policy_mapping = None
pconst.require_explicit_policy = None
self.assertIsNone(pconst.inhibit_policy_mapping)
self.assertIsNone(pconst.require_explicit_policy)
def test_value(self) -> None:
"""Overwritten because extension has no value."""
return
class KeyUsageTestCase(OrderedSetExtensionTestMixin[KeyUsage], ExtensionTestMixin[KeyUsage], TestCase):
"""Test KeyUsage extension."""
ext_class = KeyUsage
ext_class_key = "key_usage"
ext_class_name = "KeyUsage"
test_values = {
"one": {
"values": [
{
"key_agreement",
},
[
"keyAgreement",
],
],
"expected": frozenset(["key_agreement"]),
"expected_repr": "['keyAgreement']",
"expected_text": "* keyAgreement",
"expected_serialized": ["keyAgreement"],
"extension_type": x509.KeyUsage(
digital_signature=False,
content_commitment=False,
key_encipherment=False,
data_encipherment=False,
key_agreement=True,
key_cert_sign=False,
crl_sign=False,
encipher_only=False,
decipher_only=False,
),
},
"two": {
"values": [
{
"key_agreement",
"key_encipherment",
},
["keyAgreement", "keyEncipherment"],
["keyEncipherment", "keyAgreement"],
["keyEncipherment", "key_agreement"],
],
"expected": frozenset(["key_agreement", "key_encipherment"]),
"expected_repr": "['keyAgreement', 'keyEncipherment']",
"expected_text": "* keyAgreement\n* keyEncipherment",
"expected_serialized": ["keyAgreement", "keyEncipherment"],
"extension_type": x509.KeyUsage(
digital_signature=False,
content_commitment=False,
key_encipherment=True,
data_encipherment=False,
key_agreement=True,
key_cert_sign=False,
crl_sign=False,
encipher_only=False,
decipher_only=False,
),
},
"three": {
"values": [
{
"key_agreement",
"key_encipherment",
"content_commitment",
},
[
"keyAgreement",
"keyEncipherment",
"nonRepudiation",
],
[
"nonRepudiation",
"keyAgreement",
"keyEncipherment",
],
[
"nonRepudiation",
"keyAgreement",
"keyEncipherment",
],
[
"content_commitment",
"key_agreement",
"key_encipherment",
],
],
"expected": frozenset(
[
"key_agreement",
"key_encipherment",
"content_commitment",
]
),
"expected_repr": "['keyAgreement', 'keyEncipherment', 'nonRepudiation']",
"expected_text": "* keyAgreement\n* keyEncipherment\n* nonRepudiation",
"expected_serialized": ["keyAgreement", "keyEncipherment", "nonRepudiation"],
"extension_type": x509.KeyUsage(
digital_signature=False,
content_commitment=True,
key_encipherment=True,
data_encipherment=False,
key_agreement=True,
key_cert_sign=False,
crl_sign=False,
encipher_only=False,
decipher_only=False,
),
},
}
def test_completeness(self) -> None:
"""Test that we support all key usages."""
self.assertEqual(set(KeyUsage.CRYPTOGRAPHY_MAPPING.keys()), {e[0] for e in KeyUsage.CHOICES})
def test_auto_add(self) -> None:
"""Test that ``decipher_only`` and ``encipher_only`` automatically add ``key_agreement``."""
self.assertEqual(
KeyUsage({"value": ["decipher_only"]}), KeyUsage({"value": ["decipher_only", "key_agreement"]})
)
self.assertEqual(
KeyUsage({"value": ["encipher_only"]}), KeyUsage({"value": ["encipher_only", "key_agreement"]})
)
def test_unknown_values(self) -> None:
"""Test passing unknown values."""
with self.assertRaisesRegex(ValueError, r"^Unknown value: foo$"):
KeyUsage({"value": ["foo"]})
with self.assertRaisesRegex(ValueError, r"^Unknown value: True$"):
KeyUsage({"value": [True]})
class ExtendedKeyUsageTestCase(
OrderedSetExtensionTestMixin[ExtendedKeyUsage], ExtensionTestMixin[ExtendedKeyUsage], TestCase
):
"""Test ExtendedKeyUsage extension."""
ext_class = ExtendedKeyUsage
ext_class_key = "extended_key_usage"
ext_class_name = "ExtendedKeyUsage"
test_values = {
"one": {
"values": [
{"serverAuth"},
{ExtendedKeyUsageOID.SERVER_AUTH},
[ExtendedKeyUsageOID.SERVER_AUTH],
],
"extension_type": x509.ExtendedKeyUsage([ExtendedKeyUsageOID.SERVER_AUTH]),
"expected": frozenset([ExtendedKeyUsageOID.SERVER_AUTH]),
"expected_repr": "['serverAuth']",
"expected_serialized": ["serverAuth"],
"expected_text": "* serverAuth",
},
"two": {
"values": [
{
"serverAuth",
"clientAuth",
},
{ExtendedKeyUsageOID.CLIENT_AUTH, ExtendedKeyUsageOID.SERVER_AUTH},
[ExtendedKeyUsageOID.SERVER_AUTH, ExtendedKeyUsageOID.CLIENT_AUTH],
[ExtendedKeyUsageOID.SERVER_AUTH, "clientAuth"],
],
"extension_type": x509.ExtendedKeyUsage(
[ExtendedKeyUsageOID.CLIENT_AUTH, ExtendedKeyUsageOID.SERVER_AUTH]
),
"expected": frozenset([ExtendedKeyUsageOID.SERVER_AUTH, ExtendedKeyUsageOID.CLIENT_AUTH]),
"expected_repr": "['clientAuth', 'serverAuth']",
"expected_serialized": ["clientAuth", "serverAuth"],
"expected_text": "* clientAuth\n* serverAuth",
},
"three": {
"values": [
{
"serverAuth",
"clientAuth",
"timeStamping",
},
{
ExtendedKeyUsageOID.CLIENT_AUTH,
ExtendedKeyUsageOID.SERVER_AUTH,
ExtendedKeyUsageOID.TIME_STAMPING,
},
{
ExtendedKeyUsageOID.CLIENT_AUTH,
"serverAuth",
ExtendedKeyUsageOID.TIME_STAMPING,
},
[
ExtendedKeyUsageOID.SERVER_AUTH,
ExtendedKeyUsageOID.CLIENT_AUTH,
ExtendedKeyUsageOID.TIME_STAMPING,
],
[
ExtendedKeyUsageOID.TIME_STAMPING,
ExtendedKeyUsageOID.SERVER_AUTH,
ExtendedKeyUsageOID.CLIENT_AUTH,
],
],
"extension_type": x509.ExtendedKeyUsage(
[
ExtendedKeyUsageOID.CLIENT_AUTH,
ExtendedKeyUsageOID.SERVER_AUTH,
ExtendedKeyUsageOID.TIME_STAMPING,
]
),
"expected": frozenset(
[
ExtendedKeyUsageOID.SERVER_AUTH,
ExtendedKeyUsageOID.CLIENT_AUTH,
ExtendedKeyUsageOID.TIME_STAMPING,
]
),
"expected_repr": "['clientAuth', 'serverAuth', 'timeStamping']",
"expected_serialized": ["clientAuth", "serverAuth", "timeStamping"],
"expected_text": "* clientAuth\n* serverAuth\n* timeStamping",
},
}
def test_unknown_values(self) -> None:
"""Test passing unknown values."""
with self.assertRaisesRegex(ValueError, r"^Unknown value: foo$"):
ExtendedKeyUsage({"value": ["foo"]})
with self.assertRaisesRegex(ValueError, r"^Unknown value: True$"):
ExtendedKeyUsage({"value": [True]})
def test_completeness(self) -> None:
"""Test that we support all ExtendedKeyUsageOIDs."""
for attr in [getattr(ExtendedKeyUsageOID, a) for a in dir(ExtendedKeyUsageOID) if a[0] != "_"]:
if isinstance(attr, ObjectIdentifier):
# pylint: disable=protected-access; ok for a test case
self.assertIn(attr, ExtendedKeyUsage._CRYPTOGRAPHY_MAPPING_REVERSED)
# make sure we haven't forgotton any keys in the form selection
self.assertEqual(
set(ExtendedKeyUsage.CRYPTOGRAPHY_MAPPING.keys()), {e[0] for e in ExtendedKeyUsage.CHOICES}
)
class NameConstraintsTestCase(ExtensionTestMixin[NameConstraints], TestCase):
"""Test NameConstraints extension."""
ext_class = NameConstraints
ext_class_key = "name_constraints"
ext_class_name = "NameConstraints"
d1 = "example.com"
d2 = "example.net"
test_values = {
"empty": {
"values": [
{"excluded": [], "permitted": []},
{"excluded": None, "permitted": None},
],
"expected": x509.NameConstraints(permitted_subtrees=[], excluded_subtrees=[]),
"expected_repr": "permitted=[], excluded=[]",
"expected_serialized": {"excluded": [], "permitted": []},
"expected_text": "",
"extension_type": x509.NameConstraints(permitted_subtrees=[], excluded_subtrees=[]),
},
"permitted": {
"values": [
{"permitted": [d1]},
{"permitted": [f"DNS:{d1}"]},
{"permitted": [dns(d1)]},
{"permitted": [dns(d1)], "excluded": []},
],
"expected": x509.NameConstraints(permitted_subtrees=[dns(d1)], excluded_subtrees=[]),
"expected_repr": f"permitted=['DNS:{d1}'], excluded=[]",
"expected_serialized": {"excluded": [], "permitted": [f"DNS:{d1}"]},
"expected_text": f"Permitted:\n * DNS:{d1}",
"extension_type": x509.NameConstraints(permitted_subtrees=[dns(d1)], excluded_subtrees=[]),
},
"excluded": {
"values": [
{"excluded": [d1]},
{"excluded": [f"DNS:{d1}"]},
{"excluded": [dns(d1)]},
{"excluded": [dns(d1)], "permitted": []},
],
"expected": x509.NameConstraints(permitted_subtrees=[], excluded_subtrees=[dns(d1)]),
"expected_repr": f"permitted=[], excluded=['DNS:{d1}']",
"expected_serialized": {"excluded": [f"DNS:{d1}"], "permitted": []},
"expected_text": f"Excluded:\n * DNS:{d1}",
"extension_type": x509.NameConstraints(permitted_subtrees=[], excluded_subtrees=[dns(d1)]),
},
"both": {
"values": [
{"permitted": [d1], "excluded": [d2]},
{"permitted": [f"DNS:{d1}"], "excluded": [f"DNS:{d2}"]},
{"permitted": [dns(d1)], "excluded": [dns(d2)]},
{"permitted": [dns(d1)], "excluded": [d2]},
],
"expected": x509.NameConstraints(permitted_subtrees=[dns(d1)], excluded_subtrees=[dns(d2)]),
"expected_repr": f"permitted=['DNS:{d1}'], excluded=['DNS:{d2}']",
"expected_serialized": {"excluded": [f"DNS:{d2}"], "permitted": [f"DNS:{d1}"]},
"expected_text": f"Permitted:\n * DNS:{d1}\nExcluded:\n * DNS:{d2}",
"extension_type": x509.NameConstraints(permitted_subtrees=[dns(d1)], excluded_subtrees=[dns(d2)]),
},
}
def test_bool(self) -> None:
"""Test bool(ext)."""
self.assertFalse(bool(NameConstraints()))
self.assertTrue(bool(NameConstraints({"value": {"permitted": ["example.com"]}})))
self.assertTrue(bool(NameConstraints({"value": {"excluded": ["example.com"]}})))
def test_setters(self) -> None:
"""Test items etters."""
expected = NameConstraints(
{
"value": {
"permitted": ["example.com"],
"excluded": ["example.net"],
}
}
)
ext = NameConstraints()
ext.permitted = ["example.com"]
ext.excluded = ["example.net"]
self.assertEqual(ext, expected)
ext = NameConstraints()
ext.permitted = GeneralNameList(["example.com"])
ext.excluded = GeneralNameList(["example.net"])
self.assertEqual(ext, expected)
ext = NameConstraints()
ext.permitted += ["example.com"]
ext.excluded += ["example.net"]
self.assertExtensionEqual(ext, expected)
def test_none_value(self) -> None:
"""Test that we can use and pass None as values for GeneralNamesList values."""
ext = self.ext_class({"value": {}})
self.assertEqual(
ext.extension_type, x509.NameConstraints(permitted_subtrees=[], excluded_subtrees=[])
)
self.assertEqual(ext.excluded, [])
self.assertEqual(ext.permitted, [])
ext = self.ext_class({"value": {"permitted": None, "excluded": None}})
self.assertEqual(
ext.extension_type, x509.NameConstraints(permitted_subtrees=[], excluded_subtrees=[])
)
self.assertEqual(ext.excluded, [])
self.assertEqual(ext.permitted, [])
def test_value(self) -> None:
"""Overwritten because extension has no value."""
return
class OCSPNoCheckTestCase(NullExtensionTestMixin[OCSPNoCheck], TestCase):
"""Test OCSPNoCheck extension."""
ext_class = OCSPNoCheck
ext_class_key = "ocsp_no_check"
ext_class_name = "OCSPNoCheck"
test_values: TestValues = {
"empty": {
"values": [{}, None],
"expected": None,
"expected_repr": "",
"expected_serialized": None,
"expected_text": "OCSPNoCheck",
"extension_type": x509.OCSPNoCheck(),
},
}
class PrecertPoisonTestCase(NullExtensionTestMixin[PrecertPoison], TestCase):
"""Test PrecertPoison extension."""
ext_class = PrecertPoison
ext_class_key = "precert_poison"
ext_class_name = "PrecertPoison"
force_critical = True
test_values: TestValues = {
"empty": {
"values": [{}, None],
"expected": None,
"expected_repr": "",
"expected_serialized": None,
"expected_text": "PrecertPoison",
"extension_type": x509.PrecertPoison(),
},
}
def test_eq(self) -> None:
"""Test for equality."""
for values in self.test_values.values():
ext = self.ext(values["expected"])
self.assertEqual(ext, ext)
ext_critical = self.ext(values["expected"], critical=True)
self.assertEqual(ext_critical, ext_critical)
for value in values["values"]:
ext_1 = self.ext(value)
self.assertEqual(ext, ext_1)
ext_2 = self.ext(value, critical=True)
self.assertEqual(ext_critical, ext_2)
def test_hash(self) -> None:
"""Test hash()."""
for config in self.test_values.values():
ext = self.ext(config["expected"])
ext_critical = self.ext(config["expected"], critical=True)
self.assertEqual(hash(ext), hash(ext_critical))
for other_config in self.test_values.values():
other_ext = self.ext(other_config["expected"])
other_ext_critical = self.ext(other_config["expected"], critical=True)
if config["expected"] == other_config["expected"]:
self.assertEqual(hash(ext), hash(other_ext))
self.assertEqual(hash(ext_critical), hash(other_ext_critical))
else:
self.assertNotEqual(hash(ext), hash(other_ext))
self.assertNotEqual(hash(ext_critical), hash(other_ext_critical))
def test_critical(self) -> None:
"""Test the critical property."""
with self.assertRaisesRegex(ValueError, r"^PrecertPoison must always be marked as critical$"):
PrecertPoison({"critical": False}) # type: ignore[arg-type]
class PrecertificateSignedCertificateTimestampsTestCase(TestCaseMixin, TestCase):
"""Test PrecertificateSignedCertificateTimestamps extension."""
# pylint: disable=too-many-public-methods; RO-extension requires implementing everything again
# pylint: disable=too-many-instance-attributes; RO-extension requires implementing everything again
default_ca = "letsencrypt_x3"
default_cert = "letsencrypt_x3-cert"
load_cas = ("letsencrypt_x3", "comodo_ev")
load_certs = ("letsencrypt_x3-cert", "comodo_ev-cert")
ext_class = PrecertificateSignedCertificateTimestamps
ext_class_key = "precertificate_signed_certificate_timestamps"
ext_class_name = "PrecertificateSignedCertificateTimestamps"
def setUp(self) -> None:
super().setUp()
self.name1 = "letsencrypt_x3-cert"
self.name2 = "comodo_ev-cert"
cert1 = self.certs[self.name1]
cert2 = self.certs[self.name2]
self.cgx1 = cert1.pub.loaded.extensions.get_extension_for_class(
x509.PrecertificateSignedCertificateTimestamps
)
self.cgx2 = cert2.pub.loaded.extensions.get_extension_for_class(
x509.PrecertificateSignedCertificateTimestamps
)
self.ext1 = PrecertificateSignedCertificateTimestamps(self.cgx1)
self.ext2 = PrecertificateSignedCertificateTimestamps(self.cgx2)
self.exts = [self.ext1, self.ext2]
self.data1 = certs[self.name1]["precertificate_signed_certificate_timestamps_serialized"]
self.data2 = certs[self.name2]["precertificate_signed_certificate_timestamps_serialized"]
def test_config(self) -> None:
"""Test basic configuration."""
self.assertTrue(issubclass(self.ext_class, Extension))
self.assertEqual(self.ext_class.key, self.ext_class_key)
self.assertEqual(self.ext_class.name, self.ext_class_name)
# Test mapping dicts
self.assertEqual(KEY_TO_EXTENSION[self.ext_class.key], self.ext_class)
self.assertEqual(OID_TO_EXTENSION[self.ext_class.oid], self.ext_class)
# test that the model matches
self.assertTrue(hasattr(X509CertMixin, self.ext_class.key))
self.assertIsInstance(getattr(X509CertMixin, self.ext_class.key), cached_property)
def test_as_text(self) -> None:
"""Test as_text()."""
self.assertEqual(
self.ext1.as_text(),
"""* Precertificate ({v[0][version]}):
Timestamp: {v[0][timestamp]}
Log ID: {v[0][log_id]}
* Precertificate ({v[1][version]}):
Timestamp: {v[1][timestamp]}
Log ID: {v[1][log_id]}""".format(
v=self.data1["value"]
),
)
self.assertEqual(
self.ext2.as_text(),
"""* Precertificate ({v[0][version]}):
Timestamp: {v[0][timestamp]}
Log ID: {v[0][log_id]}
* Precertificate ({v[1][version]}):
Timestamp: {v[1][timestamp]}
Log ID: {v[1][log_id]}
* Precertificate ({v[2][version]}):
Timestamp: {v[2][timestamp]}
Log ID: {v[2][log_id]}""".format(
v=self.data2["value"]
),
)
def test_count(self) -> None:
"""Test ext.count()."""
self.assertEqual(self.ext1.count(self.data1["value"][0]), 1)
self.assertEqual(self.ext1.count(self.data2["value"][0]), 0)
self.assertEqual(self.ext1.count(self.cgx1.value[0]), 1)
self.assertEqual(self.ext1.count(self.cgx2.value[0]), 0)
self.assertEqual(self.ext2.count(self.data1["value"][0]), 0)
self.assertEqual(self.ext2.count(self.data2["value"][0]), 1)
self.assertEqual(self.ext2.count(self.cgx1.value[0]), 0)
self.assertEqual(self.ext2.count(self.cgx2.value[0]), 1)
def test_del(self) -> None:
"""Test item deletion (e.g. ``del ext[0]``, not supported here)."""
with self.assertRaises(NotImplementedError):
del self.ext1[0] # type: ignore[no-untyped-call]
with self.assertRaises(NotImplementedError):
del self.ext2[0] # type: ignore[no-untyped-call]
def test_extend(self) -> None:
"""Test ext.extend() (not supported here)."""
with self.assertRaises(NotImplementedError):
self.ext1.extend([]) # type: ignore[no-untyped-call]
with self.assertRaises(NotImplementedError):
self.ext2.extend([]) # type: ignore[no-untyped-call]
def test_extension_type(self) -> None:
"""Test extension_type property."""
self.assertEqual(self.ext1.extension_type, self.cgx1.value)
self.assertEqual(self.ext2.extension_type, self.cgx2.value)
def test_getitem(self) -> None:
"""Test item getter (e.g. ``x = ext[0]``)."""
self.assertEqual(self.ext1[0], self.data1["value"][0])
self.assertEqual(self.ext1[1], self.data1["value"][1])
with self.assertRaises(IndexError):
self.ext1[2] # pylint: disable=pointless-statement
self.assertEqual(self.ext2[0], self.data2["value"][0])
self.assertEqual(self.ext2[1], self.data2["value"][1])
self.assertEqual(self.ext2[2], self.data2["value"][2])
with self.assertRaises(IndexError):
self.ext2[3] # pylint: disable=pointless-statement
def test_getitem_slices(self) -> None:
"""Test getting slices (e.g. ``x = ext[0:1]``)."""
self.assertEqual(self.ext1[:1], self.data1["value"][:1])
self.assertEqual(self.ext2[:2], self.data2["value"][:2])
self.assertEqual(self.ext2[:], self.data2["value"][:])
def test_hash(self) -> None:
"""Test hash()."""
self.assertEqual(hash(self.ext1), hash(self.ext1))
self.assertEqual(hash(self.ext2), hash(self.ext2))
self.assertNotEqual(hash(self.ext1), hash(self.ext2))
def test_in(self) -> None:
"""Test the ``in`` operator."""
for val in self.data1["value"]:
self.assertIn(val, self.ext1)
for val in self.cgx1.value:
self.assertIn(val, self.ext1)
for val in self.data2["value"]:
self.assertIn(val, self.ext2)
for val in self.cgx2.value:
self.assertIn(val, self.ext2)
def test_insert(self) -> None:
"""Test ext.insert() (Not supported here)."""
with self.assertRaises(NotImplementedError):
self.ext1.insert(0, self.data1["value"][0]) # type: ignore[no-untyped-call]
with self.assertRaises(NotImplementedError):
self.ext2.insert(0, self.data2["value"][0]) # type: ignore[no-untyped-call]
def test_len(self) -> None:
"""Test len(ext) (Not supported here)."""
self.assertEqual(len(self.ext1), 2)
self.assertEqual(len(self.ext2), 3)
def test_ne(self) -> None:
"""Test ``!=`` (not-equal) operator."""
self.assertNotEqual(self.ext1, self.ext2)
def test_not_in(self) -> None:
"""Test the ``not in`` operator."""
self.assertNotIn(self.data1["value"][0], self.ext2)
self.assertNotIn(self.data2["value"][0], self.ext1)
self.assertNotIn(self.cgx1.value[0], self.ext2)
self.assertNotIn(self.cgx2.value[0], self.ext1)
def test_pop(self) -> None:
"""Test ext.pop() (Not supported here)."""
with self.assertRaises(NotImplementedError):
self.ext1.pop(self.data1["value"][0]) # type: ignore[no-untyped-call]
with self.assertRaises(NotImplementedError):
self.ext2.pop(self.data2["value"][0]) # type: ignore[no-untyped-call]
def test_remove(self) -> None:
"""Test ext.remove() (Not supported here)."""
with self.assertRaises(NotImplementedError):
self.ext1.remove(self.data1["value"][0]) # type: ignore[no-untyped-call]
with self.assertRaises(NotImplementedError):
self.ext2.remove(self.data2["value"][0]) # type: ignore[no-untyped-call]
def test_repr(self) -> None:
"""Test repr()."""
self.assertEqual(
repr(self.ext1), "<PrecertificateSignedCertificateTimestamps: 2 timestamps, critical=False>"
)
self.assertEqual(
repr(self.ext2), "<PrecertificateSignedCertificateTimestamps: 3 timestamps, critical=False>"
)
with self.patch_object(self.ext2, "critical", True):
self.assertEqual(
repr(self.ext2), "<PrecertificateSignedCertificateTimestamps: 3 timestamps, critical=True>"
)
def test_serialize(self) -> None:
"""Test serialization of extension."""
self.assertEqual(self.ext1.serialize(), self.data1)
self.assertEqual(self.ext2.serialize(), self.data2)
def test_setitem(self) -> None:
"""Test setting items (e.g. ``ext[0] = ...``)."""
with self.assertRaises(NotImplementedError):
self.ext1[0] = self.data2["value"][0]
with self.assertRaises(NotImplementedError):
self.ext2[0] = self.data1["value"][0]
def test_setitem_slices(self) -> None:
"""Test setting slices (not supported here)."""
with self.assertRaises(NotImplementedError):
self.ext1[:] = self.data2
with self.assertRaises(NotImplementedError):
self.ext2[:] = self.data1
def test_str(self) -> None:
"""Test str()."""
self.assertEqual(
str(self.ext1), "<PrecertificateSignedCertificateTimestamps: 2 timestamps, critical=False>"
)
self.assertEqual(
str(self.ext2), "<PrecertificateSignedCertificateTimestamps: 3 timestamps, critical=False>"
)
with self.patch_object(self.ext2, "critical", True):
self.assertEqual(
str(self.ext2), "<PrecertificateSignedCertificateTimestamps: 3 timestamps, critical=True>"
)
class UnknownExtensionTestCase(TestCase):
"""Test UnrecognizedExtension extension."""
def test_basic(self) -> None:
"""Only test basic functionality."""
oid = x509.ObjectIdentifier("1.2.1")
cgext = x509.Extension(
oid=oid, value=x509.UnrecognizedExtension(oid=oid, value=b"unrecognized"), critical=True
)
ext = UnrecognizedExtension(cgext)
self.assertEqual(ext.name, f"Unsupported extension (OID {oid.dotted_string})")
self.assertEqual(ext.as_text(), "Could not parse extension")
self.assertEqual(ext.as_extension(), cgext)
self.assertEqual(
str(ext), f"<Unsupported extension (OID {oid.dotted_string}): <unprintable>, critical=True>"
)
with self.assertRaisesRegex(ValueError, r"^Cannot serialize an unrecognized extension$"):
ext.serialize_value()
name = "my name"
error = "my error"
ext = UnrecognizedExtension(cgext, name=name, error=error)
self.assertEqual(ext.name, name)
self.assertEqual(ext.as_text(), f"Could not parse extension ({error})")
def test_invalid_extension(self) -> None:
"""Test creating from an actually recognized extension."""
value = x509.Extension(
oid=SubjectAlternativeName.oid,
critical=True,
value=x509.SubjectAlternativeName([uri("example.com")]),
)
with self.assertRaisesRegex(TypeError, r"^Extension value must be a x509\.UnrecognizedExtension$"):
UnrecognizedExtension(value) # type: ignore[arg-type]
def test_from_dict(self) -> None:
"""Test that you cannot instantiate this extension from a dict."""
with self.assertRaisesRegex(TypeError, r"Value must be a x509\.Extension instance$"):
UnrecognizedExtension({"value": "foo"}) # type: ignore[arg-type]
def test_abstract_methods(self) -> None:
"""Test overwritten abstract methods that are of no use in this class."""
oid = x509.ObjectIdentifier("1.2.1")
cgext = x509.Extension(
oid=oid, value=x509.UnrecognizedExtension(oid=oid, value=b"unrecognized"), critical=True
)
ext = UnrecognizedExtension(cgext)
with self.assertRaises(NotImplementedError):
ext.from_dict("foo")
with self.assertRaises(NotImplementedError):
ext.from_extension("foo")
class SubjectAlternativeNameTestCase(IssuerAlternativeNameTestCase):
"""Test SubjectAlternativeName extension."""
ext_class = SubjectAlternativeName # type: ignore[assignment]
ext_class_key = "subject_alternative_name"
ext_class_name = "SubjectAlternativeName"
ext_class_type = x509.SubjectAlternativeName # type: ignore[assignment]
uri1 = value1 = "https://example.com"
uri2 = "https://example.net"
dns1 = "example.com"
dns2 = "example.net"
et1 = x509.SubjectAlternativeName([uri(value1)]) # type: ignore[assignment]
test_values = {
"empty": {
"values": [[]],
"expected": [],
"expected_repr": "[]",
"expected_serialized": [],
"expected_text": "",
"extension_type": x509.SubjectAlternativeName([]),
},
"uri": {
"values": [[uri1], [uri(uri1)]],
"expected": [uri(uri1)],
"expected_repr": f"['URI:{uri1}']",
"expected_serialized": [f"URI:{uri1}"],
"expected_text": f"* URI:{uri1}",
"extension_type": x509.SubjectAlternativeName([uri(uri1)]),
},
"dns": {
"values": [[dns1], [dns(dns1)]],
"expected": [dns(dns1)],
"expected_repr": f"['DNS:{dns1}']",
"expected_serialized": [f"DNS:{dns1}"],
"expected_text": f"* DNS:{dns1}",
"extension_type": x509.SubjectAlternativeName([dns(dns1)]),
},
"both": {
"values": [[uri1, dns1], [uri(uri1), dns(dns1)], [uri1, dns(dns1)], [uri(uri1), dns1]],
"expected": [uri(uri1), dns(dns1)],
"expected_repr": f"['URI:{uri1}', 'DNS:{dns1}']",
"expected_serialized": [f"URI:{uri1}", f"DNS:{dns1}"],
"expected_text": f"* URI:{uri1}\n* DNS:{dns1}",
"extension_type": x509.SubjectAlternativeName([uri(uri1), dns(dns1)]),
},
"all": {
"values": [
[uri1, uri2, dns1, dns2],
[uri(uri1), uri(uri2), dns1, dns2],
[uri1, uri2, dns(dns1), dns(dns2)],
[uri(uri1), uri(uri2), dns(dns1), dns(dns2)],
],
"expected": [uri(uri1), uri(uri2), dns(dns1), dns(dns2)],
"expected_repr": f"['URI:{uri1}', 'URI:{uri2}', 'DNS:{dns1}', 'DNS:{dns2}']",
"expected_serialized": [f"URI:{uri1}", f"URI:{uri2}", f"DNS:{dns1}", f"DNS:{dns2}"],
"expected_text": f"* URI:{uri1}\n* URI:{uri2}\n* DNS:{dns1}\n* DNS:{dns2}",
"extension_type": x509.SubjectAlternativeName([uri(uri1), uri(uri2), dns(dns1), dns(dns2)]),
},
"order": { # same as "all" above but other order
"values": [
[dns2, dns1, uri2, uri1],
[dns(dns2), dns(dns1), uri2, uri1],
[dns2, dns1, uri(uri2), uri(uri1)],
[dns(dns2), dns(dns1), uri(uri2), uri(uri1)],
],
"expected": [dns(dns2), dns(dns1), uri(uri2), uri(uri1)],
"expected_repr": f"['DNS:{dns2}', 'DNS:{dns1}', 'URI:{uri2}', 'URI:{uri1}']",
"expected_serialized": [f"DNS:{dns2}", f"DNS:{dns1}", f"URI:{uri2}", f"URI:{uri1}"],
"expected_text": f"* DNS:{dns2}\n* DNS:{dns1}\n* URI:{uri2}\n* URI:{uri1}",
"extension_type": x509.SubjectAlternativeName([dns(dns2), dns(dns1), uri(uri2), uri(uri1)]),
},
}
def test_get_common_name(self) -> None:
"""Test the get_common_name() function."""
common_name = "example.com"
dirname = "dirname:/CN=example.net"
san = SubjectAlternativeName({"value": [common_name]})
self.assertEqual(san.get_common_name(), common_name)
san = SubjectAlternativeName({"value": [common_name, dirname]})
self.assertEqual(san.get_common_name(), common_name)
san = SubjectAlternativeName({"value": [dirname, common_name]})
self.assertEqual(san.get_common_name(), "example.com")
san = SubjectAlternativeName({"value": [dirname]})
self.assertIsNone(san.get_common_name())
class SubjectKeyIdentifierTestCase(ExtensionTestMixin[SubjectKeyIdentifier], TestCase):
"""Test SubjectKeyIdentifier extension."""
ext_class = SubjectKeyIdentifier
ext_class_key = "subject_key_identifier"
ext_class_name = "SubjectKeyIdentifier"
hex1 = "33:33:33:33:33:33"
hex2 = "44:44:44:44:44:44"
hex3 = "55:55:55:55:55:55"
b1 = b"333333"
b2 = b"DDDDDD"
b3 = b"UUUUUU"
test_values = {
"one": {
"values": [
x509.SubjectKeyIdentifier(b1),
hex1,
],
"expected": b1,
"expected_repr": hex1,
"expected_serialized": hex1,
"expected_text": hex1,
"extension_type": x509.SubjectKeyIdentifier(b1),
},
"two": {
"values": [
x509.SubjectKeyIdentifier(b2),
hex2,
],
"expected": b2,
"expected_repr": hex2,
"expected_serialized": hex2,
"expected_text": hex2,
"extension_type": x509.SubjectKeyIdentifier(b2),
},
"three": {
"values": [
x509.SubjectKeyIdentifier(b3),
hex3,
],
"expected": b3,
"expected_repr": hex3,
"expected_serialized": hex3,
"expected_text": hex3,
"extension_type": x509.SubjectKeyIdentifier(b3),
},
}
def test_ski_constructor(self) -> None:
"""Test passing x509.SubjectKeyIdentifier."""
self.assertEqual(
SubjectKeyIdentifier(x509.SubjectKeyIdentifier(self.b1)),
SubjectKeyIdentifier({"value": self.hex1}),
)
self.assertEqual(
SubjectKeyIdentifier(x509.SubjectKeyIdentifier(self.b2)),
SubjectKeyIdentifier({"value": self.hex2}),
)
self.assertEqual(
SubjectKeyIdentifier(x509.SubjectKeyIdentifier(self.b3)),
SubjectKeyIdentifier({"value": self.hex3}),
)
# dict also accepts SKI
self.assertEqual(
SubjectKeyIdentifier(x509.SubjectKeyIdentifier(self.b1)),
SubjectKeyIdentifier({"value": x509.SubjectKeyIdentifier(self.b1)}),
)
self.assertEqual(
SubjectKeyIdentifier(x509.SubjectKeyIdentifier(self.b2)),
SubjectKeyIdentifier({"value": x509.SubjectKeyIdentifier(self.b2)}),
)
self.assertEqual(
SubjectKeyIdentifier(x509.SubjectKeyIdentifier(self.b3)),
SubjectKeyIdentifier({"value": x509.SubjectKeyIdentifier(self.b3)}),
)
class TLSFeatureTestCase(OrderedSetExtensionTestMixin[TLSFeature], ExtensionTestMixin[TLSFeature], TestCase):
"""Test TLSFeature extension."""
ext_class = TLSFeature
ext_class_key = "tls_feature"
ext_class_name = "TLSFeature"
test_values = {
"one": {
"values": [
{
TLSFeatureType.status_request,
},
{
"OCSPMustStaple",
},
],
"extension_type": x509.TLSFeature(features=[TLSFeatureType.status_request]),
"expected": frozenset([TLSFeatureType.status_request]),
"expected_repr": "['OCSPMustStaple']",
"expected_serialized": ["OCSPMustStaple"],
"expected_text": "* OCSPMustStaple",
},
"two": {
"values": [
{TLSFeatureType.status_request, TLSFeatureType.status_request_v2},
{"OCSPMustStaple", "MultipleCertStatusRequest"},
[TLSFeatureType.status_request, TLSFeatureType.status_request_v2],
[TLSFeatureType.status_request_v2, TLSFeatureType.status_request],
["OCSPMustStaple", "MultipleCertStatusRequest"],
["MultipleCertStatusRequest", "OCSPMustStaple"],
],
"extension_type": x509.TLSFeature(
features=[
TLSFeatureType.status_request_v2,
TLSFeatureType.status_request,
]
),
"expected": frozenset([TLSFeatureType.status_request, TLSFeatureType.status_request_v2]),
"expected_repr": "['MultipleCertStatusRequest', 'OCSPMustStaple']",
"expected_serialized": ["MultipleCertStatusRequest", "OCSPMustStaple"],
"expected_text": "* MultipleCertStatusRequest\n* OCSPMustStaple",
},
"three": {
"values": [
{TLSFeatureType.status_request_v2},
{"MultipleCertStatusRequest"},
],
"extension_type": x509.TLSFeature(features=[TLSFeatureType.status_request_v2]),
"expected": frozenset([TLSFeatureType.status_request_v2]),
"expected_repr": "['MultipleCertStatusRequest']",
"expected_serialized": ["MultipleCertStatusRequest"],
"expected_text": "* MultipleCertStatusRequest",
},
}
def test_unknown_values(self) -> None:
"""Test passing unknown values."""
with self.assertRaisesRegex(ValueError, r"^Unknown value: foo$"):
TLSFeature({"value": ["foo"]})
with self.assertRaisesRegex(ValueError, r"^Unknown value: True$"):
TLSFeature({"value": [True]})
|
mathiasertl/django-ca
|
ca/django_ca/tests/tests_extensions.py
|
Python
|
gpl-3.0
| 71,893 | 0.002406 |
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core.power import Pow
from sympy.core.function import Function
from sympy.core.symbol import Symbol, Wild
from sympy.core.basic import S, C, Atom, sympify
from sympy.core.numbers import Integer, Rational
from sympy.functions import exp, sin , cos , tan , cot , asin
from sympy.functions import log, sinh, cosh, tanh, coth, asinh
from sympy.functions import sqrt, erf
from sympy.solvers import solve
from sympy.simplify import simplify, together
from sympy.polys import Poly, quo, gcd, lcm, root_factors, \
monomials, factor, PolynomialError
from sympy.utilities.iterables import make_list
def components(f, x):
"""Returns a set of all functional components of the given expression
which includes symbols, function applications and compositions and
non-integer powers. Fractional powers are collected with with
minimal, positive exponents.
>>> from sympy import *
>>> x, y = symbols('xy')
>>> components(sin(x)*cos(x)**2, x)
set([x, cos(x), sin(x)])
"""
result = set()
if f.has(x):
if f.is_Symbol:
result.add(f)
elif f.is_Function or f.is_Derivative:
for g in f.args:
result |= components(g, x)
result.add(f)
elif f.is_Pow:
result |= components(f.base, x)
if not f.exp.is_Integer:
if f.exp.is_Rational:
result.add(f.base**Rational(1, f.exp.q))
else:
result |= components(f.exp, x) | set([f])
else:
for g in f.args:
result |= components(g, x)
return result
# name -> [] of symbols
_symbols_cache = {}
# NB @cacheit is not convenient here
def _symbols(name, n):
"""get vector of symbols local to this module"""
try:
lsyms = _symbols_cache[name]
except KeyError:
lsyms = []
_symbols_cache[name] = lsyms
while len(lsyms) < n:
lsyms.append( Symbol('%s%i' % (name, len(lsyms)), dummy=True) )
return lsyms[:n]
def heurisch(f, x, **kwargs):
"""Compute indefinite integral using heuristic Risch algorithm.
This is a huristic approach to indefinite integration in finite
terms using extened heuristic (parallel) Risch algorithm, based
on Manuel Bronstein's "Poor Man's Integrator".
The algorithm supports various classes of functions including
transcendental elementary or special functions like Airy,
Bessel, Whittaker and Lambert.
Note that this algorithm is not a decision procedure. If it isn't
able to compute antiderivative for a given function, then this is
not a proof that such a functions does not exist. One should use
recursive Risch algorithm in such case. It's an open question if
this algorithm can be made a full decision procedure.
This is an internal integrator procedure. You should use toplevel
'integrate' function in most cases, as this procedure needs some
preprocessing steps and otherwise may fail.
Specificaion
============
heurisch(f, x, rewrite=False, hints=None)
where
f : expression
x : symbol
rewrite -> force rewrite 'f' in terms of 'tan' and 'tanh'
hints -> a list of functions that may appear in antiderivate
- hints = None --> no suggestions at all
- hints = [ ] --> try to figure out
- hints = [f1, ..., fn] --> we know better
Examples
========
>>> from sympy import *
>>> x,y = symbols('xy')
>>> heurisch(y*tan(x), x)
y*log(1 + tan(x)**2)/2
See Manuel Bronstein's "Poor Man's Integrator":
[1] http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/index.html
For more information on the implemented algorithm refer to:
[2] K. Geddes, L.Stefanus, On the Risch-Norman Integration
Method and its Implementation in Maple, Proceedings of
ISSAC'89, ACM Press, 212-217.
[3] J. H. Davenport, On the Parallel Risch Algorithm (I),
Proceedings of EUROCAM'82, LNCS 144, Springer, 144-157.
[4] J. H. Davenport, On the Parallel Risch Algorithm (III):
Use of Tangents, SIGSAM Bulletin 16 (1982), 3-6.
[5] J. H. Davenport, B. M. Trager, On the Parallel Risch
Algorithm (II), ACM Transactions on Mathematical
Software 11 (1985), 356-362.
"""
f = sympify(f)
if not f.is_Add:
indep, f = f.as_independent(x)
else:
indep = S.One
if not f.has(x):
return indep * f * x
rewritables = {
(sin, cos, cot) : tan,
(sinh, cosh, coth) : tanh,
}
rewrite = kwargs.pop('rewrite', False)
if rewrite:
for candidates, rule in rewritables.iteritems():
f = f.rewrite(candidates, rule)
else:
for candidates in rewritables.iterkeys():
if f.has(*candidates):
break
else:
rewrite = True
terms = components(f, x)
hints = kwargs.get('hints', None)
if hints is not None:
if not hints:
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
for g in set(terms):
if g.is_Function:
if g.func is exp:
M = g.args[0].match(a*x**2)
if M is not None:
terms.add(erf(sqrt(-M[a])*x))
elif g.is_Pow:
if g.exp.is_Rational and g.exp.q == 2:
M = g.base.match(a*x**2 + b)
if M is not None and M[b].is_positive:
if M[a].is_positive:
terms.add(asinh(sqrt(M[a]/M[b])*x))
elif M[a].is_negative:
terms.add(asin(sqrt(-M[a]/M[b])*x))
else:
terms |= set(hints)
for g in set(terms):
terms |= components(g.diff(x), x)
V = _symbols('x', len(terms))
mapping = dict(zip(terms, V))
rev_mapping = {}
for k, v in mapping.iteritems():
rev_mapping[v] = k
def substitute(expr):
return expr.subs(mapping)
diffs = [ substitute(simplify(g.diff(x))) for g in terms ]
denoms = [ g.as_numer_denom()[1] for g in diffs ]
denom = reduce(lambda p, q: lcm(p, q, V), denoms)
numers = [ Poly.cancel(denom * g, *V) for g in diffs ]
def derivation(h):
return Add(*[ d * h.diff(v) for d, v in zip(numers, V) ])
def deflation(p):
for y in V:
if not p.has_any_symbols(y):
continue
if derivation(p) is not S.Zero:
c, q = p.as_poly(y).as_primitive()
return deflation(c)*gcd(q, q.diff(y))
else:
return p
def splitter(p):
for y in V:
if not p.has_any_symbols(y):
continue
if derivation(y) is not S.Zero:
c, q = p.as_poly(y).as_primitive()
q = q.as_basic()
h = gcd(q, derivation(q), y)
s = quo(h, gcd(q, q.diff(y), y), y)
c_split = splitter(c)
if s.as_poly(y).degree == 0:
return (c_split[0], q * c_split[1])
q_split = splitter(Poly.cancel((q, s), *V))
return (c_split[0]*q_split[0]*s, c_split[1]*q_split[1])
else:
return (S.One, p)
special = {}
for term in terms:
if term.is_Function:
if term.func is tan:
special[1 + substitute(term)**2] = False
elif term.func is tanh:
special[1 + substitute(term)] = False
special[1 - substitute(term)] = False
elif term.func is C.LambertW:
special[substitute(term)] = True
F = substitute(f)
P, Q = F.as_numer_denom()
u_split = splitter(denom)
v_split = splitter(Q)
polys = list(v_split) + [ u_split[0] ] + special.keys()
s = u_split[0] * Mul(*[ k for k, v in special.iteritems() if v ])
a, b, c = [ p.as_poly(*V).degree for p in [s, P, Q] ]
poly_denom = s * v_split[0] * deflation(v_split[1])
def exponent(g):
if g.is_Pow:
if g.exp.is_Rational and g.exp.q != 1:
if g.exp.p > 0:
return g.exp.p + g.exp.q - 1
else:
return abs(g.exp.p + g.exp.q)
else:
return 1
elif not g.is_Atom:
return max([ exponent(h) for h in g.args ])
else:
return 1
A, B = exponent(f), a + max(b, c)
if A > 1 and B > 1:
monoms = monomials(V, A + B - 1)
else:
monoms = monomials(V, A + B)
poly_coeffs = _symbols('A', len(monoms))
poly_part = Add(*[ poly_coeffs[i]*monomial
for i, monomial in enumerate(monoms) ])
reducibles = set()
for poly in polys:
if poly.has(*V):
try:
factorization = factor(poly, *V)
except PolynomialError:
factorization = poly
if factorization.is_Mul:
reducibles |= set(factorization.args)
else:
reducibles.add(factorization)
def integrate(field=None):
irreducibles = set()
for poly in reducibles:
for z in poly.atoms(Symbol):
if z in V:
break
else:
continue
irreducibles |= set(root_factors(poly, z, domain=field))
log_coeffs, log_part = [], []
B = _symbols('B', len(irreducibles))
for i, poly in enumerate(irreducibles):
if poly.has(*V):
log_coeffs.append(B[i])
log_part.append(log_coeffs[-1] * log(poly))
coeffs = poly_coeffs + log_coeffs
candidate = poly_part/poly_denom + Add(*log_part)
h = together(F - derivation(candidate) / denom)
numer = h.as_numer_denom()[0].expand()
equations = {}
for term in make_list(numer, Add):
coeff, dependent = term.as_independent(*V)
if dependent in equations:
equations[dependent] += coeff
else:
equations[dependent] = coeff
solution = solve(equations.values(), *coeffs)
if solution is not None:
return (solution, candidate, coeffs)
else:
return None
if not (F.atoms(Symbol) - set(V)):
result = integrate('Q')
if result is None:
result = integrate()
else:
result = integrate()
if result is not None:
(solution, candidate, coeffs) = result
antideriv = candidate.subs(solution)
for coeff in coeffs:
if coeff not in solution:
antideriv = antideriv.subs(coeff, S.Zero)
antideriv = antideriv.subs(rev_mapping)
antideriv = simplify(antideriv).expand()
if antideriv.is_Add:
antideriv = antideriv.as_independent(x)[1]
return indep * antideriv
else:
if not rewrite:
result = heurisch(f, x, rewrite=True, **kwargs)
if result is not None:
return indep * result
return None
|
jbaayen/sympy
|
sympy/integrals/risch.py
|
Python
|
bsd-3-clause
| 11,561 | 0.002681 |
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the main global admin page."""
import copy
import config
import view_tests_base
class AdminGlobalIndexViewTests(view_tests_base.ViewTestsBase):
"""Tests the global admin index view."""
_PRIOR_CONFIG = {
'sms_number_to_repo': '{"+15551234567": "haiti"}',
'repo_aliases': '{"h": "haiti"}',
'brand': 'none',
'privacy_policy_url': 'www.example.com/privacy',
'tos_url': 'www.example.com/tos',
'feedback_url': 'www.example.com/feedback',
'captcha_site_key': 'captcha-key',
'captcha_secret_key': 'captcha-secret-key',
'analytics_id': 'analytics-id',
'amp_gtm_id': 'amp-gtm-id',
'maps_api_key': 'maps-api-key',
'translate_api_key': 'translate-api-key',
'notification_email': 'notifications@example.com',
'unreviewed_notes_threshold': 12,
}
_BASE_POST_PARAMS = {
'sms_number_to_repo': '{"+15551234567": "haiti"}',
'repo_aliases': '{"h": "haiti"}',
'brand': 'none',
'privacy_policy_url': 'www.example.com/privacy',
'tos_url': 'www.example.com/tos',
'feedback_url': 'www.example.com/feedback',
'captcha_site_key': 'captcha-key',
'captcha_secret_key': 'captcha-secret-key',
'analytics_id': 'analytics-id',
'amp_gtm_id': 'amp-gtm-id',
'maps_api_key': 'maps-api-key',
'translate_api_key': 'translate-api-key',
'notification_email': 'notifications@example.com',
'unreviewed_notes_threshold': '12',
}
def setUp(self):
super(AdminGlobalIndexViewTests, self).setUp()
self.data_generator.repo()
config.set_for_repo('*', **AdminGlobalIndexViewTests._PRIOR_CONFIG)
self.login_as_superadmin()
def test_get(self):
"""Tests GET requests."""
resp = self.client.get('/global/admin/', secure=True)
self.assertEqual(
resp.context.get('sms_config'), {
'sms_number_to_repo': '"{\\"+15551234567\\": \\"haiti\\"}"',
})
self.assertEqual(
resp.context.get('repo_alias_config'), {
'repo_aliases': '"{\\"h\\": \\"haiti\\"}"',
})
self.assertEqual(
resp.context.get('site_info_config'), {
'brand': 'none',
'privacy_policy_url': 'www.example.com/privacy',
'tos_url': 'www.example.com/tos',
'feedback_url': 'www.example.com/feedback',
})
self.assertEqual(
resp.context.get('recaptcha_config'), {
'captcha_site_key': 'captcha-key',
'captcha_secret_key': 'captcha-secret-key',
})
self.assertEqual(
resp.context.get('ganalytics_config'), {
'analytics_id': 'analytics-id',
'amp_gtm_id': 'amp-gtm-id',
})
self.assertEqual(
resp.context.get('gmaps_config'), {
'maps_api_key': 'maps-api-key',
})
self.assertEqual(
resp.context.get('gtranslate_config'), {
'translate_api_key': 'translate-api-key',
})
self.assertEqual(
resp.context.get('notification_config'), {
'notification_email': 'notifications@example.com',
'unreviewed_notes_threshold': '12',
})
def test_edit_sms_config(self):
self._post_with_params(sms_number_to_repo='{"+1800pfhaiti": "haiti"}')
conf = config.Configuration('*')
self.assertEqual(conf.sms_number_to_repo, {'+1800pfhaiti': 'haiti'})
def test_edit_repo_alias_config(self):
self._post_with_params(repo_aliases='{"e": "ecuador"}')
conf = config.Configuration('*')
self.assertEqual(conf.repo_aliases, {'e': 'ecuador'})
def test_edit_site_info_config(self):
self._post_with_params(
brand='google',
privacy_policy_url='othersite.org/privacy',
tos_url='othersite.org/tos',
feedback_url='othersite.org/feedback')
conf = config.Configuration('*')
self.assertEqual(conf.brand, 'google')
self.assertEqual(conf.privacy_policy_url, 'othersite.org/privacy')
self.assertEqual(conf.tos_url, 'othersite.org/tos')
self.assertEqual(conf.feedback_url, 'othersite.org/feedback')
def test_edit_recaptcha_config(self):
self._post_with_params(
captcha_site_key='NEW-captcha-key',
captcha_secret_key='NEW-captcha-secret-key')
conf = config.Configuration('*')
self.assertEqual(conf.captcha_site_key, 'NEW-captcha-key')
self.assertEqual(conf.captcha_secret_key, 'NEW-captcha-secret-key')
def test_edit_ganalytics_config(self):
self._post_with_params(
analytics_id='NEW-analytics-id',
amp_gtm_id='NEW-amp-gtm-id')
conf = config.Configuration('*')
self.assertEqual(conf.analytics_id, 'NEW-analytics-id')
self.assertEqual(conf.amp_gtm_id, 'NEW-amp-gtm-id')
def test_edit_gmaps_config(self):
self._post_with_params(maps_api_key='NEW-maps-api-key')
conf = config.Configuration('*')
self.assertEqual(conf.maps_api_key, 'NEW-maps-api-key')
def test_edit_gtranslate_config(self):
self._post_with_params(translate_api_key='NEW-translate-api-key')
conf = config.Configuration('*')
self.assertEqual(conf.translate_api_key, 'NEW-translate-api-key')
def test_edit_notification_config(self):
self._post_with_params(
notification_email='notifications@othersite.org',
unreviewed_notes_threshold='86')
conf = config.Configuration('*')
self.assertEqual(conf.notification_email, 'notifications@othersite.org')
self.assertEqual(conf.unreviewed_notes_threshold, 86)
def _post_with_params(self, **kwargs):
get_doc = self.to_doc(self.client.get('/global/admin', secure=True))
xsrf_token = get_doc.cssselect_one('input[name="xsrf_token"]').get(
'value')
post_params = copy.deepcopy(AdminGlobalIndexViewTests._BASE_POST_PARAMS)
post_params['xsrf_token'] = xsrf_token
post_params.update(kwargs)
return self.client.post('/global/admin/', post_params, secure=True)
|
google/personfinder
|
tests/views/test_admin_global_index.py
|
Python
|
apache-2.0
| 6,931 | 0.000289 |
from .. import TestCase
# NOTE: this test is not run by the tester because the name of this file
# does not match the testpattern regex in TestLoader.discover
class TestBar(TestCase):
def test_print(self):
self.assertTrue(3+4 > 2)
|
thisch/python-falafel
|
examples/project2/mypackage/modb/bar.py
|
Python
|
bsd-2-clause
| 246 | 0 |
from supervisorerrormiddleware import SupervisorErrorMiddleware
import os
import sys
import paste.fixture
class DummyOutput:
def __init__(self):
self._buffer = []
def write(self, data):
self._buffer.append(data)
def flush(self):
self._buffer = []
def bad_app(environ, start_response):
if environ['PATH_INFO'] != '/good':
raise Exception("Bad Kitty")
else:
start_response("200 OK", [('Content-type', 'text/html')])
return ["Good Kitty"]
def test_without_supervisor():
old_stdout = sys.stdout
try:
sys.stdout = DummyOutput()
app = bad_app
app = SupervisorErrorMiddleware(app)
app = paste.fixture.TestApp(app)
failed = False
try:
app.get("/")
except:
failed = True
assert failed
output = "".join(sys.stdout._buffer)
sys.stdout.flush()
assert not "Bad Kitty" in output
assert not "GET" in output
response = app.get("/good")
output = "".join(sys.stdout._buffer)
sys.stdout.flush()
response.mustcontain("Good Kitty")
assert not "Bad Kitty" in output
assert not "GET" in output
finally:
sys.stdout = old_stdout
def test_with_supervisor():
#Why is there output when stdout is redirected? Because
#paste.fixture.TestApp gets around the redirection.
old_stdout = sys.stdout
try:
os.environ['SUPERVISOR_ENABLED'] = "1" #fake supervisor
sys.stdout = DummyOutput()
app = bad_app
app = SupervisorErrorMiddleware(app)
app = paste.fixture.TestApp(app)
failed = False
try:
app.get("/")
except:
failed = True
assert failed
output = "".join(sys.stdout._buffer)
sys.stdout.flush()
assert "Bad Kitty" in output
assert "GET" in output
response = app.get("/good")
output = "".join(sys.stdout._buffer)
sys.stdout.flush()
response.mustcontain("Good Kitty")
assert not "Bad Kitty" in output
assert not "GET" in output
finally:
sys.stdout = old_stdout
del os.environ['SUPERVISOR_ENABLED']
|
socialplanning/SupervisorErrorMiddleware
|
supervisorerrormiddleware/tests/test.py
|
Python
|
gpl-2.0
| 2,244 | 0.008021 |
# coding=utf-8
# ---------------------------------------------------------------
# Desenvolvedor: Arannã Sousa Santos
# Mês: 12
# Ano: 2015
# Projeto: pagseguro_xml
# e-mail: asousas@live.com
# ---------------------------------------------------------------
from .test_detalhes_v3 import ClasseTransacaoDetalhesTest
from .test_historico_v2 import ClasseTransacaoHistoricoTest
from .test_abandonadas_v2 import ClasseTransacaoAbandonadasTest
|
arannasousa/pagseguro_xml
|
pagseguro_xml/tests/test_classes_consultas/__init__.py
|
Python
|
gpl-2.0
| 490 | 0.002049 |
"""
class for storing static results of a tarjan ordering
"""
from scipy.sparse.csc import csc_matrix
from scipy.sparse.csr import csr_matrix
from scipy.sparse.linalg import inv, factorized, spsolve
from scipy.sparse import eye
from scipy.io import savemat, loadmat
import os
from collections import namedtuple
from ..engine import BackgroundEngine
from lcatools.interfaces import CONTEXT_STATUS_
from lcatools import from_json, to_json, comp_dir
SUPPORTED_FILETYPES = ('.mat', )
_FLATTEN_AF = False
class NoLciDatabase(Exception):
pass
class TermRef(object):
def __init__(self, flow_ref, direction, term_ref, scc_id=None):
"""
:param flow_ref:
:param direction: direction w.r.t. term
:param term_ref:
:param scc_id: None or 0 for singleton /emission; external_ref of a contained process for SCC
"""
self._f = flow_ref
self._d = {'Input': 0, 'Output': 1, 0: 0, 1: 1}[direction]
self._t = term_ref
self._s = 0
self.scc_id = scc_id
@property
def term_ref(self):
return self._t
@property
def flow_ref(self):
return self._f
@property
def direction(self):
return ('Input', 'Output')[self._d]
@property
def scc_id(self):
if self._s == 0:
return []
return self._s
@scc_id.setter
def scc_id(self, item):
if item is None:
self._s = 0
else:
self._s = item
def __array__(self):
return self.flow_ref, self._d, self.term_ref, self._s
def __iter__(self):
return iter(self.__array__())
ExchDef = namedtuple('ExchDef', ('process', 'flow', 'direction', 'term', 'value'))
def _iterate_a_matrix(a, y, threshold=1e-8, count=100, quiet=False, solver=None):
if solver == 'spsolve':
ima = eye(a.shape[0]) - a
x = spsolve(ima, y)
return csr_matrix(x).T
y = csr_matrix(y) # tested this with ecoinvent: convert to sparse: 280 ms; keep full: 4.5 sec
total = csr_matrix(y.shape)
if a is None:
return total
mycount = 0
sumtotal = 0.0
while mycount < count:
total += y
y = a.dot(y)
inc = sum(abs(y).data)
if inc == 0:
if not quiet:
print('exact result')
break
sumtotal += inc
if inc / sumtotal < threshold:
break
mycount += 1
if not quiet:
print('completed %d iterations' % mycount)
return total
def _unit_column_vector(dim, inx):
return csr_matrix(((1,), ((inx,), (0,))), shape=(dim, 1))
def split_af(_af, _inds):
"""
splits the input matrix into diagonal and off-diagonal portions, with the split being determined by _inds
:param _af:
:param _inds:
:return:
"""
_af = _af.tocoo()
_r = _af.row
_c = _af.col
_d = _af.data
_d_non = []
_d_scc = []
_shape = _af.shape
for i in range(len(_d)):
if _r[i] in _inds and _c[i] in _inds:
_d_non.append(0)
_d_scc.append(_d[i])
else:
_d_non.append(_d[i])
_d_scc.append(0)
_af_non = csc_matrix((_d_non, (_r, _c)), shape=_shape)
_af_scc = csc_matrix((_d_scc, (_r, _c)), shape=_shape)
assert (_af_non + _af_scc - _af).nnz == 0
return _af_non, _af_scc
def _determine_scc_inds(ts):
scc_inds = set()
for _s in ts.nontrivial_sccs():
if ts.is_background_scc(_s):
continue
for k in ts.scc(_s):
scc_inds.add(ts.fg_dict(k.index))
return scc_inds
def flatten(af, ad, bf, ts):
"""
Accepts a fully populated background engine as argument
:param af:
:param ad:
:param bf:
:param ts:
:return: af_flat, ad_flat, bf_flat
"""
scc_inds = _determine_scc_inds(ts)
non, scc = split_af(af, scc_inds)
scc_inv = inv(eye(ts.pdim).tocsc() - scc)
return non * scc_inv, ad * scc_inv, bf * scc_inv
class FlatBackground(object):
"""
Static, ordered background stored in an easily serializable way
"""
@classmethod
def from_index(cls, index, **kwargs):
"""
:param index: an index interface with operable processes() and terminate()
:param kwargs: origin, quiet
:return:
"""
be = BackgroundEngine(index)
be.add_all_ref_products()
return cls.from_background_engine(be, **kwargs)
@classmethod
def from_background_engine(cls, be, **kwargs):
af, ad, bf = be.make_foreground()
if _FLATTEN_AF:
af, ad, bf = flatten(af, ad, bf, be.tstack)
_map_nontrivial_sccs = {k: be.product_flow(k).process.external_ref for k in be.tstack.nontrivial_sccs()}
def _make_term_ref(pf):
try:
_scc_id = _map_nontrivial_sccs[be.tstack.scc_id(pf)]
except KeyError:
_scc_id = 0
return pf.flow.external_ref, pf.direction, pf.process.external_ref, _scc_id
def _make_term_ext(em):
try:
comp = em.compartment[-1]
except IndexError:
comp = None
return em.flow.external_ref, comp_dir(em.direction), comp, 0
return cls([_make_term_ref(x) for x in be.foreground_flows(outputs=False)],
[_make_term_ref(x) for x in be.background_flows()],
[_make_term_ext(x) for x in be.emissions],
af, ad, bf,
lci_db=be.lci_db,
**kwargs)
@classmethod
def from_file(cls, file, **kwargs):
ext = os.path.splitext(file)[1]
if ext == '.mat':
return cls.from_matfile(file, **kwargs)
elif ext == '.hdf':
return cls.from_hdf5(file, **kwargs)
else:
raise ValueError('Unsupported file type %s' % ext)
@classmethod
def from_hdf5(cls, fle, quiet=True):
raise NotImplementedError
@classmethod
def from_matfile(cls, file, quiet=True):
d = loadmat(file)
if 'A' in d:
lci_db = (d['A'].tocsr(), d['B'].tocsr())
else:
lci_db = None
ix = from_json(file + '.index.json.gz')
'''
def _unpack_term_ref(arr):
_xt = arr[3][0]
if len(_xt) == 1:
_xt = _xt[0]
return arr[0][0], arr[1][0][0], arr[2][0], _xt
return cls((_unpack_term_ref(f) for f in d['foreground']),
(_unpack_term_ref(f) for f in d['background']),
(_unpack_term_ref(f) for f in d['exterior']),
d['Af'].tocsr(), d['Ad'].tocsr(), d['Bf'].tocsr(),
lci_db=lci_db,
quiet=quiet)
'''
return cls(ix['foreground'], ix['background'], ix['exterior'],
d['Af'].tocsr(), d['Ad'].tocsr(), d['Bf'].tocsr(),
lci_db=lci_db,
quiet=quiet)
def __init__(self, foreground, background, exterior, af, ad, bf, lci_db=None, quiet=True):
"""
:param foreground: iterable of foreground Product Flows as TermRef params
:param background: iterable of background Product Flows as TermRef params
:param exterior: iterable of Exterior flows as TermRef params
:param af: sparse, flattened Af
:param ad: sparse, flattened Ad
:param bf: sparse, flattened Bf
:param lci_db: [None] optional (A, B) 2-tuple
:param quiet: [True] does nothing for now
"""
self._fg = tuple([TermRef(*f) for f in foreground])
self._bg = tuple([TermRef(*x) for x in background])
self._ex = tuple([TermRef(*x) for x in exterior])
self._af = af
self._ad = ad
self._bf = bf
if lci_db is None:
self._A = None
self._B = None
else:
self._A = lci_db[0].tocsr()
self._B = lci_db[1].tocsr()
self._lu = None # store LU decomposition
self._fg_index = {(k.term_ref, k.flow_ref): i for i, k in enumerate(self._fg)}
self._bg_index = {(k.term_ref, k.flow_ref): i for i, k in enumerate(self._bg)}
self._ex_index = {(k.term_ref, k.flow_ref): i for i, k in enumerate(self._ex)}
self._quiet = quiet
def index_of(self, term_ref, flow_ref):
key = (term_ref, flow_ref)
if key in self._fg_index:
return self._fg_index[key]
elif key in self._bg_index:
return self._bg_index[key]
elif key in self._ex_index:
return self._ex_index[key]
else:
raise KeyError('Unknown termination %s, %s' % key)
@property
def _complete(self):
return self._A is not None and self._B is not None
@property
def ndim(self):
return len(self._bg)
@property
def pdim(self):
return len(self._fg)
@property
def fg(self):
return self._fg
@property
def bg(self):
return self._bg
@property
def ex(self):
return self._ex
def is_in_scc(self, process, ref_flow):
if self.is_in_background(process, ref_flow):
tr = self._bg[self._bg_index[(process, ref_flow)]]
else:
tr = self._fg[self._fg_index[(process, ref_flow)]]
return len(tr.scc_id) > 0
def is_in_background(self, process, ref_flow):
return (process, ref_flow) in self._bg_index
def foreground(self, process, ref_flow, traverse=False):
"""
Most of the way toward making exchanges. yields a sequence of 5-tuples defining terminated exchanges.
NOTE: traverse=True differs from the prior implementation because the old BackgroundEngine returned an Af
matrix and the foreground routine generated one exchange per matrix entry.
In contrast, the current implementation traverses the foreground and creates one exchange per traversal link.
If a fragment references the same subfragment multiple times, this will result in redundant entries for the
same fragment. At the moment this is by design but it may be undesirable.
An easy solution would be to keep a log of nonzero Af indices and 'continue' if one is encountered.
:param process:
:param ref_flow:
:param traverse: [False] if True, generate one exchange for every traversal link. Default is to create one
exchange for every matrix entry. traverse=True will produce duplicate exchanges in cases where sub-fragments
are traversed multiple times.
:return:
"""
if _FLATTEN_AF is False and traverse is True:
print('Warning: traversal of foreground SCC will never terminate')
index = self._fg_index[process, ref_flow]
yield ExchDef(process, ref_flow, self._fg[index].direction, None, 1.0)
cols_seen = set()
cols_seen.add(index)
q = [index]
while len(q) > 0:
current = q.pop(0)
node = self._fg[current]
fg_deps = self._af[:, current]
rows, cols = fg_deps.nonzero()
for i in range(len(rows)):
assert cols[i] == 0 # 1-column slice
if _FLATTEN_AF:
assert rows[i] > current # well-ordered and flattened
if rows[i] in cols_seen:
if traverse:
q.append(rows[i]) # allow fragment to be traversed multiple times
else:
cols_seen.add(rows[i])
q.append(rows[i])
term = self._fg[rows[i]]
dat = fg_deps.data[i]
if dat < 0:
dat *= -1
dirn = term.direction # comp directions w.r.t. parent node
else:
dirn = comp_dir(term.direction) # comp directions w.r.t. parent node
yield ExchDef(node.term_ref, term.flow_ref, dirn, term.term_ref, dat)
@staticmethod
def _generate_exch_defs(node_ref, data_vec, enumeration):
rows, cols = data_vec.nonzero()
assert all(cols == 0)
for i in range(len(rows)):
term = enumeration[rows[i]]
dat = data_vec.data[i]
if dat < 0:
dat *= -1
dirn = term.direction
else:
dirn = comp_dir(term.direction)
yield ExchDef(node_ref, term.flow_ref, dirn, term.term_ref, dat)
@staticmethod
def _generate_em_defs(node_ref, data_vec, enumeration):
"""
Emissions have a natural direction which should not be changed.
:param node_ref:
:param data_vec:
:param enumeration:
:return:
"""
rows, cols = data_vec.nonzero()
assert all(cols == 0)
for i in range(len(rows)):
term = enumeration[rows[i]]
dat = data_vec.data[i]
dirn = comp_dir(term.direction)
if CONTEXT_STATUS_ == 'compat':
_term = None
else:
_term = term.term_ref
yield ExchDef(node_ref, term.flow_ref, dirn, _term, dat)
def consumers(self, process, ref_flow):
idx = self.index_of(process, ref_flow)
if self.is_in_background(process, ref_flow):
for i in self._ad[idx, :].nonzero()[1]:
yield self.fg[i]
for i in self._A[idx, :].nonzero()[1]:
yield self.bg[i]
else:
for i in self._af[idx, :].nonzero()[1]:
yield self.fg[i]
def dependencies(self, process, ref_flow):
if self.is_in_background(process, ref_flow):
index = self._bg_index[process, ref_flow]
fg_deps = []
bg_deps = self._A[:, index]
else:
index = self._fg_index[process, ref_flow]
fg_deps = self._af[:, index]
bg_deps = self._ad[:, index]
for x in self._generate_exch_defs(process, fg_deps, self._fg):
yield x
for x in self._generate_exch_defs(process, bg_deps, self._bg):
yield x
def emissions(self, process, ref_flow):
if self.is_in_background(process, ref_flow):
index = self._bg_index[process, ref_flow]
ems = self._B[:, index]
else:
index = self._fg_index[process, ref_flow]
ems = self._bf[:, index]
for x in self._generate_em_defs(process, ems, self._ex):
yield x
def _x_tilde(self, process, ref_flow, quiet=True, **kwargs):
index = self._fg_index[process, ref_flow]
return _iterate_a_matrix(self._af, _unit_column_vector(self.pdim, index), quiet=quiet, **kwargs)
def ad(self, process, ref_flow, **kwargs):
if self.is_in_background(process, ref_flow):
for x in self.dependencies(process, ref_flow):
yield x
else:
ad_tilde = self._ad.dot(self._x_tilde(process, ref_flow, **kwargs))
for x in self._generate_exch_defs(process, ad_tilde, self._bg):
yield x
def bf(self, process, ref_flow, **kwargs):
if self.is_in_background(process, ref_flow):
for x in self.emissions(process, ref_flow):
yield x
else:
bf_tilde = self._bf.dot(self._x_tilde(process, ref_flow, **kwargs))
for x in self._generate_em_defs(process, bf_tilde, self._ex):
yield x
def _compute_bg_lci(self, ad, solver=None, **kwargs):
if solver == 'factorize':
if self._lu is None:
ima = eye(self._A.shape[0]) - self._A
self._lu = factorized(ima.tocsc())
if self._lu is None:
bx = _iterate_a_matrix(self._A, ad, solver=solver, **kwargs)
else:
bx = csr_matrix(self._lu(ad.toarray().flatten())).T
return self._B.dot(bx)
def _compute_lci(self, process, ref_flow, **kwargs):
if self.is_in_background(process, ref_flow):
if not self._complete:
raise NoLciDatabase
ad = _unit_column_vector(self.ndim, self._bg_index[process, ref_flow])
bx = self._compute_bg_lci(ad, **kwargs)
return bx
else:
x_tilde = self._x_tilde(process, ref_flow, **kwargs)
ad_tilde = self._ad.dot(x_tilde)
bf_tilde = self._bf.dot(x_tilde)
if self._complete:
bx = self._compute_bg_lci(ad_tilde, **kwargs)
return bx + bf_tilde
else:
return bf_tilde
def lci(self, process, ref_flow, **kwargs):
for x in self._generate_em_defs(process,
self._compute_lci(process, ref_flow, **kwargs),
self._ex):
yield x
def _write_index(self, ix_filename):
ix = {'foreground': [tuple(f) for f in self._fg],
'background': [tuple(f) for f in self._bg],
'exterior': [tuple(f) for f in self._ex]}
to_json(ix, ix_filename, gzip=True)
def _write_mat(self, filename, complete=True):
d = {'Af': csr_matrix((0, 0)) if self._af is None else self._af,
'Ad': csr_matrix((0, 0)) if self._ad is None else self._ad,
'Bf': csr_matrix((0, 0)) if self._bf is None else self._bf}
if complete and self._complete:
d['A'] = self._A
d['B'] = self._B
savemat(filename, d)
def write_to_file(self, filename, complete=True):
filetype = os.path.splitext(filename)[1]
if filetype not in SUPPORTED_FILETYPES:
raise ValueError('Unsupported file type %s' % filetype)
if filetype == '.mat':
self._write_mat(filename, complete=complete)
else:
raise ValueError('Unsupported file type %s' % filetype)
self._write_index(filename + '.index.json.gz')
|
bkuczenski/lca-tools
|
antelope_background/background/flat_background.py
|
Python
|
gpl-2.0
| 18,042 | 0.001718 |
#!/usr/bin/env python
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import ast
import jinja2
import logging
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.append(SCRIPT_DIR + "/../tests")
from utils import platform
# the log format for the logging module
LOG_FORMAT = "%(levelname)s [Line %(lineno)d]: %(message)s"
# Read all implementation templates
TEMPLATES = {}
# Temporary reserved column names
RESERVED = ["n", "index"]
# Set the platform in osquery-language
PLATFORM = platform()
# Supported SQL types for spec
class DataType(object):
def __init__(self, affinity, cpp_type="std::string"):
'''A column datatype is a pair of a SQL affinity to C++ type.'''
self.affinity = affinity
self.type = cpp_type
def __repr__(self):
return self.affinity
# Define column-type MACROs for the table specs
TEXT = DataType("TEXT")
DATE = DataType("TEXT")
DATETIME = DataType("TEXT")
INTEGER = DataType("INTEGER", "int")
BIGINT = DataType("BIGINT", "long long int")
UNSIGNED_BIGINT = DataType("UNSIGNED_BIGINT", "long long unsigned int")
DOUBLE = DataType("DOUBLE", "double")
# Define table-category MACROS from the table specs
UNKNOWN = "UNKNOWN"
UTILITY = "UTILITY"
SYSTEM = "SYSTEM"
NETWORK = "NETWORK"
EVENTS = "EVENTS"
APPLICATION = "APPLICATION"
def usage():
""" print program usage """
print(
"Usage: {0!s} <spec.table> <file.cpp> [disable_blacklist]".format(sys.argv[0]))
def to_camel_case(snake_case):
""" convert a snake_case string to camelCase """
components = snake_case.split('_')
return components[0] + "".join(x.title() for x in components[1:])
def lightred(msg):
return "\033[1;31m {0!s} \033[0m".format(str(msg))
def is_blacklisted(table_name, path=None, blacklist=None):
"""Allow blacklisting by tablename."""
if blacklist is None:
specs_path = os.path.dirname(path)
if os.path.basename(specs_path) != "specs":
specs_path = os.path.basename(specs_path)
blacklist_path = os.path.join(specs_path, "blacklist")
if not os.path.exists(blacklist_path):
return False
try:
with open(blacklist_path, "r") as fh:
blacklist = [
line.strip() for line in fh.read().split("\n")
if len(line.strip()) > 0 and line.strip()[0] != "#"
]
except:
# Blacklist is not readable.
return False
if not blacklist:
return False
# table_name based blacklisting!
for item in blacklist:
item = item.split(":")
# If this item is restricted to a platform and the platform
# and table name match
if len(item) > 1 and PLATFORM == item[0] and table_name == item[1]:
return True
elif len(item) == 1 and table_name == item[0]:
return True
return False
def setup_templates(templates_path):
if not os.path.exists(templates_path):
templates_path = os.path.join(os.path.dirname(tables_path), "templates")
if not os.path.exists(templates_path):
print ("Cannot read templates path: {0!s}".format((templates_path)))
exit(1)
for template in os.listdir(templates_path):
template_name = template.split(".", 1)[0]
with open(os.path.join(templates_path, template), "rb") as fh:
TEMPLATES[template_name] = fh.read().replace("\\\n", "")
class Singleton(object):
"""
Make sure that anything that subclasses Singleton can only be instantiated
once
"""
_instance = None
def __new__(self, *args, **kwargs):
if not self._instance:
self._instance = super(Singleton, self).__new__(
self, *args, **kwargs)
return self._instance
class TableState(Singleton):
"""
Maintain the state of of the table commands during the execution of
the config file
"""
def __init__(self):
self.table_name = ""
self.schema = []
self.header = ""
self.impl = ""
self.function = ""
self.class_name = ""
self.description = ""
self.attributes = {}
self.examples = []
def columns(self):
return [i for i in self.schema if isinstance(i, Column)]
def foreign_keys(self):
return [i for i in self.schema if isinstance(i, ForeignKey)]
def generate(self, path, template="default"):
"""Generate the virtual table files"""
logging.debug("TableState.generate")
self.impl_content = jinja2.Template(TEMPLATES[template]).render(
table_name=self.table_name,
table_name_cc=to_camel_case(self.table_name),
schema=self.columns(),
header=self.header,
impl=self.impl,
function=self.function,
class_name=self.class_name,
attributes=self.attributes,
examples=self.examples,
)
if self.table_name == "" or self.function == "":
print (lightred("Invalid table spec: {0!s}".format((path))))
exit(1)
# Check for reserved column names
for column in self.columns():
if column.name in RESERVED:
print (lightred(("Cannot use column name: %s in table: %s "
"(the column name is reserved)" % (
column.name, self.table_name))))
exit(1)
path_bits = path.split("/")
for i in range(1, len(path_bits)):
dir_path = ""
for j in range(i):
dir_path += "{0!s}/".format(path_bits[j])
if not os.path.exists(dir_path):
try:
os.mkdir(dir_path)
except:
# May encounter a race when using a make jobserver.
pass
logging.debug("generating {0!s}".format(path))
with open(path, "w+") as file_h:
file_h.write(self.impl_content)
def blacklist(self, path):
print (lightred("Blacklisting generated {0!s}".format(path)))
logging.debug("blacklisting {0!s}".format(path))
self.generate(path, template="blacklist")
table = TableState()
class Column(object):
"""
Part of an osquery table schema.
Define a column by name and type with an optional description to assist
documentation generation and reference.
"""
def __init__(self, name, col_type, description="", **kwargs):
self.name = name
self.type = col_type
self.description = description
self.options = kwargs
class ForeignKey(object):
"""
Part of an osquery table schema.
Loosely define a column in a table spec as a Foreign key in another table.
"""
def __init__(self, **kwargs):
self.column = kwargs.get("column", "")
self.table = kwargs.get("table", "")
def table_name(name):
"""define the virtual table name"""
logging.debug("- table_name")
logging.debug(" - called with: {0!s}".format(name))
table.table_name = name
table.description = ""
table.attributes = {}
table.examples = []
def schema(schema_list):
"""
define a list of Column object which represent the columns of your virtual
table
"""
logging.debug("- schema")
for it in schema_list:
if isinstance(it, Column):
logging.debug(" - column: {0!s} ({1!s})".format(it.name, it.type))
if isinstance(it, ForeignKey):
logging.debug(" - foreign_key: {0!s} ({1!s})".format(it.column, it.table))
table.schema = schema_list
def description(text):
table.description = text
def select_all(name=None):
if name is None:
name = table.table_name
return "select count(*) from {0!s};".format((name))
def examples(example_queries):
table.examples = example_queries
def attributes(**kwargs):
for attr in kwargs:
table.attributes[attr] = kwargs[attr]
def implementation(impl_string):
"""
define the path to the implementation file and the function which
implements the virtual table. You should use the following format:
# the path is "osquery/table/implementations/foo.cpp"
# the function is "QueryData genFoo();"
implementation("foo@genFoo")
"""
logging.debug("- implementation")
filename, function = impl_string.split("@")
class_parts = function.split("::")[::-1]
function = class_parts[0]
class_name = class_parts[1] if len(class_parts) > 1 else ""
impl = "{0!s}.cpp".format(filename)
logging.debug(" - impl => {0!s}".format(impl))
logging.debug(" - function => {0!s}".format(function))
logging.debug(" - class_name => {0!s}".format(class_name))
table.impl = impl
table.function = function
table.class_name = class_name
'''Check if the table has a subscriber attribute, if so, enforce time.'''
if "event_subscriber" in table.attributes:
columns = {}
# There is no dictionary comprehension on all supported platforms.
for column in table.schema:
if isinstance(column, Column):
columns[column.name] = column.type
if "time" not in columns:
print(lightred("Event subscriber: {0!s} needs a 'time' column.".format((
table.table_name))))
sys.exit(1)
if columns["time"] is not BIGINT:
print(lightred(
"Event subscriber: {0!s}, 'time' column must be a {1!s} type".format(
table.table_name, BIGINT)))
sys.exit(1)
def main(argc, argv):
parser = argparse.ArgumentParser("Generate C++ Table Plugin from specfile.")
parser.add_argument(
"--debug", default=False, action="store_true",
help="Output debug messages (when developing)"
)
parser.add_argument("--templates", default=SCRIPT_DIR + "/templates",
help="Path to codegen output .cpp.in templates")
parser.add_argument("spec_file", help="Path to input .table spec file")
parser.add_argument("output", help="Path to output .cpp file")
args = parser.parse_args()
if args.debug:
logging.basicConfig(format=LOG_FORMAT, level=logging.DEBUG)
else:
logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
if argc < 3:
usage()
sys.exit(1)
filename = args.spec_file
output = args.output
if filename.endswith(".table"):
# Adding a 3rd parameter will enable the blacklist
disable_blacklist = argc > 3
setup_templates(args.templates)
with open(filename, "rU") as file_handle:
tree = ast.parse(file_handle.read())
exec(compile(tree, "<string>", "exec"))
blacklisted = is_blacklisted(table.table_name, path=filename)
if not disable_blacklist and blacklisted:
table.blacklist(output)
else:
table.generate(output)
if __name__ == "__main__":
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
main(len(sys.argv), sys.argv)
|
runt18/osquery
|
tools/codegen/gentable.py
|
Python
|
bsd-3-clause
| 11,592 | 0.001725 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
texastribune/tt_streams
|
example/manage.py
|
Python
|
apache-2.0
| 320 | 0 |
"""Handle processing and setting web content into Readability/cleaned
"""
import httplib
import logging
import lxml
import socket
import urllib2
from BaseHTTPServer import BaseHTTPRequestHandler as HTTPH
from breadability.readable import Article
from urlparse import urlparse
LOG = logging.getLogger(__name__)
class DictObj(dict):
def __getattr__(self, name):
try:
return self.__getitem__(name)
except KeyError:
return super(DictObj, self).__getattr__(name)
USER_AGENT = 'bookie / ({url})'.format(
url="https://github.com/bookieio/bookie",
)
STATUS_CODES = DictObj({
'1': 1, # used for manual parsed
'200': 200,
'404': 404,
'403': 403,
'429': 429, # wtf, 429 doesn't exist...
# errors like 9's
'900': 900, # used for unparseable
'901': 901, # url is not parseable/usable
'902': 902, # socket.error during download
'903': 903, # httplib.IncompleteRead error
'904': 904, # lxml error about document is empty
'905': 905, # httplib.BadStatusLine
})
IMAGE_TYPES = DictObj({
'png': 'image/png',
'jpeg': 'image/jpeg',
'jpg': 'image/jpg',
'gif': 'image/gif',
})
class Readable(object):
"""Understand the base concept of making readable"""
is_error = False
content = None
content_type = None
headers = None
status_message = None
status = None
url = None
def error(self, code, msg):
"""This readable request was an error, assign it so"""
self.status = code
self.status_message = str(msg)
def is_error(self):
"""Check if this is indeed an error or not"""
if self.status not in [STATUS_CODES['200'], ]:
return True
else:
return False
def is_image(self):
"""Check if the current object is an image"""
# we can only get this if we have headers
LOG.debug('content type')
LOG.debug(self.content_type)
if (self.content_type is not None and
self.content_type.lower() in IMAGE_TYPES.values()):
return True
else:
return False
def set_content(self, content, content_type=None):
"""assign the content and potentially content type header"""
self.content = content
if content_type:
self.content_type = content_type
class ReadContent(object):
"""Handle some given content and parse the readable out of it"""
@staticmethod
def parse(content, content_type=None, url=None):
"""Handle the parsing out of the html content given"""
read = Readable()
document = Article(content.read(), url=url)
if not document.readable:
read.error(STATUS_CODES['900'], "Could not parse content.")
else:
read.set_content(document.readable,
content_type=content_type)
read.status = STATUS_CODES['1']
return read
class ReadUrl(object):
"""Fetch a url and read some content out of it"""
@staticmethod
def parse(url):
"""Fetch the given url and parse out a Readable Obj for the content"""
read = Readable()
if not isinstance(url, unicode):
url = url.decode('utf-8')
# first check if we have a special url with the #! content in it
if u'#!' in url:
# rewrite it with _escaped_fragment_=xxx
# we should be doing with this some regex, but cheating for now
idx = url.index(u'#')
fragment = url[idx:]
clean_url = u"{0}?_escaped_fragment_={1}".format(url[0:idx],
fragment)
else:
# we need to clean up the url first, we can't have any anchor tag
# on the url or urllib2 gets cranky
parsed = urlparse(url)
# We cannot parse urls that aren't http, https, or ftp://
if (parsed.scheme not in (u'http', u'https', u'ftp')):
read.error(
STATUS_CODES['901'],
'Invalid url scheme for readable content')
return read
if parsed.query is not None and parsed.query != '':
query = u'?'
else:
query = u''
clean_url = u"{0}://{1}{2}{query}{3}".format(
parsed[0],
parsed[1],
parsed[2],
parsed[4],
query=query)
try:
LOG.debug('Readable Parsed: ' + clean_url)
request = urllib2.Request(clean_url.encode('utf-8'))
request.add_header('User-Agent', USER_AGENT)
opener = urllib2.build_opener()
fh = opener.open(request)
# if it works, then we default to a 200 request
# it's ok, promise :)
read.status = 200
read.headers = fh.info()
read.content_type = read.headers.gettype()
except urllib2.HTTPError, exc:
# for some reason getting a code 429 from a server
if exc.code not in [429]:
read.error(exc.code, HTTPH.responses[exc.code])
else:
read.error(exc.code, unicode(exc.code) + ': ' + clean_url)
except httplib.InvalidURL, exc:
read.error(STATUS_CODES['901'], str(exc))
except urllib2.URLError, exc:
read.error(STATUS_CODES['901'], str(exc))
except httplib.BadStatusLine, exc:
read.error(STATUS_CODES['905'], str(exc))
except socket.error, exc:
read.error(STATUS_CODES['902'], str(exc))
LOG.debug('is error?')
LOG.debug(read.status)
# let's check to make sure we should be parsing this
# for example: don't parse images
if not read.is_error() and not read.is_image():
try:
document = Article(fh.read(), url=clean_url)
if not document.readable:
read.error(STATUS_CODES['900'],
"Could not parse document.")
else:
read.set_content(document.readable)
except socket.error, exc:
read.error(STATUS_CODES['902'], str(exc))
except httplib.IncompleteRead, exc:
read.error(STATUS_CODES['903'], str(exc))
except lxml.etree.ParserError, exc:
read.error(STATUS_CODES['904'], str(exc))
return read
|
GreenLunar/Bookie
|
bookie/lib/readable.py
|
Python
|
agpl-3.0
| 6,558 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-01 08:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('API', '0002_auto_20170201_0840'),
]
operations = [
migrations.AddField(
model_name='card',
name='back',
field=models.CharField(default='', max_length=1000),
),
migrations.AddField(
model_name='card',
name='front',
field=models.CharField(default='', max_length=1000),
),
migrations.AddField(
model_name='card',
name='review_count',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='card',
name='type',
field=models.IntegerField(default=0),
),
]
|
rearmlkp/Smart_Flash
|
API/migrations/0003_auto_20170201_0842.py
|
Python
|
gpl-3.0
| 917 | 0 |
# Generated by Django 2.2.7 on 2019-12-02 02:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('terminal', '0017_auto_20191125_0931'),
]
operations = [
migrations.RemoveField(
model_name='session',
name='date_last_active',
),
migrations.AlterField(
model_name='session',
name='remote_addr',
field=models.CharField(blank=True, max_length=128, null=True,
verbose_name='Remote addr'),
),
migrations.AddField(
model_name='session',
name='asset_id',
field=models.CharField(blank=True, db_index=True, default='',
max_length=36),
),
migrations.AddField(
model_name='session',
name='system_user_id',
field=models.CharField(blank=True, db_index=True, default='',
max_length=36),
),
migrations.AddField(
model_name='session',
name='user_id',
field=models.CharField(blank=True, db_index=True, default='',
max_length=36),
),
migrations.AlterField(
model_name='session',
name='asset',
field=models.CharField(db_index=True, max_length=128,
verbose_name='Asset'),
),
migrations.AlterField(
model_name='session',
name='protocol',
field=models.CharField(
choices=[('ssh', 'ssh'), ('rdp', 'rdp'), ('vnc', 'vnc'),
('telnet', 'telnet')], db_index=True, default='ssh',
max_length=8),
),
migrations.AlterField(
model_name='session',
name='system_user',
field=models.CharField(db_index=True, max_length=128,
verbose_name='System user'),
),
migrations.AlterField(
model_name='session',
name='user',
field=models.CharField(db_index=True, max_length=128,
verbose_name='User'),
),
]
|
skyoo/jumpserver
|
apps/terminal/migrations/0018_auto_20191202_1010.py
|
Python
|
gpl-2.0
| 2,294 | 0 |
# download images from last.fm
# PyQuery is a very powerful module to parse HTML pages, but it is not by default distributed with Python
# if you want install it you need first install lxml module
# Same features of this script works only with pyquery, but the most important ones (download images of cover and artist) works without installing it
try:
from pyquery import PyQuery as pq
pyquery = True
except ImportError:
pyquery = False
# Create an istance of FancyURLopener to avoid to be banned from certains sites that reject no browser user agent
from urllib import FancyURLopener, quote_plus
class MyOpener(FancyURLopener):
version = "Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.9.2.4) Gecko/20100513 Firefox/3.6.4"
import sys
def wget(url, name=""):
""" http://stackoverflow.com/questions/22676/how-do-i-download-a-file-over-http-using-python """
import urllib2
if name=="":
file_name = url.split('/')[-1]
else:
file_name = name
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
def openURL(url):
""" Open a URL using the Firefox user agent to avoid to be banned from getting the page content """
myopener = MyOpener()
u = myopener.open(url)
s = u.read()
u.close()
return s
def downloadURL(url, f):
myopener = MyOpener()
myopener.retrieve(url, filename = f)
def decodeArgs(s,encoding = sys.getfilesystemencoding()):
""" Encode arguments to pass as GET request to lastfm """
return quote_plus(s.decode(encoding).encode("utf-8"))
def findArtistImage_npq(s):
""" Return a dictionary of art images
This funtion not use pyquery to parse HTML and it is very rough, improove it if you wish """
import re
regex = re.compile('<img[^>]*>')
images=regex.findall(s)
img=[i for i in images if i.find("catalogueImage")!=-1]
regex=re.compile('src\b*=\b*"([^"]*)"')
try:
link=regex.findall(img[0])
return link[0]
except IndexError: return None
def findAlbumImage_npq(s):
""" Returns album cover without using pyquery, code it is very rough """
import re
try:
s = s.split('<span id="albumCover" class="albumCover coverMega">')[1].split('</span>')[0]
regex=re.compile('src\b*=\b*"([^"]*)"')
img = regex.findall(s)[0]
return img
except IndexError: return None
def findArtistImage_pq(s):
d = pq(s)
img=d('.resource-images img[itemprop="image"]').eq(0)
return img.attr("src")
def findAlbumImage_pq(s):
d=pq(s)
return d('.g.album-cover-wrapper img').eq(0).attr('src')
def getImages(artist, album=None):
if album:
s= openURL(getUrl(artist, album))
name="%s - %s" %(prettyName(artist), prettyName(album))
else:
s = openURL(getUrl(artist))
name=prettyName(artist)
if pyquery:
if album:r = findAlbumImage_pq(s)
else: r = findArtistImage_pq(s)
else:
if album:r = findAlbumImage_npq(s)
else: r = findArtistImage_npq(s)
# Check for some invalid arguments
# This part of code needs to be improoved raising exception to distinguish from different type of errors
if r=="http://cdn.last.fm/flatness/catalogue/noimage/2/default_album_mega.png": r ="Not found"
return {"url" : r, "name" : name}
def getUrl(artist, album = None):
url="http://www.lastfm.it/music/"
url +=decodeArgs(artist)
if (album): url +="/" + decodeArgs(album)
return url
def prettyName(s):
return " ".join(word.capitalize() for word in s.split())
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="Download artist's and album's images from Last.fm.")
group = parser.add_mutually_exclusive_group()
parser.add_argument('artist',
help="Artist name")
parser.add_argument("-a","--album", dest="album", default = None,
help="Album title")
group.add_argument("-d", "--download", action="store_true",
help="Download the detected image")
group.add_argument("-f","--file",
help="Name of the downloaded file")
args = parser.parse_args()
img=getImages(args.artist, args.album)
print img["url"]
if args.download:
args.file ="%s.%s" %(img["name"], img["url"].split('.')[-1])
args.file=args.file.decode(sys.getfilesystemencoding())
if args.file:
wget(img["url"], args.file)
print "Image as been downloaded successfully as %s" %args.file
|
gialloporpora/yellowpy
|
lastfm.py
|
Python
|
gpl-2.0
| 4,732 | 0.042054 |
#!/bin/python
# gofed-ng - Golang system
# Copyright (C) 2016 Fridolin Pokorny, fpokorny@redhat.com
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ####################################################################
import sys
import os
def service_path2service_name(service_path):
basename = os.path.basename(service_path)
return basename[:-len('.py')]
if __name__ == "__main__":
sys.exit(1)
|
gofed/gofed-ng
|
testsuite/helpers/utils.py
|
Python
|
gpl-3.0
| 1,073 | 0.001864 |
# -*- coding: utf-8 -*-
""" Module for converting various mesh formats."""
# Copyright (C) 2006 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Garth N. Wells (gmsh function)
# Modified by Alexander H. Jarosch (gmsh fix)
# Modified by Angelo Simone (Gmsh and Medit fix)
# Modified by Andy R. Terrel (gmsh fix and triangle function)
# Modified by Magnus Vikstrom (metis and scotch function)
# Modified by Bartosz Sawicki (diffpack function)
# Modified by Gideon Simpson (Exodus II function)
# Modified by Kent-Andre Mardal (Star-CD function)
# Modified by Nuno Lopes (fix for emc2 mesh format (medit version 0))
# Modified by Neilen Marais (add gmsh support for reading physical region)
# Modified by Evan Lezar (add support for reading gmsh physical regions on facets)
# Modified by Jan Blechta (add triangle support for marker on edges and attributes on triangles)
#
# Last changed: 2014-02-06
# NOTE: This module does not depend on (py)dolfin beeing installed.
# NOTE: If future additions need that please import dolfin in a try: except:
# NOTE: clause and tell the user to install dolfin if it is not installed.
from __future__ import print_function
import getopt
import sys
from instant import get_status_output
import re
import warnings
import os.path
import numpy
import six
from . import abaqus
from . import xml_writer
def format_from_suffix(suffix):
"Return format for given suffix"
if suffix == "xml":
return "xml"
elif suffix == "mesh":
return "mesh"
elif suffix == "gmsh":
return "gmsh"
elif suffix == "msh":
return "gmsh"
elif suffix == "gra":
return "metis"
elif suffix == "grf":
return "scotch"
elif suffix == "grid":
return "diffpack"
elif suffix == "inp":
return "abaqus"
elif suffix == "ncdf":
return "NetCDF"
elif suffix =="exo":
return "ExodusII"
elif suffix =="e":
return "ExodusII"
elif suffix == "vrt" or suffix == "cel":
return "StarCD"
elif suffix == "ele" or suffix == "node":
return "Triangle"
else:
_error("Sorry, unknown suffix %s." % suffix)
def mesh2xml(ifilename, ofilename):
"""Convert between .mesh and .xml, parser implemented as a
state machine:
0 = read 'Vertices'
1 = read number of vertices
2 = read next vertex
3 = read 'Triangles' or 'Tetrahedra'
4 = read number of cells
5 = read next cell
6 = done
"""
print("Converting from Medit format (.mesh) to DOLFIN XML format")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Scan file for cell type
cell_type = None
dim = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Remove newline
line = line.strip(" \n\r").split(" ")
# Read dimension either on same line or following line
if line[0] == "Dimension":
if (len(line) == 2):
line = line[1]
else:
line = ifile.readline()
num_dims = int(line)
if num_dims == 2:
cell_type = "triangle"
dim = 2
elif num_dims == 3:
cell_type = "tetrahedron"
dim = 3
break
# Check that we got the cell type
if cell_type == None:
_error("Unable to find cell type.")
# Step to beginning of file
ifile.seek(0)
# Write header
xml_writer.write_header_mesh(ofile, cell_type, dim)
# Current state
state = 0
# Write data
num_vertices_read = 0
num_cells_read = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Skip comments
if line[0] == '#':
continue
# Remove newline
line = line.rstrip("\n\r")
if state == 0:
if line == "Vertices" or line == " Vertices":
state += 1
elif state == 1:
num_vertices = int(line)
xml_writer.write_header_vertices(ofile, num_vertices)
state +=1
elif state == 2:
if num_dims == 2:
(x, y, tmp) = line.split()
x = float(x)
y = float(y)
z = 0.0
elif num_dims == 3:
(x, y, z, tmp) = line.split()
x = float(x)
y = float(y)
z = float(z)
xml_writer.write_vertex(ofile, num_vertices_read, x, y, z)
num_vertices_read +=1
if num_vertices == num_vertices_read:
xml_writer.write_footer_vertices(ofile)
state += 1
elif state == 3:
if (line == "Triangles" or line == " Triangles") and num_dims == 2:
state += 1
if line == "Tetrahedra" and num_dims == 3:
state += 1
elif state == 4:
num_cells = int(line)
xml_writer.write_header_cells(ofile, num_cells)
state +=1
elif state == 5:
if num_dims == 2:
(n0, n1, n2, tmp) = line.split()
n0 = int(n0) - 1
n1 = int(n1) - 1
n2 = int(n2) - 1
xml_writer.write_cell_triangle(ofile, num_cells_read, n0, n1, n2)
elif num_dims == 3:
(n0, n1, n2, n3, tmp) = line.split()
n0 = int(n0) - 1
n1 = int(n1) - 1
n2 = int(n2) - 1
n3 = int(n3) - 1
xml_writer.write_cell_tetrahedron(ofile, num_cells_read, n0, n1, n2, n3)
num_cells_read +=1
if num_cells == num_cells_read:
xml_writer.write_footer_cells(ofile)
state += 1
elif state == 6:
break
# Check that we got all data
if state == 6:
print("Conversion done")
else:
_error("Missing data, unable to convert")
# Write footer
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
def gmsh2xml(ifilename, handler):
"""Convert between .gmsh v2.0 format (http://www.geuz.org/gmsh/) and .xml,
parser implemented as a state machine:
0 = read 'MeshFormat'
1 = read mesh format data
2 = read 'EndMeshFormat'
3 = read 'Nodes'
4 = read number of vertices
5 = read vertices
6 = read 'EndNodes'
7 = read 'Elements'
8 = read number of cells
9 = read cells
10 = done
Afterwards, extract physical region numbers if they are defined in
the mesh file as a mesh function.
"""
print("Converting from Gmsh format (.msh, .gmsh) to DOLFIN XML format")
# The dimension of the gmsh element types supported here as well as the dolfin cell types for each dimension
gmsh_dim = {15: 0, 1: 1, 2: 2, 4: 3}
cell_type_for_dim = {1: "interval", 2: "triangle", 3: "tetrahedron" }
# the gmsh element types supported for conversion
supported_gmsh_element_types = [1, 2, 4, 15]
# Open files
ifile = open(ifilename, "r")
# Scan file for cell type
cell_type = None
highest_dim = 0
line = ifile.readline()
while line:
# Remove newline
line = line.rstrip("\n\r")
# Read dimension
if line.find("$Elements") == 0:
line = ifile.readline()
num_elements = int(line)
if num_elements == 0:
_error("No elements found in gmsh file.")
line = ifile.readline()
# Now iterate through elements to find largest dimension. Gmsh
# format might include elements of lower dimensions in the element list.
# We also need to count number of elements of correct dimensions.
# Also determine which vertices are not used.
dim_count = {0: 0, 1: 0, 2: 0, 3: 0}
vertices_used_for_dim = {0: [], 1: [], 2: [], 3: []}
# Array used to store gmsh tags for 1D (type 1/line), 2D (type 2/triangular) elements and 3D (type 4/tet) elements
tags_for_dim = {0: [], 1: [], 2: [], 3: []}
while line.find("$EndElements") == -1:
element = line.split()
elem_type = int(element[1])
num_tags = int(element[2])
if elem_type in supported_gmsh_element_types:
dim = gmsh_dim[elem_type]
if highest_dim < dim:
highest_dim = dim
node_num_list = [int(node) for node in element[3 + num_tags:]]
vertices_used_for_dim[dim].extend(node_num_list)
if num_tags > 0:
tags_for_dim[dim].append(tuple(int(tag) for tag in element[3:3+num_tags]))
dim_count[dim] += 1
else:
#TODO: output a warning here. "gmsh element type %d not supported" % elem_type
pass
line = ifile.readline()
else:
# Read next line
line = ifile.readline()
# Check that we got the cell type and set num_cells_counted
if highest_dim == 0:
_error("Unable to find cells of supported type.")
num_cells_counted = dim_count[highest_dim]
vertex_set = set(vertices_used_for_dim[highest_dim])
vertices_used_for_dim[highest_dim] = None
vertex_dict = {}
for n,v in enumerate(vertex_set):
vertex_dict[v] = n
# Step to beginning of file
ifile.seek(0)
# Set mesh type
handler.set_mesh_type(cell_type_for_dim[highest_dim], highest_dim)
# Initialise node list (gmsh does not export all vertexes in order)
nodelist = {}
# Current state
state = 0
# Write data
num_vertices_read = 0
num_cells_read = 0
# Only import the dolfin objects if facet markings exist
process_facets = False
if len(tags_for_dim[highest_dim-1]) > 0:
# first construct the mesh
try:
from dolfin import MeshEditor, Mesh
except ImportError:
_error("DOLFIN must be installed to handle Gmsh boundary regions")
mesh = Mesh()
mesh_editor = MeshEditor ()
mesh_editor.open( mesh, highest_dim, highest_dim )
process_facets = True
else:
# TODO: Output a warning or an error here
me = None
while state != 10:
# Read next line
line = ifile.readline()
if not line: break
# Skip comments
if line[0] == '#':
continue
# Remove newline
line = line.rstrip("\n\r")
if state == 0:
if line == "$MeshFormat":
state = 1
elif state == 1:
(version, file_type, data_size) = line.split()
state = 2
elif state == 2:
if line == "$EndMeshFormat":
state = 3
elif state == 3:
if line == "$Nodes":
state = 4
elif state == 4:
num_vertices = len(vertex_dict)
handler.start_vertices(num_vertices)
if process_facets:
mesh_editor.init_vertices_global(num_vertices, num_vertices)
state = 5
elif state == 5:
(node_no, x, y, z) = line.split()
node_no = int(node_no)
x,y,z = [float(xx) for xx in (x,y,z)]
if node_no in vertex_dict:
node_no = vertex_dict[node_no]
else:
continue
nodelist[int(node_no)] = num_vertices_read
handler.add_vertex(num_vertices_read, [x, y, z])
if process_facets:
if highest_dim == 1:
coords = numpy.array([x])
elif highest_dim == 2:
coords = numpy.array([x, y])
elif highest_dim == 3:
coords = numpy.array([x, y, z])
mesh_editor.add_vertex(num_vertices_read, coords)
num_vertices_read +=1
if num_vertices == num_vertices_read:
handler.end_vertices()
state = 6
elif state == 6:
if line == "$EndNodes":
state = 7
elif state == 7:
if line == "$Elements":
state = 8
elif state == 8:
handler.start_cells(num_cells_counted)
if process_facets:
mesh_editor.init_cells_global(num_cells_counted, num_cells_counted)
state = 9
elif state == 9:
element = line.split()
elem_type = int(element[1])
num_tags = int(element[2])
if elem_type in supported_gmsh_element_types:
dim = gmsh_dim[elem_type]
else:
dim = 0
if dim == highest_dim:
node_num_list = [vertex_dict[int(node)] for node in element[3 + num_tags:]]
for node in node_num_list:
if not node in nodelist:
_error("Vertex %d of %s %d not previously defined." %
(node, cell_type_for_dim[dim], num_cells_read))
cell_nodes = [nodelist[n] for n in node_num_list]
handler.add_cell(num_cells_read, cell_nodes)
if process_facets:
cell_nodes = numpy.array([nodelist[n] for n in node_num_list], dtype=numpy.uintp)
mesh_editor.add_cell(num_cells_read, cell_nodes)
num_cells_read +=1
if num_cells_counted == num_cells_read:
handler.end_cells()
if process_facets:
mesh_editor.close()
state = 10
elif state == 10:
break
# Write mesh function based on the Physical Regions defined by
# gmsh, but only if they are not all zero. All zero physical
# regions indicate that no physical regions were defined.
if highest_dim not in [1,2,3]:
_error("Gmsh tags not supported for dimension %i. Probably a bug" % dim)
tags = tags_for_dim[highest_dim]
physical_regions = tuple(tag[0] for tag in tags)
if not all(tag == 0 for tag in physical_regions):
handler.start_meshfunction("physical_region", dim, num_cells_counted)
for i, physical_region in enumerate(physical_regions):
handler.add_entity_meshfunction(i, physical_region)
handler.end_meshfunction()
# Now process the facet markers
tags = tags_for_dim[highest_dim-1]
if (len(tags) > 0) and (mesh is not None):
physical_regions = tuple(tag[0] for tag in tags)
if not all(tag == 0 for tag in physical_regions):
mesh.init(highest_dim-1,0)
# Get the facet-node connectivity information (reshape as a row of node indices per facet)
if highest_dim==1:
# for 1d meshes the mesh topology returns the vertex to vertex map, which isn't what we want
# as facets are vertices
facets_as_nodes = numpy.array([[i] for i in range(mesh.num_facets())])
else:
facets_as_nodes = mesh.topology()(highest_dim-1,0)().reshape ( mesh.num_facets(), highest_dim )
# Build the reverse map
nodes_as_facets = {}
for facet in range(mesh.num_facets()):
nodes_as_facets[tuple(facets_as_nodes[facet,:])] = facet
data = [int(0*k) for k in range(mesh.num_facets()) ]
for i, physical_region in enumerate(physical_regions):
nodes = [n-1 for n in vertices_used_for_dim[highest_dim-1][highest_dim*i:(highest_dim*i+highest_dim)]]
nodes.sort()
if physical_region != 0:
try:
index = nodes_as_facets[tuple(nodes)]
data[index] = physical_region
except IndexError:
raise Exception ( "The facet (%d) was not found to mark: %s" % (i, nodes) )
# Create and initialise the mesh function
handler.start_meshfunction("facet_region", highest_dim-1, mesh.num_facets() )
for index, physical_region in enumerate ( data ):
handler.add_entity_meshfunction(index, physical_region)
handler.end_meshfunction()
# Check that we got all data
if state == 10:
print("Conversion done")
else:
_error("Missing data, unable to convert \n\ Did you use version 2.0 of the gmsh file format?")
# Close files
ifile.close()
def triangle2xml(ifilename, ofilename):
"""Convert between triangle format
(http://www.cs.cmu.edu/~quake/triangle.html) and .xml. The
given ifilename should be the prefix for the corresponding
.node, and .ele files.
"""
def get_next_line (fp):
"""Helper function for skipping comments and blank lines"""
line = fp.readline()
if line == '':
_error("Hit end of file prematurely.")
line = line.strip()
if not (line.startswith('#') or line == ''):
return line
return get_next_line(fp)
print("Converting from Triangle format {.node, .ele} to DOLFIN XML format")
# Open files
for suffix in [".node", ".ele"]:
if suffix in ifilename and ifilename[-len(suffix):] == suffix:
ifilename = ifilename.replace(suffix, "")
node_file = open(ifilename+".node", "r")
ele_file = open(ifilename+".ele", "r")
ofile = open(ofilename, "w")
try:
edge_file = open(ifilename+".edge", "r")
print("Found .edge file")
except IOError:
edge_file = None
# Read all the nodes
nodes = {}
num_nodes, dim, attr, bound = list(map(int, get_next_line(node_file).split()))
while len(nodes) < num_nodes:
node, x, y = get_next_line(node_file).split()[:3]
nodes[int(node)] = (float(x), float(y))
# Read all the triangles
tris = {}
tri_attrs = {}
num_tris, n_per_tri, attrs = list(map(int, get_next_line(ele_file).split()))
while len(tris) < num_tris:
line = get_next_line(ele_file).split()
tri, n1, n2, n3 = list(map(int, line[:4]))
# vertices are ordered according to current UFC ordering scheme -
# - may change in future!
tris[tri] = tuple(sorted((n1, n2, n3)))
tri_attrs[tri] = tuple(map(float, line[4:4+attrs]))
# Read all the boundary markers from edges
edge_markers_global = {}
edge_markers_local = []
got_negative_edge_markers = False
if edge_file is not None:
num_edges, num_edge_markers = list(map(int, get_next_line(edge_file).split()))
if num_edge_markers == 1:
while len(edge_markers_global) < num_edges:
edge, v1, v2, marker = list(map(int, get_next_line(edge_file).split()))
if marker < 0: got_negative_edge_markers = True
edge_markers_global[tuple(sorted((v1, v2)))] = marker
if got_negative_edge_markers:
print("Some edge markers are negative! dolfin will increase "\
"them by probably 2**32 when loading xml. "\
"Consider using non-negative edge markers only.")
for tri, vertices in six.iteritems(tris):
v0, v1, v2 = sorted((vertices[0:3]))
try:
edge_markers_local.append((tri, 0, \
edge_markers_global[(v1, v2)]))
edge_markers_local.append((tri, 1, \
edge_markers_global[(v0, v2)]))
edge_markers_local.append((tri, 2, \
edge_markers_global[(v0, v1)]))
except IndexError:
raise Exception("meshconvert.py: The facet was not found.")
elif num_edge_markers == 0:
print("...but no markers in it. Ignoring it")
else:
print("...but %d markers specified in it. It won't be processed."\
%num_edge_markers)
# Write everything out
xml_writer.write_header_mesh(ofile, "triangle", 2)
xml_writer.write_header_vertices(ofile, num_nodes)
node_off = 0 if 0 in nodes else -1
for node, node_t in six.iteritems(nodes):
xml_writer.write_vertex(ofile, node+node_off, node_t[0], node_t[1], 0.0)
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_cells(ofile, num_tris)
tri_off = 0 if 0 in tris else -1
for tri, tri_t in six.iteritems(tris):
xml_writer.write_cell_triangle(ofile, tri+tri_off, tri_t[0] + node_off,
tri_t[1] + node_off, tri_t[2] + node_off)
xml_writer.write_footer_cells(ofile)
if len(edge_markers_local) > 0:
xml_writer.write_header_domains(ofile)
xml_writer.write_header_meshvaluecollection(ofile, \
"edge markers", 1, len(edge_markers_local), "uint")
for tri, local_edge, marker in edge_markers_local:
xml_writer.write_entity_meshvaluecollection(ofile, \
1, tri+tri_off, marker, local_edge)
xml_writer.write_footer_meshvaluecollection(ofile)
xml_writer.write_footer_domains(ofile)
xml_writer.write_footer_mesh(ofile)
for i in range(attrs):
afilename = ofilename.replace(".xml", ".attr"+str(i)+".xml")
afile = open(afilename, "w")
xml_writer.write_header_meshfunction2(afile)
xml_writer.write_header_meshvaluecollection(afile, \
"triangle attribs "+str(i), 2, num_tris, "double")
for tri, tri_a in six.iteritems(tri_attrs):
xml_writer.write_entity_meshvaluecollection(afile, \
2, tri+tri_off, tri_a[i], 0)
xml_writer.write_footer_meshvaluecollection(afile)
xml_writer.write_footer_meshfunction(afile)
print("triangle attributes from .ele file written to "+afilename)
afile.close()
# Close files
node_file.close()
ele_file.close()
if edge_file is not None:
edge_file.close()
ofile.close()
def xml_old2xml(ifilename, ofilename):
"Convert from old DOLFIN XML format to new."
print("Converting from old (pre DOLFIN 0.6.2) to new DOLFIN XML format...")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Scan file for cell type (assuming there is just one)
cell_type = None
dim = 0
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Read dimension
if "<triangle" in line:
cell_type = "triangle"
dim = 2
break
elif "<tetrahedron" in line:
cell_type = "tetrahedron"
dim = 3
break
# Step to beginning of file
ifile.seek(0)
# Read lines and make changes
while 1:
# Read next line
line = ifile.readline()
if not line: break
# Modify line
if "xmlns" in line:
line = "<dolfin xmlns:dolfin=\"http://fenicsproject.org\">\n"
if "<mesh>" in line:
line = " <mesh celltype=\"%s\" dim=\"%d\">\n" % (cell_type, dim)
if dim == 2 and " z=\"0.0\"" in line:
line = line.replace(" z=\"0.0\"", "")
if " name=" in line:
line = line.replace(" name=", " index=")
if " name =" in line:
line = line.replace(" name =", " index=")
if "n0" in line:
line = line.replace("n0", "v0")
if "n1" in line:
line = line.replace("n1", "v1")
if "n2" in line:
line = line.replace("n2", "v2")
if "n3" in line:
line = line.replace("n3", "v3")
# Write line
ofile.write(line)
# Close files
ifile.close();
ofile.close();
print("Conversion done")
def metis_graph2graph_xml(ifilename, ofilename):
"Convert from Metis graph format to DOLFIN Graph XML."
print("Converting from Metis graph format to DOLFIN Graph XML.")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Read number of vertices and edges
line = ifile.readline()
if not line:
_error("Empty file")
(num_vertices, num_edges) = line.split()
xml_writer.write_header_graph(ofile, "directed")
xml_writer.write_header_vertices(ofile, int(num_vertices))
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
xml_writer.write_graph_vertex(ofile, i, len(edges))
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_edges(ofile, 2*int(num_edges))
# Step to beginning of file and skip header info
ifile.seek(0)
ifile.readline()
for i in range(int(num_vertices)):
print("vertex %g", i)
line = ifile.readline()
edges = line.split()
for e in edges:
xml_writer.write_graph_edge(ofile, i, int(e))
xml_writer.write_footer_edges(ofile)
xml_writer.write_footer_graph(ofile)
# Close files
ifile.close();
ofile.close();
def scotch_graph2graph_xml(ifilename, ofilename):
"Convert from Scotch graph format to DOLFIN Graph XML."
print("Converting from Scotch graph format to DOLFIN Graph XML.")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Skip graph file version number
ifile.readline()
# Read number of vertices and edges
line = ifile.readline()
if not line:
_error("Empty file")
(num_vertices, num_edges) = line.split()
# Read start index and numeric flag
# Start index is 0 or 1 (C/Fortran)
# Numeric flag is 3 bits where bit 1 enables vertex labels
# bit 2 enables edge weights and bit 3 enables vertex weights
line = ifile.readline()
(start_index, numeric_flag) = line.split()
# Handling not implented
if not numeric_flag == "000":
_error("Handling of scotch vertex labels, edge- and vertex weights not implemented")
xml_writer.write_header_graph(ofile, "undirected")
xml_writer.write_header_vertices(ofile, int(num_vertices))
# Read vertices and edges, first number gives number of edges from this vertex (not used)
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
xml_writer.write_graph_vertex(ofile, i, len(edges)-1)
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_edges(ofile, int(num_edges))
# Step to beginning of file and skip header info
ifile.seek(0)
ifile.readline()
ifile.readline()
ifile.readline()
for i in range(int(num_vertices)):
line = ifile.readline()
edges = line.split()
for j in range(1, len(edges)):
xml_writer.write_graph_edge(ofile, i, int(edges[j]))
xml_writer.write_footer_edges(ofile)
xml_writer.write_footer_graph(ofile)
# Close files
ifile.close();
ofile.close();
def diffpack2xml(ifilename, ofilename):
"Convert from Diffpack tetrahedral/triangle grid format to DOLFIN XML."
print(diffpack2xml.__doc__)
# Format strings for MeshFunction XML files
meshfunction_header = """\
<?xml version="1.0" encoding="UTF-8"?>\n
<dolfin xmlns:dolfin="http://www.fenics.org/dolfin/">
<mesh_function type="uint" dim="%d" size="%d">\n"""
meshfunction_entity = " <entity index=\"%d\" value=\"%d\"/>\n"
meshfunction_footer = " </mesh_function>\n</dolfin>"
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
# Read and analyze header
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if line[0] == "#":
break
if re.search(r"Number of elements", line):
num_cells = int(re.match(r".*\s(\d+).*", line).group(1))
if re.search(r"Number of nodes", line):
num_vertices = int(re.match(r".*\s(\d+).*", line).group(1))
if re.search(r"Number of space dim.", line):
num_dims = int(re.match(r".*\s(\d+).*", line).group(1))
if num_dims == 3:
xml_writer.write_header_mesh(ofile, "tetrahedron", 3)
elem_type = "ElmT4n3D"
write_cell_func = xml_writer.write_cell_tetrahedron
else:
xml_writer.write_header_mesh(ofile, "triangle", 2)
elem_type = "ElmT3n2D"
write_cell_func = xml_writer.write_cell_triangle
xml_writer.write_header_vertices(ofile, num_vertices)
# Read & write vertices and collect markers for vertices
vertex_markers = []
unique_vertex_markers = set()
for i in range(num_vertices):
line = ifile.readline()
m = re.match(r"^.*\(\s*(.*)\s*\).*\](.*)$", line)
x = list(map(float, re.split("[\s,]+", m.group(1))))
xml_writer.write_vertex(ofile, i, *x)
markers = list(map(int, m.group(2).split()))
vertex_markers.append(markers)
unique_vertex_markers.update(markers)
xml_writer.write_footer_vertices(ofile)
xml_writer.write_header_cells(ofile, num_cells)
# Output unique vertex markers as individual VertexFunctions
unique_vertex_markers.difference_update([0])
for unique_marker in unique_vertex_markers:
ofile_marker = open(ofilename.replace(".xml", "") + \
"_marker_" + str(unique_marker)+".xml", "w")
xml_writer.write_header_meshfunction(ofile_marker, 0, num_vertices)
for ind, markers in enumerate(vertex_markers):
if unique_marker in markers:
xml_writer.write_entity_meshfunction(ofile_marker, ind, unique_marker)
else:
xml_writer.write_entity_meshfunction(ofile_marker, ind, 0)
xml_writer.write_footer_meshfunction(ofile_marker)
# Ignore comment lines
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if line[0] == "#":
break
# Read & write cells and collect cell and face markers
cell_markers = []
facet_markers = []
facet_to_vert = [[1,2,3], [0,2,3], [0,1,3], [0,1,2]]
vert_to_facet = facet_to_vert # The same!
cell_ind = 0
while cell_ind < num_cells:
line = ifile.readline()
v = line.split()
if not v:
continue
if v[1] != elem_type:
_error("Only tetrahedral (ElmT4n3D) and triangular (ElmT3n2D) elements are implemented.")
# Store Cell markers
cell_markers.append(int(v[2]))
# Sort vertex indices
cell_indices = sorted([int(x)-1 for x in v[3:]])
write_cell_func(ofile, cell_ind, *cell_indices)
if num_dims == 2:
cell_ind += 1
continue
# Check Facet info
process_facet = set(range(4))
for local_vert_ind, global_vert_ind in enumerate(cell_indices):
# If no marker is included for vertex skip corresponding facet
if not vertex_markers[global_vert_ind]:
process_facet.difference_update(facet_to_vert[local_vert_ind])
# Process facets
for local_facet in process_facet:
# Start with markers from first vertex
global_first_vertex = cell_indices[facet_to_vert[local_facet][0]]
marker_intersection = set(vertex_markers[global_first_vertex])
# Process the other vertices
for local_vert in facet_to_vert[local_facet][1:]:
marker_intersection.intersection_update(\
vertex_markers[cell_indices[local_vert]])
if not marker_intersection:
break
# If not break we have a marker on local_facet
else:
assert(len(marker_intersection)==1)
facet_markers.append((cell_ind, local_facet, \
marker_intersection.pop()))
# Bump cell_ind
cell_ind += 1
xml_writer.write_footer_cells(ofile)
xml_writer.write_header_domains(ofile)
# Write facet markers if any
if facet_markers:
xml_writer.write_header_meshvaluecollection(ofile, "m", 2, \
len(facet_markers), "uint")
for cell, local_facet, marker in facet_markers:
xml_writer.write_entity_meshvaluecollection(ofile, 2, cell, \
marker, local_facet)
xml_writer.write_footer_meshvaluecollection(ofile)
xml_writer.write_header_meshvaluecollection(ofile, "m", num_dims, \
len(cell_markers), "uint")
for cell, marker in enumerate(cell_markers):
xml_writer.write_entity_meshvaluecollection(ofile, num_dims, cell, \
marker)
xml_writer.write_footer_meshvaluecollection(ofile)
xml_writer.write_footer_domains(ofile)
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
class ParseError(Exception):
""" Error encountered in source file.
"""
class DataHandler(object):
""" Baseclass for handlers of mesh data.
The actual handling of mesh data encountered in the source file is
delegated to a polymorfic object. Typically, the delegate will write the
data to XML.
@ivar _state: the state which the handler is in, one of State_*.
@ivar _cell_type: cell type in mesh. One of CellType_*.
@ivar _dim: mesh dimensions.
"""
State_Invalid, State_Init, State_Vertices, State_Cells, \
State_MeshFunction, State_MeshValueCollection = list(range(6))
CellType_Tetrahedron, CellType_Triangle, CellType_Interval = list(range(3))
def __init__(self):
self._state = self.State_Invalid
def set_mesh_type(self, cell_type, dim):
assert self._state == self.State_Invalid
self._state = self.State_Init
if cell_type == "tetrahedron":
self._cell_type = self.CellType_Tetrahedron
elif cell_type == "triangle":
self._cell_type = self.CellType_Triangle
elif cell_type == "interval":
self._cell_type = self.CellType_Interval
self._dim = dim
def start_vertices(self, num_vertices):
assert self._state == self.State_Init
self._state = self.State_Vertices
def add_vertex(self, vertex, coords):
assert self._state == self.State_Vertices
def end_vertices(self):
assert self._state == self.State_Vertices
self._state = self.State_Init
def start_cells(self, num_cells):
assert self._state == self.State_Init
self._state = self.State_Cells
def add_cell(self, cell, nodes):
assert self._state == self.State_Cells
def end_cells(self):
assert self._state == self.State_Cells
self._state = self.State_Init
def start_domains(self):
assert self._state == self.State_Init
def end_domains(self):
self._state = self.State_Init
def start_meshfunction(self, name, dim, size):
assert self._state == self.State_Init
self._state = self.State_MeshFunction
def add_entity_meshfunction(self, index, value):
assert self._state == self.State_MeshFunction
def end_meshfunction(self):
assert self._state == self.State_MeshFunction
self._state = self.State_Init
def start_mesh_value_collection(self, name, dim, size, etype):
assert self._state == self.State_Init
self._state = self.State_MeshValueCollection
def add_entity_mesh_value_collection(self, dim, index, value, local_entity=0):
assert self._state == self.State_MeshValueCollection
def end_mesh_value_collection(self):
assert self._state == self.State_MeshValueCollection
self._state = self.State_Init
def warn(self, msg):
""" Issue warning during parse.
"""
warnings.warn(msg)
def error(self, msg):
""" Raise error during parse.
This method is expected to raise ParseError.
"""
raise ParseError(msg)
def close(self):
self._state = self.State_Invalid
class XmlHandler(DataHandler):
""" Data handler class which writes to Dolfin XML.
"""
def __init__(self, ofilename):
DataHandler.__init__(self)
self._ofilename = ofilename
self.__ofile = open(ofilename, "w")
self.__ofile_meshfunc = None
def ofile(self):
return self.__ofile
def set_mesh_type(self, cell_type, dim):
DataHandler.set_mesh_type(self, cell_type, dim)
xml_writer.write_header_mesh(self.__ofile, cell_type, dim)
def start_vertices(self, num_vertices):
DataHandler.start_vertices(self, num_vertices)
xml_writer.write_header_vertices(self.__ofile, num_vertices)
def add_vertex(self, vertex, coords):
DataHandler.add_vertex(self, vertex, coords)
xml_writer.write_vertex(self.__ofile, vertex, *coords)
def end_vertices(self):
DataHandler.end_vertices(self)
xml_writer.write_footer_vertices(self.__ofile)
def start_cells(self, num_cells):
DataHandler.start_cells(self, num_cells)
xml_writer.write_header_cells(self.__ofile, num_cells)
def add_cell(self, cell, nodes):
DataHandler.add_cell(self, cell, nodes)
if self._cell_type == self.CellType_Tetrahedron:
func = xml_writer.write_cell_tetrahedron
elif self._cell_type == self.CellType_Triangle:
func = xml_writer.write_cell_triangle
elif self._cell_type == self.CellType_Interval:
func = xml_writer.write_cell_interval
func(self.__ofile, cell, *nodes)
def end_cells(self):
DataHandler.end_cells(self)
xml_writer.write_footer_cells(self.__ofile)
def start_meshfunction(self, name, dim, size):
DataHandler.start_meshfunction(self, name, dim, size)
fname = os.path.splitext(self.__ofile.name)[0]
self.__ofile_meshfunc = open("%s_%s.xml" % (fname, name), "w")
xml_writer.write_header_meshfunction(self.__ofile_meshfunc, dim, size)
def add_entity_meshfunction(self, index, value):
DataHandler.add_entity_meshfunction(self, index, value)
xml_writer.write_entity_meshfunction(self.__ofile_meshfunc, index, value)
def end_meshfunction(self):
DataHandler.end_meshfunction(self)
xml_writer.write_footer_meshfunction(self.__ofile_meshfunc)
self.__ofile_meshfunc.close()
self.__ofile_meshfunc = None
def start_domains(self):
#DataHandler.start_domains(self)
xml_writer.write_header_domains(self.__ofile)
def end_domains(self):
#DataHandler.end_domains(self)
xml_writer.write_footer_domains(self.__ofile)
def start_mesh_value_collection(self, name, dim, size, etype):
DataHandler.start_mesh_value_collection(self, name, dim, size, etype)
xml_writer.write_header_meshvaluecollection(self.__ofile, name, dim, size, etype)
def add_entity_mesh_value_collection(self, dim, index, value, local_entity=0):
DataHandler.add_entity_mesh_value_collection(self, dim, index, value)
xml_writer.write_entity_meshvaluecollection(self.__ofile, dim, index, value, local_entity=local_entity)
def end_mesh_value_collection(self):
DataHandler.end_mesh_value_collection(self)
xml_writer.write_footer_meshvaluecollection(self.__ofile)
def close(self):
DataHandler.close(self)
if self.__ofile.closed:
return
xml_writer.write_footer_mesh(self.__ofile)
self.__ofile.close()
if self.__ofile_meshfunc is not None:
self.__ofile_meshfunc.close()
def netcdf2xml(ifilename,ofilename):
"Convert from NetCDF format to DOLFIN XML."
print("Converting from NetCDF format (.ncdf) to DOLFIN XML format")
# Open files
ifile = open(ifilename, "r")
ofile = open(ofilename, "w")
cell_type = None
dim = 0
# Scan file for dimension, number of nodes, number of elements
while 1:
line = ifile.readline()
if not line:
_error("Empty file")
if re.search(r"num_dim.*=", line):
dim = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"num_nodes.*=", line):
num_vertices = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"num_elem.*=", line):
num_cells = int(re.match(".*\s=\s(\d+)\s;",line).group(1))
if re.search(r"connect1 =",line):
break
num_dims=dim
# Set cell type
if dim == 2:
cell_type = "triangle"
if dim == 3:
cell_type = "tetrahedron"
# Check that we got the cell type
if cell_type == None:
_error("Unable to find cell type.")
# Write header
xml_writer.write_header_mesh(ofile, cell_type, dim)
xml_writer.write_header_cells(ofile, num_cells)
num_cells_read = 0
# Read and write cells
while 1:
# Read next line
line = ifile.readline()
if not line:
break
connect=re.split("[,;]",line)
if num_dims == 2:
n0 = int(connect[0])-1
n1 = int(connect[1])-1
n2 = int(connect[2])-1
xml_writer.write_cell_triangle(ofile, num_cells_read, n0, n1, n2)
elif num_dims == 3:
n0 = int(connect[0])-1
n1 = int(connect[1])-1
n2 = int(connect[2])-1
n3 = int(connect[3])-1
xml_writer.write_cell_tetrahedron(ofile, num_cells_read, n0, n1, n2, n3)
num_cells_read +=1
if num_cells == num_cells_read:
xml_writer.write_footer_cells(ofile)
xml_writer.write_header_vertices(ofile, num_vertices)
break
num_vertices_read = 0
coords = [[],[],[]]
coord = -1
while 1:
line = ifile.readline()
if not line:
_error("Missing data")
if re.search(r"coord =",line):
break
# Read vertices
while 1:
line = ifile.readline()
if not line:
break
if re.search(r"\A\s\s\S+,",line):
coord+=1
print("Found x_"+str(coord)+" coordinates")
coords[coord] += line.split()
if re.search(r";",line):
break
# Write vertices
for i in range(num_vertices):
if num_dims == 2:
x = float(re.split(",",coords[0].pop(0))[0])
y = float(re.split(",",coords[1].pop(0))[0])
z = 0
if num_dims == 3:
x = float(re.split(",",coords[0].pop(0))[0])
y = float(re.split(",",coords[1].pop(0))[0])
z = float(re.split(",",coords[2].pop(0))[0])
xml_writer.write_vertex(ofile, i, x, y, z)
# Write footer
xml_writer.write_footer_vertices(ofile)
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
def exodus2xml(ifilename,ofilename):
"Convert from Exodus II format to DOLFIN XML."
print("Converting from Exodus II format to NetCDF format")
name = ifilename.split(".")[0]
netcdffilename = name +".ncdf"
status, output = get_status_output('ncdump '+ifilename + ' > '+netcdffilename)
if status != 0:
raise IOError("Something wrong while executing ncdump. Is ncdump "\
"installed on the system?")
netcdf2xml(netcdffilename, ofilename)
def _error(message):
"Write an error message"
for line in message.split("\n"):
print("*** %s" % line)
sys.exit(2)
def convert2xml(ifilename, ofilename, iformat=None):
""" Convert a file to the DOLFIN XML format.
"""
convert(ifilename, XmlHandler(ofilename), iformat=iformat)
def convert(ifilename, handler, iformat=None):
""" Convert a file using a provided data handler.
Note that handler.close is called when this function finishes.
@param ifilename: Name of input file.
@param handler: The data handler (instance of L{DataHandler}).
@param iformat: Format of input file.
"""
if iformat is None:
iformat = format_from_suffix(os.path.splitext(ifilename)[1][1:])
# XXX: Backwards-compat
if hasattr(handler, "_ofilename"):
ofilename = handler._ofilename
# Choose conversion
if iformat == "mesh":
# Convert from mesh to xml format
mesh2xml(ifilename, ofilename)
elif iformat == "gmsh":
# Convert from gmsh to xml format
gmsh2xml(ifilename, handler)
elif iformat == "Triangle":
# Convert from Triangle to xml format
triangle2xml(ifilename, ofilename)
elif iformat == "xml-old":
# Convert from old to new xml format
xml_old2xml(ifilename, ofilename)
elif iformat == "metis":
# Convert from metis graph to dolfin graph xml format
metis_graph2graph_xml(ifilename, ofilename)
elif iformat == "scotch":
# Convert from scotch graph to dolfin graph xml format
scotch_graph2graph_xml(ifilename, ofilename)
elif iformat == "diffpack":
# Convert from Diffpack tetrahedral grid format to xml format
diffpack2xml(ifilename, ofilename)
elif iformat == "abaqus":
# Convert from abaqus to xml format
abaqus.convert(ifilename, handler)
elif iformat == "NetCDF":
# Convert from NetCDF generated from ExodusII format to xml format
netcdf2xml(ifilename, ofilename)
elif iformat =="ExodusII":
# Convert from ExodusII format to xml format via NetCDF
exodus2xml(ifilename, ofilename)
elif iformat == "StarCD":
# Convert from Star-CD tetrahedral grid format to xml format
starcd2xml(ifilename, ofilename)
else:
_error("Sorry, cannot convert between %s and DOLFIN xml file formats." % iformat)
# XXX: handler.close messes things for other input formats than abaqus or gmsh
if iformat in ("abaqus", "gmsh"):
handler.close()
def starcd2xml(ifilename, ofilename):
"Convert from Star-CD tetrahedral grid format to DOLFIN XML."
print(starcd2xml.__doc__)
if not os.path.isfile(ifilename[:-3] + "vrt") or not os.path.isfile(ifilename[:-3] + "cel"):
print("StarCD format requires one .vrt file and one .cel file")
sys.exit(2)
# open output file
ofile = open(ofilename, "w")
# Open file, the vertices are in a .vrt file
ifile = open(ifilename[:-3] + "vrt", "r")
write_header_mesh(ofile, "tetrahedron", 3)
# Read & write vertices
# first, read all lines (need to sweep to times through the file)
lines = ifile.readlines()
# second, find the number of vertices
num_vertices = -1
counter = 0
# nodenr_map is needed because starcd support node numbering like 1,2,4 (ie 3 is missing)
nodenr_map = {}
for line in lines:
nodenr = int(line[0:15])
nodenr_map[nodenr] = counter
counter += 1
num_vertices = counter
# third, run over all vertices
xml_writer.write_header_vertices(ofile, num_vertices)
for line in lines:
nodenr = int(line[0:15])
vertex0 = float(line[15:31])
vertex1 = float(line[31:47])
vertex2 = float(line[47:63])
xml_writer.write_vertex(ofile, nodenr_map[nodenr], float(vertex0), float(vertex1), float(vertex2))
xml_writer.write_footer_vertices(ofile)
# Open file, the cells are in a .cel file
ifile = open(ifilename[:-3] + "cel", "r")
# Read & write cells
# first, read all lines (need to sweep to times through the file)
lines = ifile.readlines()
# second, find the number of cells
num_cells = -1
counter = 0
for line in lines:
l = [int(a) for a in line.split()]
cellnr, node0, node1, node2, node3, node4, node5, node6, node7, tmp1, tmp2 = l
if node4 > 0:
if node2 == node3 and node4 == node5 and node5 == node6 and node6 == node7: # these nodes should be equal
counter += 1
else:
print("The file does contain cells that are not tetraheders. The cell number is ", cellnr, " the line read was ", line)
else:
# triangles on the surface
# print "The file does contain cells that are not tetraheders node4==0. The cell number is ", cellnr, " the line read was ", line
#sys.exit(2)
pass
num_cells = counter
# third, run over all cells
xml_writer.write_header_cells(ofile, num_cells)
counter = 0
for line in lines:
l = [int(a) for a in line.split()]
cellnr, node0, node1, node2, node3, node4, node5, node6, node7, tmp1, tmp2 = l
if (node4 > 0):
if node2 == node3 and node4 == node5 and node5 == node6 and node6 == node7: # these nodes should be equal
xml_writer.write_cell_tetrahedron(ofile, counter, nodenr_map[node0], nodenr_map[node1], nodenr_map[node2], nodenr_map[node4])
counter += 1
xml_writer.write_footer_cells(ofile)
xml_writer.write_footer_mesh(ofile)
# Close files
ifile.close()
ofile.close()
|
FEniCS/dolfin
|
site-packages/dolfin_utils/meshconvert/meshconvert.py
|
Python
|
lgpl-3.0
| 50,178 | 0.004604 |
""" Tests for Dynamo3 """
import sys
import unittest
from decimal import Decimal
from pickle import dumps, loads
from urllib.parse import urlparse
from botocore.exceptions import ClientError
from mock import ANY, MagicMock, patch
from dynamo3 import (
Binary,
Dynamizer,
DynamoDBConnection,
DynamoDBError,
DynamoKey,
GlobalIndex,
Limit,
Table,
ThroughputException,
)
from dynamo3.constants import STRING
from dynamo3.result import Capacity, ConsumedCapacity, Count, ResultSet, add_dicts
class BaseSystemTest(unittest.TestCase):
"""Base class for system tests"""
dynamo: DynamoDBConnection = None # type: ignore
def setUp(self):
super(BaseSystemTest, self).setUp()
# Clear out any pre-existing tables
for tablename in self.dynamo.list_tables():
self.dynamo.delete_table(tablename)
def tearDown(self):
super(BaseSystemTest, self).tearDown()
for tablename in self.dynamo.list_tables():
self.dynamo.delete_table(tablename)
self.dynamo.clear_hooks()
class TestMisc(BaseSystemTest):
"""Tests that don't fit anywhere else"""
def tearDown(self):
super(TestMisc, self).tearDown()
self.dynamo.default_return_capacity = False
def test_connection_host(self):
"""Connection can access host of endpoint"""
urlparse(self.dynamo.host)
def test_connection_region(self):
"""Connection can access name of connected region"""
self.assertTrue(isinstance(self.dynamo.region, str))
def test_connect_to_region(self):
"""Can connect to a dynamo region"""
conn = DynamoDBConnection.connect("us-west-1")
self.assertIsNotNone(conn.host)
def test_connect_to_region_creds(self):
"""Can connect to a dynamo region with credentials"""
conn = DynamoDBConnection.connect(
"us-west-1", access_key="abc", secret_key="12345"
)
self.assertIsNotNone(conn.host)
def test_connect_to_host_without_session(self):
"""Can connect to a dynamo host without passing in a session"""
conn = DynamoDBConnection.connect("us-west-1", host="localhost")
self.assertIsNotNone(conn.host)
@patch("dynamo3.connection.time")
def test_retry_on_throughput_error(self, time):
"""Throughput exceptions trigger a retry of the request"""
def call(*_, **__):
"""Dummy service call"""
response = {
"ResponseMetadata": {
"HTTPStatusCode": 400,
},
"Error": {
"Code": "ProvisionedThroughputExceededException",
"Message": "Does not matter",
},
}
raise ClientError(response, "list_tables")
with patch.object(self.dynamo, "client") as client:
client.list_tables.side_effect = call
with self.assertRaises(ThroughputException):
self.dynamo.call("list_tables")
self.assertEqual(len(time.sleep.mock_calls), self.dynamo.request_retries - 1)
self.assertTrue(time.sleep.called)
def test_describe_missing(self):
"""Describing a missing table returns None"""
ret = self.dynamo.describe_table("foobar")
self.assertIsNone(ret)
def test_magic_table_props(self):
"""Table can look up properties on response object"""
hash_key = DynamoKey("id")
self.dynamo.create_table("foobar", hash_key=hash_key)
ret = self.dynamo.describe_table("foobar")
assert ret is not None
self.assertEqual(ret.item_count, ret["ItemCount"])
with self.assertRaises(KeyError):
self.assertIsNotNone(ret["Missing"])
def test_magic_index_props(self):
"""Index can look up properties on response object"""
index = GlobalIndex.all("idx-name", DynamoKey("id"))
index.response = {"FooBar": 2}
self.assertEqual(index["FooBar"], 2)
with self.assertRaises(KeyError):
self.assertIsNotNone(index["Missing"])
def test_describe_during_delete(self):
"""Describing a table during a delete operation should not crash"""
response = {
"ItemCount": 0,
"ProvisionedThroughput": {
"NumberOfDecreasesToday": 0,
"ReadCapacityUnits": 5,
"WriteCapacityUnits": 5,
},
"TableName": "myTableName",
"TableSizeBytes": 0,
"TableStatus": "DELETING",
}
table = Table.from_response(response)
self.assertEqual(table.status, "DELETING")
def test_delete_missing(self):
"""Deleting a missing table returns False"""
ret = self.dynamo.delete_table("foobar")
self.assertTrue(not ret)
def test_re_raise_passthrough(self):
"""DynamoDBError can re-raise itself if missing original exception"""
err = DynamoDBError(400, Code="ErrCode", Message="Ouch", args={})
caught = False
try:
err.re_raise()
except DynamoDBError as e:
caught = True
self.assertEqual(err, e)
self.assertTrue(caught)
def test_re_raise(self):
"""DynamoDBError can re-raise itself with stacktrace of original exc"""
caught = False
try:
try:
raise Exception("Hello")
except Exception as e1:
err = DynamoDBError(
400,
Code="ErrCode",
Message="Ouch",
args={},
exc_info=sys.exc_info(),
)
err.re_raise()
except DynamoDBError as e:
caught = True
import traceback
tb = traceback.format_tb(e.__traceback__)
self.assertIn("Hello", tb[-1])
self.assertEqual(e.status_code, 400)
self.assertTrue(caught)
def test_default_return_capacity(self):
"""When default_return_capacity=True, always return capacity"""
self.dynamo.default_return_capacity = True
with patch.object(self.dynamo, "call") as call:
call().get.return_value = None
rs = self.dynamo.scan("foobar")
list(rs)
call.assert_called_with(
"scan",
TableName="foobar",
ReturnConsumedCapacity="INDEXES",
ConsistentRead=False,
)
def test_list_tables_page(self):
"""Call to ListTables should page results"""
hash_key = DynamoKey("id")
for i in range(120):
self.dynamo.create_table("table%d" % i, hash_key=hash_key)
tables = list(self.dynamo.list_tables(110))
self.assertEqual(len(tables), 110)
def test_limit_complete(self):
"""A limit with item_capacity = 0 is 'complete'"""
limit = Limit(item_limit=0)
self.assertTrue(limit.complete)
def test_wait_create_table(self):
"""Create table shall wait for the table to come online."""
tablename = "foobar_wait"
hash_key = DynamoKey("id")
self.dynamo.create_table(tablename, hash_key=hash_key, wait=True)
self.assertIsNotNone(self.dynamo.describe_table(tablename))
def test_wait_delete_table(self):
"""Delete table shall wait for the table to go offline."""
tablename = "foobar_wait"
hash_key = DynamoKey("id")
self.dynamo.create_table(tablename, hash_key=hash_key, wait=True)
result = self.dynamo.delete_table(tablename, wait=True)
self.assertTrue(result)
class TestDataTypes(BaseSystemTest):
"""Tests for Dynamo data types"""
def make_table(self):
"""Convenience method for making a table"""
hash_key = DynamoKey("id")
self.dynamo.create_table("foobar", hash_key=hash_key)
def test_string(self):
"""Store and retrieve a string"""
self.make_table()
self.dynamo.put_item("foobar", {"id": "abc"})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["id"], "abc")
self.assertTrue(isinstance(item["id"], str))
def test_int(self):
"""Store and retrieve an int"""
self.make_table()
self.dynamo.put_item("foobar", {"id": "a", "num": 1})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["num"], 1)
def test_float(self):
"""Store and retrieve a float"""
self.make_table()
self.dynamo.put_item("foobar", {"id": "a", "num": 1.1})
item = list(self.dynamo.scan("foobar"))[0]
self.assertAlmostEqual(float(item["num"]), 1.1)
def test_decimal(self):
"""Store and retrieve a Decimal"""
self.make_table()
self.dynamo.put_item("foobar", {"id": "a", "num": Decimal("1.1")})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["num"], Decimal("1.1"))
def test_binary(self):
"""Store and retrieve a binary"""
self.make_table()
self.dynamo.put_item("foobar", {"id": "a", "data": Binary("abc")})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["data"].value, b"abc")
def test_binary_bytes(self):
"""Store and retrieve bytes as a binary"""
self.make_table()
data = {"a": 1, "b": 2}
self.dynamo.put_item("foobar", {"id": "a", "data": Binary(dumps(data))})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(loads(item["data"].value), data)
def test_string_set(self):
"""Store and retrieve a string set"""
self.make_table()
item = {
"id": "a",
"datas": set(["a", "b"]),
}
self.dynamo.put_item("foobar", item)
ret = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(ret, item)
def test_number_set(self):
"""Store and retrieve a number set"""
self.make_table()
item = {
"id": "a",
"datas": set([1, 2, 3]),
}
self.dynamo.put_item("foobar", item)
ret = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(ret, item)
def test_binary_set(self):
"""Store and retrieve a binary set"""
self.make_table()
item = {
"id": "a",
"datas": set([Binary("a"), Binary("b")]),
}
self.dynamo.put_item("foobar", item)
ret = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(ret, item)
def test_binary_equal(self):
"""Binary should eq other Binaries and also raw bytestrings"""
self.assertEqual(Binary("a"), Binary("a"))
self.assertEqual(Binary("a"), b"a")
self.assertFalse(Binary("a") != Binary("a"))
def test_binary_repr(self):
"""Binary repr should wrap the contained value"""
self.assertEqual(repr(Binary("a")), "Binary(%r)" % b"a")
def test_binary_converts_unicode(self):
"""Binary will convert unicode to bytes"""
b = Binary("a")
self.assertTrue(isinstance(b.value, bytes))
def test_binary_force_string(self):
"""Binary must wrap a string type"""
with self.assertRaises(TypeError):
Binary(2) # type: ignore
def test_bool(self):
"""Store and retrieve a boolean"""
self.make_table()
self.dynamo.put_item("foobar", {"id": "abc", "b": True})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["b"], True)
self.assertTrue(isinstance(item["b"], bool))
def test_list(self):
"""Store and retrieve a list"""
self.make_table()
self.dynamo.put_item("foobar", {"id": "abc", "l": ["a", 1, False]})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["l"], ["a", 1, False])
def test_dict(self):
"""Store and retrieve a dict"""
self.make_table()
data = {
"i": 1,
"s": "abc",
"n": None,
"l": ["a", 1, True],
"b": False,
}
self.dynamo.put_item("foobar", {"id": "abc", "d": data})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["d"], data)
def test_nested_dict(self):
"""Store and retrieve a nested dict"""
self.make_table()
data = {
"s": "abc",
"d": {
"i": 42,
},
}
self.dynamo.put_item("foobar", {"id": "abc", "d": data})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["d"], data)
def test_nested_list(self):
"""Store and retrieve a nested list"""
self.make_table()
data = [
1,
[
True,
None,
"abc",
],
]
self.dynamo.put_item("foobar", {"id": "abc", "l": data})
item = list(self.dynamo.scan("foobar"))[0]
self.assertEqual(item["l"], data)
def test_unrecognized_type(self):
"""Dynamizer throws error on unrecognized type"""
value = {
"ASDF": "abc",
}
with self.assertRaises(TypeError):
self.dynamo.dynamizer.decode(value)
class TestDynamizer(unittest.TestCase):
"""Tests for the Dynamizer"""
def test_register_encoder(self):
"""Can register a custom encoder"""
from datetime import datetime
dynamizer = Dynamizer()
dynamizer.register_encoder(datetime, lambda d, v: (STRING, v.isoformat()))
now = datetime.utcnow()
self.assertEqual(dynamizer.raw_encode(now), (STRING, now.isoformat()))
def test_encoder_missing(self):
"""If no encoder is found, raise ValueError"""
from datetime import datetime
dynamizer = Dynamizer()
with self.assertRaises(ValueError):
dynamizer.encode(datetime.utcnow())
class TestResultModels(unittest.TestCase):
"""Tests for the model classes in results.py"""
def test_add_dicts_base_case(self):
"""add_dict where one argument is None returns the other"""
f = object()
self.assertEqual(add_dicts(f, None), f)
self.assertEqual(add_dicts(None, f), f)
def test_add_dicts(self):
"""Merge two dicts of values together"""
a = {
"a": 1,
"b": 2,
}
b = {
"a": 3,
"c": 4,
}
ret = add_dicts(a, b)
self.assertEqual(
ret,
{
"a": 4,
"b": 2,
"c": 4,
},
)
def test_count_repr(self):
"""Count repr"""
count = Count(0, 0)
self.assertEqual(repr(count), "Count(0)")
def test_count_addition(self):
"""Count addition"""
count = Count(4, 2)
self.assertEqual(count + 5, 9)
def test_count_subtraction(self):
"""Count subtraction"""
count = Count(4, 2)
self.assertEqual(count - 2, 2)
def test_count_multiplication(self):
"""Count multiplication"""
count = Count(4, 2)
self.assertEqual(2 * count, 8)
def test_count_division(self):
"""Count division"""
count = Count(4, 2)
self.assertEqual(count / 2, 2)
def test_count_add_none_capacity(self):
"""Count addition with one None consumed_capacity"""
cap = Capacity(3, 0)
count = Count(4, 2)
count2 = Count(5, 3, cap)
ret = count + count2
self.assertEqual(ret, 9)
self.assertEqual(ret.scanned_count, 5)
self.assertEqual(ret.consumed_capacity, cap)
def test_count_add_capacity(self):
"""Count addition with consumed_capacity"""
count = Count(4, 2, Capacity(3, 0))
count2 = Count(5, 3, Capacity(2, 0))
ret = count + count2
self.assertEqual(ret, 9)
self.assertEqual(ret.scanned_count, 5)
self.assertEqual(ret.consumed_capacity.read, 5)
def test_capacity_math(self):
"""Capacity addition and equality"""
cap = Capacity(2, 4)
s = set([cap])
self.assertIn(Capacity(2, 4), s)
self.assertNotEqual(Capacity(1, 4), cap)
self.assertEqual(Capacity(1, 1) + Capacity(2, 2), Capacity(3, 3))
def test_capacity_format(self):
"""String formatting for Capacity"""
c = Capacity(1, 3)
self.assertEqual(str(c), "R:1.0 W:3.0")
c = Capacity(0, 0)
self.assertEqual(str(c), "0")
def test_total_consumed_capacity(self):
"""ConsumedCapacity can parse results with only Total"""
response = {
"TableName": "foobar",
"ReadCapacityUnits": 4,
"WriteCapacityUnits": 5,
}
cap = ConsumedCapacity.from_response(response)
self.assertEqual(cap.total, (4, 5))
self.assertIsNone(cap.table_capacity)
def test_consumed_capacity_equality(self):
"""ConsumedCapacity addition and equality"""
cap = ConsumedCapacity(
"foobar",
Capacity(0, 10),
Capacity(0, 2),
{
"l-index": Capacity(0, 4),
},
{
"g-index": Capacity(0, 3),
},
)
c2 = ConsumedCapacity(
"foobar",
Capacity(0, 10),
Capacity(0, 2),
{
"l-index": Capacity(0, 4),
"l-index2": Capacity(0, 7),
},
)
self.assertNotEqual(cap, c2)
c3 = ConsumedCapacity(
"foobar",
Capacity(0, 10),
Capacity(0, 2),
{
"l-index": Capacity(0, 4),
},
{
"g-index": Capacity(0, 3),
},
)
self.assertIn(cap, set([c3]))
combined = cap + c2
self.assertEqual(
cap + c2,
ConsumedCapacity(
"foobar",
Capacity(0, 20),
Capacity(0, 4),
{
"l-index": Capacity(0, 8),
"l-index2": Capacity(0, 7),
},
{
"g-index": Capacity(0, 3),
},
),
)
self.assertIn(str(Capacity(0, 3)), str(combined))
def test_add_different_tables(self):
"""Cannot add ConsumedCapacity of two different tables"""
c1 = ConsumedCapacity("foobar", Capacity(1, 28))
c2 = ConsumedCapacity("boofar", Capacity(3, 0))
with self.assertRaises(TypeError):
c1 += c2
def test_always_continue_query(self):
"""Regression test.
If result has no items but does have LastEvaluatedKey, keep querying.
"""
conn = MagicMock()
conn.dynamizer.decode_keys.side_effect = lambda x: x
items = ["a", "b"]
results = [
{"Items": [], "LastEvaluatedKey": {"foo": 1, "bar": 2}},
{"Items": [], "LastEvaluatedKey": {"foo": 1, "bar": 2}},
{"Items": items},
]
conn.call.side_effect = lambda *_, **__: results.pop(0)
rs = ResultSet(conn, Limit())
results = list(rs)
self.assertEqual(results, items)
class TestHooks(BaseSystemTest):
"""Tests for connection callback hooks"""
def tearDown(self):
super(TestHooks, self).tearDown()
for hooks in self.dynamo._hooks.values():
while hooks:
hooks.pop()
def test_precall(self):
"""precall hooks are called before an API call"""
hook = MagicMock()
self.dynamo.subscribe("precall", hook)
def throw(**_):
"""Throw an exception to terminate the request"""
raise Exception()
with patch.object(self.dynamo, "client") as client:
client.describe_table.side_effect = throw
with self.assertRaises(Exception):
self.dynamo.describe_table("foobar")
hook.assert_called_with(self.dynamo, "describe_table", {"TableName": "foobar"})
def test_postcall(self):
"""postcall hooks are called after API call"""
hash_key = DynamoKey("id")
self.dynamo.create_table("foobar", hash_key=hash_key)
calls = []
def hook(*args):
"""Log the call into a list"""
calls.append(args)
self.dynamo.subscribe("postcall", hook)
self.dynamo.describe_table("foobar")
self.assertEqual(len(calls), 1)
args = calls[0]
self.assertEqual(len(args), 4)
conn, command, kwargs, response = args
self.assertEqual(conn, self.dynamo)
self.assertEqual(command, "describe_table")
self.assertEqual(kwargs["TableName"], "foobar")
self.assertEqual(response["Table"]["TableName"], "foobar")
def test_capacity(self):
"""capacity hooks are called whenever response has ConsumedCapacity"""
hash_key = DynamoKey("id")
self.dynamo.create_table("foobar", hash_key=hash_key)
hook = MagicMock()
self.dynamo.subscribe("capacity", hook)
with patch.object(self.dynamo, "client") as client:
client.scan.return_value = {
"Items": [],
"ConsumedCapacity": {
"TableName": "foobar",
"ReadCapacityUnits": 4,
},
}
rs = self.dynamo.scan("foobar")
list(rs)
cap = ConsumedCapacity("foobar", Capacity(4, 0))
hook.assert_called_with(self.dynamo, "scan", ANY, ANY, cap)
def test_subscribe(self):
"""Can subscribe and unsubscribe from hooks"""
hook = lambda: None
self.dynamo.subscribe("precall", hook)
self.assertEqual(len(self.dynamo._hooks["precall"]), 1)
self.dynamo.unsubscribe("precall", hook)
self.assertEqual(len(self.dynamo._hooks["precall"]), 0)
|
stevearc/dynamo3
|
tests/__init__.py
|
Python
|
mit
| 22,100 | 0.000271 |
from itertools import chain
import gzip
import multiprocessing
import time
import numpy as np
from enum import Enum
from uncertainties import unumpy
from io import BytesIO
from flask import abort
import pymongo
try:
import matplotlib
import matplotlib.figure
import matplotlib.pyplot
except ImportError:
matplotlib = None
from .. import utils
from bgmodelbuilder import units
from bgmodelbuilder.simulationsdb.histogram import Histogram
from bgmodelbuilder.common import try_reduce
import logging
log = logging.getLogger(__name__)
# todo: take component, spec, groupname, groupval? with class?
class ModelEvaluator(object):
""" Utiilty class to generate data tables and spectra for non-temp models """
class StatusCodes(Enum):
NoEntryInCache = "Cache query returned 0 hits"
def __init__(self, model, modeldb=None, simsdbview=None,
bypasscache=False, writecache=True, cacheimages=True):
""" Constructor
Args:
model (BgModel): model object to evaluate
modeldb (ModelDB): database with models
simsdbview (SimsDbView): defines vals and spectra
bypasscache (bool): If True, do not search for cached value
writecache (bool): If False, do not write calculation results to cache
cacheimages (bool): If False, don't cache image generation
"""
self.model = model
self.cache = None
if not modeldb:
modeldb = utils.get_modeldb()
if modeldb and not modeldb.is_model_temp(model.id):
self.cache = modeldb.getevalcache()
self.bypasscache = bypasscache
self.writecache = writecache
self.cacheimages = cacheimages
self.simsdbview = simsdbview
if simsdbview is None:
self.simsdbview = utils.get_simsdbview(model=model)
self.simsdb = self.simsdbview.simsdb
def _valtostr(self, valname, val, match):
# convert to unit if provided
unit = self.simsdbview.values_units.get(valname, None)
if unit:
try:
val = val.to(unit).m
except AttributeError: # not a Quantity...
pass
except units.errors.DimensionalityError as e:
if val != 0:
log.warning(e)
val = getattr(val, 'm', 0)
# convert to string
val = "{:.3g}".format(val)
if match.spec.islimit:
val = '<'+val
return val
def _applyspecunit(self, specname, spec):
unit = self.simsdbview.spectra_units.get(specname, None)
if unit is not None:
try:
spec.hist.ito(unit)
except AttributeError: # not a quantity
pass
return spec
def _evalmatch(self, match, dovals=True, dogroups=True, dospectra=False):
""" Evaluate SimDocEvals and grous for a match
Returns:
dict
"""
toeval = []
if dovals:
toeval.extend(self.simsdbview.values.values())
if dospectra:
toeval.extend(self.simsdbview.spectra.values())
result = self.simsdb.evaluate(toeval, match)
doc = dict()
if dovals:
doc['values'] = [self._valtostr(name, val, match) for name, val in
zip(self.simsdbview.values.keys(), result)]
result = result[len(self.simsdbview.values):]
if dospectra:
doc['spectra'] = [self._applyspecunit(name, spec) for name, spec in
zip(self.simsdbview.spectra.keys(), result)]
if dogroups:
doc['groups'] = self.simsdbview.evalgroups(match).values()
return doc
def datatable(self, doallcache=False):
""" Generate the datatable with line for each sim data match,
return the result as a gzip compressed blob
Args:
doallcache (bool): If True, while evaluating all values, also
generate spectra. This slows down datatable
generation, but speeds up caching speed overall
"""
cached = self.readfromcache("datatable")
if cached is not self.StatusCodes.NoEntryInCache:
return cached
start = time.monotonic()
log.info(f"Generating datatable for model {self.model.id}")
# define some useful helper functions
def _valhead(val):
suffix = ''
if val in self.simsdbview.values_units:
suffix = f' [{self.simsdbview.values_units[val]}]'
return f'V_{val}{suffix}'
# prepare output buffer
buf = BytesIO()
datatable = gzip.open(buf, mode='wt', newline='\n')
# write the header
header = '\t'.join(chain(['ID'],
(f'G_{g}' for g in self.simsdbview.groups),
(_valhead(v)
for v in self.simsdbview.values.keys())
))
datatable.write(header)
datatable.write('\n')
for match in self.model.simdata.values():
doc = self._evalmatch(match, dovals=True, dogroups=True,
dospectra=doallcache)
dtline = '\t'.join(chain([match.id],
[str(g) for g in doc['groups']],
doc['values']))
datatable.write(dtline)
datatable.write('\n')
if doallcache:
for name, spec in zip(self.simsdbview.spectra, doc['spectra']):
self.writetocache(name, spec, match=match, fmt='hist')
datatable.flush()
result = buf.getvalue()
self.writetocache('datatable', result)
log.info("Finished evaluation of data for model %s in %s seconds",
self.model.id, time.monotonic()-start)
return result
def spectrum(self, specname, component=None, spec=None, match=None,
matches=None):
return self._spectrum_impl(specname, component, spec, match, matches,
fmt="hist")
def spectrum_image(self, specname, component=None, spec=None, match=None,
matches=None):
return self._spectrum_impl(specname, component, spec, match, matches,
fmt="png")
def fillallcache(self, genimages=False):
""" Loop over all matches, components, and spectra in the model and
create cache entries for all spectra
Args:
genimages (bool): If True, also generate PNG images
"""
if not self.cacheimages:
genimages = False
start = time.monotonic()
log.info(f"Generating full cache for model {self.model.id}")
self.datatable(doallcache=True)
specfunc = self.spectrum_image if genimages else self.spectrum
for specname in self.simsdbview.spectra:
for match in self.model.getsimdata():
specfunc(specname, match=match)
for comp in self.model.getcomponents():
specfunc(specname, component=comp)
for spec in self.model.getspecs(rootonly=True):
specfunc(specname, spec=spec)
# also gen the top-level model hists
specfunc(specname)
log.info("Finished caching data for model %s in %s seconds",
self.model.id, time.monotonic()-start)
def _spectrum_impl(self, specname, component=None, spec=None, match=None,
matches=None, fmt="hist"):
if match and matches:
raise ValueError("Only one of `match` and `matches` can be provided")
# see if `matches` is a single match
try:
if len(matches) == 1:
match = matches[0]
matches = None
except TypeError:
# matches has no len and may be a generator
pass
# if 'match' or 'matches' is defined, we ignore component and spec
if match or matches:
component = None
spec = None
cacheable = (not matches)
result = None
fmt = fmt.lower()
if cacheable:
cached = self.readfromcache(specname, component=component,
spec=spec, match=match, fmt=fmt)
if cached is not self.StatusCodes.NoEntryInCache:
return cached
if specname not in self.simsdbview.spectra:
raise KeyError(f"Unknown spectrum generator {specname}")
if fmt == 'png':
result = self._spectrum_impl(specname, component, spec, match,
matches, fmt="hist")
titlesuffix = ''
if component is not None:
titlesuffix += f', Component={component.name}'
if spec is not None:
titlesuffix += f', Source={spec.name}'
result = self._spectrum_image(specname, result, titlesuffix)
elif match is not None:
result = self._spectrum_hist(specname, match)
else:
if not matches:
matches = self.model.getsimdata(rootcomponent=component,
rootspec=spec)
result = None
reducer = self.simsdbview.spectra[specname].reduce
for amatch in matches:
result1 = self._spectrum_impl(specname, match=amatch, fmt=fmt)
result = try_reduce(reducer, result, result1)
if cacheable:
self.writetocache(specname, result, component=component, spec=spec,
match=match, fmt=fmt)
return result
def _spectrum_hist(self, specname, match):
specgen = self.simsdbview.spectra[specname]
result = self.simsdb.evaluate(specgen, match)[0]
result = self._applyspecunit(specname, result)
return result
def _spectrum_image(self, specname, spectrum, titlesuffix="",
logx=True, logy=True):
if not hasattr(spectrum, 'hist') or not hasattr(spectrum, 'bin_edges'):
# this is not a Histogram, don't know what to do with it
return None
if matplotlib is None:
abort(500, "Matplotlib is not available")
# unit should already be applied ...
#spectrum = self._applyspecunit(specname, spectrum)
#log.debug("Generating spectrum image")
# apparently this aborts sometimes?
fig = matplotlib.figure.Figure()
ax = fig.subplots()
ax.errorbar(x=unumpy.nominal_values(spectrum.bin_edges[:-1]),
y=unumpy.nominal_values(spectrum.hist),
yerr=unumpy.std_devs(spectrum.hist),
drawstyle='steps-post',
elinewidth=0.6,
)
ax.set_title(' '.join((specname, titlesuffix)))
if logx:
ax.set_xscale('log')
if logy:
ax.set_yscale('log')
if hasattr(spectrum.bin_edges, 'units'):
ax.set_xlabel(f'Bin [{spectrum.bin_edges.units}]')
if hasattr(spectrum.hist, 'units'):
ax.set_ylabel(f"Value [{spectrum.hist.units}]")
"""
# limit to at most N decades...
maxrange = 100000
ymin, ymax = plt.ylim()
ymax = 10**ceil(log10(ymax))
ymin = max(ymin, ymax/maxrange)
plt.ylim(ymin, ymax)
plt.tick_params(which='major',length=6, width=1)
plt.tick_params(which='minor',length=4,width=1)
iplt.gcf().set_size_inches(9,6)
plt.gca().set_position((0.08,0.1,0.7,0.8))
"""
out = BytesIO()
fig.savefig(out, format='png')
matplotlib.pyplot.close(fig)
#log.debug("Done generating image")
return out.getvalue()
@staticmethod
def pack_quantity(q):
val = q
err = 0
unit = None
try:
val = q.m
unit = '{:~P}'.format(q.u)
except AttributeError:
pass
try:
val = val.n
err = val.s
except AttributeError:
pass
return (val, err, unit)
@staticmethod
def unpack_quantity(data):
val = data[0]
if data[2] or data[1]:
val = units.Quantity(data[0], data[2])
if data[1]:
val = val.plus_minus(data[1])
return val
@staticmethod
def pack_histogram(hist):
try:
vals = hist.hist
bins = hist.bin_edges
except AttributeError:
# this is not a histogram, no idea what to do with it
return 0
valunit = None
binunit = None
try:
valunit = vals.u
vals = vals.m
except AttributeError:
pass
try:
binunit = bins.u
bins = bins.m
except AttributeError:
pass
valerrs = unumpy.std_devs(vals)
vals = unumpy.nominal_values(vals)
# convert to npz
args = dict(hist=vals, bins=bins, errs=valerrs)
if not np.any(valerrs):
del args['errs']
buf = BytesIO()
np.savez_compressed(buf, **args)
doc = dict(hist=buf.getvalue())
if valunit is not None:
doc['hist_unit'] = str(valunit)
if binunit is not None:
doc['bins_unit'] = str(binunit)
return doc
@staticmethod
def unpack_histogram(doc):
if not isinstance(doc, dict):
return doc
data = np.load(BytesIO(doc['hist']))
hist = data['hist']
bins = data['bins']
if 'errs' in data:
hist = unumpy.uarray(hist, data['errs'])
if 'hist_unit' in doc:
hist = hist * units(doc['hist_unit'])
if 'bins_unit' in doc:
bins = bins * units(doc['bins_unit'])
return Histogram(hist, bins)
def writetocache(self, dataname, result, component=None, spec=None,
match=None, fmt=None):
""" write an evaluated data dictionary to the cache """
# TODO: currently the most granular level of caching is a single
# match, which means if you only want to calculate a single spectrum,
# you're out of luck. We should set it so you can do just one at a time
if (not self.writecache) or (self.cache is None):
return
if fmt == 'png' and not self.cacheimages:
return
if dataname != 'datatable' and fmt not in ('hist', 'png'):
raise ValueError("Only 'hist' and 'png' fmt supported for spectra")
if fmt == 'hist':
result = self.pack_histogram(result)
entry = dict(modelid=self.model.id, dataname=dataname, fmt=fmt,
data=result)
if dataname != 'datatable':
if component is not None:
entry['componentid'] = component.id
if spec is not None:
entry['specid'] = spec.id
if match is not None:
entry['matchid'] = match.id
query = {k: str(v) for k, v in entry.items()
if k not in ('data', 'time')}
log.debug(f"Caching {query}")
# write to db
try:
self.cache.insert_one(entry)
except pymongo.errors.DuplicateKeyError:
del entry['data']
log.warning(f"Existing entry for cache {query}")
def readfromcache(self, dataname, component=None, spec=None,
match=None, fmt=None):
""" Test if there is an entry in cache for the supplied values
Returns:
obj if in cache, None otherwise
"""
if (self.bypasscache) or (self.cache is None):
return self.StatusCodes.NoEntryInCache
query = dict(modelid=self.model.id, dataname=dataname, fmt=fmt,
componentid=None, specid=None, matchid=None)
if component is not None:
query['componentid'] = component.id
if spec is not None:
query['specid'] = spec.id
if match is not None:
query['matchid'] = match.id
doc = self.cache.find_one(query, projection={'data': True})
log.debug(f"Cached entry for {query}: {bool(doc)}")
if not doc:
return self.StatusCodes.NoEntryInCache
try:
data = doc['data']
except KeyError:
log.warn("No 'data' key in cached doc for query %s", query)
return self.StatusCodes.NoEntryInCache
if fmt == 'hist':
data = self.unpack_histogram(data)
return data
def getcachestatus(self, includetotal=True, includetime=True):
if not self.cache:
return None
result = dict()
# First find how many are in the cache db
query = dict(modelid=self.model.id, dataname='datatable')
result['datatable'] = self.cache.count(query)
query = dict(modelid=self.model.id, fmt='hist')
result['spectra'] = self.cache.count(query)
if includetotal:
nspecs = len(self.simsdbview.spectra)
ncache = (1 # for the overall model
+ len(self.model.getsimdata())
+ len(self.model.getcomponents())
+ sum(1 for _ in self.model.getspecs(rootonly=True))
)
result['totalspectra'] = nspecs * ncache
if result['spectra'] > 0 and includetime:
# find the time fo the first cache entry
query = dict(modelid=self.model.id)
projection = dict(_id=True)
firstentry = self.cache.find_one(query, projection=projection,
sort=(('_id', 1),))
lastentry = self.cache.find_one(query, projection=projection,
sort=(('_id',-1),))
query['dataname'] = 'datatable'
dtentry = self.cache.find_one(query, projection=projection)
if firstentry:
t0 = firstentry['_id'].generation_time.timestamp()
if dtentry:
t1 = dtentry['_id'].generation_time.timestamp()
result['datatabletime'] = t1 - t0
t1 = lastentry['_id'].generation_time.timestamp()
result['spectratime'] = t1 - t0
return result
def genevalcache(model, spawnprocess=True):
""" Generate cache entries for eaach match, component, and spec in model
Args:
model (BgModel): the model to evaluate
spawnprocess (bool): if true, launch a separate process
Returns:
multiprocessing.Process: if spawnprocess is true
dict: evuated values for model otherwise
"""
evaluator = ModelEvaluator(model)
response = None
if spawnprocess:
proc = multiprocessing.Process(name='bgexplorer.genevalcache',
target=lambda ev: ev.fillallcache(),
args=(evaluator,),
daemon=True,
)
proc.start()
response = proc
else:
response = evaluator.fillallcache()
return response
def get_datatable(model):
""" Get only the datatable for a model """
return ModelEvaluator(model).datatable()
def get_spectrum(model, specname, image=True, component=None, spec=None,
matches=None):
""" Get only a single spectrum, ideally from cache """
evaluator = ModelEvaluator(model)
if image:
result = evaluator.spectrum_image(specname, component, spec,
matches=matches)
else:
result = evaluator.spectrum(specname, component, spec, matches=matches)
return result
def getcachestatus(model, *args, **kwargs):
return ModelEvaluator(model).getcachestatus(*args, **kwargs)
|
bloer/bgexplorer
|
bgexplorer/modelviewer/evaldata.py
|
Python
|
bsd-2-clause
| 20,163 | 0.000446 |
# -*- coding: utf-8 -*-
import socket
from paramiko import SSHClient, AutoAddPolicy, AuthenticationException
from bssh.utils import env
from bssh.auth import get_pkey
from bssh.logger import logger
def connect(
hostname=None,
port=22,
username=None,
password=None,
pkey=None,
pkey_pwd=None,
sock=None,
timeout=env.timeout,
**kwargs
):
"""Connect the remote ssh server"""
passauth = True if password else False
pkey = pkey if passauth else get_pkey(pkey, pkey_pwd)
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
try:
client.connect(hostname=hostname,
port=int(port),
username=username,
password=password,
pkey=pkey,
sock=sock,
timeout=timeout)
logger.login.debug('%s connect successfully.' % hostname)
return client
except AuthenticationException:
logger.login.error('%s Validation failed.' % hostname)
except socket.error:
logger.login.error('%s Network Error' % hostname)
except Exception as e:
logger.login.error('%s %s' % (hostname, str(e)))
|
liwanggui/bssh
|
bssh/network.py
|
Python
|
lgpl-2.1
| 1,271 | 0 |
"""
Contains manager class and exceptions for operations for recording and retrieving
consumer history events.
"""
import datetime
import isodate
import pymongo
from pulp.common import dateutils
from pulp.server import config
from pulp.server.db.model.consumer import Consumer, ConsumerHistoryEvent
from pulp.server.exceptions import InvalidValue, MissingResource
from pulp.server.managers import factory as managers_factory
# Event Types
TYPE_CONSUMER_REGISTERED = 'consumer_registered'
TYPE_CONSUMER_UNREGISTERED = 'consumer_unregistered'
TYPE_REPO_BOUND = 'repo_bound'
TYPE_REPO_UNBOUND = 'repo_unbound'
TYPE_CONTENT_UNIT_INSTALLED = 'content_unit_installed'
TYPE_CONTENT_UNIT_UNINSTALLED = 'content_unit_uninstalled'
TYPE_UNIT_PROFILE_CHANGED = 'unit_profile_changed'
TYPE_ADDED_TO_GROUP = 'added_to_group'
TYPE_REMOVED_FROM_GROUP = 'removed_from_group'
TYPES = (TYPE_CONSUMER_REGISTERED, TYPE_CONSUMER_UNREGISTERED, TYPE_REPO_BOUND,
TYPE_REPO_UNBOUND, TYPE_CONTENT_UNIT_INSTALLED, TYPE_CONTENT_UNIT_UNINSTALLED,
TYPE_UNIT_PROFILE_CHANGED, TYPE_ADDED_TO_GROUP, TYPE_REMOVED_FROM_GROUP)
# Maps user entered query sort parameters to the pymongo representation
SORT_ASCENDING = 'ascending'
SORT_DESCENDING = 'descending'
SORT_DIRECTION = {
SORT_ASCENDING: pymongo.ASCENDING,
SORT_DESCENDING: pymongo.DESCENDING,
}
class ConsumerHistoryManager(object):
"""
Performs consumer related CRUD operations
"""
# -- internal ----------------------------------------
def _originator(self):
'''
Returns the value to use as the originator of the consumer event (either the
consumer itself or an admin user).
@return: login of the originator value to use in the event
@rtype: string
'''
return managers_factory.principal_manager().get_principal().login
def record_event(self, consumer_id, event_type, event_details=None):
"""
@ivar consumer_id: identifies the consumer
@type id: str
@param type: event type
@type type: str
@param details: event details
@type details: dict
@raises MissingResource: if the given consumer does not exist
@raises InvalidValue: if any of the fields is unacceptable
"""
# Check that consumer exists for all except registration event
existing_consumer = Consumer.get_collection().find_one({'id': consumer_id})
if not existing_consumer and event_type != TYPE_CONSUMER_UNREGISTERED:
raise MissingResource(consumer=consumer_id)
invalid_values = []
if event_type not in TYPES:
invalid_values.append('event_type')
if event_details is not None and not isinstance(event_details, dict):
invalid_values.append('event_details')
if invalid_values:
raise InvalidValue(invalid_values)
event = ConsumerHistoryEvent(consumer_id, self._originator(), event_type, event_details)
ConsumerHistoryEvent.get_collection().save(event)
def query(self, consumer_id=None, event_type=None, limit=None, sort='descending',
start_date=None, end_date=None):
'''
Queries the consumer history storage.
@param consumer_id: if specified, events will only be returned for the the
consumer referenced
@type consumer_id: string or number
@param event_type: if specified, only events of the given type are returned
@type event_type: string (enumeration found in TYPES)
@param limit: if specified, the query will only return up to this amount of
entries; default is to not limit the entries returned
@type limit: number greater than zero
@param sort: indicates the sort direction of the results; results are sorted
by timestamp
@type sort: string; valid values are 'ascending' and 'descending'
@param start_date: if specified, no events prior to this date will be returned
@type start_date: datetime.datetime
@param end_date: if specified, no events after this date will be returned
@type end_date: datetime.datetime
@return: list of consumer history entries that match the given parameters;
empty list (not None) if no matching entries are found
@rtype: list of ConsumerHistoryEvent instances
@raises MissingResource: if the given consumer does not exist
@raises InvalidValue: if any of the fields is unacceptable
'''
invalid_values = []
if event_type is not None and event_type not in TYPES:
invalid_values.append('event_type')
# Verify the limit makes sense
if limit is not None and limit < 1:
invalid_values.append('limit')
# Verify the sort direction is valid
if sort not in SORT_DIRECTION:
invalid_values.append('sort')
# Verify that start_date and end_date is valid
if start_date is not None:
try:
dateutils.parse_iso8601_date(start_date)
except (ValueError, isodate.ISO8601Error):
invalid_values.append('start_date')
if end_date is not None:
try:
dateutils.parse_iso8601_date(end_date)
except (ValueError, isodate.ISO8601Error):
invalid_values.append('end_date')
if invalid_values:
raise InvalidValue(invalid_values)
# Assemble the mongo search parameters
search_params = {}
if consumer_id:
search_params['consumer_id'] = consumer_id
if event_type:
search_params['type'] = event_type
# Add in date range limits if specified
date_range = {}
if start_date:
date_range['$gte'] = start_date
if end_date:
date_range['$lte'] = end_date
if len(date_range) > 0:
search_params['timestamp'] = date_range
# Determine the correct mongo cursor to retrieve
if len(search_params) == 0:
cursor = ConsumerHistoryEvent.get_collection().find()
else:
cursor = ConsumerHistoryEvent.get_collection().find(search_params)
# Sort by most recent entry first
cursor.sort('timestamp', direction=SORT_DIRECTION[sort])
# If a limit was specified, add it to the cursor
if limit:
cursor.limit(limit)
# Finally convert to a list before returning
return list(cursor)
def event_types(self):
return TYPES
def cull_history(self, lifetime):
'''
Deletes all consumer history entries that are older than the given lifetime.
@param lifetime: length in days; history entries older than this many days old
are deleted in this call
@type lifetime: L{datetime.timedelta}
'''
now = datetime.datetime.now(dateutils.local_tz())
limit = dateutils.format_iso8601_datetime(now - lifetime)
spec = {'timestamp': {'$lt': limit}}
self.collection.remove(spec, safe=False)
def _get_lifetime(self):
'''
Returns the configured maximum lifetime for consumer history entries.
@return: time in days
@rtype: L{datetime.timedelta}
'''
days = config.config.getint('consumer_history', 'lifetime')
return datetime.timedelta(days=days)
# -- functions ----------------------------------------------------------------
|
ulif/pulp
|
server/pulp/server/managers/consumer/history.py
|
Python
|
gpl-2.0
| 7,603 | 0.002104 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common / shared code for handling authentication against OpenStack identity
service (Keystone).
"""
import sys
import datetime
from libcloud.utils.py3 import httplib
from libcloud.utils.iso8601 import parse_date
from libcloud.common.base import ConnectionUserAndKey, Response
from libcloud.compute.types import (LibcloudError, InvalidCredsError,
MalformedResponseError)
try:
import simplejson as json
except ImportError:
import json
AUTH_API_VERSION = '1.1'
# Auth versions which contain token expiration information.
AUTH_VERSIONS_WITH_EXPIRES = [
'1.1',
'2.0',
'2.0_apikey',
'2.0_password',
'3.0',
'3.x_password'
]
# How many seconds to subtract from the auth token expiration time before
# testing if the token is still valid.
# The time is subtracted to account for the HTTP request latency and prevent
# user from getting "InvalidCredsError" if token is about to expire.
AUTH_TOKEN_EXPIRES_GRACE_SECONDS = 5
__all__ = [
'OpenStackIdentityVersion',
'OpenStackIdentityDomain',
'OpenStackIdentityProject',
'OpenStackIdentityUser',
'OpenStackIdentityRole',
'OpenStackServiceCatalog',
'OpenStackServiceCatalogEntry',
'OpenStackServiceCatalogEntryEndpoint',
'OpenStackIdentityEndpointType',
'OpenStackIdentityConnection',
'OpenStackIdentity_1_0_Connection',
'OpenStackIdentity_1_1_Connection',
'OpenStackIdentity_2_0_Connection',
'OpenStackIdentity_3_0_Connection',
'get_class_for_auth_version'
]
class OpenStackIdentityEndpointType(object):
"""
Enum class for openstack identity endpoint type.
"""
INTERNAL = 'internal'
EXTERNAL = 'external'
ADMIN = 'admin'
class OpenStackIdentityTokenScope(object):
"""
Enum class for openstack identity token scope.
"""
PROJECT = 'project'
DOMAIN = 'domain'
UNSCOPED = 'unscoped'
class OpenStackIdentityVersion(object):
def __init__(self, version, status, updated, url):
self.version = version
self.status = status
self.updated = updated
self.url = url
def __repr__(self):
return (('<OpenStackIdentityVersion version=%s, status=%s, '
'updated=%s, url=%s>' %
(self.version, self.status, self.updated, self.url)))
class OpenStackIdentityDomain(object):
def __init__(self, id, name, enabled):
self.id = id
self.name = name
self.enabled = enabled
def __repr__(self):
return (('<OpenStackIdentityDomain id=%s, name=%s, enabled=%s>' %
(self.id, self.name, self.enabled)))
class OpenStackIdentityProject(object):
def __init__(self, id, name, description, enabled, domain_id=None):
self.id = id
self.name = name
self.description = description
self.enabled = enabled
self.domain_id = domain_id
def __repr__(self):
return (('<OpenStackIdentityProject id=%s, domain_id=%s, name=%s, '
'enabled=%s>' %
(self.id, self.domain_id, self.name, self.enabled)))
class OpenStackIdentityRole(object):
def __init__(self, id, name, description, enabled):
self.id = id
self.name = name
self.description = description
self.enabled = enabled
def __repr__(self):
return (('<OpenStackIdentityRole id=%s, name=%s, description=%s, '
'enabled=%s>' % (self.id, self.name, self.description,
self.enabled)))
class OpenStackIdentityUser(object):
def __init__(self, id, domain_id, name, email, description, enabled):
self.id = id
self.domain_id = domain_id
self.name = name
self.email = email
self.description = description
self.enabled = enabled
def __repr__(self):
return (('<OpenStackIdentityUser id=%s, domain_id=%s, name=%s, '
'email=%s, enabled=%s>' % (self.id, self.domain_id, self.name,
self.email, self.enabled)))
class OpenStackServiceCatalog(object):
"""
http://docs.openstack.org/api/openstack-identity-service/2.0/content/
This class should be instantiated with the contents of the
'serviceCatalog' in the auth response. This will do the work of figuring
out which services actually exist in the catalog as well as split them up
by type, name, and region if available
"""
_auth_version = None
_service_catalog = None
def __init__(self, service_catalog, auth_version=AUTH_API_VERSION):
self._auth_version = auth_version
# Check this way because there are a couple of different 2.0_*
# auth types.
if '3.x' in self._auth_version:
entries = self._parse_service_catalog_auth_v3(
service_catalog=service_catalog)
elif '2.0' in self._auth_version:
entries = self._parse_service_catalog_auth_v2(
service_catalog=service_catalog)
elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version):
entries = self._parse_service_catalog_auth_v1(
service_catalog=service_catalog)
else:
raise LibcloudError('auth version "%s" not supported'
% (self._auth_version))
# Force consistent ordering by sorting the entries
entries = sorted(entries,
key=lambda x: x.service_type + (x.service_name or ''))
self._entries = entries # stories all the service catalog entries
def get_entries(self):
"""
Return all the entries for this service catalog.
:rtype: ``list`` of :class:`.OpenStackServiceCatalogEntry`
"""
return self._entries
def get_catalog(self):
"""
Deprecated in the favor of ``get_entries`` method.
"""
return self.get_entries()
def get_public_urls(self, service_type=None, name=None):
"""
Retrieve all the available public (external) URLs for the provided
service type and name.
"""
endpoints = self.get_endpoints(service_type=service_type,
name=name)
result = []
for endpoint in endpoints:
endpoint_type = endpoint.endpoint_type
if endpoint_type == OpenStackIdentityEndpointType.EXTERNAL:
result.append(endpoint.url)
return result
def get_endpoints(self, service_type=None, name=None):
"""
Retrieve all the endpoints for the provided service type and name.
:rtype: ``list`` of :class:`.OpenStackServiceCatalogEntryEndpoint`
"""
endpoints = []
for entry in self._entries:
# Note: "if XXX and YYY != XXX" comparison is used to support
# partial lookups.
# This allows user to pass in only one argument to the method (only
# service_type or name), both of them or neither.
if service_type and entry.service_type != service_type:
continue
if name and entry.service_name != name:
continue
for endpoint in entry.endpoints:
endpoints.append(endpoint)
return endpoints
def get_endpoint(self, service_type=None, name=None, region=None,
endpoint_type=OpenStackIdentityEndpointType.EXTERNAL):
"""
Retrieve a single endpoint using the provided criteria.
Note: If no or more than one matching endpoint is found, an exception
is thrown.
"""
endpoints = []
for entry in self._entries:
if service_type and entry.service_type != service_type:
continue
if name and entry.service_name != name:
continue
for endpoint in entry.endpoints:
if region and endpoint.region != region:
continue
if endpoint_type and endpoint.endpoint_type != endpoint_type:
continue
endpoints.append(endpoint)
if len(endpoints) == 1:
return endpoints[0]
elif len(endpoints) > 1:
raise ValueError('Found more than 1 matching endpoint')
else:
raise LibcloudError('Could not find specified endpoint')
def get_regions(self, service_type=None):
"""
Retrieve a list of all the available regions.
:param service_type: If specified, only return regions for this
service type.
:type service_type: ``str``
:rtype: ``list`` of ``str``
"""
regions = set()
for entry in self._entries:
if service_type and entry.service_type != service_type:
continue
for endpoint in entry.endpoints:
if endpoint.region:
regions.add(endpoint.region)
return sorted(list(regions))
def get_service_types(self, region=None):
"""
Retrieve all the available service types.
:param region: Optional region to retrieve service types for.
:type region: ``str``
:rtype: ``list`` of ``str``
"""
service_types = set()
for entry in self._entries:
include = True
for endpoint in entry.endpoints:
if region and endpoint.region != region:
include = False
break
if include:
service_types.add(entry.service_type)
return sorted(list(service_types))
def get_service_names(self, service_type=None, region=None):
"""
Retrieve list of service names that match service type and region.
:type service_type: ``str``
:type region: ``str``
:rtype: ``list`` of ``str``
"""
names = set()
if '2.0' not in self._auth_version:
raise ValueError('Unsupported version: %s' % (self._auth_version))
for entry in self._entries:
if service_type and entry.service_type != service_type:
continue
include = True
for endpoint in entry.endpoints:
if region and endpoint.region != region:
include = False
break
if include and entry.service_name:
names.add(entry.service_name)
return sorted(list(names))
def _parse_service_catalog_auth_v1(self, service_catalog):
entries = []
for service, endpoints in service_catalog.items():
entry_endpoints = []
for endpoint in endpoints:
region = endpoint.get('region', None)
public_url = endpoint.get('publicURL', None)
private_url = endpoint.get('internalURL', None)
if public_url:
entry_endpoint = OpenStackServiceCatalogEntryEndpoint(
region=region, url=public_url,
endpoint_type=OpenStackIdentityEndpointType.EXTERNAL)
entry_endpoints.append(entry_endpoint)
if private_url:
entry_endpoint = OpenStackServiceCatalogEntryEndpoint(
region=region, url=private_url,
endpoint_type=OpenStackIdentityEndpointType.INTERNAL)
entry_endpoints.append(entry_endpoint)
entry = OpenStackServiceCatalogEntry(service_type=service,
endpoints=entry_endpoints)
entries.append(entry)
return entries
def _parse_service_catalog_auth_v2(self, service_catalog):
entries = []
for service in service_catalog:
service_type = service['type']
service_name = service.get('name', None)
entry_endpoints = []
for endpoint in service.get('endpoints', []):
region = endpoint.get('region', None)
public_url = endpoint.get('publicURL', None)
private_url = endpoint.get('internalURL', None)
if public_url:
entry_endpoint = OpenStackServiceCatalogEntryEndpoint(
region=region, url=public_url,
endpoint_type=OpenStackIdentityEndpointType.EXTERNAL)
entry_endpoints.append(entry_endpoint)
if private_url:
entry_endpoint = OpenStackServiceCatalogEntryEndpoint(
region=region, url=private_url,
endpoint_type=OpenStackIdentityEndpointType.INTERNAL)
entry_endpoints.append(entry_endpoint)
entry = OpenStackServiceCatalogEntry(service_type=service_type,
endpoints=entry_endpoints,
service_name=service_name)
entries.append(entry)
return entries
def _parse_service_catalog_auth_v3(self, service_catalog):
entries = []
for item in service_catalog:
service_type = item['type']
service_name = item.get('name', None)
entry_endpoints = []
for endpoint in item['endpoints']:
region = endpoint.get('region', None)
url = endpoint['url']
endpoint_type = endpoint['interface']
if endpoint_type == 'internal':
endpoint_type = OpenStackIdentityEndpointType.INTERNAL
elif endpoint_type == 'public':
endpoint_type = OpenStackIdentityEndpointType.EXTERNAL
elif endpoint_type == 'admin':
endpoint_type = OpenStackIdentityEndpointType.ADMIN
entry_endpoint = OpenStackServiceCatalogEntryEndpoint(
region=region, url=url, endpoint_type=endpoint_type)
entry_endpoints.append(entry_endpoint)
entry = OpenStackServiceCatalogEntry(service_type=service_type,
service_name=service_name,
endpoints=entry_endpoints)
entries.append(entry)
return entries
class OpenStackServiceCatalogEntry(object):
def __init__(self, service_type, endpoints=None, service_name=None):
"""
:param service_type: Service type.
:type service_type: ``str``
:param endpoints: Endpoints belonging to this entry.
:type endpoints: ``list``
:param service_name: Optional service name.
:type service_name: ``str``
"""
self.service_type = service_type
self.endpoints = endpoints or []
self.service_name = service_name
# For consistency, sort the endpoints
self.endpoints = sorted(self.endpoints, key=lambda x: x.url or '')
def __eq__(self, other):
return (self.service_type == other.service_type and
self.endpoints == other.endpoints and
other.service_name == self.service_name)
def __ne__(self, other):
return not self.__eq__(other=other)
def __repr__(self):
return (('<OpenStackServiceCatalogEntry service_type=%s, '
'service_name=%s, endpoints=%s' %
(self.service_type, self.service_name, repr(self.endpoints))))
class OpenStackServiceCatalogEntryEndpoint(object):
VALID_ENDPOINT_TYPES = [
OpenStackIdentityEndpointType.INTERNAL,
OpenStackIdentityEndpointType.EXTERNAL,
OpenStackIdentityEndpointType.ADMIN,
]
def __init__(self, region, url, endpoint_type='external'):
"""
:param region: Endpoint region.
:type region: ``str``
:param url: Endpoint URL.
:type url: ``str``
:param endpoint_type: Endpoint type (external / internal / admin).
:type endpoint_type: ``str``
"""
if endpoint_type not in self.VALID_ENDPOINT_TYPES:
raise ValueError('Invalid type: %s' % (endpoint_type))
# TODO: Normalize / lowercase all the region names
self.region = region
self.url = url
self.endpoint_type = endpoint_type
def __eq__(self, other):
return (self.region == other.region and self.url == other.url and
self.endpoint_type == other.endpoint_type)
def __ne__(self, other):
return not self.__eq__(other=other)
def __repr__(self):
return (('<OpenStackServiceCatalogEntryEndpoint region=%s, url=%s, '
'type=%s' % (self.region, self.url, self.endpoint_type)))
class OpenStackAuthResponse(Response):
def success(self):
return self.status in [httplib.OK, httplib.CREATED,
httplib.ACCEPTED, httplib.NO_CONTENT,
httplib.MULTIPLE_CHOICES,
httplib.UNAUTHORIZED,
httplib.INTERNAL_SERVER_ERROR]
def parse_body(self):
if not self.body:
return None
if 'content-type' in self.headers:
key = 'content-type'
elif 'Content-Type' in self.headers:
key = 'Content-Type'
else:
raise LibcloudError('Missing content-type header',
driver=OpenStackIdentityConnection)
content_type = self.headers[key]
if content_type.find(';') != -1:
content_type = content_type.split(';')[0]
if content_type == 'application/json':
try:
data = json.loads(self.body)
except:
driver = OpenStackIdentityConnection
raise MalformedResponseError('Failed to parse JSON',
body=self.body,
driver=driver)
elif content_type == 'text/plain':
data = self.body
else:
data = self.body
return data
class OpenStackIdentityConnection(ConnectionUserAndKey):
"""
Base identity connection class which contains common / shared logic.
Note: This class shouldn't be instantiated directly.
"""
responseCls = OpenStackAuthResponse
timeout = None
auth_version = None
def __init__(self, auth_url, user_id, key, tenant_name=None,
domain_name='Default',
token_scope=OpenStackIdentityTokenScope.PROJECT,
timeout=None, parent_conn=None):
super(OpenStackIdentityConnection, self).__init__(user_id=user_id,
key=key,
url=auth_url,
timeout=timeout)
self.parent_conn = parent_conn
# enable tests to use the same mock connection classes.
if parent_conn:
self.conn_classes = parent_conn.conn_classes
self.driver = parent_conn.driver
else:
self.driver = None
self.auth_url = auth_url
self.tenant_name = tenant_name
self.domain_name = domain_name
self.token_scope = token_scope
self.timeout = timeout
self.urls = {}
self.auth_token = None
self.auth_token_expires = None
self.auth_user_info = None
def authenticated_request(self, action, params=None, data=None,
headers=None, method='GET', raw=False):
"""
Perform an authenticated request against the identity API.
"""
if not self.auth_token:
raise ValueError('Not to be authenticated to perform this request')
headers = headers or {}
headers['X-Auth-Token'] = self.auth_token
return self.request(action=action, params=params, data=data,
headers=headers, method=method, raw=raw)
def morph_action_hook(self, action):
(_, _, _, request_path) = self._tuple_from_url(self.auth_url)
if request_path == '':
# No path is provided in the auth_url, use action passed to this
# method.
return action
return request_path
def add_default_headers(self, headers):
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json; charset=UTF-8'
return headers
def is_token_valid(self):
"""
Return True if the current auth token is already cached and hasn't
expired yet.
:return: ``True`` if the token is still valid, ``False`` otherwise.
:rtype: ``bool``
"""
if not self.auth_token:
return False
if not self.auth_token_expires:
return False
expires = self.auth_token_expires - \
datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS)
time_tuple_expires = expires.utctimetuple()
time_tuple_now = datetime.datetime.utcnow().utctimetuple()
if time_tuple_now < time_tuple_expires:
return True
return False
def authenticate(self, force=False):
"""
Authenticate against the identity API.
:param force: Forcefully update the token even if it's already cached
and still valid.
:type force: ``bool``
"""
raise NotImplementedError('authenticate not implemented')
def list_supported_versions(self):
"""
Retrieve a list of all the identity versions which are supported by
this installation.
:rtype: ``list`` of :class:`.OpenStackIdentityVersion`
"""
response = self.request('/', method='GET')
result = self._to_versions(data=response.object['versions']['values'])
result = sorted(result, key=lambda x: x.version)
return result
def _to_versions(self, data):
result = []
for item in data:
version = self._to_version(data=item)
result.append(version)
return result
def _to_version(self, data):
try:
updated = parse_date(data['updated'])
except Exception:
updated = None
try:
url = data['links'][0]['href']
except IndexError:
url = None
version = OpenStackIdentityVersion(version=data['id'],
status=data['status'],
updated=updated,
url=url)
return version
def _is_authentication_needed(self, force=False):
"""
Determine if the authentication is needed or if the existing token (if
any exists) is still valid.
"""
if force:
return True
if self.auth_version not in AUTH_VERSIONS_WITH_EXPIRES:
return True
if self.is_token_valid():
return False
return True
def _to_projects(self, data):
result = []
for item in data:
project = self._to_project(data=item)
result.append(project)
return result
def _to_project(self, data):
project = OpenStackIdentityProject(id=data['id'],
name=data['name'],
description=data['description'],
enabled=data['enabled'],
domain_id=data.get('domain_id',
None))
return project
class OpenStackIdentity_1_0_Connection(OpenStackIdentityConnection):
"""
Connection class for Keystone API v1.0.
"""
responseCls = OpenStackAuthResponse
name = 'OpenStack Identity API v1.0'
auth_version = '1.0'
def authenticate(self, force=False):
if not self._is_authentication_needed(force=force):
return self
headers = {
'X-Auth-User': self.user_id,
'X-Auth-Key': self.key,
}
resp = self.request('/v1.0', headers=headers, method='GET')
if resp.status == httplib.UNAUTHORIZED:
# HTTP UNAUTHORIZED (401): auth failed
raise InvalidCredsError()
elif resp.status not in [httplib.NO_CONTENT, httplib.OK]:
body = 'code: %s body:%s headers:%s' % (resp.status,
resp.body,
resp.headers)
raise MalformedResponseError('Malformed response', body=body,
driver=self.driver)
else:
headers = resp.headers
# emulate the auth 1.1 URL list
self.urls = {}
self.urls['cloudServers'] = \
[{'publicURL': headers.get('x-server-management-url', None)}]
self.urls['cloudFilesCDN'] = \
[{'publicURL': headers.get('x-cdn-management-url', None)}]
self.urls['cloudFiles'] = \
[{'publicURL': headers.get('x-storage-url', None)}]
self.auth_token = headers.get('x-auth-token', None)
self.auth_user_info = None
if not self.auth_token:
raise MalformedResponseError('Missing X-Auth-Token in \
response headers')
return self
class OpenStackIdentity_1_1_Connection(OpenStackIdentityConnection):
"""
Connection class for Keystone API v1.1.
"""
responseCls = OpenStackAuthResponse
name = 'OpenStack Identity API v1.1'
auth_version = '1.1'
def authenticate(self, force=False):
if not self._is_authentication_needed(force=force):
return self
reqbody = json.dumps({'credentials': {'username': self.user_id,
'key': self.key}})
resp = self.request('/v1.1/auth', data=reqbody, headers={},
method='POST')
if resp.status == httplib.UNAUTHORIZED:
# HTTP UNAUTHORIZED (401): auth failed
raise InvalidCredsError()
elif resp.status != httplib.OK:
body = 'code: %s body:%s' % (resp.status, resp.body)
raise MalformedResponseError('Malformed response', body=body,
driver=self.driver)
else:
try:
body = json.loads(resp.body)
except Exception:
e = sys.exc_info()[1]
raise MalformedResponseError('Failed to parse JSON', e)
try:
expires = body['auth']['token']['expires']
self.auth_token = body['auth']['token']['id']
self.auth_token_expires = parse_date(expires)
self.urls = body['auth']['serviceCatalog']
self.auth_user_info = None
except KeyError:
e = sys.exc_info()[1]
raise MalformedResponseError('Auth JSON response is \
missing required elements', e)
return self
class OpenStackIdentity_2_0_Connection(OpenStackIdentityConnection):
"""
Connection class for Keystone API v2.0.
"""
responseCls = OpenStackAuthResponse
name = 'OpenStack Identity API v1.0'
auth_version = '2.0'
def authenticate(self, auth_type='api_key', force=False):
if not self._is_authentication_needed(force=force):
return self
if auth_type == 'api_key':
return self._authenticate_2_0_with_api_key()
elif auth_type == 'password':
return self._authenticate_2_0_with_password()
else:
raise ValueError('Invalid value for auth_type argument')
def _authenticate_2_0_with_api_key(self):
# API Key based authentication uses the RAX-KSKEY extension.
# http://s.apache.org/oAi
data = {'auth':
{'RAX-KSKEY:apiKeyCredentials':
{'username': self.user_id, 'apiKey': self.key}}}
if self.tenant_name:
data['auth']['tenantName'] = self.tenant_name
reqbody = json.dumps(data)
return self._authenticate_2_0_with_body(reqbody)
def _authenticate_2_0_with_password(self):
# Password based authentication is the only 'core' authentication
# method in Keystone at this time.
# 'keystone' - http://s.apache.org/e8h
data = {'auth':
{'passwordCredentials':
{'username': self.user_id, 'password': self.key}}}
if self.tenant_name:
data['auth']['tenantName'] = self.tenant_name
reqbody = json.dumps(data)
return self._authenticate_2_0_with_body(reqbody)
def _authenticate_2_0_with_body(self, reqbody):
resp = self.request('/v2.0/tokens', data=reqbody,
headers={'Content-Type': 'application/json'},
method='POST')
if resp.status == httplib.UNAUTHORIZED:
raise InvalidCredsError()
elif resp.status not in [httplib.OK,
httplib.NON_AUTHORITATIVE_INFORMATION]:
body = 'code: %s body: %s' % (resp.status, resp.body)
raise MalformedResponseError('Malformed response', body=body,
driver=self.driver)
else:
body = resp.object
try:
access = body['access']
expires = access['token']['expires']
self.auth_token = access['token']['id']
self.auth_token_expires = parse_date(expires)
self.urls = access['serviceCatalog']
self.auth_user_info = access.get('user', {})
except KeyError:
e = sys.exc_info()[1]
raise MalformedResponseError('Auth JSON response is \
missing required elements', e)
return self
def list_projects(self):
response = self.authenticated_request('/v2.0/tenants', method='GET')
result = self._to_projects(data=response.object['tenants'])
return result
def list_tenants(self):
return self.list_projects()
class OpenStackIdentity_3_0_Connection(OpenStackIdentityConnection):
"""
Connection class for Keystone API v3.x.
"""
responseCls = OpenStackAuthResponse
name = 'OpenStack Identity API v3.x'
auth_version = '3.0'
VALID_TOKEN_SCOPES = [
OpenStackIdentityTokenScope.PROJECT,
OpenStackIdentityTokenScope.DOMAIN,
OpenStackIdentityTokenScope.UNSCOPED
]
def __init__(self, auth_url, user_id, key, tenant_name=None,
domain_name='Default',
token_scope=OpenStackIdentityTokenScope.PROJECT,
timeout=None, parent_conn=None):
"""
:param tenant_name: Name of the project this user belongs to. Note:
When token_scope is set to project, this argument
control to which project to scope the token to.
:type tenant_name: ``str``
:param domain_name: Domain the user belongs to. Note: Then token_scope
is set to token, this argument controls to which
domain to scope the token to.
:type domain_name: ``str``
:param token_scope: Whether to scope a token to a "project", a
"domain" or "unscoped"
:type token_scope: ``str``
"""
super(OpenStackIdentity_3_0_Connection,
self).__init__(auth_url=auth_url,
user_id=user_id,
key=key,
tenant_name=tenant_name,
domain_name=domain_name,
token_scope=token_scope,
timeout=timeout,
parent_conn=parent_conn)
if self.token_scope not in self.VALID_TOKEN_SCOPES:
raise ValueError('Invalid value for "token_scope" argument: %s' %
(self.token_scope))
if (self.token_scope == OpenStackIdentityTokenScope.PROJECT and
(not self.tenant_name or not self.domain_name)):
raise ValueError('Must provide tenant_name and domain_name '
'argument')
elif (self.token_scope == OpenStackIdentityTokenScope.DOMAIN and
not self.domain_name):
raise ValueError('Must provide domain_name argument')
self.auth_user_roles = None
def authenticate(self, force=False):
"""
Perform authentication.
"""
if not self._is_authentication_needed(force=force):
return self
data = {
'auth': {
'identity': {
'methods': ['password'],
'password': {
'user': {
'domain': {
'name': self.domain_name
},
'name': self.user_id,
'password': self.key
}
}
}
}
}
if self.token_scope == OpenStackIdentityTokenScope.PROJECT:
# Scope token to project (tenant)
data['auth']['scope'] = {
'project': {
'domain': {
'name': self.domain_name
},
'name': self.tenant_name
}
}
elif self.token_scope == OpenStackIdentityTokenScope.DOMAIN:
# Scope token to domain
data['auth']['scope'] = {
'domain': {
'name': self.domain_name
}
}
elif self.token_scope == OpenStackIdentityTokenScope.UNSCOPED:
pass
else:
raise ValueError('Token needs to be scoped either to project or '
'a domain')
data = json.dumps(data)
response = self.request('/v3/auth/tokens', data=data,
headers={'Content-Type': 'application/json'},
method='POST')
if response.status == httplib.UNAUTHORIZED:
# Invalid credentials
raise InvalidCredsError()
elif response.status in [httplib.OK, httplib.CREATED]:
headers = response.headers
try:
body = json.loads(response.body)
except Exception:
e = sys.exc_info()[1]
raise MalformedResponseError('Failed to parse JSON', e)
try:
roles = self._to_roles(body['token']['roles'])
except Exception:
e = sys.exc_info()[1]
roles = []
try:
expires = body['token']['expires_at']
self.auth_token = headers['x-subject-token']
self.auth_token_expires = parse_date(expires)
# Note: catalog is not returned for unscoped tokens
self.urls = body['token'].get('catalog', None)
self.auth_user_info = None
self.auth_user_roles = roles
except KeyError:
e = sys.exc_info()[1]
raise MalformedResponseError('Auth JSON response is \
missing required elements', e)
body = 'code: %s body:%s' % (response.status, response.body)
else:
raise MalformedResponseError('Malformed response', body=body,
driver=self.driver)
return self
def list_domains(self):
"""
List the available domains.
:rtype: ``list`` of :class:`OpenStackIdentityDomain`
"""
response = self.authenticated_request('/v3/domains', method='GET')
result = self._to_domains(data=response.object['domains'])
return result
def list_projects(self):
"""
List the available projects.
Note: To perform this action, user you are currently authenticated with
needs to be an admin.
:rtype: ``list`` of :class:`OpenStackIdentityProject`
"""
response = self.authenticated_request('/v3/projects', method='GET')
result = self._to_projects(data=response.object['projects'])
return result
def list_users(self):
"""
List the available users.
:rtype: ``list`` of :class:`.OpenStackIdentityUser`
"""
response = self.authenticated_request('/v3/users', method='GET')
result = self._to_users(data=response.object['users'])
return result
def list_roles(self):
"""
List the available roles.
:rtype: ``list`` of :class:`.OpenStackIdentityRole`
"""
response = self.authenticated_request('/v3/roles', method='GET')
result = self._to_roles(data=response.object['roles'])
return result
def get_domain(self, domain_id):
"""
Retrieve information about a single domain.
:param domain_id: ID of domain to retrieve information for.
:type domain_id: ``str``
:rtype: :class:`.OpenStackIdentityDomain`
"""
response = self.authenticated_request('/v3/domains/%s' % (domain_id),
method='GET')
result = self._to_domain(data=response.object['domain'])
return result
def list_user_projects(self, user):
"""
Retrieve all the projects user belongs to.
:rtype: ``list`` of :class:`.OpenStackIdentityProject`
"""
path = '/v3/users/%s/projects' % (user.id)
response = self.authenticated_request(path, method='GET')
result = self._to_projects(data=response.object['projects'])
return result
def list_user_domain_roles(self, domain, user):
"""
Retrieve all the roles for a particular user on a domain.
:rtype: ``list`` of :class:`.OpenStackIdentityRole`
"""
# TODO: Also add "get users roles" and "get assginements" which are
# available in 3.1 and 3.3
path = '/v3/domains/%s/users/%s/roles' % (domain.id, user.id)
response = self.authenticated_request(path, method='GET')
result = self._to_roles(data=response.object['roles'])
return result
def grant_domain_role_to_user(self, domain, role, user):
"""
Grant domain role to a user.
Note: This function appears to be idempotent.
:param domain: Domain to grant the role to.
:type domain: :class:`.OpenStackIdentityDomain`
:param role: Role to grant.
:type role: :class:`.OpenStackIdentityRole`
:param user: User to grant the role to.
:type user: :class:`.OpenStackIdentityUser`
:return: ``True`` on success.
:rtype: ``bool``
"""
path = ('/v3/domains/%s/users/%s/roles/%s' %
(domain.id, user.id, role.id))
response = self.authenticated_request(path, method='PUT')
return response.status == httplib.NO_CONTENT
def revoke_domain_role_from_user(self, domain, user, role):
"""
Revoke domain role from a user.
:param domain: Domain to revoke the role from.
:type domain: :class:`.OpenStackIdentityDomain`
:param role: Role to revoke.
:type role: :class:`.OpenStackIdentityRole`
:param user: User to revoke the role from.
:type user: :class:`.OpenStackIdentityUser`
:return: ``True`` on success.
:rtype: ``bool``
"""
path = ('/v3/domains/%s/users/%s/roles/%s' %
(domain.id, user.id, role.id))
response = self.authenticated_request(path, method='DELETE')
return response.status == httplib.NO_CONTENT
def grant_project_role_to_user(self, project, role, user):
"""
Grant project role to a user.
Note: This function appears to be idempotent.
:param project: Project to grant the role to.
:type project: :class:`.OpenStackIdentityDomain`
:param role: Role to grant.
:type role: :class:`.OpenStackIdentityRole`
:param user: User to grant the role to.
:type user: :class:`.OpenStackIdentityUser`
:return: ``True`` on success.
:rtype: ``bool``
"""
path = ('/v3/projects/%s/users/%s/roles/%s' %
(project.id, user.id, role.id))
response = self.authenticated_request(path, method='PUT')
return response.status == httplib.NO_CONTENT
def revoke_project_role_from_user(self, project, role, user):
"""
Revoke project role from a user.
:param project: Project to revoke the role from.
:type project: :class:`.OpenStackIdentityDomain`
:param role: Role to revoke.
:type role: :class:`.OpenStackIdentityRole`
:param user: User to revoke the role from.
:type user: :class:`.OpenStackIdentityUser`
:return: ``True`` on success.
:rtype: ``bool``
"""
path = ('/v3/projects/%s/users/%s/roles/%s' %
(project.id, user.id, role.id))
response = self.authenticated_request(path, method='DELETE')
return response.status == httplib.NO_CONTENT
def create_user(self, email, password, name, description=None,
domain_id=None, default_project_id=None, enabled=True):
"""
Create a new user account.
:param email: User's mail address.
:type email: ``str``
:param password: User's password.
:type password: ``str``
:param name: User's name.
:type name: ``str``
:param description: Optional description.
:type description: ``str``
:param domain_id: ID of the domain to add the user to (optional).
:type domain_id: ``str``
:param default_project_id: ID of the default user project (optional).
:type default_project_id: ``str``
:param enabled: True to enable user after creation.
:type enabled: ``bool``
:return: Created user.
:rtype: :class:`.OpenStackIdentityUser`
"""
data = {
'email': email,
'password': password,
'name': name,
'enabled': enabled
}
if description:
data['description'] = description
if domain_id:
data['domain_id'] = domain_id
if default_project_id:
data['default_project_id'] = default_project_id
data = json.dumps({'user': data})
response = self.authenticated_request('/v3/users', data=data,
method='POST')
user = self._to_user(data=response.object['user'])
return user
def enable_user(self, user):
"""
Enable user account.
Note: This operation appears to be idempotent.
:param user: User to enable.
:type user: :class:`.OpenStackIdentityUser`
:return: User account which has been enabled.
:rtype: :class:`.OpenStackIdentityUser`
"""
data = {
'enabled': True
}
data = json.dumps({'user': data})
response = self.authenticated_request('/v3/users/%s' % (user.id),
data=data,
method='PATCH')
user = self._to_user(data=response.object['user'])
return user
def disable_user(self, user):
"""
Disable user account.
Note: This operation appears to be idempotent.
:param user: User to disable.
:type user: :class:`.OpenStackIdentityUser`
:return: User account which has been disabled.
:rtype: :class:`.OpenStackIdentityUser`
"""
data = {
'enabled': False
}
data = json.dumps({'user': data})
response = self.authenticated_request('/v3/users/%s' % (user.id),
data=data,
method='PATCH')
user = self._to_user(data=response.object['user'])
return user
def _to_domains(self, data):
result = []
for item in data:
domain = self._to_domain(data=item)
result.append(domain)
return result
def _to_domain(self, data):
domain = OpenStackIdentityDomain(id=data['id'],
name=data['name'],
enabled=data['enabled'])
return domain
def _to_users(self, data):
result = []
for item in data:
user = self._to_user(data=item)
result.append(user)
return result
def _to_user(self, data):
user = OpenStackIdentityUser(id=data['id'],
domain_id=data['domain_id'],
name=data['name'],
email=data['email'],
description=data.get('description',
None),
enabled=data['enabled'])
return user
def _to_roles(self, data):
result = []
for item in data:
user = self._to_role(data=item)
result.append(user)
return result
def _to_role(self, data):
role = OpenStackIdentityRole(id=data['id'],
name=data['name'],
description=data.get('description',
None),
enabled=data.get('enabled', True))
return role
def get_class_for_auth_version(auth_version):
"""
Retrieve class for the provided auth version.
"""
if auth_version == '1.0':
cls = OpenStackIdentity_1_0_Connection
elif auth_version == '1.1':
cls = OpenStackIdentity_1_1_Connection
elif auth_version == '2.0' or auth_version == '2.0_apikey':
cls = OpenStackIdentity_2_0_Connection
elif auth_version == '2.0_password':
cls = OpenStackIdentity_2_0_Connection
elif auth_version == '3.x_password':
cls = OpenStackIdentity_3_0_Connection
else:
raise LibcloudError('Unsupported Auth Version requested')
return cls
|
wrigri/libcloud
|
libcloud/common/openstack_identity.py
|
Python
|
apache-2.0
| 48,080 | 0.000021 |
# This is a helper for the win32trace module
# If imported from a normal Python program, it sets up sys.stdout and sys.stderr
# so output goes to the collector.
# If run from the command line, it creates a collector loop.
# Eg:
# C:>start win32traceutil.py (or python.exe win32traceutil.py)
# will start a process with a (pretty much) blank screen.
#
# then, switch to a DOS prompt, and type:
# C:>python.exe
# Python 1.4 etc...
# >>> import win32traceutil
# Redirecting output to win32trace remote collector
# >>> print "Hello"
# >>>
# And the output will appear in the first collector process.
# Note - the client or the collector can be started first.
# There is a 64k buffer. If this gets full, it is reset, and new
# output appended from the start.
import win32trace
def RunAsCollector():
import sys
try:
import win32api
win32api.SetConsoleTitle("Python Trace Collector")
except:
pass # Oh well!
win32trace.InitRead()
print "Collecting Python Trace Output..."
# import win32api;win32api.DebugBreak()
while 1:
# print win32trace.blockingread()
sys.stdout.write(win32trace.blockingread())
def SetupForPrint():
win32trace.InitWrite()
try: # Under certain servers, sys.stdout may be invalid.
print "Redirecting output to win32trace remote collector"
except:
pass
win32trace.setprint() # this works in an rexec environment.
if __name__=='__main__':
RunAsCollector()
else:
SetupForPrint()
|
leighpauls/k2cro4
|
third_party/python_26/Lib/site-packages/win32/lib/win32traceutil.py
|
Python
|
bsd-3-clause
| 1,423 | 0.021082 |
from pymongo import MongoClient
from dalmongo import configuration
# get the instance of MongoDB client
client = MongoClient(configuration.MONGODB_HOST, configuration.MONGODB_PORT)
# get the main application database
db = getattr(client, configuration.MONGODB_NAME)
|
RobertoPrevato/flask-three-template
|
dalmongo/__init__.py
|
Python
|
mit
| 267 | 0.003745 |
#!/usr/bin/env python
"""
Lookup and Store Tweets utility.
Lookup tweets on Twitter by the GUID and then stores the profile and tweet
data in the local db.
TODO: Use the system category and campaign as set in app.conf file.
"""
import argparse
import os
import sys
# Allow imports to be done when executing this file directly.
sys.path.insert(
0,
os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
),
)
from lib import tweets
from lib.twitter_api import authentication
def main():
"""
Command-line interface for Lookup and Store Tweets utility.
"""
parser = argparse.ArgumentParser(
description="""Lookup and Store Tweets utility. Fetches a tweet from
the Twitter API given its GUID. Stores or updates the author
Profile and Tweet in the db."""
)
parser.add_argument(
"tweetGUIDs",
metavar="TWEET_GUID",
nargs="+",
help="""List of one or more Tweet GUIDs to lookup, separated by spaces.
The Tweet 'GUID' in the local db is equivalent to the Tweet 'ID'
on the Twitter API.""",
)
parser.add_argument(
"-u",
"--update-all-fields",
action="store_true",
help="""If supplied, update all fields when updating an existing
local Tweet record. Otherwise, the default behavior is to
only update the favorite and retweet counts of the record.""",
)
args = parser.parse_args()
APIConn = authentication.getAppOnlyConnection()
tweets.lookupTweetGuids(
APIConn, args.tweetGUIDs, onlyUpdateEngagements=not (args.update_all_fields)
)
if __name__ == "__main__":
main()
|
MichaelCurrin/twitterverse
|
app/utils/insert/lookup_and_store_tweets.py
|
Python
|
mit
| 1,724 | 0.00174 |
"""Sensor platform for nodered."""
import json
import logging
import voluptuous as vol
from homeassistant.components.websocket_api import event_message
from homeassistant.const import (
CONF_ENTITY_ID,
CONF_ICON,
CONF_ID,
CONF_STATE,
CONF_TYPE,
EVENT_STATE_CHANGED,
)
from homeassistant.core import callback
from homeassistant.helpers import entity_platform, trigger
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import ToggleEntity
from . import NodeRedEntity
from .const import (
CONF_CONFIG,
CONF_DATA,
CONF_DEVICE_TRIGGER,
CONF_OUTPUT_PATH,
CONF_PAYLOAD,
CONF_REMOVE,
CONF_SKIP_CONDITION,
CONF_SUB_TYPE,
CONF_SWITCH,
CONF_TRIGGER_ENTITY_ID,
DOMAIN,
NODERED_DISCOVERY_NEW,
SERVICE_TRIGGER,
SWITCH_ICON,
)
from .utils import NodeRedJSONEncoder
_LOGGER = logging.getLogger(__name__)
SERVICE_TRIGGER_SCHEMA = vol.Schema(
{
vol.Required(CONF_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_TRIGGER_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_SKIP_CONDITION): cv.boolean,
vol.Optional(CONF_OUTPUT_PATH): cv.boolean,
vol.Optional(CONF_PAYLOAD): vol.Extra,
}
)
EVENT_TRIGGER_NODE = "automation_triggered"
EVENT_DEVICE_TRIGGER = "device_trigger"
TYPE_SWITCH = "switch"
TYPE_DEVICE_TRIGGER = "device_trigger"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Switch platform."""
async def async_discover(config, connection):
await _async_setup_entity(hass, config, async_add_entities, connection)
async_dispatcher_connect(
hass,
NODERED_DISCOVERY_NEW.format(CONF_SWITCH),
async_discover,
)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_TRIGGER, SERVICE_TRIGGER_SCHEMA, "async_trigger_node"
)
async def _async_setup_entity(hass, config, async_add_entities, connection):
"""Set up the Node-RED Switch."""
switch_type = config.get(CONF_SUB_TYPE, TYPE_SWITCH)
switch_class = (
NodeRedDeviceTrigger if switch_type == TYPE_DEVICE_TRIGGER else NodeRedSwitch
)
async_add_entities([switch_class(hass, config, connection)])
class NodeRedSwitch(ToggleEntity, NodeRedEntity):
"""Node-RED Switch class."""
def __init__(self, hass, config, connection):
"""Initialize the switch."""
super().__init__(hass, config)
self._message_id = config[CONF_ID]
self._connection = connection
self._state = config.get(CONF_STATE, True)
self._component = CONF_SWITCH
self._available = True
@property
def is_on(self) -> bool:
"""Return the state of the switch."""
return self._state
@property
def icon(self):
"""Return the icon of the sensor."""
return self._config.get(CONF_ICON, SWITCH_ICON)
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the switch."""
self._update_node_red(False)
async def async_turn_on(self, **kwargs) -> None:
"""Turn on the switch."""
self._update_node_red(True)
async def async_trigger_node(self, **kwargs) -> None:
"""Trigger node in Node-RED."""
data = {}
data[CONF_ENTITY_ID] = kwargs.get(CONF_TRIGGER_ENTITY_ID)
data[CONF_SKIP_CONDITION] = kwargs.get(CONF_SKIP_CONDITION, False)
data[CONF_OUTPUT_PATH] = kwargs.get(CONF_OUTPUT_PATH, True)
if kwargs.get(CONF_PAYLOAD) is not None:
data[CONF_PAYLOAD] = kwargs[CONF_PAYLOAD]
self._connection.send_message(
event_message(
self._message_id,
{CONF_TYPE: EVENT_TRIGGER_NODE, CONF_DATA: data},
)
)
def _update_node_red(self, state):
self._connection.send_message(
event_message(
self._message_id, {CONF_TYPE: EVENT_STATE_CHANGED, CONF_STATE: state}
)
)
@callback
def handle_lost_connection(self):
"""Set availability to False when disconnected."""
self._available = False
self.async_write_ha_state()
@callback
def handle_discovery_update(self, msg, connection):
"""Update entity config."""
if CONF_REMOVE in msg:
# Remove entity
self.hass.async_create_task(self.async_remove())
else:
self._available = True
self._state = msg[CONF_STATE]
self._config = msg[CONF_CONFIG]
self._message_id = msg[CONF_ID]
self._connection = connection
self._connection.subscriptions[msg[CONF_ID]] = self.handle_lost_connection
self.async_write_ha_state()
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
self._connection.subscriptions[self._message_id] = self.handle_lost_connection
class NodeRedDeviceTrigger(NodeRedSwitch):
"""Node-RED Device Trigger class."""
def __init__(self, hass, config, connection):
"""Initialize the switch."""
super().__init__(hass, config, connection)
self._trigger_config = config[CONF_DEVICE_TRIGGER]
self._unsubscribe_device_trigger = None
@callback
def handle_lost_connection(self):
"""Set remove device trigger when disconnected."""
super().handle_lost_connection()
self.remove_device_trigger()
async def add_device_trigger(self):
"""Validate device trigger."""
@callback
def forward_trigger(event, context=None):
"""Forward events to websocket."""
message = event_message(
self._message_id,
{"type": EVENT_DEVICE_TRIGGER, "data": event["trigger"]},
)
self._connection.send_message(
json.dumps(message, cls=NodeRedJSONEncoder, allow_nan=False)
)
try:
trigger_config = await trigger.async_validate_trigger_config(
self.hass, [self._trigger_config]
)
self._unsubscribe_device_trigger = await trigger.async_initialize_triggers(
self.hass,
trigger_config,
forward_trigger,
DOMAIN,
DOMAIN,
_LOGGER.log,
)
except vol.MultipleInvalid as ex:
_LOGGER.error(
f"Error initializing device trigger '{self._node_id}': {str(ex)}",
)
def remove_device_trigger(self):
"""Remove device trigger."""
self._trigger_config = None
if self._unsubscribe_device_trigger is not None:
_LOGGER.info(f"removed device triger - {self._server_id} {self._node_id}")
self._unsubscribe_device_trigger()
self._unsubscribe_device_trigger = None
@callback
async def handle_discovery_update(self, msg, connection):
"""Update entity config."""
if CONF_REMOVE not in msg and self._trigger_config != msg[CONF_DEVICE_TRIGGER]:
self.remove_device_trigger()
self._trigger_config = msg[CONF_DEVICE_TRIGGER]
await self.add_device_trigger()
super().handle_discovery_update(msg, connection)
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
await self.add_device_trigger()
async def async_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass."""
self.remove_device_trigger()
await super().async_will_remove_from_hass()
|
shortbloke/home_assistant_config
|
custom_components/nodered/switch.py
|
Python
|
mit
| 7,962 | 0.001005 |
def extractSpearpointtranslationsHomeBlog(item):
'''
Parser for 'spearpointtranslations.home.blog'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Record of the Missing Sect Master', 'Record of the Missing Sect Master', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractSpearpointtranslationsHomeBlog.py
|
Python
|
bsd-3-clause
| 692 | 0.027457 |
"""Setup script for NodeTrie"""
from setuptools import setup, find_packages, Extension
import versioneer
try:
from Cython.Build import cythonize
except ImportError:
USING_CYTHON = False
else:
USING_CYTHON = True
ext = 'pyx' if USING_CYTHON else 'c'
extensions = [Extension("nodetrie.nodetrie",
["nodetrie/nodetrie.%s" % (ext,),
"nodetrie_c/src/node.c",],
depends=["nodetrie_c/src/node.h"],
include_dirs=["nodetrie_c/src"],
extra_compile_args=["-std=c99", "-O3"],
),
]
if USING_CYTHON:
extensions = cythonize(
extensions,
compiler_directives={'embedsignature': True,}
)
cmdclass = versioneer.get_cmdclass()
setup(
name='nodetrie',
version=versioneer.get_version(),
cmdclass=cmdclass,
url='https://github.com/NodeTrie/NodeTrie_Py',
license='LGPLv2',
author='Panos Kittenis',
author_email='22e889d8@opayq.com',
description=('Python bindings for NodeTrie, a trie data structure library'),
long_description=open('README.rst').read(),
packages=find_packages('.'),
zip_safe=False,
include_package_data=True,
platforms='any',
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)',
],
ext_modules=extensions,
)
|
NodeTrie/NodeTrie_Py
|
setup.py
|
Python
|
lgpl-2.1
| 2,127 | 0.004701 |
import abc
import unittest
class FinderTests(metaclass=abc.ABCMeta):
"""Basic tests for a finder to pass."""
@abc.abstractmethod
def test_module(self):
# Test importing a top-level module.
pass
@abc.abstractmethod
def test_package(self):
# Test importing a package.
pass
@abc.abstractmethod
def test_module_in_package(self):
# Test importing a module contained within a package.
# A value for 'path' should be used if for a meta_path finder.
pass
@abc.abstractmethod
def test_package_in_package(self):
# Test importing a subpackage.
# A value for 'path' should be used if for a meta_path finder.
pass
@abc.abstractmethod
def test_package_over_module(self):
# Test that packages are chosen over modules.
pass
@abc.abstractmethod
def test_failure(self):
# Test trying to find a module that cannot be handled.
pass
class LoaderTests(metaclass=abc.ABCMeta):
@abc.abstractmethod
def test_module(self):
"""A module should load without issue.
After the loader returns the module should be in sys.modules.
Attributes to verify:
* __file__
* __loader__
* __name__
* No __path__
"""
pass
@abc.abstractmethod
def test_package(self):
"""Loading a package should work.
After the loader returns the module should be in sys.modules.
Attributes to verify:
* __name__
* __file__
* __package__
* __path__
* __loader__
"""
pass
@abc.abstractmethod
def test_lacking_parent(self):
"""A loader should not be dependent on it's parent package being
imported."""
pass
@abc.abstractmethod
def test_state_after_failure(self):
"""If a module is already in sys.modules and a reload fails
(e.g. a SyntaxError), the module should be in the state it was before
the reload began."""
pass
@abc.abstractmethod
def test_unloadable(self):
"""Test ImportError is raised when the loader is asked to load a module
it can't."""
pass
|
Orav/kbengine
|
kbe/src/lib/python/Lib/test/test_importlib/abc.py
|
Python
|
lgpl-3.0
| 2,382 | 0 |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 31 16:04:18 2017
@author: adelpret
"""
import pinocchio as se3
import numpy as np
from pinocchio import RobotWrapper
from conversion_utils import config_sot_to_urdf, joints_sot_to_urdf, velocity_sot_to_urdf
from dynamic_graph.sot.torque_control.inverse_dynamics_balance_controller import InverseDynamicsBalanceController
from dynamic_graph.sot.torque_control.create_entities_utils import create_ctrl_manager
import dynamic_graph.sot.torque_control.hrp2.balance_ctrl_sim_conf as balance_ctrl_conf
import dynamic_graph.sot.torque_control.hrp2.control_manager_sim_conf as control_manager_conf
from dynamic_graph.sot.torque_control.tests.robot_data_test import initRobotData
np.set_printoptions(precision=3, suppress=True, linewidth=100);
def create_balance_controller(dt, q, conf, robot_name='robot'):
ctrl = InverseDynamicsBalanceController("invDynBalCtrl");
ctrl.q.value = tuple(q);
ctrl.v.value = (NJ+6)*(0.0,);
ctrl.wrench_right_foot.value = 6*(0.0,);
ctrl.wrench_left_foot.value = 6*(0.0,);
ctrl.posture_ref_pos.value = tuple(q[6:]);
ctrl.posture_ref_vel.value = NJ*(0.0,);
ctrl.posture_ref_acc.value = NJ*(0.0,);
ctrl.com_ref_pos.value = (0., 0., 0.8);
ctrl.com_ref_vel.value = 3*(0.0,);
ctrl.com_ref_acc.value = 3*(0.0,);
# ctrl.rotor_inertias.value = np.array(conf.ROTOR_INERTIAS);
# ctrl.gear_ratios.value = conf.GEAR_RATIOS;
ctrl.rotor_inertias.value = tuple([g*g*r for (g,r) in zip(conf.GEAR_RATIOS, conf.ROTOR_INERTIAS)])
ctrl.gear_ratios.value = NJ*(1.0,);
ctrl.contact_normal.value = conf.FOOT_CONTACT_NORMAL;
ctrl.contact_points.value = conf.RIGHT_FOOT_CONTACT_POINTS;
ctrl.f_min.value = conf.fMin;
ctrl.f_max_right_foot.value = conf.fMax;
ctrl.f_max_left_foot.value = conf.fMax;
ctrl.mu.value = conf.mu[0];
ctrl.weight_contact_forces.value = (1e2, 1e2, 1e0, 1e3, 1e3, 1e3);
ctrl.kp_com.value = 3*(conf.kp_com,);
ctrl.kd_com.value = 3*(conf.kd_com,);
ctrl.kp_constraints.value = 6*(conf.kp_constr,);
ctrl.kd_constraints.value = 6*(conf.kd_constr,);
ctrl.kp_feet.value = 6*(conf.kp_feet,);
ctrl.kd_feet.value = 6*(conf.kd_feet,);
ctrl.kp_posture.value = conf.kp_posture;
ctrl.kd_posture.value = conf.kd_posture;
ctrl.kp_pos.value = conf.kp_pos;
ctrl.kd_pos.value = conf.kd_pos;
ctrl.w_com.value = conf.w_com;
ctrl.w_feet.value = conf.w_feet;
ctrl.w_forces.value = conf.w_forces;
ctrl.w_posture.value = conf.w_posture;
ctrl.w_base_orientation.value = conf.w_base_orientation;
ctrl.w_torques.value = conf.w_torques;
ctrl.active_joints.value = NJ*(1,);
ctrl.init(dt, robot_name);
return ctrl;
print "*** UNIT TEST FOR INVERSE-DYNAMICS-BALANCE-CONTROLLER (IDBC) ***"
print "This test computes the torques using the IDBC and compares them with"
print "the torques computed using the desired joint accelerations and contact"
print "wrenches computed by the IDBC. The two values should be identical."
print "Some small differences are expected due to the precision loss when"
print "Passing the parameters from python to c++."
print "However, none of the following values should be larger than 1e-3.\n"
N_TESTS = 100
dt = 0.001;
NJ = initRobotData.nbJoints
# robot configuration
q_sot = np.array([-0.0027421149619457344, -0.0013842807952574399, 0.6421082804660067,
-0.0005693871512031474, -0.0013094048521806974, 0.0028568508070167,
-0.0006369040657361668, 0.002710094953239396, -0.48241992906618536, 0.9224570746372157, -0.43872624301275104, -0.0021586727954009096,
-0.0023395862060549863, 0.0031045906573987617, -0.48278188636903313, 0.9218508861779927, -0.4380058166724791, -0.0025558837738616047,
-0.012985322450541008, 0.04430420221275542, 0.37027327677517635, 1.4795064165303056,
0.20855551221055582, -0.13188842278441873, 0.005487207370709895, -0.2586657542648506, 2.6374918629921953, -0.004223605878088189, 0.17118034021053144, 0.24171737354070008, 0.11594430024547904, -0.05264225067057105, -0.4691871937149223, 0.0031522040623960016, 0.011836097472447007, 0.18425595002313025]);
ctrl_manager = create_ctrl_manager(control_manager_conf, dt);
ctrl = create_balance_controller(dt, q_sot, balance_ctrl_conf);
robot = RobotWrapper(initRobotData.testRobotPath, [], se3.JointModelFreeFlyer())
index_rf = robot.index('RLEG_JOINT5');
index_lf = robot.index('LLEG_JOINT5');
Md = np.matrix(np.zeros((NJ+6,NJ+6)));
gr = joints_sot_to_urdf(balance_ctrl_conf.GEAR_RATIOS);
ri = joints_sot_to_urdf(balance_ctrl_conf.ROTOR_INERTIAS);
for i in range(NJ):
Md[6+i,6+i] = ri[i] * gr[i] * gr[i];
for i in range(N_TESTS):
q_sot += 0.001*np.random.random(NJ+6);
v_sot = np.random.random(NJ+6);
q_pin = np.matrix(config_sot_to_urdf(q_sot));
v_pin = np.matrix(velocity_sot_to_urdf(v_sot));
ctrl.q.value = tuple(q_sot);
ctrl.v.value = tuple(v_sot);
ctrl.tau_des.recompute(i);
tau_ctrl = joints_sot_to_urdf(np.array(ctrl.tau_des.value));
ctrl.dv_des.recompute(i);
dv = velocity_sot_to_urdf(np.array(ctrl.dv_des.value));
M = Md + robot.mass(q_pin);
h = robot.bias(q_pin, v_pin);
ctrl.f_des_right_foot.recompute(i);
ctrl.f_des_left_foot.recompute(i);
f_rf = np.matrix(ctrl.f_des_right_foot.value).T;
f_lf = np.matrix(ctrl.f_des_left_foot.value).T;
J_rf = robot.jacobian(q_pin, index_rf);
J_lf = robot.jacobian(q_pin, index_lf);
tau_pin = M*np.matrix(dv).T + h - J_rf.T * f_rf - J_lf.T * f_lf;
# ctrl.M.recompute(i);
# M_ctrl = np.array(ctrl.M.value);
print "norm(tau_ctrl-tau_pin) = %.4f"% np.linalg.norm(tau_ctrl - tau_pin[6:,0].T);
print "norm(tau_pin[:6]) = %.4f"% np.linalg.norm(tau_pin[:6]);
# print "q_pin:\n", q_pin;
# print "tau_pin:\n", tau_pin[6:,0].T, "\n";
# print "tau ctrl:\n", tau_ctrl.T, "\n";
# print "dv = ", np.linalg.norm(dv);
# print "f_rf:", f_rf.T, "\n";
# print "f_lf:", f_lf.T, "\n";
# print "h:", h.T, "\n";
# M_err = M-M_ctrl
# print "M-M_ctrl = ", M_err.diagonal(), "\n"
# for j in range(NJ+6):
# print M_err[j,:];
|
proyan/sot-torque-control
|
unitTesting/unit_test_inverse_dynamics_balance_controller.py
|
Python
|
gpl-3.0
| 6,246 | 0.018092 |
from sqlalchemy.orm import create_session, relationship, mapper, \
contains_eager, joinedload, subqueryload, subqueryload_all,\
Session, aliased, with_polymorphic
from sqlalchemy import Integer, String, ForeignKey
from sqlalchemy.engine import default
from sqlalchemy.testing import AssertsCompiledSQL, fixtures
from sqlalchemy import testing
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.testing import assert_raises, eq_, is_
class Company(fixtures.ComparableEntity):
pass
class Person(fixtures.ComparableEntity):
pass
class Engineer(Person):
pass
class Manager(Person):
pass
class Boss(Manager):
pass
class Machine(fixtures.ComparableEntity):
pass
class Paperwork(fixtures.ComparableEntity):
pass
class SelfReferentialTestJoinedToBase(fixtures.MappedTest):
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('primary_language', String(50)),
Column('reports_to_id', Integer,
ForeignKey('people.person_id')))
@classmethod
def setup_mappers(cls):
engineers, people = cls.tables.engineers, cls.tables.people
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Engineer, engineers,
inherits=Person,
inherit_condition=engineers.c.person_id == people.c.person_id,
polymorphic_identity='engineer',
properties={
'reports_to':relationship(
Person,
primaryjoin=
people.c.person_id == engineers.c.reports_to_id)})
def test_has(self):
p1 = Person(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=p1)
sess = create_session()
sess.add(p1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to.has(Person.name == 'dogbert'))
.first(),
Engineer(name='dilbert'))
def test_oftype_aliases_in_exists(self):
e1 = Engineer(name='dilbert', primary_language='java')
e2 = Engineer(name='wally', primary_language='c++', reports_to=e1)
sess = create_session()
sess.add_all([e1, e2])
sess.flush()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to
.of_type(Engineer)
.has(Engineer.name == 'dilbert'))
.first(),
e2)
def test_join(self):
p1 = Person(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=p1)
sess = create_session()
sess.add(p1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.join('reports_to', aliased=True)
.filter(Person.name == 'dogbert').first(),
Engineer(name='dilbert'))
class SelfReferentialJ2JTest(fixtures.MappedTest):
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
people = Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('primary_language', String(50)),
Column('reports_to_id', Integer,
ForeignKey('managers.person_id'))
)
managers = Table('managers', metadata,
Column('person_id', Integer, ForeignKey('people.person_id'),
primary_key=True),
)
@classmethod
def setup_mappers(cls):
engineers = cls.tables.engineers
managers = cls.tables.managers
people = cls.tables.people
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Manager, managers,
inherits=Person,
polymorphic_identity='manager')
mapper(Engineer, engineers,
inherits=Person,
polymorphic_identity='engineer',
properties={
'reports_to':relationship(
Manager,
primaryjoin=
managers.c.person_id == engineers.c.reports_to_id,
backref='engineers')})
def test_has(self):
m1 = Manager(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1)
sess = create_session()
sess.add(m1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to.has(Manager.name == 'dogbert'))
.first(),
Engineer(name='dilbert'))
def test_join(self):
m1 = Manager(name='dogbert')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1)
sess = create_session()
sess.add(m1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Engineer)
.join('reports_to', aliased=True)
.filter(Manager.name == 'dogbert').first(),
Engineer(name='dilbert'))
def test_filter_aliasing(self):
m1 = Manager(name='dogbert')
m2 = Manager(name='foo')
e1 = Engineer(name='wally', primary_language='java', reports_to=m1)
e2 = Engineer(name='dilbert', primary_language='c++', reports_to=m2)
e3 = Engineer(name='etc', primary_language='c++')
sess = create_session()
sess.add_all([m1, m2, e1, e2, e3])
sess.flush()
sess.expunge_all()
# filter aliasing applied to Engineer doesn't whack Manager
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Manager.name == 'dogbert').all(),
[m1])
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.name == 'dilbert').all(),
[m2])
eq_(sess.query(Manager, Engineer)
.join(Manager.engineers)
.order_by(Manager.name.desc()).all(),
[(m2, e2), (m1, e1)])
def test_relationship_compare(self):
m1 = Manager(name='dogbert')
m2 = Manager(name='foo')
e1 = Engineer(name='dilbert', primary_language='java', reports_to=m1)
e2 = Engineer(name='wally', primary_language='c++', reports_to=m2)
e3 = Engineer(name='etc', primary_language='c++')
sess = create_session()
sess.add(m1)
sess.add(m2)
sess.add(e1)
sess.add(e2)
sess.add(e3)
sess.flush()
sess.expunge_all()
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.reports_to == None).all(),
[])
eq_(sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.reports_to == m1).all(),
[m1])
class SelfReferentialJ2JSelfTest(fixtures.MappedTest):
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
people = Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('reports_to_id', Integer,
ForeignKey('engineers.person_id')))
@classmethod
def setup_mappers(cls):
engineers = cls.tables.engineers
people = cls.tables.people
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Engineer, engineers,
inherits=Person,
polymorphic_identity='engineer',
properties={
'reports_to':relationship(
Engineer,
primaryjoin=
engineers.c.person_id == engineers.c.reports_to_id,
backref='engineers',
remote_side=engineers.c.person_id)})
def _two_obj_fixture(self):
e1 = Engineer(name='wally')
e2 = Engineer(name='dilbert', reports_to=e1)
sess = Session()
sess.add_all([e1, e2])
sess.commit()
return sess
def _five_obj_fixture(self):
sess = Session()
e1, e2, e3, e4, e5 = [
Engineer(name='e%d' % (i + 1)) for i in range(5)
]
e3.reports_to = e1
e4.reports_to = e2
sess.add_all([e1, e2, e3, e4, e5])
sess.commit()
return sess
def test_has(self):
sess = self._two_obj_fixture()
eq_(sess.query(Engineer)
.filter(Engineer.reports_to.has(Engineer.name == 'wally'))
.first(),
Engineer(name='dilbert'))
def test_join_explicit_alias(self):
sess = self._five_obj_fixture()
ea = aliased(Engineer)
eq_(sess.query(Engineer)
.join(ea, Engineer.engineers)
.filter(Engineer.name == 'e1').all(),
[Engineer(name='e1')])
def test_join_aliased_flag_one(self):
sess = self._two_obj_fixture()
eq_(sess.query(Engineer)
.join('reports_to', aliased=True)
.filter(Engineer.name == 'wally').first(),
Engineer(name='dilbert'))
def test_join_aliased_flag_two(self):
sess = self._five_obj_fixture()
eq_(sess.query(Engineer)
.join(Engineer.engineers, aliased=True)
.filter(Engineer.name == 'e4').all(),
[Engineer(name='e2')])
def test_relationship_compare(self):
sess = self._five_obj_fixture()
e1 = sess.query(Engineer).filter_by(name='e1').one()
eq_(sess.query(Engineer)
.join(Engineer.engineers, aliased=True)
.filter(Engineer.reports_to == None).all(),
[])
eq_(sess.query(Engineer)
.join(Engineer.engineers, aliased=True)
.filter(Engineer.reports_to == e1).all(),
[e1])
class M2MFilterTest(fixtures.MappedTest):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
organizations = Table('organizations', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)))
engineers_to_org = Table('engineers_to_org', metadata,
Column('org_id', Integer,
ForeignKey('organizations.id')),
Column('engineer_id', Integer,
ForeignKey('engineers.person_id')))
people = Table('people', metadata,
Column('person_id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(30)))
engineers = Table('engineers', metadata,
Column('person_id', Integer,
ForeignKey('people.person_id'),
primary_key=True),
Column('primary_language', String(50)))
@classmethod
def setup_mappers(cls):
organizations = cls.tables.organizations
people = cls.tables.people
engineers = cls.tables.engineers
engineers_to_org = cls.tables.engineers_to_org
class Organization(cls.Comparable):
pass
mapper(Organization, organizations,
properties={
'engineers':relationship(
Engineer,
secondary=engineers_to_org,
backref='organizations')})
mapper(Person, people,
polymorphic_on=people.c.type,
polymorphic_identity='person')
mapper(Engineer, engineers,
inherits=Person,
polymorphic_identity='engineer')
@classmethod
def insert_data(cls):
Organization = cls.classes.Organization
e1 = Engineer(name='e1')
e2 = Engineer(name='e2')
e3 = Engineer(name='e3')
e4 = Engineer(name='e4')
org1 = Organization(name='org1', engineers=[e1, e2])
org2 = Organization(name='org2', engineers=[e3, e4])
sess = create_session()
sess.add(org1)
sess.add(org2)
sess.flush()
def test_not_contains(self):
Organization = self.classes.Organization
sess = create_session()
e1 = sess.query(Person).filter(Engineer.name == 'e1').one()
eq_(sess.query(Organization)
.filter(~Organization.engineers
.of_type(Engineer)
.contains(e1))
.all(),
[Organization(name='org2')])
# this had a bug
eq_(sess.query(Organization)
.filter(~Organization.engineers
.contains(e1))
.all(),
[Organization(name='org2')])
def test_any(self):
sess = create_session()
Organization = self.classes.Organization
eq_(sess.query(Organization)
.filter(Organization.engineers
.of_type(Engineer)
.any(Engineer.name == 'e1'))
.all(),
[Organization(name='org1')])
eq_(sess.query(Organization)
.filter(Organization.engineers
.any(Engineer.name == 'e1'))
.all(),
[Organization(name='org1')])
class SelfReferentialM2MTest(fixtures.MappedTest, AssertsCompiledSQL):
__dialect__ = "default"
@classmethod
def define_tables(cls, metadata):
Table('secondary', metadata,
Column('left_id', Integer,
ForeignKey('parent.id'),
nullable=False),
Column('right_id', Integer,
ForeignKey('parent.id'),
nullable=False))
Table('parent', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('cls', String(50)))
Table('child1', metadata,
Column('id', Integer,
ForeignKey('parent.id'),
primary_key=True))
Table('child2', metadata,
Column('id', Integer,
ForeignKey('parent.id'),
primary_key=True))
@classmethod
def setup_classes(cls):
class Parent(cls.Basic):
pass
class Child1(Parent):
pass
class Child2(Parent):
pass
@classmethod
def setup_mappers(cls):
child1 = cls.tables.child1
child2 = cls.tables.child2
Parent = cls.classes.Parent
parent = cls.tables.parent
Child1 = cls.classes.Child1
Child2 = cls.classes.Child2
secondary = cls.tables.secondary
mapper(Parent, parent,
polymorphic_on=parent.c.cls)
mapper(Child1, child1,
inherits=Parent,
polymorphic_identity='child1',
properties={
'left_child2':relationship(
Child2,
secondary=secondary,
primaryjoin=parent.c.id == secondary.c.right_id,
secondaryjoin=parent.c.id == secondary.c.left_id,
uselist=False,
backref="right_children")})
mapper(Child2, child2,
inherits=Parent,
polymorphic_identity='child2')
def test_query_crit(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = create_session()
c11, c12, c13 = Child1(), Child1(), Child1()
c21, c22, c23 = Child2(), Child2(), Child2()
c11.left_child2 = c22
c12.left_child2 = c22
c13.left_child2 = c23
sess.add_all([c11, c12, c13, c21, c22, c23])
sess.flush()
# test that the join to Child2 doesn't alias Child1 in the select
eq_(set(sess.query(Child1).join(Child1.left_child2)),
set([c11, c12, c13]))
eq_(set(sess.query(Child1, Child2).join(Child1.left_child2)),
set([(c11, c22), (c12, c22), (c13, c23)]))
# test __eq__() on property is annotating correctly
eq_(set(sess.query(Child2)
.join(Child2.right_children)
.filter(Child1.left_child2 == c22)),
set([c22]))
# test the same again
self.assert_compile(
sess.query(Child2)
.join(Child2.right_children)
.filter(Child1.left_child2 == c22)
.with_labels().statement,
"SELECT child2.id AS child2_id, parent.id AS parent_id, "
"parent.cls AS parent_cls FROM secondary AS secondary_1, "
"parent JOIN child2 ON parent.id = child2.id JOIN secondary AS "
"secondary_2 ON parent.id = secondary_2.left_id JOIN "
"(parent AS parent_1 JOIN child1 AS child1_1 ON parent_1.id = child1_1.id) "
"ON parent_1.id = secondary_2.right_id WHERE "
"parent_1.id = secondary_1.right_id AND :param_1 = "
"secondary_1.left_id"
)
def test_eager_join(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = create_session()
c1 = Child1()
c1.left_child2 = Child2()
sess.add(c1)
sess.flush()
# test that the splicing of the join works here, doesn't break in
# the middle of "parent join child1"
q = sess.query(Child1).options(joinedload('left_child2'))
self.assert_compile(q.limit(1).with_labels().statement,
"SELECT anon_1.child1_id AS anon_1_child1_id, anon_1.parent_id "
"AS anon_1_parent_id, anon_1.parent_cls AS anon_1_parent_cls, "
"child2_1.id AS child2_1_id, parent_1.id AS "
"parent_1_id, parent_1.cls AS parent_1_cls FROM "
"(SELECT child1.id AS child1_id, parent.id AS parent_id, "
"parent.cls AS parent_cls "
"FROM parent JOIN child1 ON parent.id = child1.id "
"LIMIT :param_1) AS anon_1 LEFT OUTER JOIN "
"(secondary AS secondary_1 JOIN "
"(parent AS parent_1 JOIN child2 AS child2_1 "
"ON parent_1.id = child2_1.id) ON parent_1.id = secondary_1.left_id) "
"ON anon_1.parent_id = secondary_1.right_id",
{'param_1':1})
# another way to check
assert q.limit(1).with_labels().subquery().count().scalar() == 1
assert q.first() is c1
def test_subquery_load(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = create_session()
c1 = Child1()
c1.left_child2 = Child2()
sess.add(c1)
sess.flush()
sess.expunge_all()
query_ = sess.query(Child1).options(subqueryload('left_child2'))
for row in query_.all():
assert row.left_child2
class EagerToSubclassTest(fixtures.MappedTest):
"""Test eager loads to subclass mappers"""
run_setup_classes = 'once'
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('data', String(10)))
Table('base', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10)),
Column('related_id', Integer,
ForeignKey('related.id')))
Table('sub', metadata,
Column('id', Integer,
ForeignKey('base.id'),
primary_key=True),
Column('data', String(10)),
Column('parent_id', Integer,
ForeignKey('parent.id'),
nullable=False))
Table('related', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('data', String(10)))
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Base(cls.Comparable):
pass
class Sub(Base):
pass
class Related(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
sub = cls.tables.sub
Sub = cls.classes.Sub
base = cls.tables.base
Base = cls.classes.Base
parent = cls.tables.parent
Parent = cls.classes.Parent
related = cls.tables.related
Related = cls.classes.Related
mapper(Parent, parent,
properties={'children':relationship(Sub, order_by=sub.c.data)})
mapper(Base, base,
polymorphic_on=base.c.type,
polymorphic_identity='b',
properties={'related':relationship(Related)})
mapper(Sub, sub,
inherits=Base,
polymorphic_identity='s')
mapper(Related, related)
@classmethod
def insert_data(cls):
global p1, p2
Parent = cls.classes.Parent
Sub = cls.classes.Sub
Related = cls.classes.Related
sess = Session()
r1, r2 = Related(data='r1'), Related(data='r2')
s1 = Sub(data='s1', related=r1)
s2 = Sub(data='s2', related=r2)
s3 = Sub(data='s3')
s4 = Sub(data='s4', related=r2)
s5 = Sub(data='s5')
p1 = Parent(data='p1', children=[s1, s2, s3])
p2 = Parent(data='p2', children=[s4, s5])
sess.add(p1)
sess.add(p2)
sess.commit()
def test_joinedload(self):
Parent = self.classes.Parent
sess = Session()
def go():
eq_(sess.query(Parent)
.options(joinedload(Parent.children)).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager(self):
Parent = self.classes.Parent
Sub = self.classes.Sub
sess = Session()
def go():
eq_(sess.query(Parent)
.join(Parent.children)
.options(contains_eager(Parent.children))
.order_by(Parent.data, Sub.data).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
def test_subq_through_related(self):
Parent = self.classes.Parent
Base = self.classes.Base
sess = Session()
def go():
eq_(sess.query(Parent)
.options(subqueryload_all(Parent.children, Base.related))
.order_by(Parent.data).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 3)
def test_subq_through_related_aliased(self):
Parent = self.classes.Parent
Base = self.classes.Base
pa = aliased(Parent)
sess = Session()
def go():
eq_(sess.query(pa)
.options(subqueryload_all(pa.children, Base.related))
.order_by(pa.data).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 3)
class SubClassEagerToSubClassTest(fixtures.MappedTest):
"""Test joinedloads from subclass to subclass mappers"""
run_setup_classes = 'once'
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10)),
)
Table('subparent', metadata,
Column('id', Integer,
ForeignKey('parent.id'),
primary_key=True),
Column('data', String(10)),
)
Table('base', metadata,
Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10)),
)
Table('sub', metadata,
Column('id', Integer,
ForeignKey('base.id'),
primary_key=True),
Column('data', String(10)),
Column('subparent_id', Integer,
ForeignKey('subparent.id'),
nullable=False)
)
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Subparent(Parent):
pass
class Base(cls.Comparable):
pass
class Sub(Base):
pass
@classmethod
def setup_mappers(cls):
sub = cls.tables.sub
Sub = cls.classes.Sub
base = cls.tables.base
Base = cls.classes.Base
parent = cls.tables.parent
Parent = cls.classes.Parent
subparent = cls.tables.subparent
Subparent = cls.classes.Subparent
mapper(Parent, parent,
polymorphic_on=parent.c.type,
polymorphic_identity='b')
mapper(Subparent, subparent,
inherits=Parent,
polymorphic_identity='s',
properties={
'children':relationship(Sub, order_by=base.c.id)})
mapper(Base, base,
polymorphic_on=base.c.type,
polymorphic_identity='b')
mapper(Sub, sub,
inherits=Base,
polymorphic_identity='s')
@classmethod
def insert_data(cls):
global p1, p2
Sub, Subparent = cls.classes.Sub, cls.classes.Subparent
sess = create_session()
p1 = Subparent(
data='p1',
children=[Sub(data='s1'), Sub(data='s2'), Sub(data='s3')])
p2 = Subparent(
data='p2',
children=[Sub(data='s4'), Sub(data='s5')])
sess.add(p1)
sess.add(p2)
sess.flush()
def test_joinedload(self):
Subparent = self.classes.Subparent
sess = create_session()
def go():
eq_(sess.query(Subparent)
.options(joinedload(Subparent.children)).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(sess.query(Subparent)
.options(joinedload("children")).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager(self):
Subparent = self.classes.Subparent
sess = create_session()
def go():
eq_(sess.query(Subparent)
.join(Subparent.children)
.options(contains_eager(Subparent.children)).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(sess.query(Subparent)
.join(Subparent.children)
.options(contains_eager("children")).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 1)
def test_subqueryload(self):
Subparent = self.classes.Subparent
sess = create_session()
def go():
eq_(sess.query(Subparent)
.options(subqueryload(Subparent.children)).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 2)
sess.expunge_all()
def go():
eq_(sess.query(Subparent)
.options(subqueryload("children")).all(),
[p1, p2])
self.assert_sql_count(testing.db, go, 2)
class SameNamedPropTwoPolymorphicSubClassesTest(fixtures.MappedTest):
"""test pathing when two subclasses contain a different property
for the same name, and polymorphic loading is used.
#2614
"""
run_setup_classes = 'once'
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('a', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10))
)
Table('b', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True)
)
Table('btod', metadata,
Column('bid', Integer, ForeignKey('b.id'), nullable=False),
Column('did', Integer, ForeignKey('d.id'), nullable=False)
)
Table('c', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True)
)
Table('ctod', metadata,
Column('cid', Integer, ForeignKey('c.id'), nullable=False),
Column('did', Integer, ForeignKey('d.id'), nullable=False)
)
Table('d', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(A):
pass
class C(A):
pass
class D(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
A = cls.classes.A
B = cls.classes.B
C = cls.classes.C
D = cls.classes.D
mapper(A, cls.tables.a, polymorphic_on=cls.tables.a.c.type)
mapper(B, cls.tables.b, inherits=A, polymorphic_identity='b',
properties={
'related': relationship(D, secondary=cls.tables.btod)
})
mapper(C, cls.tables.c, inherits=A, polymorphic_identity='c',
properties={
'related': relationship(D, secondary=cls.tables.ctod)
})
mapper(D, cls.tables.d)
@classmethod
def insert_data(cls):
B = cls.classes.B
C = cls.classes.C
D = cls.classes.D
session = Session()
d = D()
session.add_all([
B(related=[d]),
C(related=[d])
])
session.commit()
def test_free_w_poly_subquery(self):
A = self.classes.A
B = self.classes.B
C = self.classes.C
D = self.classes.D
session = Session()
d = session.query(D).one()
a_poly = with_polymorphic(A, [B, C])
def go():
for a in session.query(a_poly).\
options(
subqueryload(a_poly.B.related),
subqueryload(a_poly.C.related)):
eq_(a.related, [d])
self.assert_sql_count(testing.db, go, 3)
def test_fixed_w_poly_subquery(self):
A = self.classes.A
B = self.classes.B
C = self.classes.C
D = self.classes.D
session = Session()
d = session.query(D).one()
def go():
for a in session.query(A).with_polymorphic([B, C]).\
options(subqueryload(B.related), subqueryload(C.related)):
eq_(a.related, [d])
self.assert_sql_count(testing.db, go, 3)
def test_free_w_poly_joined(self):
A = self.classes.A
B = self.classes.B
C = self.classes.C
D = self.classes.D
session = Session()
d = session.query(D).one()
a_poly = with_polymorphic(A, [B, C])
def go():
for a in session.query(a_poly).\
options(
joinedload(a_poly.B.related),
joinedload(a_poly.C.related)):
eq_(a.related, [d])
self.assert_sql_count(testing.db, go, 1)
def test_fixed_w_poly_joined(self):
A = self.classes.A
B = self.classes.B
C = self.classes.C
D = self.classes.D
session = Session()
d = session.query(D).one()
def go():
for a in session.query(A).with_polymorphic([B, C]).\
options(joinedload(B.related), joinedload(C.related)):
eq_(a.related, [d])
self.assert_sql_count(testing.db, go, 1)
class SubClassToSubClassFromParentTest(fixtures.MappedTest):
"""test #2617
"""
run_setup_classes = 'once'
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('z', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
)
Table('a', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('type', String(10)),
Column('z_id', Integer, ForeignKey('z.id'))
)
Table('b', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True)
)
Table('d', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True),
Column('b_id', Integer, ForeignKey('b.id'))
)
@classmethod
def setup_classes(cls):
class Z(cls.Comparable):
pass
class A(cls.Comparable):
pass
class B(A):
pass
class D(A):
pass
@classmethod
def setup_mappers(cls):
Z = cls.classes.Z
A = cls.classes.A
B = cls.classes.B
D = cls.classes.D
mapper(Z, cls.tables.z)
mapper(A, cls.tables.a, polymorphic_on=cls.tables.a.c.type,
with_polymorphic='*',
properties={
'zs': relationship(Z, lazy="subquery")
})
mapper(B, cls.tables.b, inherits=A, polymorphic_identity='b',
properties={
'related': relationship(D, lazy="subquery",
primaryjoin=cls.tables.d.c.b_id ==
cls.tables.b.c.id)
})
mapper(D, cls.tables.d, inherits=A, polymorphic_identity='d')
@classmethod
def insert_data(cls):
B = cls.classes.B
session = Session()
session.add(B())
session.commit()
def test_2617(self):
A = self.classes.A
session = Session()
def go():
a1 = session.query(A).first()
eq_(a1.related, [])
self.assert_sql_count(testing.db, go, 3)
class SubClassToSubClassMultiTest(AssertsCompiledSQL, fixtures.MappedTest):
"""
Two different joined-inh subclasses, led by a
parent, with two distinct endpoints:
parent -> subcl1 -> subcl2 -> (ep1, ep2)
the join to ep2 indicates we need to join
from the middle of the joinpoint, skipping ep1
"""
run_create_tables = None
run_deletes = None
__dialect__ = 'default'
@classmethod
def define_tables(cls, metadata):
Table('parent', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(30))
)
Table('base1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(30))
)
Table('sub1', metadata,
Column('id', Integer, ForeignKey('base1.id'), primary_key=True),
Column('parent_id', ForeignKey('parent.id')),
Column('subdata', String(30))
)
Table('base2', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('base1_id', ForeignKey('base1.id')),
Column('data', String(30))
)
Table('sub2', metadata,
Column('id', Integer, ForeignKey('base2.id'), primary_key=True),
Column('subdata', String(30))
)
Table('ep1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('base2_id', Integer, ForeignKey('base2.id')),
Column('data', String(30))
)
Table('ep2', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('base2_id', Integer, ForeignKey('base2.id')),
Column('data', String(30))
)
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Base1(cls.Comparable):
pass
class Sub1(Base1):
pass
class Base2(cls.Comparable):
pass
class Sub2(Base2):
pass
class EP1(cls.Comparable):
pass
class EP2(cls.Comparable):
pass
@classmethod
def _classes(cls):
return cls.classes.Parent, cls.classes.Base1,\
cls.classes.Base2, cls.classes.Sub1,\
cls.classes.Sub2, cls.classes.EP1,\
cls.classes.EP2
@classmethod
def setup_mappers(cls):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = cls._classes()
mapper(Parent, cls.tables.parent, properties={
'sub1': relationship(Sub1)
})
mapper(Base1, cls.tables.base1, properties={
'sub2': relationship(Sub2)
})
mapper(Sub1, cls.tables.sub1, inherits=Base1)
mapper(Base2, cls.tables.base2, properties={
'ep1': relationship(EP1),
'ep2': relationship(EP2)
})
mapper(Sub2, cls.tables.sub2, inherits=Base2)
mapper(EP1, cls.tables.ep1)
mapper(EP2, cls.tables.ep2)
def test_one(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
s.query(Parent).join(Parent.sub1, Sub1.sub2).
join(Sub2.ep1).
join(Sub2.ep2),
"SELECT parent.id AS parent_id, parent.data AS parent_data "
"FROM parent JOIN (base1 JOIN sub1 ON base1.id = sub1.id) "
"ON parent.id = sub1.parent_id JOIN "
"(base2 JOIN sub2 "
"ON base2.id = sub2.id) "
"ON base1.id = base2.base1_id "
"JOIN ep1 ON base2.id = ep1.base2_id "
"JOIN ep2 ON base2.id = ep2.base2_id"
)
def test_two(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s2a = aliased(Sub2, flat=True)
s = Session()
self.assert_compile(
s.query(Parent).join(Parent.sub1).
join(s2a, Sub1.sub2),
"SELECT parent.id AS parent_id, parent.data AS parent_data "
"FROM parent JOIN (base1 JOIN sub1 ON base1.id = sub1.id) "
"ON parent.id = sub1.parent_id JOIN "
"(base2 AS base2_1 JOIN sub2 AS sub2_1 "
"ON base2_1.id = sub2_1.id) "
"ON base1.id = base2_1.base1_id"
)
def test_three(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
s.query(Base1).join(Base1.sub2).
join(Sub2.ep1).\
join(Sub2.ep2),
"SELECT base1.id AS base1_id, base1.data AS base1_data "
"FROM base1 JOIN (base2 JOIN sub2 "
"ON base2.id = sub2.id) ON base1.id = "
"base2.base1_id "
"JOIN ep1 ON base2.id = ep1.base2_id "
"JOIN ep2 ON base2.id = ep2.base2_id"
)
def test_four(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
s.query(Sub2).join(Base1, Base1.id == Sub2.base1_id).
join(Sub2.ep1).\
join(Sub2.ep2),
"SELECT sub2.id AS sub2_id, base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM base2 JOIN sub2 ON base2.id = sub2.id "
"JOIN base1 ON base1.id = base2.base1_id "
"JOIN ep1 ON base2.id = ep1.base2_id "
"JOIN ep2 ON base2.id = ep2.base2_id"
)
def test_five(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
s.query(Sub2).join(Sub1, Sub1.id == Sub2.base1_id).
join(Sub2.ep1).\
join(Sub2.ep2),
"SELECT sub2.id AS sub2_id, base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM base2 JOIN sub2 ON base2.id = sub2.id "
"JOIN "
"(base1 JOIN sub1 ON base1.id = sub1.id) "
"ON sub1.id = base2.base1_id "
"JOIN ep1 ON base2.id = ep1.base2_id "
"JOIN ep2 ON base2.id = ep2.base2_id"
)
def test_six(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
s.query(Sub2).from_self().\
join(Sub2.ep1).
join(Sub2.ep2),
"SELECT anon_1.sub2_id AS anon_1_sub2_id, "
"anon_1.base2_id AS anon_1_base2_id, "
"anon_1.base2_base1_id AS anon_1_base2_base1_id, "
"anon_1.base2_data AS anon_1_base2_data, "
"anon_1.sub2_subdata AS anon_1_sub2_subdata "
"FROM (SELECT sub2.id AS sub2_id, base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM base2 JOIN sub2 ON base2.id = sub2.id) AS anon_1 "
"JOIN ep1 ON anon_1.base2_id = ep1.base2_id "
"JOIN ep2 ON anon_1.base2_id = ep2.base2_id"
)
def test_seven(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = Session()
self.assert_compile(
# adding Sub2 to the entities list helps it,
# otherwise the joins for Sub2.ep1/ep2 don't have columns
# to latch onto. Can't really make it better than this
s.query(Parent, Sub2).join(Parent.sub1).\
join(Sub1.sub2).from_self().\
join(Sub2.ep1).
join(Sub2.ep2),
"SELECT anon_1.parent_id AS anon_1_parent_id, "
"anon_1.parent_data AS anon_1_parent_data, "
"anon_1.sub2_id AS anon_1_sub2_id, "
"anon_1.base2_id AS anon_1_base2_id, "
"anon_1.base2_base1_id AS anon_1_base2_base1_id, "
"anon_1.base2_data AS anon_1_base2_data, "
"anon_1.sub2_subdata AS anon_1_sub2_subdata "
"FROM (SELECT parent.id AS parent_id, parent.data AS parent_data, "
"sub2.id AS sub2_id, "
"base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, "
"base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM parent JOIN (base1 JOIN sub1 ON base1.id = sub1.id) "
"ON parent.id = sub1.parent_id JOIN "
"(base2 JOIN sub2 ON base2.id = sub2.id) "
"ON base1.id = base2.base1_id) AS anon_1 "
"JOIN ep1 ON anon_1.base2_id = ep1.base2_id "
"JOIN ep2 ON anon_1.base2_id = ep2.base2_id"
)
class JoinAcrossJoinedInhMultiPath(fixtures.DeclarativeMappedTest,
testing.AssertsCompiledSQL):
"""test long join paths with a joined-inh in the middle, where we go multiple
times across the same joined-inh to the same target but with other classes
in the middle. E.g. test [ticket:2908]
"""
run_setup_mappers = 'once'
__dialect__ = 'default'
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Root(Base):
__tablename__ = 'root'
id = Column(Integer, primary_key=True)
sub1_id = Column(Integer, ForeignKey('sub1.id'))
intermediate = relationship("Intermediate")
sub1 = relationship("Sub1")
class Intermediate(Base):
__tablename__ = 'intermediate'
id = Column(Integer, primary_key=True)
sub1_id = Column(Integer, ForeignKey('sub1.id'))
root_id = Column(Integer, ForeignKey('root.id'))
sub1 = relationship("Sub1")
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
class Sub1(Parent):
__tablename__ = 'sub1'
id = Column(Integer, ForeignKey('parent.id'),
primary_key=True)
target = relationship("Target")
class Target(Base):
__tablename__ = 'target'
id = Column(Integer, primary_key=True)
sub1_id = Column(Integer, ForeignKey('sub1.id'))
def test_join(self):
Root, Intermediate, Sub1, Target = \
self.classes.Root, self.classes.Intermediate, \
self.classes.Sub1, self.classes.Target
s1_alias = aliased(Sub1)
s2_alias = aliased(Sub1)
t1_alias = aliased(Target)
t2_alias = aliased(Target)
sess = Session()
q = sess.query(Root).\
join(s1_alias, Root.sub1).join(t1_alias, s1_alias.target).\
join(Root.intermediate).join(s2_alias, Intermediate.sub1).\
join(t2_alias, s2_alias.target)
self.assert_compile(q,
"SELECT root.id AS root_id, root.sub1_id AS root_sub1_id "
"FROM root "
"JOIN (SELECT parent.id AS parent_id, sub1.id AS sub1_id "
"FROM parent JOIN sub1 ON parent.id = sub1.id) AS anon_1 "
"ON anon_1.sub1_id = root.sub1_id "
"JOIN target AS target_1 ON anon_1.sub1_id = target_1.sub1_id "
"JOIN intermediate ON root.id = intermediate.root_id "
"JOIN (SELECT parent.id AS parent_id, sub1.id AS sub1_id "
"FROM parent JOIN sub1 ON parent.id = sub1.id) AS anon_2 "
"ON anon_2.sub1_id = intermediate.sub1_id "
"JOIN target AS target_2 ON anon_2.sub1_id = target_2.sub1_id")
def test_join_flat(self):
Root, Intermediate, Sub1, Target = \
self.classes.Root, self.classes.Intermediate, \
self.classes.Sub1, self.classes.Target
s1_alias = aliased(Sub1, flat=True)
s2_alias = aliased(Sub1, flat=True)
t1_alias = aliased(Target)
t2_alias = aliased(Target)
sess = Session()
q = sess.query(Root).\
join(s1_alias, Root.sub1).join(t1_alias, s1_alias.target).\
join(Root.intermediate).join(s2_alias, Intermediate.sub1).\
join(t2_alias, s2_alias.target)
self.assert_compile(q,
"SELECT root.id AS root_id, root.sub1_id AS root_sub1_id "
"FROM root "
"JOIN (parent AS parent_1 JOIN sub1 AS sub1_1 ON parent_1.id = sub1_1.id) "
"ON sub1_1.id = root.sub1_id "
"JOIN target AS target_1 ON sub1_1.id = target_1.sub1_id "
"JOIN intermediate ON root.id = intermediate.root_id "
"JOIN (parent AS parent_2 JOIN sub1 AS sub1_2 ON parent_2.id = sub1_2.id) "
"ON sub1_2.id = intermediate.sub1_id "
"JOIN target AS target_2 ON sub1_2.id = target_2.sub1_id"
)
def test_joinedload(self):
Root, Intermediate, Sub1, Target = \
self.classes.Root, self.classes.Intermediate, \
self.classes.Sub1, self.classes.Target
sess = Session()
q = sess.query(Root).\
options(
joinedload(Root.sub1).joinedload(Sub1.target),
joinedload(Root.intermediate).joinedload(Intermediate.sub1).\
joinedload(Sub1.target),
)
self.assert_compile(q,
"SELECT root.id AS root_id, root.sub1_id AS root_sub1_id, "
"target_1.id AS target_1_id, target_1.sub1_id AS target_1_sub1_id, "
"sub1_1.id AS sub1_1_id, parent_1.id AS parent_1_id, "
"intermediate_1.id AS intermediate_1_id, "
"intermediate_1.sub1_id AS intermediate_1_sub1_id, "
"intermediate_1.root_id AS intermediate_1_root_id, "
"target_2.id AS target_2_id, target_2.sub1_id AS target_2_sub1_id, "
"sub1_2.id AS sub1_2_id, parent_2.id AS parent_2_id "
"FROM root "
"LEFT OUTER JOIN intermediate AS intermediate_1 "
"ON root.id = intermediate_1.root_id "
"LEFT OUTER JOIN (parent AS parent_1 JOIN sub1 AS sub1_1 "
"ON parent_1.id = sub1_1.id) ON sub1_1.id = intermediate_1.sub1_id "
"LEFT OUTER JOIN target AS target_1 ON sub1_1.id = target_1.sub1_id "
"LEFT OUTER JOIN (parent AS parent_2 JOIN sub1 AS sub1_2 "
"ON parent_2.id = sub1_2.id) ON sub1_2.id = root.sub1_id "
"LEFT OUTER JOIN target AS target_2 ON sub1_2.id = target_2.sub1_id")
class MultipleAdaptUsesEntityOverTableTest(AssertsCompiledSQL, fixtures.MappedTest):
__dialect__ = 'default'
run_create_tables = None
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('a', metadata,
Column('id', Integer, primary_key=True),
Column('name', String)
)
Table('b', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True)
)
Table('c', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True),
Column('bid', Integer, ForeignKey('b.id'))
)
Table('d', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True),
Column('cid', Integer, ForeignKey('c.id'))
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(A):
pass
class C(A):
pass
class D(A):
pass
@classmethod
def setup_mappers(cls):
A, B, C, D = cls.classes.A, cls.classes.B, cls.classes.C, cls.classes.D
a, b, c, d = cls.tables.a, cls.tables.b, cls.tables.c, cls.tables.d
mapper(A, a)
mapper(B, b, inherits=A)
mapper(C, c, inherits=A)
mapper(D, d, inherits=A)
def _two_join_fixture(self):
A, B, C, D = self.classes.A, self.classes.B, self.classes.C, self.classes.D
s = Session()
return s.query(B.name, C.name, D.name).select_from(B).\
join(C, C.bid == B.id).\
join(D, D.cid == C.id)
def test_two_joins_adaption(self):
a, b, c, d = self.tables.a, self.tables.b, self.tables.c, self.tables.d
q = self._two_join_fixture()
btoc = q._from_obj[0].left
ac_adapted = btoc.right.element.left
c_adapted = btoc.right.element.right
is_(ac_adapted.element, a)
is_(c_adapted.element, c)
ctod = q._from_obj[0].right
ad_adapted = ctod.left
d_adapted = ctod.right
is_(ad_adapted.element, a)
is_(d_adapted.element, d)
bname, cname, dname = q._entities
b_name_adapted = bname._resolve_expr_against_query_aliases(
q, bname.column, None)
c_name_adapted = cname._resolve_expr_against_query_aliases(
q, cname.column, None)
d_name_adapted = dname._resolve_expr_against_query_aliases(
q, dname.column, None)
assert bool(b_name_adapted == a.c.name)
assert bool(c_name_adapted == ac_adapted.c.name)
assert bool(d_name_adapted == ad_adapted.c.name)
def test_two_joins_sql(self):
q = self._two_join_fixture()
self.assert_compile(q,
"SELECT a.name AS a_name, a_1.name AS a_1_name, "
"a_2.name AS a_2_name "
"FROM a JOIN b ON a.id = b.id JOIN "
"(a AS a_1 JOIN c AS c_1 ON a_1.id = c_1.id) ON c_1.bid = b.id "
"JOIN (a AS a_2 JOIN d AS d_1 ON a_2.id = d_1.id) "
"ON d_1.cid = c_1.id"
)
|
michaelBenin/sqlalchemy
|
test/orm/inheritance/test_relationship.py
|
Python
|
mit
| 53,676 | 0.007564 |
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import StringIO
from webkitpy.common.checkout.scm import CheckoutNeedsUpdate
from webkitpy.common.net.bugzilla import Attachment
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.layout_package import test_results
from webkitpy.layout_tests.layout_package import test_failures
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.commands.commandtest import CommandsTest
from webkitpy.tool.commands.queues import *
from webkitpy.tool.commands.queuestest import QueuesTest
from webkitpy.tool.commands.stepsequence import StepSequence
from webkitpy.tool.mocktool import MockTool, MockSCM, MockStatusServer
class TestQueue(AbstractPatchQueue):
name = "test-queue"
class TestReviewQueue(AbstractReviewQueue):
name = "test-review-queue"
class TestFeederQueue(FeederQueue):
_sleep_duration = 0
class AbstractQueueTest(CommandsTest):
def test_log_directory(self):
self.assertEquals(TestQueue()._log_directory(), os.path.join("..", "test-queue-logs"))
def _assert_run_webkit_patch(self, run_args, port=None):
queue = TestQueue()
tool = MockTool()
tool.status_server.bot_id = "gort"
tool.executive = Mock()
queue.bind_to_tool(tool)
queue._options = Mock()
queue._options.port = port
queue.run_webkit_patch(run_args)
expected_run_args = ["echo", "--status-host=example.com", "--bot-id=gort"]
if port:
expected_run_args.append("--port=%s" % port)
expected_run_args.extend(run_args)
tool.executive.run_and_throw_if_fail.assert_called_with(expected_run_args)
def test_run_webkit_patch(self):
self._assert_run_webkit_patch([1])
self._assert_run_webkit_patch(["one", 2])
self._assert_run_webkit_patch([1], port="mockport")
def test_iteration_count(self):
queue = TestQueue()
queue._options = Mock()
queue._options.iterations = 3
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
self.assertFalse(queue.should_continue_work_queue())
def test_no_iteration_count(self):
queue = TestQueue()
queue._options = Mock()
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
self.assertTrue(queue.should_continue_work_queue())
def _assert_log_message(self, script_error, log_message):
failure_log = AbstractQueue._log_from_script_error_for_upload(script_error, output_limit=10)
self.assertTrue(failure_log.read(), log_message)
def test_log_from_script_error_for_upload(self):
self._assert_log_message(ScriptError("test"), "test")
# In python 2.5 unicode(Exception) is busted. See:
# http://bugs.python.org/issue2517
# With no good workaround, we just ignore these tests.
if not hasattr(Exception, "__unicode__"):
return
unicode_tor = u"WebKit \u2661 Tor Arne Vestb\u00F8!"
utf8_tor = unicode_tor.encode("utf-8")
self._assert_log_message(ScriptError(unicode_tor), utf8_tor)
script_error = ScriptError(unicode_tor, output=unicode_tor)
expected_output = "%s\nLast %s characters of output:\n%s" % (utf8_tor, 10, utf8_tor[-10:])
self._assert_log_message(script_error, expected_output)
class FeederQueueTest(QueuesTest):
def test_feeder_queue(self):
queue = TestFeederQueue()
tool = MockTool(log_executive=True)
expected_stderr = {
"begin_work_queue": self._default_begin_work_queue_stderr("feeder-queue", MockSCM.fake_checkout_root),
"should_proceed_with_work_item": "",
"next_work_item": "",
"process_work_item": """Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com)
Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com)
MOCK setting flag 'commit-queue' to '-' on attachment '128' with comment 'Rejecting attachment 128 from commit-queue.' and additional comment 'non-committer@example.com does not have committer permissions according to http://trac.webkit.org/browser/trunk/Tools/Scripts/webkitpy/common/config/committers.py.
- If you do not have committer rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags.
- If you have committer rights please correct the error in Tools/Scripts/webkitpy/common/config/committers.py by adding yourself to the file (no review needed). The commit-queue restarts itself every 2 hours. After restart the commit-queue will correctly respect your committer rights.'
MOCK: update_work_items: commit-queue [106, 197]
Feeding commit-queue items [106, 197]
Feeding EWS (1 r? patch, 1 new)
MOCK: submit_to_ews: 103
""",
"handle_unexpected_error": "Mock error message\n",
}
self.assert_queue_outputs(queue, tool=tool, expected_stderr=expected_stderr)
class AbstractPatchQueueTest(CommandsTest):
def test_next_patch(self):
queue = AbstractPatchQueue()
tool = MockTool()
queue.bind_to_tool(tool)
queue._options = Mock()
queue._options.port = None
self.assertEquals(queue._next_patch(), None)
tool.status_server = MockStatusServer(work_items=[2, 197])
expected_stdout = "MOCK: fetch_attachment: 2 is not a known attachment id\n" # A mock-only message to prevent us from making mistakes.
expected_stderr = "MOCK: release_work_item: None 2\n"
patch_id = OutputCapture().assert_outputs(self, queue._next_patch, [], expected_stdout=expected_stdout, expected_stderr=expected_stderr)
self.assertEquals(patch_id, None) # 2 is an invalid patch id
self.assertEquals(queue._next_patch().id(), 197)
class NeedsUpdateSequence(StepSequence):
def _run(self, tool, options, state):
raise CheckoutNeedsUpdate([], 1, "", None)
class AlwaysCommitQueueTool(object):
def __init__(self):
self.status_server = MockStatusServer()
def command_by_name(self, name):
return CommitQueue
class SecondThoughtsCommitQueue(CommitQueue):
def __init__(self):
self._reject_patch = False
CommitQueue.__init__(self)
def run_command(self, command):
# We want to reject the patch after the first validation,
# so wait to reject it until after some other command has run.
self._reject_patch = True
return CommitQueue.run_command(self, command)
def refetch_patch(self, patch):
if not self._reject_patch:
return self._tool.bugs.fetch_attachment(patch.id())
attachment_dictionary = {
"id": patch.id(),
"bug_id": patch.bug_id(),
"name": "Rejected",
"is_obsolete": True,
"is_patch": False,
"review": "-",
"reviewer_email": "foo@bar.com",
"commit-queue": "-",
"committer_email": "foo@bar.com",
"attacher_email": "Contributer1",
}
return Attachment(attachment_dictionary, None)
class CommitQueueTest(QueuesTest):
def _mock_test_result(self, testname):
return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
def test_commit_queue(self):
expected_stderr = {
"begin_work_queue": self._default_begin_work_queue_stderr("commit-queue", MockSCM.fake_checkout_root),
"should_proceed_with_work_item": "MOCK: update_status: commit-queue Processing patch\n",
"next_work_item": "",
"process_work_item": """MOCK: update_status: commit-queue Cleaned working directory
MOCK: update_status: commit-queue Updated working directory
MOCK: update_status: commit-queue Applied patch
MOCK: update_status: commit-queue Built patch
MOCK: update_status: commit-queue Passed tests
MOCK: update_status: commit-queue Landed patch
MOCK: update_status: commit-queue Pass
MOCK: release_work_item: commit-queue 197
""",
"handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '197' with comment 'Rejecting attachment 197 from commit-queue.' and additional comment 'Mock error message'\n",
"handle_script_error": "ScriptError error message\n",
}
self.assert_queue_outputs(CommitQueue(), expected_stderr=expected_stderr)
def test_commit_queue_failure(self):
expected_stderr = {
"begin_work_queue": self._default_begin_work_queue_stderr("commit-queue", MockSCM.fake_checkout_root),
"should_proceed_with_work_item": "MOCK: update_status: commit-queue Processing patch\n",
"next_work_item": "",
"process_work_item": """MOCK: update_status: commit-queue Cleaned working directory
MOCK: update_status: commit-queue Updated working directory
MOCK: update_status: commit-queue Patch does not apply
MOCK setting flag 'commit-queue' to '-' on attachment '197' with comment 'Rejecting attachment 197 from commit-queue.' and additional comment 'MOCK script error'
MOCK: update_status: commit-queue Fail
MOCK: release_work_item: commit-queue 197
""",
"handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '197' with comment 'Rejecting attachment 197 from commit-queue.' and additional comment 'Mock error message'\n",
"handle_script_error": "ScriptError error message\n",
}
queue = CommitQueue()
def mock_run_webkit_patch(command):
if command == ['clean'] or command == ['update']:
# We want cleaning to succeed so we can error out on a step
# that causes the commit-queue to reject the patch.
return
raise ScriptError('MOCK script error')
queue.run_webkit_patch = mock_run_webkit_patch
self.assert_queue_outputs(queue, expected_stderr=expected_stderr)
def test_rollout(self):
tool = MockTool(log_executive=True)
tool.filesystem.write_text_file('/mock/results.html', '') # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
tool.buildbot.light_tree_on_fire()
expected_stderr = {
"begin_work_queue": self._default_begin_work_queue_stderr("commit-queue", MockSCM.fake_checkout_root),
"should_proceed_with_work_item": "MOCK: update_status: commit-queue Processing patch\n",
"next_work_item": "",
"process_work_item": """MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean']
MOCK: update_status: commit-queue Cleaned working directory
MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'update']
MOCK: update_status: commit-queue Updated working directory
MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'apply-attachment', '--no-update', '--non-interactive', 197]
MOCK: update_status: commit-queue Applied patch
MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'build', '--no-clean', '--no-update', '--build-style=both']
MOCK: update_status: commit-queue Built patch
MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
MOCK: update_status: commit-queue Passed tests
MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'land-attachment', '--force-clean', '--ignore-builders', '--non-interactive', '--parent-command=commit-queue', 197]
MOCK: update_status: commit-queue Landed patch
MOCK: update_status: commit-queue Pass
MOCK: release_work_item: commit-queue 197
""",
"handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '197' with comment 'Rejecting attachment 197 from commit-queue.' and additional comment 'Mock error message'\n",
"handle_script_error": "ScriptError error message\n",
}
self.assert_queue_outputs(CommitQueue(), tool=tool, expected_stderr=expected_stderr)
def test_rollout_lands(self):
tool = MockTool(log_executive=True)
tool.buildbot.light_tree_on_fire()
rollout_patch = tool.bugs.fetch_attachment(106) # _patch6, a rollout patch.
assert(rollout_patch.is_rollout())
expected_stderr = {
"begin_work_queue": self._default_begin_work_queue_stderr("commit-queue", MockSCM.fake_checkout_root),
"should_proceed_with_work_item": "MOCK: update_status: commit-queue Processing rollout patch\n",
"next_work_item": "",
"process_work_item": """MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean']
MOCK: update_status: commit-queue Cleaned working directory
MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'update']
MOCK: update_status: commit-queue Updated working directory
MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'apply-attachment', '--no-update', '--non-interactive', 106]
MOCK: update_status: commit-queue Applied patch
MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'land-attachment', '--force-clean', '--ignore-builders', '--non-interactive', '--parent-command=commit-queue', 106]
MOCK: update_status: commit-queue Landed patch
MOCK: update_status: commit-queue Pass
MOCK: release_work_item: commit-queue 106
""",
"handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '106' with comment 'Rejecting attachment 106 from commit-queue.' and additional comment 'Mock error message'\n",
"handle_script_error": "ScriptError error message\n",
}
self.assert_queue_outputs(CommitQueue(), tool=tool, work_item=rollout_patch, expected_stderr=expected_stderr)
def test_auto_retry(self):
queue = CommitQueue()
options = Mock()
options.parent_command = "commit-queue"
tool = AlwaysCommitQueueTool()
sequence = NeedsUpdateSequence(None)
expected_stderr = "Commit failed because the checkout is out of date. Please update and try again.\nMOCK: update_status: commit-queue Tests passed, but commit failed (checkout out of date). Updating, then landing without building or re-running tests.\n"
state = {'patch': None}
OutputCapture().assert_outputs(self, sequence.run_and_handle_errors, [tool, options, state], expected_exception=TryAgain, expected_stderr=expected_stderr)
self.assertEquals(options.update, True)
self.assertEquals(options.build, False)
self.assertEquals(options.test, False)
def test_manual_reject_during_processing(self):
queue = SecondThoughtsCommitQueue()
queue.bind_to_tool(MockTool())
queue._tool.filesystem.write_text_file('/mock/results.html', '') # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
queue._options = Mock()
queue._options.port = None
expected_stderr = """MOCK: update_status: commit-queue Cleaned working directory
MOCK: update_status: commit-queue Updated working directory
MOCK: update_status: commit-queue Applied patch
MOCK: update_status: commit-queue Built patch
MOCK: update_status: commit-queue Passed tests
MOCK: update_status: commit-queue Retry
MOCK: release_work_item: commit-queue 197
"""
OutputCapture().assert_outputs(self, queue.process_work_item, [QueuesTest.mock_work_item], expected_stderr=expected_stderr)
def test_report_flaky_tests(self):
queue = CommitQueue()
queue.bind_to_tool(MockTool())
expected_stderr = """MOCK bug comment: bug_id=76, cc=None
--- Begin comment ---
The commit-queue just saw foo/bar.html flake (Text diff mismatch) while processing attachment 197 on bug 42.
Port: MockPort Platform: MockPlatform 1.0
--- End comment ---
MOCK add_attachment_to_bug: bug_id=76, description=Failure diff from bot filename=failure.diff
MOCK bug comment: bug_id=76, cc=None
--- Begin comment ---
The commit-queue just saw bar/baz.html flake (Text diff mismatch) while processing attachment 197 on bug 42.
Port: MockPort Platform: MockPlatform 1.0
--- End comment ---
MOCK add_attachment_to_bug: bug_id=76, description=Archive of layout-test-results from bot filename=layout-test-results.zip
MOCK bug comment: bug_id=42, cc=None
--- Begin comment ---
The commit-queue encountered the following flaky tests while processing attachment 197:
foo/bar.html bug 76 (author: abarth@webkit.org)
bar/baz.html bug 76 (author: abarth@webkit.org)
The commit-queue is continuing to process your patch.
--- End comment ---
"""
test_names = ["foo/bar.html", "bar/baz.html"]
test_results = [self._mock_test_result(name) for name in test_names]
class MockZipFile(object):
def __init__(self):
self.fp = StringIO()
def read(self, path):
return ""
def namelist(self):
# This is intentionally missing one diffs.txt to exercise the "upload the whole zip" codepath.
return ['foo/bar-diffs.txt']
OutputCapture().assert_outputs(self, queue.report_flaky_tests, [QueuesTest.mock_work_item, test_results, MockZipFile()], expected_stderr=expected_stderr)
def test_missing_layout_test_results(self):
queue = CommitQueue()
tool = MockTool()
results_path = '/mock/results.html'
tool.filesystem = MockFileSystem({results_path: None})
queue.bind_to_tool(tool)
# Make sure that our filesystem mock functions as we expect.
self.assertRaises(IOError, tool.filesystem.read_text_file, results_path)
# layout_test_results shouldn't raise even if the results.html file is missing.
self.assertEquals(queue.layout_test_results(), None)
def test_layout_test_results(self):
queue = CommitQueue()
queue.bind_to_tool(MockTool())
queue._read_file_contents = lambda path: None
self.assertEquals(queue.layout_test_results(), None)
queue._read_file_contents = lambda path: ""
self.assertEquals(queue.layout_test_results(), None)
queue._create_layout_test_results = lambda: LayoutTestResults([])
results = queue.layout_test_results()
self.assertNotEquals(results, None)
self.assertEquals(results.failure_limit_count(), 10) # This value matches RunTests.NON_INTERACTIVE_FAILURE_LIMIT_COUNT
def test_archive_last_layout_test_results(self):
queue = CommitQueue()
queue.bind_to_tool(MockTool())
patch = queue._tool.bugs.fetch_attachment(128)
# This is just to test that the method doesn't raise.
queue.archive_last_layout_test_results(patch)
def test_upload_results_archive_for_patch(self):
queue = CommitQueue()
queue.bind_to_tool(MockTool())
patch = queue._tool.bugs.fetch_attachment(128)
expected_stderr = """MOCK add_attachment_to_bug: bug_id=42, description=Archive of layout-test-results from bot filename=layout-test-results.zip
-- Begin comment --
The attached test failures were seen while running run-webkit-tests on the commit-queue.
Port: MockPort Platform: MockPlatform 1.0
-- End comment --
"""
OutputCapture().assert_outputs(self, queue._upload_results_archive_for_patch, [patch, Mock()], expected_stderr=expected_stderr)
class StyleQueueTest(QueuesTest):
def test_style_queue(self):
expected_stderr = {
"begin_work_queue": self._default_begin_work_queue_stderr("style-queue", MockSCM.fake_checkout_root),
"next_work_item": "",
"should_proceed_with_work_item": "MOCK: update_status: style-queue Checking style\n",
"process_work_item": "MOCK: update_status: style-queue Pass\nMOCK: release_work_item: style-queue 197\n",
"handle_unexpected_error": "Mock error message\n",
"handle_script_error": "MOCK: update_status: style-queue ScriptError error message\nMOCK bug comment: bug_id=42, cc=[]\n--- Begin comment ---\nAttachment 197 did not pass style-queue:\n\nScriptError error message\n\nIf any of these errors are false positives, please file a bug against check-webkit-style.\n--- End comment ---\n\n",
}
expected_exceptions = {
"handle_script_error": SystemExit,
}
self.assert_queue_outputs(StyleQueue(), expected_stderr=expected_stderr, expected_exceptions=expected_exceptions)
|
danialbehzadi/Nokia-RM-1013-2.0.0.11
|
webkit/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py
|
Python
|
gpl-3.0
| 22,259 | 0.002965 |
# -*- coding: utf-8 -*-
from nose.tools import raises
from openfisca_core import periods
from openfisca_core.columns import IntCol
from openfisca_core.formulas import CycleError, SimpleFormulaColumn
from openfisca_core.tests import dummy_country
from openfisca_core.tests.dummy_country import Individus, reference_formula
from openfisca_core.tools import assert_near
# 1 <--> 2 with same period
@reference_formula
class variable1(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
return period, simulation.calculate('variable2', period)
@reference_formula
class variable2(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
return period, simulation.calculate('variable1', period)
# 3 <--> 4 with a period offset, but without explicit cycle allowed
@reference_formula
class variable3(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
return period, simulation.calculate('variable4', period.last_year)
@reference_formula
class variable4(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
return period, simulation.calculate('variable3', period)
# 5 -f-> 6 with a period offset, with cycle flagged but not allowed
# <---
@reference_formula
class variable5(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
variable6 = simulation.calculate('variable6', period.last_year, max_nb_cycles = 0)
return period, 5 + variable6
@reference_formula
class variable6(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
variable5 = simulation.calculate('variable5', period)
return period, 6 + variable5
# december cotisation depending on november value
@reference_formula
class cotisation(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
period = period.this_month
if period.start.month == 12:
return period, 2 * simulation.calculate('cotisation', period.last_month, max_nb_cycles = 1)
else:
return period, self.zeros() + 1
# 7 -f-> 8 with a period offset, with explicit cycle allowed (1 level)
# <---
@reference_formula
class variable7(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
variable8 = simulation.calculate('variable8', period.last_year, max_nb_cycles = 1)
return period, 7 + variable8
@reference_formula
class variable8(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
variable7 = simulation.calculate('variable7', period)
return period, 8 + variable7
# TaxBenefitSystem instance declared after formulas
tax_benefit_system = dummy_country.init_tax_benefit_system()
reference_period = periods.period(u'2013')
@raises(AssertionError)
def test_pure_cycle():
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period,
parent1 = dict(),
).new_simulation(debug = True)
simulation.calculate('variable1')
@raises(CycleError)
def test_cycle_time_offset():
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period,
parent1 = dict(),
).new_simulation(debug = True)
simulation.calculate('variable3')
def test_allowed_cycle():
"""
Calculate variable5 then variable6 then in the order order, to verify that the first calculated variable
has no effect on the result.
"""
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period,
parent1 = dict(),
).new_simulation(debug = True)
variable6 = simulation.calculate('variable6')
variable5 = simulation.calculate('variable5')
variable6_last_year = simulation.calculate('variable6', reference_period.last_year)
assert_near(variable5, [5])
assert_near(variable6, [11])
assert_near(variable6_last_year, [0])
def test_allowed_cycle_different_order():
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period,
parent1 = dict(),
).new_simulation(debug = True)
variable5 = simulation.calculate('variable5')
variable6 = simulation.calculate('variable6')
variable6_last_year = simulation.calculate('variable6', reference_period.last_year)
assert_near(variable5, [5])
assert_near(variable6, [11])
assert_near(variable6_last_year, [0])
def test_cotisation_1_level():
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period.last_month, # December
parent1 = dict(),
).new_simulation(debug = True)
cotisation = simulation.calculate('cotisation')
assert_near(cotisation, [2])
def test_cycle_1_level():
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period,
parent1 = dict(),
).new_simulation(debug = True)
variable7 = simulation.calculate('variable7')
# variable8 = simulation.calculate('variable8')
assert_near(variable7, [22])
|
adrienpacifico/openfisca-core
|
openfisca_core/tests/test_cycles.py
|
Python
|
agpl-3.0
| 5,495 | 0.008735 |
from autopush.tests import setUp, tearDown
def pytest_configure(config):
"""Called before testing begins"""
setUp()
def pytest_unconfigure(config):
"""Called after all tests run and warnings displayed"""
tearDown()
|
mozilla-services/autopush
|
autopush/tests/conftest.py
|
Python
|
mpl-2.0
| 235 | 0 |
"""
This module converts requested URLs to callback view functions.
RegexURLResolver is the main class here. Its resolve() method takes a URL (as
a string) and returns a ResolverMatch object which provides access to all
attributes of the resolved URL match.
"""
from __future__ import unicode_literals
import functools
import re
import warnings
from importlib import import_module
from threading import local
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.utils import lru_cache, six
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_str, force_text, iri_to_uri
from django.utils.functional import cached_property, lazy
from django.utils.http import RFC3986_SUBDELIMS, urlquote
from django.utils.module_loading import module_has_submodule
from django.utils.regex_helper import normalize
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import get_language, override
# SCRIPT_NAME prefixes for each thread are stored here. If there's no entry for
# the current thread (which is the only one we ever access), it is assumed to
# be empty.
_prefixes = local()
# Overridden URLconfs for each thread are stored here.
_urlconfs = local()
class ResolverMatch(object):
def __init__(self, func, args, kwargs, url_name=None, app_names=None, namespaces=None):
self.func = func
self.args = args
self.kwargs = kwargs
self.url_name = url_name
# If a URLRegexResolver doesn't have a namespace or app_name, it passes
# in an empty value.
self.app_names = [x for x in app_names if x] if app_names else []
self.app_name = ':'.join(self.app_names)
if namespaces:
self.namespaces = [x for x in namespaces if x]
else:
self.namespaces = []
self.namespace = ':'.join(self.namespaces)
if not hasattr(func, '__name__'):
# A class-based view
self._func_path = '.'.join([func.__class__.__module__, func.__class__.__name__])
else:
# A function-based view
self._func_path = '.'.join([func.__module__, func.__name__])
view_path = url_name or self._func_path
self.view_name = ':'.join(self.namespaces + [view_path])
def __getitem__(self, index):
return (self.func, self.args, self.kwargs)[index]
def __repr__(self):
return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name=%s, app_names=%s, namespaces=%s)" % (
self._func_path, self.args, self.kwargs, self.url_name, self.app_names, self.namespaces)
class Resolver404(Http404):
pass
class NoReverseMatch(Exception):
pass
@lru_cache.lru_cache(maxsize=None)
def get_callable(lookup_view, can_fail=False):
"""
Return a callable corresponding to lookup_view. This function is used
by both resolve() and reverse(), so can_fail allows the caller to choose
between returning the input as is and raising an exception when the input
string can't be interpreted as an import path.
If lookup_view is already a callable, return it.
If lookup_view is a string import path that can be resolved to a callable,
import that callable and return it.
If lookup_view is some other kind of string and can_fail is True, the string
is returned as is. If can_fail is False, an exception is raised (either
ImportError or ViewDoesNotExist).
"""
if callable(lookup_view):
return lookup_view
if not isinstance(lookup_view, six.string_types):
raise ViewDoesNotExist(
"'%s' is not a callable or a dot-notation path" % lookup_view
)
mod_name, func_name = get_mod_func(lookup_view)
if not func_name: # No '.' in lookup_view
if can_fail:
return lookup_view
else:
raise ImportError(
"Could not import '%s'. The path must be fully qualified." %
lookup_view)
try:
mod = import_module(mod_name)
except ImportError:
if can_fail:
return lookup_view
else:
parentmod, submod = get_mod_func(mod_name)
if submod and not module_has_submodule(import_module(parentmod), submod):
raise ViewDoesNotExist(
"Could not import '%s'. Parent module %s does not exist." %
(lookup_view, mod_name))
else:
raise
else:
try:
view_func = getattr(mod, func_name)
except AttributeError:
if can_fail:
return lookup_view
else:
raise ViewDoesNotExist(
"Could not import '%s'. View does not exist in module %s." %
(lookup_view, mod_name))
else:
if not callable(view_func):
# For backwards compatibility this is raised regardless of can_fail
raise ViewDoesNotExist(
"Could not import '%s.%s'. View is not callable." %
(mod_name, func_name))
return view_func
@lru_cache.lru_cache(maxsize=None)
def get_resolver(urlconf=None):
if urlconf is None:
from django.conf import settings
urlconf = settings.ROOT_URLCONF
return RegexURLResolver(r'^/', urlconf)
@lru_cache.lru_cache(maxsize=None)
def get_ns_resolver(ns_pattern, resolver):
# Build a namespaced resolver for the given parent urlconf pattern.
# This makes it possible to have captured parameters in the parent
# urlconf pattern.
ns_resolver = RegexURLResolver(ns_pattern, resolver.url_patterns)
return RegexURLResolver(r'^/', [ns_resolver])
def get_mod_func(callback):
# Converts 'django.views.news.stories.story_detail' to
# ['django.views.news.stories', 'story_detail']
try:
dot = callback.rindex('.')
except ValueError:
return callback, ''
return callback[:dot], callback[dot + 1:]
class LocaleRegexProvider(object):
"""
A mixin to provide a default regex property which can vary by active
language.
"""
def __init__(self, regex):
# regex is either a string representing a regular expression, or a
# translatable string (using ugettext_lazy) representing a regular
# expression.
self._regex = regex
self._regex_dict = {}
@property
def regex(self):
"""
Returns a compiled regular expression, depending upon the activated
language-code.
"""
language_code = get_language()
if language_code not in self._regex_dict:
if isinstance(self._regex, six.string_types):
regex = self._regex
else:
regex = force_text(self._regex)
try:
compiled_regex = re.compile(regex, re.UNICODE)
except re.error as e:
raise ImproperlyConfigured(
'"%s" is not a valid regular expression: %s' %
(regex, six.text_type(e)))
self._regex_dict[language_code] = compiled_regex
return self._regex_dict[language_code]
class RegexURLPattern(LocaleRegexProvider):
def __init__(self, regex, callback, default_args=None, name=None):
LocaleRegexProvider.__init__(self, regex)
# callback is either a string like 'foo.views.news.stories.story_detail'
# which represents the path to a module and a view function name, or a
# callable object (view).
if callable(callback):
self._callback = callback
else:
self._callback = None
self._callback_str = callback
self.default_args = default_args or {}
self.name = name
def __repr__(self):
return force_str('<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern))
def add_prefix(self, prefix):
"""
Adds the prefix string to a string-based callback.
"""
if not prefix or not hasattr(self, '_callback_str'):
return
self._callback_str = prefix + '.' + self._callback_str
def resolve(self, path):
match = self.regex.search(path)
if match:
# If there are any named groups, use those as kwargs, ignoring
# non-named groups. Otherwise, pass all non-named arguments as
# positional arguments.
kwargs = match.groupdict()
if kwargs:
args = ()
else:
args = match.groups()
# In both cases, pass any extra_kwargs as **kwargs.
kwargs.update(self.default_args)
return ResolverMatch(self.callback, args, kwargs, self.name)
@property
def callback(self):
if self._callback is not None:
return self._callback
self._callback = get_callable(self._callback_str)
return self._callback
class RegexURLResolver(LocaleRegexProvider):
def __init__(self, regex, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
LocaleRegexProvider.__init__(self, regex)
# urlconf_name is the dotted Python path to the module defining
# urlpatterns. It may also be an object with an urlpatterns attribute
# or urlpatterns itself.
self.urlconf_name = urlconf_name
self.callback = None
self.default_kwargs = default_kwargs or {}
self.namespace = namespace
self.app_name = app_name
self._reverse_dict = {}
self._namespace_dict = {}
self._app_dict = {}
# set of dotted paths to all functions and classes that are used in
# urlpatterns
self._callback_strs = set()
self._populated = False
def __repr__(self):
if isinstance(self.urlconf_name, list) and len(self.urlconf_name):
# Don't bother to output the whole list, it can be huge
urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__
else:
urlconf_repr = repr(self.urlconf_name)
return str('<%s %s (%s:%s) %s>') % (
self.__class__.__name__, urlconf_repr, self.app_name,
self.namespace, self.regex.pattern)
def _populate(self):
lookups = MultiValueDict()
namespaces = {}
apps = {}
language_code = get_language()
for pattern in reversed(self.url_patterns):
if hasattr(pattern, '_callback_str'):
self._callback_strs.add(pattern._callback_str)
elif hasattr(pattern, '_callback'):
callback = pattern._callback
if isinstance(callback, functools.partial):
callback = callback.func
if not hasattr(callback, '__name__'):
lookup_str = callback.__module__ + "." + callback.__class__.__name__
else:
lookup_str = callback.__module__ + "." + callback.__name__
self._callback_strs.add(lookup_str)
p_pattern = pattern.regex.pattern
if p_pattern.startswith('^'):
p_pattern = p_pattern[1:]
if isinstance(pattern, RegexURLResolver):
if pattern.namespace:
namespaces[pattern.namespace] = (p_pattern, pattern)
if pattern.app_name:
apps.setdefault(pattern.app_name, []).append(pattern.namespace)
else:
parent_pat = pattern.regex.pattern
for name in pattern.reverse_dict:
for matches, pat, defaults in pattern.reverse_dict.getlist(name):
new_matches = normalize(parent_pat + pat)
lookups.appendlist(
name,
(
new_matches,
p_pattern + pat,
dict(defaults, **pattern.default_kwargs),
)
)
for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
namespaces[namespace] = (p_pattern + prefix, sub_pattern)
for app_name, namespace_list in pattern.app_dict.items():
apps.setdefault(app_name, []).extend(namespace_list)
self._callback_strs.update(pattern._callback_strs)
else:
bits = normalize(p_pattern)
lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args))
if pattern.name is not None:
lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args))
self._reverse_dict[language_code] = lookups
self._namespace_dict[language_code] = namespaces
self._app_dict[language_code] = apps
self._populated = True
@property
def reverse_dict(self):
language_code = get_language()
if language_code not in self._reverse_dict:
self._populate()
return self._reverse_dict[language_code]
@property
def namespace_dict(self):
language_code = get_language()
if language_code not in self._namespace_dict:
self._populate()
return self._namespace_dict[language_code]
@property
def app_dict(self):
language_code = get_language()
if language_code not in self._app_dict:
self._populate()
return self._app_dict[language_code]
def _is_callback(self, name):
if not self._populated:
self._populate()
return name in self._callback_strs
def resolve(self, path):
path = force_text(path) # path may be a reverse_lazy object
tried = []
match = self.regex.search(path)
if match:
new_path = path[match.end():]
for pattern in self.url_patterns:
try:
sub_match = pattern.resolve(new_path)
except Resolver404 as e:
sub_tried = e.args[0].get('tried')
if sub_tried is not None:
tried.extend([pattern] + t for t in sub_tried)
else:
tried.append([pattern])
else:
if sub_match:
# Merge captured arguments in match with submatch
sub_match_dict = dict(match.groupdict(), **self.default_kwargs)
sub_match_dict.update(sub_match.kwargs)
# If there are *any* named groups, ignore all non-named groups.
# Otherwise, pass all non-named arguments as positional arguments.
sub_match_args = sub_match.args
if not sub_match_dict:
sub_match_args = match.groups() + sub_match.args
return ResolverMatch(
sub_match.func,
sub_match_args,
sub_match_dict,
sub_match.url_name,
[self.app_name] + sub_match.app_names,
[self.namespace] + sub_match.namespaces
)
tried.append([pattern])
raise Resolver404({'tried': tried, 'path': new_path})
raise Resolver404({'path': path})
@cached_property
def urlconf_module(self):
if isinstance(self.urlconf_name, six.string_types):
return import_module(self.urlconf_name)
else:
return self.urlconf_name
@cached_property
def url_patterns(self):
# urlconf_module might be a valid set of patterns, so we default to it
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
try:
iter(patterns)
except TypeError:
msg = (
"The included urlconf '{name}' does not appear to have any "
"patterns in it. If you see valid patterns in the file then "
"the issue is probably caused by a circular import."
)
raise ImproperlyConfigured(msg.format(name=self.urlconf_name))
return patterns
def resolve_error_handler(self, view_type):
callback = getattr(self.urlconf_module, 'handler%s' % view_type, None)
if not callback:
# No handler specified in file; use default
# Lazy import, since django.urls imports this file
from django.conf import urls
callback = getattr(urls, 'handler%s' % view_type)
return get_callable(callback), {}
def reverse(self, lookup_view, *args, **kwargs):
return self._reverse_with_prefix(lookup_view, '', *args, **kwargs)
def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs):
if args and kwargs:
raise ValueError("Don't mix *args and **kwargs in call to reverse()!")
text_args = [force_text(v) for v in args]
text_kwargs = {k: force_text(v) for (k, v) in kwargs.items()}
if not self._populated:
self._populate()
original_lookup = lookup_view
try:
if self._is_callback(lookup_view):
lookup_view = get_callable(lookup_view, True)
except (ImportError, AttributeError) as e:
raise NoReverseMatch("Error importing '%s': %s." % (lookup_view, e))
else:
if not callable(original_lookup) and callable(lookup_view):
warnings.warn(
'Reversing by dotted path is deprecated (%s).' % original_lookup,
RemovedInDjango110Warning, stacklevel=3
)
possibilities = self.reverse_dict.getlist(lookup_view)
for possibility, pattern, defaults in possibilities:
for result, params in possibility:
if args:
if len(args) != len(params):
continue
candidate_subs = dict(zip(params, text_args))
else:
if (set(kwargs.keys()) | set(defaults.keys()) != set(params) |
set(defaults.keys())):
continue
matches = True
for k, v in defaults.items():
if kwargs.get(k, v) != v:
matches = False
break
if not matches:
continue
candidate_subs = text_kwargs
# WSGI provides decoded URLs, without %xx escapes, and the URL
# resolver operates on such URLs. First substitute arguments
# without quoting to build a decoded URL and look for a match.
# Then, if we have a match, redo the substitution with quoted
# arguments in order to return a properly encoded URL.
candidate_pat = _prefix.replace('%', '%%') + result
if re.search('^%s%s' % (re.escape(_prefix), pattern), candidate_pat % candidate_subs, re.UNICODE):
# safe characters from `pchar` definition of RFC 3986
url = urlquote(candidate_pat % candidate_subs, safe=RFC3986_SUBDELIMS + str('/~:@'))
# Don't allow construction of scheme relative urls.
if url.startswith('//'):
url = '/%%2F%s' % url[2:]
return url
# lookup_view can be URL label, or dotted path, or callable, Any of
# these can be passed in at the top, but callables are not friendly in
# error messages.
m = getattr(lookup_view, '__module__', None)
n = getattr(lookup_view, '__name__', None)
if m is not None and n is not None:
lookup_view_s = "%s.%s" % (m, n)
else:
lookup_view_s = lookup_view
patterns = [pattern for (possibility, pattern, defaults) in possibilities]
raise NoReverseMatch("Reverse for '%s' with arguments '%s' and keyword "
"arguments '%s' not found. %d pattern(s) tried: %s" %
(lookup_view_s, args, kwargs, len(patterns), patterns))
class LocaleRegexURLResolver(RegexURLResolver):
"""
A URL resolver that always matches the active language code as URL prefix.
Rather than taking a regex argument, we just override the ``regex``
function to always return the active language-code as regex.
"""
def __init__(self, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
super(LocaleRegexURLResolver, self).__init__(
None, urlconf_name, default_kwargs, app_name, namespace)
@property
def regex(self):
language_code = get_language()
if language_code not in self._regex_dict:
regex_compiled = re.compile('^%s/' % language_code, re.UNICODE)
self._regex_dict[language_code] = regex_compiled
return self._regex_dict[language_code]
def resolve(path, urlconf=None):
if urlconf is None:
urlconf = get_urlconf()
return get_resolver(urlconf).resolve(path)
def reverse(viewname, urlconf=None, args=None, kwargs=None, current_app=None):
if urlconf is None:
urlconf = get_urlconf()
resolver = get_resolver(urlconf)
args = args or []
kwargs = kwargs or {}
prefix = get_script_prefix()
if not isinstance(viewname, six.string_types):
view = viewname
else:
parts = viewname.split(':')
parts.reverse()
view = parts[0]
path = parts[1:]
if current_app:
current_path = current_app.split(':')
current_path.reverse()
else:
current_path = None
resolved_path = []
ns_pattern = ''
while path:
ns = path.pop()
current_ns = current_path.pop() if current_path else None
# Lookup the name to see if it could be an app identifier
try:
app_list = resolver.app_dict[ns]
# Yes! Path part matches an app in the current Resolver
if current_ns and current_ns in app_list:
# If we are reversing for a particular app,
# use that namespace
ns = current_ns
elif ns not in app_list:
# The name isn't shared by one of the instances
# (i.e., the default) so just pick the first instance
# as the default.
ns = app_list[0]
except KeyError:
pass
if ns != current_ns:
current_path = None
try:
extra, resolver = resolver.namespace_dict[ns]
resolved_path.append(ns)
ns_pattern = ns_pattern + extra
except KeyError as key:
if resolved_path:
raise NoReverseMatch(
"%s is not a registered namespace inside '%s'" %
(key, ':'.join(resolved_path)))
else:
raise NoReverseMatch("%s is not a registered namespace" %
key)
if ns_pattern:
resolver = get_ns_resolver(ns_pattern, resolver)
return force_text(iri_to_uri(resolver._reverse_with_prefix(view, prefix, *args, **kwargs)))
reverse_lazy = lazy(reverse, six.text_type)
def clear_url_caches():
get_callable.cache_clear()
get_resolver.cache_clear()
get_ns_resolver.cache_clear()
def set_script_prefix(prefix):
"""
Sets the script prefix for the current thread.
"""
if not prefix.endswith('/'):
prefix += '/'
_prefixes.value = prefix
def get_script_prefix():
"""
Returns the currently active script prefix. Useful for client code that
wishes to construct their own URLs manually (although accessing the request
instance is normally going to be a lot cleaner).
"""
return getattr(_prefixes, "value", '/')
def clear_script_prefix():
"""
Unsets the script prefix for the current thread.
"""
try:
del _prefixes.value
except AttributeError:
pass
def set_urlconf(urlconf_name):
"""
Sets the URLconf for the current thread (overriding the default one in
settings). Set to None to revert back to the default.
"""
if urlconf_name:
_urlconfs.value = urlconf_name
else:
if hasattr(_urlconfs, "value"):
del _urlconfs.value
def get_urlconf(default=None):
"""
Returns the root URLconf to use for the current thread if it has been
changed from the default one.
"""
return getattr(_urlconfs, "value", default)
def is_valid_path(path, urlconf=None):
"""
Returns True if the given path resolves against the default URL resolver,
False otherwise.
This is a convenience method to make working with "is this a match?" cases
easier, avoiding unnecessarily indented try...except blocks.
"""
try:
resolve(path, urlconf)
return True
except Resolver404:
return False
def translate_url(url, lang_code):
"""
Given a URL (absolute or relative), try to get its translated version in
the `lang_code` language (either by i18n_patterns or by translated regex).
Return the original URL if no translated version is found.
"""
parsed = urlsplit(url)
try:
match = resolve(parsed.path)
except Resolver404:
pass
else:
to_be_reversed = "%s:%s" % (match.namespace, match.url_name) if match.namespace else match.url_name
with override(lang_code):
try:
url = reverse(to_be_reversed, args=match.args, kwargs=match.kwargs)
except NoReverseMatch:
pass
else:
url = urlunsplit((parsed.scheme, parsed.netloc, url, parsed.query, parsed.fragment))
return url
|
bobcyw/django
|
django/core/urlresolvers.py
|
Python
|
bsd-3-clause
| 26,463 | 0.001474 |
#
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
#
"""JavaScript 1.7 keywords"""
keywords = set([
"break",
"case", "catch", "const", "continue",
"debugger", "default", "delete", "do",
"else",
"false", "finally", "for", "function",
"if", "in", "instanceof",
"let",
"new", "null",
"return",
"switch",
"this", "throw", "true", "try", "typeof",
"var", "void",
"yield",
"while", "with"
])
|
zynga/jasy
|
jasy/js/tokenize/Lang.py
|
Python
|
mit
| 458 | 0 |
# -*- coding: utf-8 -*-
import os
from codecs import encode, decode
import re
LINUX_ROOT = u"/"
def trim_quote(text):
if len(text) > 2 and text[0] == '"' and text[-1] == '"':
text = text[1:-1]
return text
def to_unicode(text, encoding="utf8"):
if type(text) == unicode:
return text
elif type(text) == str:
return decode(text, encoding)
else:
return unicode(text)
def remove_bom(input_filename, output_filename):
fp = open(input_filename, "rb")
bom = fp.read(3)
if bom != b'\xef\xbb\xbf':
raise ValueError("File doesn't have UTF-8 BOM")
fo = open(output_filename, "wb")
fo.write(fp.read())
fo.close()
fp.close()
def iconv_file(input_filename, output_filename, encoding, overwrite=False):
fp = open(input_filename, "rb")
ansi_content = fp.read()
fp.close()
if not overwrite:
if os.path.exists(output_filename):
return
with open(output_filename, "w") as fp:
if encoding.lower() in ["utf8", "utf-8", "u8", "utf", "utf_8"]:
fp.write(ansi_content)
else:
fp.write(encode(
decode(ansi_content, encoding),
"utf8"))
def read_file(filename, encoding="utf_8"):
'''
Load the content of a CUE file.
'''
file_content_string = open(filename, "rb").read()
if file_content_string[:3] == b"\xef\xbb\xbf":
file_content_string = file_content_string[3:]
return decode(file_content_string, encoding)
def filename_safe(filename):
parts = filename.split(u"/")
for i in xrange(len(parts)):
for ch in u'''<>'"?*\\/:''':
parts[i] = parts[i].replace(ch, u"_").strip()
return u"/".join(parts).strip()
def path_from_pattern(pattern, d):
all_keys = {}
group_stack = []
buffer = ""
# parse all the keys in the pattern string
iter_formats = re.finditer(u'''%\(([^)]+)\)s''', pattern)
for f in iter_formats:
all_keys[f.span()[0]] = (
f.groups()[0],
f.span()[1] - f.span()[0]
)
# parse the pattern
i = 0 # current position in pattern string
while i < len(pattern):
# Case 1: we meet %(xxx)s
if i in all_keys: # if we find a key %(xxx)s
if not group_stack: # not in option fields:
if all_keys[i][0] in d: # the key exists in `d`
buffer += d[all_keys[i][0]]
i += all_keys[i][1]
else:
if all_keys[i][0] in d:
group_stack[-1] += d[all_keys[i][0]]
i += all_keys[i][1]
else: # doesn't exists, skip all for this optional field
while i < len(pattern):
if pattern[i] == u">": break
i += 1
i += 1
group_stack.pop()
continue
# Case 2: we meet a `<`
if pattern[i] == u"<":
group_stack.append("")
i += 1
continue
# Case 3: we meet a `>`
if pattern[i] == u">":
if not group_stack:
raise ValueError("Invalid pattern! (unmatched `>`)")
opt_str = group_stack.pop()
if not group_stack:
buffer += opt_str
else:
group_stack[-1] += opt_str
i += 1
continue
# Otherwise
if not group_stack:
buffer += pattern[i]
else:
group_stack[-1] += pattern[i]
i += 1
if len(group_stack):
raise ValueError("Invalid pattern! (lack of `>`)")
return buffer
def cli_escape(text):
for ch in u'''`''':
text = text.replace(ch, u'''\\%s''' % ch)
return text
def parent_folder(path):
parts = path.split(u"/")
if path == LINUX_ROOT:
raise ValueError(u"Can't get parent folder from linux dir /")
if parts[-1] == u"":
del parts[-1]
if parts[-1] == u".":
parts[-1] = u".."
elif parts[-1] == u"..":
parts.append(u"..")
elif len(parts) == 1:
return u"./"
else:
del parts[-1]
parts.append(u"")
return u"/".join(parts)
def ensure_parent_folder(path):
try:
os.makedirs(parent_folder(path), mode=0755)
except OSError, e:
if e.errno != os.errno.EEXIST:
raise
def command_copy_to(files, folder, base_command=u"cp"):
if len(files) == 0:
return u"echo"
if not folder:
folder = u"."
arguments = [base_command]
arguments.append(u"-n")
for f in files:
arguments.append(u'''"%s"''' % cli_escape(f))
if not folder.endswith(u"/"):
folder += u"/"
arguments.append(u'''"%s"''' % folder)
return u" ".join(arguments)
|
Gateswong/GatesMusicPet
|
music_pet/utils.py
|
Python
|
mit
| 4,849 | 0.000825 |
#!/usr/bin/python
# compute new sensitivity from formulae in manual
import math
print "Tilt Sensitivity Calculator"
print "X1 refers to the tilt measurement, in arc sec"
print "R0/R1 refer to the gravimeter readings, in mGal"
print "Get the current tilt sensitivity from data files or the Setup menu"
oldSens = float(raw_input("Current tilt sensitivity: "))
r0 = float(raw_input("R0 [mGal] : "))
r1 = float(raw_input("R1 [mGal] : "))
x1 = float(raw_input("X1 [arc sec]: "))
K = math.sqrt( 1 + (87000 * (r0-r1)/(x1*x1)) )
newSens = K * oldSens
print "New tilt Sensitivity: %f"%newSens
|
inkenbrandt/Earth_Tides
|
Micrograv/util/tilt_sens.py
|
Python
|
gpl-2.0
| 593 | 0.006745 |
# -*- coding: utf-8 -*-
import logging
import pickle
import re
from cacheops import invalidate_model
from django import http
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import redirect, render
from ..attendance import generate_roster_pdf
from ...forms.admin.blocks import BlockForm, QuickBlockForm
from ...models import EighthBlock, EighthScheduledActivity
from ....auth.decorators import eighth_admin_required
logger = logging.getLogger(__name__)
@eighth_admin_required
def add_block_view(request):
if request.method == "POST" and "custom_block" in request.POST:
form = QuickBlockForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, "Successfully added block.")
return redirect("eighth_admin_dashboard")
else:
messages.error(request, "Error adding block.")
request.session["add_block_form"] = pickle.dumps(form)
date = None
show_letters = None
if "date" in request.GET:
date = request.GET.get("date")
if "date" in request.POST:
date = request.POST.get("date")
title_suffix = ""
if date:
date_format = re.compile(r'([0-9]{2})\/([0-9]{2})\/([0-9]{4})')
fmtdate = date_format.sub(r'\3-\1-\2', date)
logger.debug(fmtdate)
title_suffix = " - {}".format(fmtdate)
show_letters = True
if "modify_blocks" in request.POST:
letters = request.POST.getlist("blocks")
current_letters = []
blocks_day = EighthBlock.objects.filter(date=fmtdate)
for day in blocks_day:
current_letters.append(day.block_letter)
logger.debug(letters)
logger.debug(current_letters)
for l in letters:
if len(l) == 0:
continue
if l not in current_letters:
EighthBlock.objects.create(date=fmtdate, block_letter=l)
messages.success(request, "Successfully added {} Block on {}".format(l, fmtdate))
for l in current_letters:
if len(l) == 0:
continue
if l not in letters:
EighthBlock.objects.get(date=fmtdate, block_letter=l).delete()
messages.success(request, "Successfully removed {} Block on {}".format(l, fmtdate))
invalidate_model(EighthBlock)
letters = []
visible_blocks = ["A", "B", "C", "D", "E", "F", "G", "H"]
if show_letters:
onday = EighthBlock.objects.filter(date=fmtdate)
for l in visible_blocks:
exists = onday.filter(block_letter=l)
letters.append({"name": l, "exists": exists})
for blk in onday:
if blk.block_letter not in visible_blocks:
visible_blocks.append(blk.block_letter)
letters.append({"name": blk.block_letter, "exists": True})
context = {"admin_page_title": "Add or Remove Blocks{}".format(title_suffix),
"date": date,
"letters": letters,
"show_letters": show_letters,
"add_block_form": QuickBlockForm}
return render(request, "eighth/admin/add_block.html", context)
@eighth_admin_required
def edit_block_view(request, block_id):
try:
block = EighthBlock.objects.get(id=block_id)
except EighthBlock.DoesNotExist:
raise http.Http404
if request.method == "POST":
form = BlockForm(request.POST, instance=block)
if form.is_valid():
form.save()
invalidate_model(EighthBlock)
messages.success(request, "Successfully edited block.")
return redirect("eighth_admin_dashboard")
else:
messages.error(request, "Error adding block.")
else:
form = BlockForm(instance=block)
context = {"form": form, "delete_url": reverse("eighth_admin_delete_block", args=[block_id]), "admin_page_title": "Edit Block"}
return render(request, "eighth/admin/edit_form.html", context)
@eighth_admin_required
def delete_block_view(request, block_id):
try:
block = EighthBlock.objects.get(id=block_id)
except EighthBlock.DoesNotExist:
raise http.Http404
if request.method == "POST":
block.delete()
invalidate_model(EighthBlock)
messages.success(request, "Successfully deleted block.")
return redirect("eighth_admin_dashboard")
else:
context = {"admin_page_title": "Delete Block",
"item_name": str(block),
"help_text": "Deleting this block will remove all records "
"of it related to eighth period."}
return render(request, "eighth/admin/delete_form.html", context)
@eighth_admin_required
def print_block_rosters_view(request, block_id):
if "schact_id" in request.POST:
response = HttpResponse(content_type="application/pdf")
response["Content-Disposition"] = "inline; filename=\"block_{}_rosters.pdf\"".format(block_id)
sched_act_ids = request.POST.getlist("schact_id")
pdf_buffer = generate_roster_pdf(sched_act_ids, True)
response.write(pdf_buffer.getvalue())
pdf_buffer.close()
return response
else:
try:
block = EighthBlock.objects.get(id=block_id)
schacts = EighthScheduledActivity.objects.filter(block=block).order_by("sponsors")
schacts = sorted(schacts, key=lambda x: "{}".format(x.get_true_sponsors()))
except (EighthBlock.DoesNotExist, EighthScheduledActivity.DoesNotExist):
raise http.Http404
context = {"eighthblock": block, "admin_page_title": "Choose activities to print", "schacts": schacts}
return render(request, "eighth/admin/choose_roster_activities.html", context)
|
jacobajit/ion
|
intranet/apps/eighth/views/admin/blocks.py
|
Python
|
gpl-2.0
| 5,960 | 0.002349 |
import os
import numpy as np
import MMTK
class Grid:
"""
Class to read and write alchemical grids.
Data is a dictionary with
spacing - the grid spacing, in Angstroms.
counts - the number of points in each dimension.
vals - the values.
All are numpy arrays.
"""
def __init__(self):
pass
def read(self, FN, multiplier=None):
"""
Reads a grid in dx or netcdf format
The multiplier affects the origin and spacing.
"""
if FN is None:
raise Exception('File is not defined')
elif FN.endswith('.dx') or FN.endswith('.dx.gz'):
data = self._read_dx(FN)
elif FN.endswith('.nc'):
data = self._read_nc(FN)
else:
raise Exception('File type not supported')
if multiplier is not None:
data['origin'] = multiplier*data['origin']
data['spacing'] = multiplier*data['spacing']
return data
def _read_dx(self, FN):
"""
Reads a grid in dx format
"""
if FN.endswith('.dx'):
F = open(FN,'r')
else:
import gzip
F = gzip.open(FN,'r')
# Read the header
line = F.readline()
while line.find('object')==-1:
line = F.readline()
header = {}
header['counts'] = [int(x) for x in line.split(' ')[-3:]]
for name in ['origin','d0','d1','d2']:
header[name] = [float(x) for x in F.readline().split(' ')[-3:]]
F.readline()
header['npts'] = int(F.readline().split(' ')[-3])
# Test to make sure the grid type is okay.
# These conditions are not absolultely essential,
# but they reduce the number of subtraction operations.
if not (header['d0'][1]==0 and header['d0'][2]==0 and
header['d1'][0]==0 and header['d1'][2]==0 and
header['d2'][0]==0 and header['d2'][1]==0):
raise Exception('Trilinear grid must be in original basis')
if not (header['d0'][0]>0 and header['d1'][1]>0 and header['d2'][2]>0):
raise Exception('Trilinear grid must have positive coordinates')
# Read the data
vals = np.ndarray(shape=header['npts'], dtype=float)
index = 0
while index<header['npts']:
line = F.readline()[:-1]
items = [float(item) for item in line.split()]
vals[index:index+len(items)] = items
index = index + len(items)
F.close()
data = {
'origin':np.array(header['origin']), \
'spacing':np.array([header['d0'][0],header['d1'][1],header['d2'][2]]), \
'counts':np.array(header['counts']), \
'vals':vals}
return data
def _read_nc(self, FN):
"""
Reads a grid in netcdf format
"""
from netCDF4 import Dataset
grid_nc = Dataset(FN,'r')
data = {}
for key in list(grid_nc.variables):
data[key] = np.array(grid_nc.variables[key][:][0][:])
grid_nc.close()
return data
def write(self, FN, data, multiplier=None):
"""
Writes a grid in dx or netcdf format.
The multiplier affects the origin and spacing.
"""
if multiplier is not None:
data_n = {'origin':multiplier*data['origin'],
'counts':data['counts'],
'spacing':multiplier*data['spacing'],
'vals':data['vals']}
else:
data_n = data
if FN.endswith('.nc'):
self._write_nc(FN, data_n)
elif FN.endswith('.dx') or FN.endswith('.dx.gz'):
self._write_dx(FN, data_n)
else:
raise Exception('File type not supported')
def _write_dx(self, FN, data):
"""
Writes a grid in dx format
"""
n_points = data['counts'][0]*data['counts'][1]*data['counts'][2]
if FN.endswith('.dx'):
F = open(FN,'w')
else:
import gzip
F = gzip.open(FN,'w')
F.write("""object 1 class gridpositions counts {0[0]} {0[1]} {0[2]}
origin {1[0]} {1[1]} {1[2]}
delta {2[0]} 0.0 0.0
delta 0.0 {2[1]} 0.0
delta 0.0 0.0 {2[2]}
object 2 class gridconnections counts {0[0]} {0[1]} {0[2]}
object 3 class array type double rank 0 items {3} data follows
""".format(data['counts'],data['origin'],data['spacing'],n_points))
for start_n in range(0,len(data['vals']),3):
F.write(' '.join(['%6e'%c for c in data['vals'][start_n:start_n+3]]) + '\n')
F.write('object 4 class field\n')
F.write('component "positions" value 1\n')
F.write('component "connections" value 2\n')
F.write('component "data" value 3\n')
F.close()
def _write_nc(self, FN, data):
"""
Writes a grid in netcdf format
"""
n_points = data['counts'][0]*data['counts'][1]*data['counts'][2]
from netCDF4 import Dataset
grid_nc = Dataset(FN,'w',format='NETCDF4')
grid_nc.createDimension('one', 1)
grid_nc.createDimension('n_cartesian', 3)
grid_nc.createDimension('n_points', n_points)
grid_nc.createVariable('origin','f8',('one','n_cartesian'))
grid_nc.createVariable('counts','i8',('one','n_cartesian'))
grid_nc.createVariable('spacing','f8',('one','n_cartesian'))
grid_nc.createVariable('vals','f8',('one','n_points'), zlib=True)
for key in data.keys():
grid_nc.variables[key][:] = data[key]
grid_nc.close()
def truncate(self, in_FN, out_FN, counts, multiplier=None):
"""
Truncates the grid at the origin and
with a limited number of counts per dimension
multiplier is for the values, not the grid scaling
"""
data_o = self.read(in_FN)
nyz_o = data_o['counts'][1]*data_o['counts'][2]
nz_o = data_o['counts'][2]
min_i = int(-data_o['origin'][0]/data_o['spacing'][0])
min_j = int(-data_o['origin'][1]/data_o['spacing'][1])
min_k = int(-data_o['origin'][2]/data_o['spacing'][2])
# vals = np.ndarray(shape=tuple(counts), dtype=float)
# for i in range(counts[0]):
# for j in range(counts[1]):
# for k in range(counts[2]):
# vals[i,j,k] = data_o['vals'][(i+min_i)*nyz_o + (j+min_j)*nz_o + (k+min_k)]
vals = np.array(
[[[data_o['vals'][(i+min_i)*nyz_o + (j+min_j)*nz_o + (k+min_k)]
for k in range(counts[2])]
for j in range(counts[1])]
for i in range(counts[0])])
if multiplier is not None:
vals = vals*multiplier
data_n = {'origin':np.array([0., 0., 0.]), \
'counts':counts, 'spacing':data_o['spacing'], 'vals':vals.flatten()}
self.write(out_FN,data_n)
class crd:
"""
Class to read and write AMBER coordinate/restart and trajectory files.
"""
def __init__(self):
pass
def read(self, FN, natoms=None, return_title=False, \
multiplier=None, trajectory=False):
"""
Reads an AMBER coordinate/restart or trajectory file.
If natoms is not none, then the coordinates will be split
into a list of natoms X 3 arrays.
The coordinates will be multiplied by multiplier.
The default of 0.1 converts Angstroms into nanometers.
"""
if not os.path.isfile(FN):
raise Exception('Coordinate file %s does not exist!'%FN)
if FN.endswith('.gz'):
import gzip
F = gzip.open(FN, 'r')
else:
F = open(FN,'r')
dat = F.read().strip().split('\n')
F.close()
title = dat.pop(0) # Title
if len(dat[0].split())>1:
# VMD format (does not specify number of atoms)
crd = []
for line in dat:
crd = crd + [float(x) for x in line.split()]
crd = np.resize(crd,(len(crd)/3,3))
else:
# AMBER format
file_natoms = int(dat.pop(0)) # Number of atoms
if (natoms is not None) and (file_natoms!=natoms):
print "Incorrect number of atoms in crd file"
return np.array([])
if trajectory:
w = 8 # For mdcrd
else:
w = 12 # For inpcrd
crd = []
for line in dat:
crd = crd + [float(line[x:x+w]) for x in range(0,len(line),w)]
crd = np.resize(crd,(len(crd)/3,3))
if multiplier is not None:
crd = multiplier*crd
if (natoms is not None):
crd = np.vsplit(crd,crd.shape[0]/natoms)
print " read %d configurations from %s"%(len(crd), FN)
if return_title:
return (crd, title)
else:
return crd
def write(self, FN, crd, title='', append=False, \
multiplier=None, trajectory=False):
"""
Writes an AMBER coordinate/restart or trajectory file
"""
if (append and os.path.isfile(FN)):
if FN.endswith('.gz'):
import gzip
F = gzip.open(FN,'a')
else:
F = open(FN,'a')
else:
if os.path.isfile(FN):
os.rename(FN,FN+'.BAK')
if FN.endswith('.gz'):
import gzip
F = gzip.open(FN,'w')
else:
F = open(FN,'w')
# Write the header
F.write(title+'\n') # Title
if not trajectory:
F.write('%d\n'%crd.shape[0])
if not trajectory:
flattened = np.vstack(crd).flatten()
if multiplier is not None:
flattened = multiplier*flattened
for n in range(0,len(flattened),6):
F.write(''.join(['%12.7f'%val for val in flattened[n:n+6]]) + '\n')
else:
for c in crd:
flattened = c.flatten()
if multiplier is not None:
flattened = multiplier*flattened
for n in range(0,len(flattened),10):
F.write(''.join(['%8.3f'%val for val in flattened[n:n+10]]) + '\n')
F.close()
class dock6_mol2:
"""
Class to read output from UCSF DOCK 6
"""
def __init__(self):
pass
def read(self, FN, reorder=None):
crds = []
E = {}
if (FN is None) or (not os.path.isfile(FN)):
return (crds,E)
# Specifically to read output from UCSF dock6
if FN.endswith('.mol2'):
mol2F = open(FN,'r')
elif FN.endswith('.mol2.gz'):
import gzip
mol2F = gzip.open(FN,'r')
else:
raise Exception('Unknown file type')
models = mol2F.read().strip().split('########## Name:')
mol2F.close()
models.pop(0)
if len(models)>0:
for line in models[0].split('\n'):
if line.startswith('##########'):
label = line[11:line.find(':')].strip()
E[label] = []
for model in models:
fields = model.split('<TRIPOS>')
crd = np.array([l.split()[2:5] for l in fields[2].split('\n')[1:-1]],
dtype=float)/10.
if reorder is not None:
crd = crd[reorder,:]
for line in fields[0].split('\n'):
if line.startswith('##########'):
label = line[11:line.find(':')].strip()
E[label].append(float(line.split()[-1]))
crds.append(crd)
return (crds,E)
class dcd:
"""
Class to write DCD files
"""
def __init__(self, molecule, ligand_atom_order=None, \
receptorConf=None, ligand_first_atom=0):
self.molecule = molecule
self.receptorConf = receptorConf
self.ligand_first_atom = ligand_first_atom
if ligand_atom_order is None:
self.ligand_atom_order = range(len(self.molecule.atoms))
else:
self.ligand_atom_order = ligand_atom_order
pass
def write(self, FN, confs,
includeLigand=True, includeReceptor=False,
factor=1.0/MMTK.Units.Ang,
delta_t=0.1):
"""
Writes a DCD file for a trajectory.
If includeReceptor==True, the receptor coordinates are included.
"""
import MMTK_DCD # @UnresolvedImport
from Scientific import N
if not isinstance(confs,list):
confs = [confs]
if includeReceptor and (self.receptorConf is None):
raise Exception("Missing receptor configuration")
n_atoms = 0
if includeReceptor:
receptor_x0 = factor*self.receptorConf[:self.ligand_first_atom,0]
receptor_y0 = factor*self.receptorConf[:self.ligand_first_atom,1]
receptor_z0 = factor*self.receptorConf[:self.ligand_first_atom,2]
receptor_x1 = factor*self.receptorConf[self.ligand_first_atom:,0]
receptor_y1 = factor*self.receptorConf[self.ligand_first_atom:,1]
receptor_z1 = factor*self.receptorConf[self.ligand_first_atom:,2]
n_atoms += self.receptorConf.shape[0]
if includeLigand:
n_atoms += len(self.molecule.atoms)
n_snaps = len(confs)
fd = MMTK_DCD.writeOpenDCD(FN, n_atoms, n_snaps, 1, 1, delta_t)
if includeReceptor and includeLigand:
for array in confs:
array = factor*array
x = N.concatenate((receptor_x0,N.take(array[:,0],self.ligand_atom_order),receptor_x1)).astype(N.Float16)
y = N.concatenate((receptor_y0,N.take(array[:,1],self.ligand_atom_order),receptor_y1)).astype(N.Float16)
z = N.concatenate((receptor_z0,N.take(array[:,2],self.ligand_atom_order),receptor_z1)).astype(N.Float16)
MMTK_DCD.writeDCDStep(fd, x, y, z)
MMTK_DCD.writeCloseDCD(fd)
elif includeLigand:
for array in confs:
array = factor*array
x = N.take(array[:,0], self.ligand_atom_order).astype(N.Float16)
y = N.take(array[:,1], self.ligand_atom_order).astype(N.Float16)
z = N.take(array[:,2], self.ligand_atom_order).astype(N.Float16)
MMTK_DCD.writeDCDStep(fd, x, y, z)
MMTK_DCD.writeCloseDCD(fd)
else:
x = N.concatenate((receptor_x0,receptor_x1)).astype(N.Float16)
y = N.concatenate((receptor_y0,receptor_y1)).astype(N.Float16)
z = N.concatenate((receptor_z0,receptor_z1)).astype(N.Float16)
MMTK_DCD.writeDCDStep(fd, x, y, z)
MMTK_DCD.writeCloseDCD(fd)
class prmtop:
"""
Class to read AMBER prmtop files
"""
def __init__(self):
pass
def read(self, FN, varnames=['RESIDUE_LABEL','RESIDUE_POINTER']):
"""
Reads an AMBER prmtop file, returning a dictionary
"""
if not os.path.isfile(FN):
raise Exception('prmtop file %s does not exist!'%FN)
if FN.endswith('.gz'):
import gzip
F = gzip.open(FN, 'r')
else:
F = open(FN,'r')
data = F.read().split('%FLAG ')
F.close()
prmtop = {}
for record in data:
name = record[:record.find('\n')].strip()
if name in varnames:
prmtop[name] = self._load_record(record)
return prmtop
def _load_record(self, record):
items = []
lines = record.split('\n')
lines.pop(0) # Name
FORMAT = lines.pop(0).strip()[8:-1] # Format
if FORMAT.find('a')>-1: # Text
w = int(FORMAT[FORMAT.find('a')+1:])
for line in lines:
items = items + [line[x:x+w] for x in range(0,len(line),w)]
return np.array(items)
elif FORMAT.find('I')>-1: # Integer
w = int(FORMAT[FORMAT.find('I')+1:])
for line in lines:
items = items + [int(line[x:x+w]) for x in range(0,len(line),w)]
return np.array(items, dtype=int)
elif FORMAT.find('E')>-1: # Scientific
w = int(FORMAT[FORMAT.find('E')+1:FORMAT.find('.')])
for line in lines:
items = items + [float(line[x:x+w]) for x in range(0,len(line),w)]
return np.array(items, dtype=float)
|
luizcieslak/AlGDock
|
AlGDock/IO.py
|
Python
|
mit
| 14,740 | 0.021574 |
import extendedHand
from extendedHand import *
import math
from math import *
'''
Function that calculates the @percent % of @whole
'''
def percentage(whole, percent):
return (whole * percent) / 100.0
'''
Function that detects the run gesture.
@hand: the hand that we're analysing.
@tolerance: the percentage of tolerance in the measurements.
'''
def detectRunGesture(hand, tolerance):
#We use the index and the middle finger like two legs and we're going to simulate de run action.
#Then we need the position information about these fingers.
index = getFinger(hand, 'index')
middle = getFinger(hand, 'middle')
#We get the tips position of the two fingers.
index_tip_pos = index.bone(3).next_joint
middle_tip_pos = middle.bone(3).next_joint
#We calculate the signed difference between the Y coordenates.
#We use the sign to check that the fingers have been moved.
diffBtwTipsY = index_tip_pos[1] - middle_tip_pos[1]
#We check the palm orientation and we want a minimum distance between the two fingers.
if detectRunGesture.sign*diffBtwTipsY <= (-30 + percentage(30, tolerance)) and palmOrientation(hand) == 'down':
detectRunGesture.sign = copysign(1, diffBtwTipsY)
return True
else:
return False
detectRunGesture.sign = -1
'''
Function that detects the OK gesture.
@hand: the hand that we're analysing.
@tolerance: the percentage of tolerance in the measurements.
'''
def detectOKGesture(hand, tolerance):
#We use the index finger and the thumb so we need the position information about these fingers.
thumb = getFinger(hand, 'thumb')
index = getFinger(hand, 'index')
#We get the tips position of the two fingers.
thumb_tip_pos = thumb.bone(3).next_joint
index_tip_pos = index.bone(3).next_joint
#We calculate the distance between the tips.
distanceBtwTips = sqrt(pow(thumb_tip_pos[0]-index_tip_pos[0],2) + pow(thumb_tip_pos[1]-index_tip_pos[1],2) + pow(thumb_tip_pos[2]-index_tip_pos[2],2))
#We check the palm orientation and the distance between tips.
if distanceBtwTips < (30 + percentage(30, tolerance)) and palmOrientation(hand) == 'down':
return True
else:
return False
|
LuisSuall/Sleight_of_hand
|
Finger_run/utils/gesture.py
|
Python
|
gpl-2.0
| 2,182 | 0.024748 |
# Import Functions
import urllib2
import xml.etree.ElementTree as ET
# Get online XML file
url="https://cghub.ucsc.edu/cghub/metadata/analysisDetail/a8f16339-4802-440c-81b6-d7a6635e604b"
request=urllib2.Request(url, headers={"Accept" : "application/xml"})
u=urllib2.urlopen(request)
tree=ET.parse(u)
root=tree.getroot()
dict={}
for i in root.iter():
if i.text!=None:
dict[i.tag]=i.text.strip()
else:
dict[i.tag]=""
for key in sorted(dict.keys(), key=lambda v: v.upper()):
print key+":"+dict[key]
|
minesh1291/Learning-Python
|
solutions/parse-xml.py
|
Python
|
apache-2.0
| 508 | 0.033465 |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import struct
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest, convert_to_binary):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest)
else:
if os.path.exists(dest):
os.unlink(dest)
shutil.copy(source, dest)
if extension in ('.plist', '.strings') and convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices']
if os.environ['XCODE_VERSION_ACTUAL'] > '0700':
args.extend(['--auto-activate-custom-fonts'])
if 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ:
args.extend([
'--target-device', 'iphone', '--target-device', 'ipad',
'--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'],
])
else:
args.extend([
'--target-device', 'mac',
'--minimum-deployment-target',
os.environ['MACOSX_DEPLOYMENT_TARGET'],
])
args.extend(['--output-format', 'human-readable-text', '--compile', dest,
source])
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _ConvertToBinary(self, dest):
subprocess.check_call([
'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest])
def _CopyStringsFile(self, source, dest):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile(r'[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: (?:for architecture: \S* )?'
r'file: .* has no symbols$')
libtool_re5 = re.compile(
r'^.*libtool: warning for library: ' +
r'.* the table of contents is empty ' +
r'\(no object file members in the library define global symbols\)$')
env = os.environ.copy()
# Ref:
# http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c
# The problem with this flag is that it resets the file mtime on the file to
# epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.
env['ZERO_AR_DATE'] = '1'
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line) and not libtool_re5.match(line):
print >>sys.stderr, line
# Unconditionally touch the output .a file on the command line if present
# and the command succeeded. A bit hacky.
if not libtoolout.returncode:
for i in range(len(cmd_list) - 1):
if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'):
os.utime(cmd_list[i+1], None)
break
return libtoolout.returncode
def ExecPackageIosFramework(self, framework):
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
module_path = os.path.join(framework, 'Modules');
if not os.path.exists(module_path):
os.mkdir(module_path)
module_template = 'framework module %s {\n' \
' umbrella header "%s.h"\n' \
'\n' \
' export *\n' \
' module * { export * }\n' \
'}\n' % (binary, binary)
module_file = open(os.path.join(module_path, 'module.modulemap'), "w")
module_file.write(module_template)
module_file.close()
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCompileIosFrameworkHeaderMap(self, out, framework, *all_headers):
framework_name = os.path.basename(framework).split('.')[0]
all_headers = map(os.path.abspath, all_headers)
filelist = {}
for header in all_headers:
filename = os.path.basename(header)
filelist[filename] = header
filelist[os.path.join(framework_name, filename)] = header
WriteHmap(out, filelist)
def ExecCopyIosFrameworkHeaders(self, framework, *copy_headers):
header_path = os.path.join(framework, 'Headers');
if not os.path.exists(header_path):
os.makedirs(header_path)
for header in copy_headers:
shutil.copy(header, os.path.join(header_path, os.path.basename(header)))
def ExecCompileXcassets(self, keys, *inputs):
"""Compiles multiple .xcassets files into a single .car file.
This invokes 'actool' to compile all the inputs .xcassets files. The
|keys| arguments is a json-encoded dictionary of extra arguments to
pass to 'actool' when the asset catalogs contains an application icon
or a launch image.
Note that 'actool' does not create the Assets.car file if the asset
catalogs does not contains imageset.
"""
command_line = [
'xcrun', 'actool', '--output-format', 'human-readable-text',
'--compress-pngs', '--notices', '--warnings', '--errors',
]
is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ
if is_iphone_target:
platform = os.environ['CONFIGURATION'].split('-')[-1]
if platform not in ('iphoneos', 'iphonesimulator'):
platform = 'iphonesimulator'
command_line.extend([
'--platform', platform, '--target-device', 'iphone',
'--target-device', 'ipad', '--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile',
os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']),
])
else:
command_line.extend([
'--platform', 'macosx', '--target-device', 'mac',
'--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'],
'--compile',
os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']),
])
if keys:
keys = json.loads(keys)
for key, value in keys.iteritems():
arg_name = '--' + key
if isinstance(value, bool):
if value:
command_line.append(arg_name)
elif isinstance(value, list):
for v in value:
command_line.append(arg_name)
command_line.append(str(v))
else:
command_line.append(arg_name)
command_line.append(str(value))
# Note: actool crashes if inputs path are relative, so use os.path.abspath
# to get absolute path name for inputs.
command_line.extend(map(os.path.abspath, inputs))
subprocess.check_call(command_line)
def ExecMergeInfoPlist(self, output, *inputs):
"""Merge multiple .plist files into a single .plist file."""
merged_plist = {}
for path in inputs:
plist = self._LoadPlistMaybeBinary(path)
self._MergePlist(merged_plist, plist)
plistlib.writePlist(merged_plist, output)
def ExecCodeSignBundle(self, key, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
2. copy Entitlements.plist from user or SDK next to the bundle,
3. code sign the bundle.
"""
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--entitlements',
entitlements_path, '--timestamp=none', os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _MergePlist(self, merged_plist, plist):
"""Merge |plist| into |merged_plist|."""
for key, value in plist.iteritems():
if isinstance(value, dict):
merged_value = merged_plist.get(key, {})
if isinstance(merged_value, dict):
self._MergePlist(merged_value, value)
merged_plist[key] = merged_value
else:
merged_plist[key] = value
else:
merged_plist[key] = value
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return {k: self._ExpandVariables(data[k], substitutions) for k in data}
return data
def NextGreaterPowerOf2(x):
return 2**(x-1).bit_length()
def WriteHmap(output_name, filelist):
"""Generates a header map based on |filelist|.
Per Mark Mentovai:
A header map is structured essentially as a hash table, keyed by names used
in #includes, and providing pathnames to the actual files.
The implementation below and the comment above comes from inspecting:
http://www.opensource.apple.com/source/distcc/distcc-2503/distcc_dist/include_server/headermap.py?txt
while also looking at the implementation in clang in:
https://llvm.org/svn/llvm-project/cfe/trunk/lib/Lex/HeaderMap.cpp
"""
magic = 1751998832
version = 1
_reserved = 0
count = len(filelist)
capacity = NextGreaterPowerOf2(count)
strings_offset = 24 + (12 * capacity)
max_value_length = len(max(filelist.items(), key=lambda (k,v):len(v))[1])
out = open(output_name, "wb")
out.write(struct.pack('<LHHLLLL', magic, version, _reserved, strings_offset,
count, capacity, max_value_length))
# Create empty hashmap buckets.
buckets = [None] * capacity
for file, path in filelist.items():
key = 0
for c in file:
key += ord(c.lower()) * 13
# Fill next empty bucket.
while buckets[key & capacity - 1] is not None:
key = key + 1
buckets[key & capacity - 1] = (file, path)
next_offset = 1
for bucket in buckets:
if bucket is None:
out.write(struct.pack('<LLL', 0, 0, 0))
else:
(file, path) = bucket
key_offset = next_offset
prefix_offset = key_offset + len(file) + 1
suffix_offset = prefix_offset + len(os.path.dirname(path) + os.sep) + 1
next_offset = suffix_offset + len(os.path.basename(path)) + 1
out.write(struct.pack('<LLL', key_offset, prefix_offset, suffix_offset))
# Pad byte since next offset starts at 1.
out.write(struct.pack('<x'))
for bucket in buckets:
if bucket is not None:
(file, path) = bucket
out.write(struct.pack('<%ds' % len(file), file))
out.write(struct.pack('<s', '\0'))
base = os.path.dirname(path) + os.sep
out.write(struct.pack('<%ds' % len(base), base))
out.write(struct.pack('<s', '\0'))
path = os.path.basename(path)
out.write(struct.pack('<%ds' % len(path), path))
out.write(struct.pack('<s', '\0'))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
tkelman/utf8rewind
|
tools/gyp/pylib/gyp/mac_tool.py
|
Python
|
mit
| 26,881 | 0.007366 |
## Burglary example [Figure 14.2]
from probability import BayesNet
T, F = True, False
gym = BayesNet([
#WorkOuts
('LegDay', '', .33),
('ArmsDay', '', 0.33),
('Cardio', '', 0.33),
('Tired', 'LegDay ArmDay', 'Cardio',
{(T, T, T): 0.1,
(T, T, F): 0.1,
(T, F, T): 0.7,
(T, F, F): 0.8,
(F, T, T): 0.7,
(F, T, F): 0.9,
(F, F, T): 0.9,
(F, F, F): 0.5}),
('Quit', 'Tired', {T: 0.70, F: 0.01}),
('Push', 'Tired', {T: 0.90, F: 0.10})
])
gym.label = 'Gym Day'
examples = {
gym: [
{'variable': 'Legday',
'evidence': {'Quit': T, 'Push': F}
},
{'variable': 'Legday',
'evidence': {'Quit': F, 'Push': F}
},
{'variable': 'Armday',
'evidence': {'Quit': T, 'Push': T}
},
{'variable': 'Cardio',
'evidence': {'Quit': F, 'Push': T}
}
]
}
#
|
WmHHooper/aima-python
|
submissions/Thompson/myBayes.py
|
Python
|
mit
| 907 | 0.003308 |
'''
Oh, an attractive module description here.
'''
import os
import shutil
import zipfile
from gi.repository import Gtk
from xml.dom import minidom
from xml.etree import ElementTree as ET
import re
import distutils.archive_util
EPUB_PATH = "/home/amit/git/epub-highlighter/epub/test.epub"
# print(os.path.spli(EPUB_PATH)[0] + "tmp")
# os.mkdir(os.path.spli(EPUB_PATH)[0]+"tmp")
EXTRACT_ROOT = "/home/amit/git/epub-highlighter/epub/tmp/"
MIMETYPE_OPF = 'application/oebps-package+xml'
MEDIA_TYPE = 'application/xhtml+xml'
# XML_PATH = '/home/amit/git/epub-highlighter/epub/tmp/test.epub/index_split_000.xhtml'
LIST_PATH = "/home/amit/git/epub-highlighter/list"
current_progress_in_percent = 0
counter = 0
DELIMITER = ',-,'
def get_content_files(opf_path: str):
opf_xml = minidom.parse(opf_path).documentElement
xhtmls = []
for element in opf_xml.getElementsByTagName('item'):
# print(element.getAttribute("href"))
if element.getAttribute("media-type") == MEDIA_TYPE:
xhtmls.append(element.getAttribute("href"))
return xhtmls
# if element.getAttribute("media-type") is (MEDIA_TYPE):
# print(element.getAttribute("href"))
def read_container(extract_path: str)->str:
container_xml = extract_path + "META-INF/container.xml"
minidom_xml = minidom.parse(container_xml).documentElement
opf_path = None
for element in minidom_xml.getElementsByTagName('rootfile'):
if element.getAttribute('media-type') == MIMETYPE_OPF:
# Only take the first full-path available
opf_path = element.getAttribute('full-path')
break
opf_path = extract_path + opf_path
return opf_path
# i = root.findall('./rootfile')
# print(i[0].tag)
def highlight_content(content, word, meaning=None):
global counter
# insensitive_hippo = re.compile(re.escape('hippo'), re.IGNORECASE)
# insensitive_hippo.sub('giraffe', 'I want a hIPpo for my birthday')
word = str(word).strip()
word = ' ' + word + ' '
if not meaning:
highlighted_word = " <b><i>" + word.upper() + "</i></b> "
else:
highlighted_word = " <b><i>" + \
word.upper() + "</i></b> [" + meaning.strip() + "] "
# print(word, highlighted_word)
# exit()
insensitive_pattern = re.compile(re.escape(word), re.IGNORECASE)
changed_content = insensitive_pattern.sub(highlighted_word, content)
if content != changed_content:
counter = counter + 1
# print(content, changed_content)
# exit()
return changed_content
def read_contents(xml_path) -> str:
return str(open(xml_path, "r").read())
def read_list_of_words(list_path):
return open(list_path).readlines()
def read_list_of_words_with_meanings(list_path):
contents = open(list_path).readlines()
words = []
meanings = []
for content in contents:
# print(content)
split_content = str(content).split(DELIMITER)
words.append(split_content[0])
meanings.append(split_content[1])
return words, meanings
def write_content(xml_path, content):
open(xml_path, mode='w').write(content)
def do_something_with_progress(progress_in_hundred: int):
print("Current Progress: " + str(progress_in_hundred))
def replace_xml_files(xmls_with_path, words, progress_bar=None, status_bar=None, meanings=None):
global current_progress_in_percent
xml_file_count = len(xmls_with_path)
files_processed = 0
for xml in xmls_with_path:
# content = open(xml).read()
# print("Processing: " + xml)
xml_file_contents = read_contents(xml)
# print(xml_file_contents)
for i in range(0, len(words)):
word = words[i]
# print(word)
if meanings:
meaning = meanings[i]
xml_file_contents = highlight_content(
xml_file_contents, word, meaning)
else:
xml_file_contents = highlight_content(
xml_file_contents, word)
# print(xml_file_contents)
write_content(xml, xml_file_contents)
files_processed = files_processed + 1
current_progress_in_percent = (files_processed / xml_file_count)
msg = "processing " + os.path.basename(xml)
if status_bar and progress_bar:
status_bar.push(1, msg)
progress_bar.set_fraction(current_progress_in_percent)
while Gtk.events_pending():
Gtk.main_iteration()
# do_something_with_progress(current_progress_in_percent)
def create_epub(extracted_epub_path, original_epub_path):
original_epub_basename = os.path.split(original_epub_path)[1]
original_epub_dir = os.path.split(original_epub_path)[0]
# print(original_epub_dir)
# print(original_epub_basename)
new_epub_name = os.path.splitext(original_epub_basename)[
0] + "_highlighted.epub"
# print(new_epub_name)
# print(extracted_epub_path)
new_epub_path = original_epub_dir + "/" + new_epub_name
# print(new_epub_path)
zip_path = distutils.archive_util.make_archive(
new_epub_name, format='zip', root_dir=extracted_epub_path)
shutil.move(zip_path, new_epub_path + '.zip')
os.rename(new_epub_path + '.zip', new_epub_path.replace('zip', ''))
def remove_extracted_directory(extract_root):
import shutil
shutil.rmtree(extract_root)
def extract_epub_to_tmp_directory(
epub_path) ->str:
epub_basename = os.path.basename(EPUB_PATH)
temp_dir = os.path.split(EPUB_PATH)[
0] + "/tmp-" + os.path.splitext(epub_basename)[0]
# os.mkdir(temp_dir)
# words = ["Test"]
epub_file = zipfile.ZipFile(epub_path, mode='r')
# print(epub_basename)
# extract_path: str = EXTRACT_ROOT + epub_basename + "/"
extract_path = temp_dir + "/"
# print(extract_path)
epub_file.extractall(path=extract_path)
return extract_path
def get_full_content_xmls_filepaths(extract_path):
opf_path = read_container(extract_path)
opf_path_base = os.path.split(opf_path)[0]
xmls = get_content_files(opf_path)
xmls_with_path = []
for xml in xmls:
xml_with_path = opf_path_base + '/' + xml
xmls_with_path.append(xml_with_path)
return xmls_with_path
def main(epub_path, list_path, progress_bar=None, status_bar=None, with_meaning: bool = None):
extract_path = extract_epub_to_tmp_directory(epub_path)
xmls_with_path = get_full_content_xmls_filepaths(extract_path)
if not with_meaning:
texts = read_list_of_words(list_path)
replace_xml_files(xmls_with_path, texts, progress_bar, status_bar)
else:
words, meanings = read_list_of_words_with_meanings(list_path)
# print(words, meanings)
replace_xml_files(xmls_with_path, words,
progress_bar, status_bar, meanings)
create_epub(extract_path, epub_path)
remove_extracted_directory(extract_path)
global counter
success_msg = "Complete! Highlighted " + \
str(counter) + " Words in " + str(len(xmls_with_path)) + " files"
if status_bar:
status_bar.push(1, success_msg)
else:
print(success_msg)
if __name__ == '__main__':
main(EPUB_PATH, LIST_PATH, None, None, False)
|
LordAmit/epub-highlighter
|
epub_highlighter.py
|
Python
|
mit
| 7,285 | 0.000686 |
from django.shortcuts import render, get_object_or_404, redirect
from .models import Movie, Trailer
from favorites.models import Favourite
def recent_movies(request):
movie_list = Movie.objects.all().order_by('-id')
# favs = Favourite.objects.all()
# for fav in favs:
# if fav.user == request.user.username:
# print(fav.favourites.all())
context = {
"movies": movie_list,
}
return render(request, "activity.html", context)
def movie_detail(request, id=None):
movie = get_object_or_404(Movie, id=id)
favs = Favourite.objects.filter(user=request.user)
is_favourited = False
for fav in favs:
if fav.favourites.all()[0] == movie:
is_favourited = True
context = {
"movie": movie,
"is_favourited": is_favourited,
}
return render(request, "movie_detail.html", context)
def mark_as_favourite(request, id=None):
if not request.user.is_authenticated():
return redirect("login")
movie = get_object_or_404(Movie, id=id)
fav = Favourite(user=request.user.username)
fav.save()
fav.favourites.add(movie)
return redirect("activity:detail", id=id)
|
outworldrunner/nightbay
|
movies/views.py
|
Python
|
gpl-3.0
| 1,200 | 0.004167 |
from asynctnt_queue import Queue, Tube
from tests import BaseTarantoolTestCase
class QueueTestCase(BaseTarantoolTestCase):
async def test__queue_create(self):
q = Queue(self.conn)
self.assertEqual(q.conn, self.conn, 'conn valid')
def test__queue_get_tube(self):
q = Queue(self.conn)
tube = q.tube('test_tube')
self.assertEqual(tube.name, 'test_tube', 'name valid')
self.assertIsInstance(tube, Tube, 'tube valid type')
self.assertEqual(tube.conn, self.conn, 'conn valid')
def test__queue_get_tube_multiple(self):
q = Queue(self.conn)
tube1 = q.tube('test_tube')
tube2 = q.tube('test_tube')
self.assertIs(tube1, tube2, 'the same object')
async def test__queue_statistics(self):
q = Queue(self.conn)
res = await q.statistics()
self.assertIsNotNone(res)
self.assertIn('test_tube', res)
|
igorcoding/asynctnt-queue
|
tests/test_queue.py
|
Python
|
apache-2.0
| 925 | 0 |
# -*- coding: utf-8 -*-
# $Id: zh_tw.py 7119 2011-09-02 13:00:23Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Traditional Chinese language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'attention (translation required)': 'attention',
'caution (translation required)': 'caution',
'code (translation required)': 'code',
'danger (translation required)': 'danger',
'error (translation required)': 'error',
'hint (translation required)': 'hint',
'important (translation required)': 'important',
'note (translation required)': 'note',
'tip (translation required)': 'tip',
'warning (translation required)': 'warning',
'admonition (translation required)': 'admonition',
'sidebar (translation required)': 'sidebar',
'topic (translation required)': 'topic',
'line-block (translation required)': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
'rubric (translation required)': 'rubric',
'epigraph (translation required)': 'epigraph',
'highlights (translation required)': 'highlights',
'pull-quote (translation required)': 'pull-quote',
'compound (translation required)': 'compound',
u'container (translation required)': 'container',
#'questions (translation required)': 'questions',
'table (translation required)': 'table',
'csv-table (translation required)': 'csv-table',
'list-table (translation required)': 'list-table',
#'qa (translation required)': 'questions',
#'faq (translation required)': 'questions',
'meta (translation required)': 'meta',
'math (translation required)': 'math',
#'imagemap (translation required)': 'imagemap',
'image (translation required)': 'image',
'figure (translation required)': 'figure',
'include (translation required)': 'include',
'raw (translation required)': 'raw',
'replace (translation required)': 'replace',
'unicode (translation required)': 'unicode',
u'日期': 'date',
'class (translation required)': 'class',
'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
'contents (translation required)': 'contents',
'sectnum (translation required)': 'sectnum',
'section-numbering (translation required)': 'sectnum',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
#'footnotes (translation required)': 'footnotes',
#'citations (translation required)': 'citations',
'target-notes (translation required)': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Traditional Chinese name to registered (in directives/__init__.py)
directive name mapping."""
roles = {
# language-dependent: fixed
'abbreviation (translation required)': 'abbreviation',
'ab (translation required)': 'abbreviation',
'acronym (translation required)': 'acronym',
'ac (translation required)': 'acronym',
u'code (translation required)': 'code',
'index (translation required)': 'index',
'i (translation required)': 'index',
'subscript (translation required)': 'subscript',
'sub (translation required)': 'subscript',
'superscript (translation required)': 'superscript',
'sup (translation required)': 'superscript',
'title-reference (translation required)': 'title-reference',
'title (translation required)': 'title-reference',
't (translation required)': 'title-reference',
'pep-reference (translation required)': 'pep-reference',
'pep (translation required)': 'pep-reference',
'rfc-reference (translation required)': 'rfc-reference',
'rfc (translation required)': 'rfc-reference',
'emphasis (translation required)': 'emphasis',
'strong (translation required)': 'strong',
'literal (translation required)': 'literal',
'math (translation required)': 'math',
'named-reference (translation required)': 'named-reference',
'anonymous-reference (translation required)': 'anonymous-reference',
'footnote-reference (translation required)': 'footnote-reference',
'citation-reference (translation required)': 'citation-reference',
'substitution-reference (translation required)': 'substitution-reference',
'target (translation required)': 'target',
'uri-reference (translation required)': 'uri-reference',
'uri (translation required)': 'uri-reference',
'url (translation required)': 'uri-reference',
'raw (translation required)': 'raw',}
"""Mapping of Traditional Chinese role names to canonical role names for
interpreted text."""
|
JulienMcJay/eclock
|
windows/Python27/Lib/site-packages/docutils/parsers/rst/languages/zh_tw.py
|
Python
|
gpl-2.0
| 5,172 | 0.001354 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_tenant_span_dst_group
short_description: Manage SPAN destination groups (span:DestGrp)
description:
- Manage SPAN destination groups on Cisco ACI fabrics.
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
- More information about the internal APIC class B(span:DestGrp) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
options:
dst_group:
description:
- The name of the SPAN destination group.
required: yes
aliases: [ name ]
description:
description:
- The description of the SPAN destination group.
aliases: [ descr ]
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_tenant_span_dst_group:
host: apic
username: admin
password: SomeSecretPassword
dst_group: '{{ dst_group }}'
description: '{{ descr }}'
tenant: '{{ tenant }}'
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
dst_group=dict(type='str', required=False, aliases=['name']), # Not required for querying all objects
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['dst_group', 'tenant']],
['state', 'present', ['dst_group', 'tenant']],
],
)
dst_group = module.params['dst_group']
description = module.params['description']
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='spanDestGrp',
aci_rn='destgrp-{0}'.format(dst_group),
filter_target='eq(spanDestGrp.name, "{0}")'.format(dst_group),
module_object=dst_group,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='spanDestGrp',
class_config=dict(
name=dst_group,
descr=description,
),
)
aci.get_diff(aci_class='spanDestGrp')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
hryamzik/ansible
|
lib/ansible/modules/network/aci/aci_tenant_span_dst_group.py
|
Python
|
gpl-3.0
| 6,414 | 0.001559 |
#!/usr/bin/env python
"""Stubs of client actions.
Client actions shouldn't be used on the server, stubs should be used instead.
This way we prevent loading effectively the whole client code into ours
server parts.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from future.utils import with_metaclass
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import registry
from grr_response_core.lib.rdfvalues import artifacts as rdf_artifacts
from grr_response_core.lib.rdfvalues import chipsec_types as rdf_chipsec_types
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats
from grr_response_core.lib.rdfvalues import cloud as rdf_cloud
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.rdfvalues import memory as rdf_memory
from grr_response_core.lib.rdfvalues import osquery as rdf_osquery
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.rdfvalues import plist as rdf_plist
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
class ClientActionStub(with_metaclass(registry.MetaclassRegistry, object)):
"""Stub for a client action. To be used in server code."""
in_rdfvalue = None
out_rdfvalues = [None]
# from artifacts.py
class ArtifactCollector(ClientActionStub):
"""The client side artifact collector implementation."""
in_rdfvalue = rdf_artifacts.ClientArtifactCollectorArgs
out_rdfvalues = [rdf_artifacts.ClientArtifactCollectorResult]
# from windows/windows.py, osx/osx.py and linux/linux.py
class GetInstallDate(ClientActionStub):
"""Estimate the install date of this system."""
# DataBlob is deprecated but might still be sent by old clients.
out_rdfvalues = [rdf_protodict.DataBlob, rdfvalue.RDFDatetime]
class EnumerateInterfaces(ClientActionStub):
"""Enumerate all MAC addresses of all NICs."""
out_rdfvalues = [rdf_client_network.Interface]
class EnumerateFilesystems(ClientActionStub):
"""Enumerate all unique filesystems local to the system."""
out_rdfvalues = [rdf_client_fs.Filesystem]
class Uninstall(ClientActionStub):
"""Remove the service that starts us at startup."""
out_rdfvalues = [rdf_protodict.DataBlob]
class UpdateAgent(ClientActionStub):
"""Updates the GRR agent to a new version."""
in_rdfvalue = rdf_client_action.ExecuteBinaryRequest
out_rdfvalues = [rdf_client_action.ExecuteBinaryResponse]
# Windows-specific
class WmiQuery(ClientActionStub):
"""Runs a WMI query and returns the results to a server callback."""
in_rdfvalue = rdf_client_action.WMIRequest
out_rdfvalues = [rdf_protodict.Dict]
# OS X-specific
class OSXEnumerateRunningServices(ClientActionStub):
"""Enumerate all running launchd jobs."""
in_rdfvalue = None
out_rdfvalues = [rdf_client.OSXServiceInformation]
# Linux-specific
class EnumerateRunningServices(ClientActionStub):
"""List running daemons."""
in_rdfvalue = None
out_rdfvalues = [None]
class EnumerateUsers(ClientActionStub):
"""Enumerates all the users on this system."""
# Client versions 3.0.7.1 and older used to return KnowledgeBaseUser.
# KnowledgeBaseUser was renamed to User.
out_rdfvalues = [rdf_client.User, rdf_client.KnowledgeBaseUser]
# from admin.py
class Echo(ClientActionStub):
"""Returns a message to the server."""
in_rdfvalue = rdf_client_action.EchoRequest
out_rdfvalues = [rdf_client_action.EchoRequest]
class GetHostname(ClientActionStub):
"""Retrieves the host name of the client."""
out_rdfvalues = [rdf_protodict.DataBlob]
class GetPlatformInfo(ClientActionStub):
"""Retrieves platform information."""
out_rdfvalues = [rdf_client.Uname]
class Kill(ClientActionStub):
"""A client action for terminating (ClientActionStub) the client."""
out_rdfvalues = [rdf_flows.GrrMessage]
class Hang(ClientActionStub):
"""A client action for simulating the client becoming unresponsive."""
in_rdfvalue = rdf_protodict.DataBlob
class BusyHang(ClientActionStub):
"""A client action that burns cpu cycles. Used for testing cpu limits."""
in_rdfvalue = rdf_protodict.DataBlob
class Bloat(ClientActionStub):
"""A client action that uses lots of memory for testing."""
in_rdfvalue = rdf_protodict.DataBlob
class GetConfiguration(ClientActionStub):
"""Retrieves the running configuration parameters."""
in_rdfvalue = None
out_rdfvalues = [rdf_protodict.Dict]
class GetLibraryVersions(ClientActionStub):
"""Retrieves version information for installed libraries."""
in_rdfvalue = None
out_rdfvalues = [rdf_protodict.Dict]
class UpdateConfiguration(ClientActionStub):
"""Updates configuration parameters on the client."""
in_rdfvalue = rdf_protodict.Dict
class GetClientInfo(ClientActionStub):
"""Obtains information about the GRR client installed."""
out_rdfvalues = [rdf_client.ClientInformation]
class GetClientStats(ClientActionStub):
"""This retrieves some stats about the GRR process."""
in_rdfvalue = rdf_client_action.GetClientStatsRequest
out_rdfvalues = [rdf_client_stats.ClientStats]
class GetClientStatsAuto(GetClientStats):
"""Action used to send the reply to a well known flow on the server."""
class SendStartupInfo(ClientActionStub):
out_rdfvalues = [rdf_client.StartupInfo]
# from plist.py
class PlistQuery(ClientActionStub):
"""Parses the plist request specified and returns the results."""
in_rdfvalue = rdf_plist.PlistRequest
out_rdfvalues = [rdf_protodict.RDFValueArray]
# from standard.py
class ReadBuffer(ClientActionStub):
"""Reads a buffer from a file and returns it to a server callback."""
in_rdfvalue = rdf_client.BufferReference
out_rdfvalues = [rdf_client.BufferReference]
class TransferBuffer(ClientActionStub):
"""Reads a buffer from a file and returns it to the server efficiently."""
in_rdfvalue = rdf_client.BufferReference
out_rdfvalues = [rdf_client.BufferReference]
class HashBuffer(ClientActionStub):
"""Hash a buffer from a file and returns it to the server efficiently."""
in_rdfvalue = rdf_client.BufferReference
out_rdfvalues = [rdf_client.BufferReference]
class HashFile(ClientActionStub):
"""Hash an entire file using multiple algorithms."""
in_rdfvalue = rdf_client_action.FingerprintRequest
out_rdfvalues = [rdf_client_action.FingerprintResponse]
class ListDirectory(ClientActionStub):
"""Lists all the files in a directory."""
in_rdfvalue = rdf_client_action.ListDirRequest
out_rdfvalues = [rdf_client_fs.StatEntry]
# DEPRECATED.
#
# This action was replaced by newer `GetFileStat` action. This stub is left for
# compatibility with old clients. After the transition period all clients should
# support new action and this class should be removed.
#
# TODO(hanuszczak): Remove this class after 2021-01-01.
class StatFile(ClientActionStub):
"""Sends a StatEntry for a single file."""
in_rdfvalue = rdf_client_action.ListDirRequest
out_rdfvalues = [rdf_client_fs.StatEntry]
class GetFileStat(ClientActionStub):
"""A client action that yields stat of a given file."""
in_rdfvalue = rdf_client_action.GetFileStatRequest
out_rdfvalues = [rdf_client_fs.StatEntry]
class ExecuteCommand(ClientActionStub):
"""Executes one of the predefined commands."""
in_rdfvalue = rdf_client_action.ExecuteRequest
out_rdfvalues = [rdf_client_action.ExecuteResponse]
class ExecuteBinaryCommand(ClientActionStub):
"""Executes a command from a passed in binary."""
in_rdfvalue = rdf_client_action.ExecuteBinaryRequest
out_rdfvalues = [rdf_client_action.ExecuteBinaryResponse]
class ExecutePython(ClientActionStub):
"""Executes python code with exec."""
in_rdfvalue = rdf_client_action.ExecutePythonRequest
out_rdfvalues = [rdf_client_action.ExecutePythonResponse]
class Segfault(ClientActionStub):
"""This action is just for debugging. It induces a segfault."""
in_rdfvalue = None
out_rdfvalues = [None]
class ListProcesses(ClientActionStub):
"""This action lists all the processes running on a machine."""
in_rdfvalue = None
out_rdfvalues = [rdf_client.Process]
class SendFile(ClientActionStub):
"""This action encrypts and sends a file to a remote listener."""
in_rdfvalue = rdf_client_action.SendFileRequest
out_rdfvalues = [rdf_client_fs.StatEntry]
class StatFS(ClientActionStub):
"""Call os.statvfs for a given list of paths. OS X and Linux only."""
in_rdfvalue = rdf_client_action.StatFSRequest
out_rdfvalues = [rdf_client_fs.Volume]
class GetMemorySize(ClientActionStub):
out_rdfvalues = [rdfvalue.ByteSize]
# from tempfiles.py
class DeleteGRRTempFiles(ClientActionStub):
"""Delete all the GRR temp files in a directory."""
in_rdfvalue = rdf_paths.PathSpec
out_rdfvalues = [rdf_client.LogMessage]
class CheckFreeGRRTempSpace(ClientActionStub):
in_rdfvalue = rdf_paths.PathSpec
out_rdfvalues = [rdf_client_fs.DiskUsage]
# from searching.py
class Find(ClientActionStub):
"""Recurses through a directory returning files which match conditions."""
in_rdfvalue = rdf_client_fs.FindSpec
out_rdfvalues = [rdf_client_fs.FindSpec, rdf_client_fs.StatEntry]
class Grep(ClientActionStub):
"""Search a file for a pattern."""
in_rdfvalue = rdf_client_fs.GrepSpec
out_rdfvalues = [rdf_client.BufferReference]
# from network.py
# Deprecated action, kept for outdated clients.
class Netstat(ClientActionStub):
"""Gather open network connection stats."""
in_rdfvalue = None
out_rdfvalues = [rdf_client_network.NetworkConnection]
class ListNetworkConnections(ClientActionStub):
"""Gather open network connection stats."""
in_rdfvalue = rdf_client_action.ListNetworkConnectionsArgs
out_rdfvalues = [rdf_client_network.NetworkConnection]
# from cloud.py
class GetCloudVMMetadata(ClientActionStub):
"""Get metadata for cloud VMs."""
in_rdfvalue = rdf_cloud.CloudMetadataRequests
out_rdfvalues = [rdf_cloud.CloudMetadataResponses]
# from file_finder.py
class FileFinderOS(ClientActionStub):
"""The file finder implementation using the OS file api."""
in_rdfvalue = rdf_file_finder.FileFinderArgs
out_rdfvalues = [rdf_file_finder.FileFinderResult]
# from file_finder.py
class VfsFileFinder(ClientActionStub):
"""The client file finder implementation using the VFS file api."""
in_rdfvalue = rdf_file_finder.FileFinderArgs
out_rdfvalues = [rdf_file_finder.FileFinderResult]
# from file_fingerprint.py
class FingerprintFile(ClientActionStub):
"""Apply a set of fingerprinting methods to a file."""
in_rdfvalue = rdf_client_action.FingerprintRequest
out_rdfvalues = [rdf_client_action.FingerprintResponse]
# from components/chipsec_support
class DumpFlashImage(ClientActionStub):
"""A client action to collect the BIOS via SPI using Chipsec."""
in_rdfvalue = rdf_chipsec_types.DumpFlashImageRequest
out_rdfvalues = [rdf_chipsec_types.DumpFlashImageResponse]
class DumpACPITable(ClientActionStub):
"""A client action to collect the ACPI table(s)."""
in_rdfvalue = rdf_chipsec_types.DumpACPITableRequest
out_rdfvalues = [rdf_chipsec_types.DumpACPITableResponse]
# from memory.py
class YaraProcessScan(ClientActionStub):
"""Scans the memory of a number of processes using Yara."""
in_rdfvalue = rdf_memory.YaraProcessScanRequest
out_rdfvalues = [rdf_memory.YaraProcessScanResponse]
class YaraProcessDump(ClientActionStub):
"""Dumps a process to disk and returns pathspecs for GRR to pick up."""
in_rdfvalue = rdf_memory.YaraProcessDumpArgs
out_rdfvalues = [rdf_memory.YaraProcessDumpResponse]
class Osquery(ClientActionStub):
"""A stub class for the osquery action plugin."""
in_rdfvalue = rdf_osquery.OsqueryArgs
out_rdfvalues = [rdf_osquery.OsqueryResult]
|
demonchild2112/travis-test
|
grr/server/grr_response_server/server_stubs.py
|
Python
|
apache-2.0
| 12,170 | 0.012572 |
# -*- coding: utf-8 -*-
delivery={'weight': '10.0', 'pec_bar': u'9V169001>59647441000000023', 'suivi_bar': u'9V0>50000000024', 'cab_prise_en_charge': u'9V1 69001 964744 1000 000023', 'date': '12/05/2014', 'cab_suivi': u'9V 00000 00002 4', 'ref_client': u'OUT/00007', 'Instructions': ''}
sender={'city': u'city', 'account': u'964744', 'name': u'Your Company', 'zip': u'zip', 'phone': u'599', 'country': u'France', 'support_city': u'Gennevilliers PFC', 'street': u'rue', 'password': u'123456'}
address={'city': u'Lyon', 'name': u'Jim NHASTIC', 'zip': u'69001', 'mobile': '', 'street2': '', 'street3': '', 'countryCode': u'FR', 'phone': '', 'street': u'150 rue Vauban', 'email': ''}
option={'ar': False, 'nm': False, 'ftd': False}
kwargs={'logo': 'EXPERT_F', '_product_code': u'9V'}
content="""/* Utf8 file encoded converted in CP1252 by python */
^XA
^LH30,30 /* initial position*/
^CI27 /* windows CP1252 decoding */
^CF0,22 /*CF:default font|font_type,size*/
/*Fonts : P,Q,R,S,T fonts are the same with Zebra GX420t, only size change font '0' seems to be functionnal for general purpose */
^FWN /*FW:Default orientation*/
^BY3 /*BY:Bar Code Field Default*/
^FO80,01^XGE:EXPERT_F,1,1^FS
^FO0,100^GB770,1,4^FS
^FO10,130^A0,30^FDEXPEDITEUR
^FS
^FO450,130^FDRef Client: OUT/00007^FS
^FO0,160^GB360,160,4^FS /*GB:graphic box|width,height,thickness*/
/*graphic diagonal line:width,height,border_thickness,,orientation(R=right diagonal)*/
^FO0,160^GD350,160,10,,R^FS
^FO0,160^GD350,160,10,,L^FS
^FO410,160^GB360,160,4^FS
/*^A0 /*A:font|font_type,orientation,size*/*/
^FO25,175^A0,30,30^FDYour Company^FS
^FO25,205 /*FO:field origin|x,y*/
^FB400,5,3, /*FB:field block|width text,line number,space beetween lines*/
/* COLISS RULE Teleph expediteur si OM ou I */
/* COLISS RULE Pays expediteur si OM ou I */
^A0,24^FDrue
\&
\&zip city
^FS
^FO420,170 /*FO:field origin|x,y*/
^FB400,6,3,
^FDCOMPTE CLIENT: 964744
\&SITE DE PRISE EN CHARGE:
\&Gennevilliers PFC
\&N° Colis : 9V 00000 00002 4
\&Poids : 10.0 Kg
\&Edité le : 12/05/2014
^FS
/* ||| || |||| */
/* >5 => is subset C invocation code ; >6 => is subset B invocation code */
^FO40,345^PR2,2^BCN,230,Y,N,N^FD9V0>50000000024^FS
^FO40,575^GB402,3,4^FS
^FO0,585^FDN° de colis :^FS
/* /!\ /_\ /!\ /_\ /!\ */
^FO30,630^A0,30^FDDESTINATAIRE^FS
^FO5,660^GB450,200,4^FS
^FO30,675^A0,24,28^FDJim NHASTIC^FS
^FO30,705^FB400,6,2,
^FD150 rue Vauban
\&
\&^FS
^FO30,755
^A0,40
^FD69001 Lyon^FS
/* COLISS RULE Phone+country expediteur si Internationale */
^FO30,800^FDTEL: ^FS
^FO0,950^A0B^FDSPECIFIQUE^FS
/* ||| || |||| */
^FO70,880^BCN,230,Y,N,N^FD9V169001>59647441000000023^FS
^FO100,1120^FDN° PCH:^FS
^FO0,1136^XGE:POSTE,1,1^FS
^FO720,1130^XGE:CAMERA,1,1^FS
^XZ
"""
|
akretion/laposte_api
|
laposte_api/data/colissimo_9V_nhas22.py
|
Python
|
agpl-3.0
| 2,753 | 0.009081 |
"""
Django settings for CELLAR project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'PLEASE FILL YOUR OWN SECRET KEY'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["localhost", "127.0.0.1", ]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Browser',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'CELLAR.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CELLAR.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'CELLAR.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
|
SonienTaegi/CELLAR
|
CELLAR/settings.py
|
Python
|
gpl-2.0
| 2,762 | 0.000724 |
#!/usr/bin/python
#
# Copyright (C) 2009 Julian Andres Klode <jak@debian.org>
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
"""Unit tests for verifying the correctness of hashsums in apt_pkg.
Unit tests to verify the correctness of Hashes, HashString and the various
functions like md5sum."""
import unittest
import hashlib
import sys
import warnings
import apt_pkg
import testcommon
class TestHashes(testcommon.TestCase):
"""Test apt_pkg.Hashes() and the various apt_pkg.*sum() functions."""
def setUp(self):
"""Prepare the tests, create reference values..."""
testcommon.TestCase.setUp(self)
self.file = open(apt_pkg.__file__, "rb")
self.value = self.file.read()
self.hashes = apt_pkg.Hashes(self.value)
self.file.seek(0)
self.fhashes = apt_pkg.Hashes(self.file)
# Reference values.
self.md5 = hashlib.md5(self.value).hexdigest()
self.sha1 = hashlib.sha1(self.value).hexdigest()
self.sha256 = hashlib.sha256(self.value).hexdigest()
self.file.seek(0)
def tearDown(self):
"""Cleanup, Close the file object used for the tests."""
self.file.close()
def test_md5sum(self):
"""hashes: Test apt_pkg.md5sum()"""
self.assertEqual(apt_pkg.md5sum(self.value), self.md5)
self.assertEqual(apt_pkg.md5sum(self.file), self.md5)
def test_sha1sum(self):
"""hashes: Test apt_pkg.sha1sum()"""
self.assertEqual(apt_pkg.sha1sum(self.value), self.sha1)
self.assertEqual(apt_pkg.sha1sum(self.file), self.sha1)
def test_sha256sum(self):
"""hashes: Test apt_pkg.sha256sum()"""
self.assertEqual(apt_pkg.sha256sum(self.value), self.sha256)
self.assertEqual(apt_pkg.sha256sum(self.file), self.sha256)
def test_bytes(self):
"""hashes: Test apt_pkg.Hashes(bytes)"""
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
self.assertEqual(self.hashes.md5, self.md5)
self.assertEqual(self.hashes.sha1, self.sha1)
self.assertEqual(self.hashes.sha256, self.sha256)
self.assertEqual(len(caught_warnings), 3)
self.assertTrue(issubclass(caught_warnings[0].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[1].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[2].category,
DeprecationWarning))
def test_file(self):
"""hashes: Test apt_pkg.Hashes(file)."""
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
self.assertEqual(self.hashes.md5, self.fhashes.md5)
self.assertEqual(self.hashes.sha1, self.fhashes.sha1)
self.assertEqual(self.hashes.sha256, self.fhashes.sha256)
self.assertEqual(len(caught_warnings), 6)
self.assertTrue(issubclass(caught_warnings[0].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[1].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[2].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[3].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[4].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[5].category,
DeprecationWarning))
def test_unicode(self):
"""hashes: Test apt_pkg.Hashes(unicode)."""
if sys.version_info[0] == 3:
self.assertRaises(TypeError, apt_pkg.Hashes, "D")
self.assertRaises(TypeError, apt_pkg.md5sum, "D")
self.assertRaises(TypeError, apt_pkg.sha1sum, "D")
self.assertRaises(TypeError, apt_pkg.sha256sum, "D")
else:
self.assertRaises(TypeError, apt_pkg.Hashes, unicode())
self.assertRaises(TypeError, apt_pkg.md5sum, unicode())
self.assertRaises(TypeError, apt_pkg.sha1sum, unicode())
self.assertRaises(TypeError, apt_pkg.sha256sum, unicode())
class TestHashString(testcommon.TestCase):
"""Test apt_pkg.HashString()."""
def setUp(self):
"""Prepare the test by reading the file."""
testcommon.TestCase.setUp(self)
self.file = open(apt_pkg.__file__)
self.hashes = apt_pkg.Hashes(self.file)
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
self.md5 = apt_pkg.HashString("MD5Sum", self.hashes.md5)
self.sha1 = apt_pkg.HashString("SHA1", self.hashes.sha1)
self.sha256 = apt_pkg.HashString("SHA256", self.hashes.sha256)
self.assertEqual(len(caught_warnings), 3)
self.assertTrue(issubclass(caught_warnings[0].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[1].category,
DeprecationWarning))
self.assertTrue(issubclass(caught_warnings[2].category,
DeprecationWarning))
def tearDown(self):
"""Cleanup, Close the file object used for the tests."""
self.file.close()
def test_md5(self):
"""hashes: Test apt_pkg.HashString().md5"""
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.assertEqual("MD5Sum:%s" % self.hashes.md5, str(self.md5))
self.assertTrue(self.md5.verify_file(apt_pkg.__file__))
def test_sha1(self):
"""hashes: Test apt_pkg.HashString().sha1"""
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.assertEqual("SHA1:%s" % self.hashes.sha1, str(self.sha1))
self.assertTrue(self.sha1.verify_file(apt_pkg.__file__))
def test_sha256(self):
"""hashes: Test apt_pkg.HashString().sha256"""
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.assertEqual("SHA256:%s" % self.hashes.sha256,
str(self.sha256))
self.assertTrue(self.sha256.verify_file(apt_pkg.__file__))
def test_wrong(self):
"""hashes: Test apt_pkg.HashString(wrong_type)."""
self.assertRaises(TypeError, apt_pkg.HashString, 0)
if sys.version_info[0] == 3:
self.assertRaises(TypeError, apt_pkg.HashString, bytes())
class TestHashStringList(testcommon.TestCase):
"""Test apt_pkg.HashStringList()"""
def test_file_size(self):
hsl = apt_pkg.HashStringList()
self.assertEqual(hsl.file_size, 0)
hsl.file_size = 42
self.assertEqual(hsl.file_size, 42)
self.assertEqual(len(hsl), 1)
# Verify that I can re-assign value (this handles the long case on
# Python 2).
hsl.file_size = hsl.file_size
with self.assertRaises(OverflowError):
hsl.file_size = -1
hsl.file_size = 0
def test_append(self):
"""Testing whether append works correctly."""
hs1 = apt_pkg.HashString("MD5Sum",
"a60599e6200b60050d7a30721e3532ed")
hs2 = apt_pkg.HashString("SHA1",
"ef113338e654b1ada807a939ad47b3a67633391b")
hsl = apt_pkg.HashStringList()
hsl.append(hs1)
hsl.append(hs2)
self.assertEqual(len(hsl), 2)
self.assertEqual(hsl[0].hashtype, "MD5Sum")
self.assertEqual(hsl[1].hashtype, "SHA1")
self.assertEqual(str(hsl[0]), str(hs1))
self.assertEqual(str(hsl[1]), str(hs2))
def test_find(self):
"""Testing whether append works correctly."""
hs1 = apt_pkg.HashString("MD5Sum",
"a60599e6200b60050d7a30721e3532ed")
hs2 = apt_pkg.HashString("SHA1",
"ef113338e654b1ada807a939ad47b3a67633391b")
hsl = apt_pkg.HashStringList()
hsl.append(hs1)
hsl.append(hs2)
self.assertEqual(hsl.find("MD5Sum").hashtype, "MD5Sum")
self.assertEqual(hsl.find("SHA1").hashtype, "SHA1")
self.assertEqual(hsl.find().hashtype, "SHA1")
def test_verify_file(self):
with open(apt_pkg.__file__) as fobj:
hashes = apt_pkg.Hashes(fobj)
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
sha1 = apt_pkg.HashString("SHA1", hashes.sha1)
sha256 = apt_pkg.HashString("SHA256", hashes.sha256)
hsl = apt_pkg.HashStringList()
hsl.append(sha1)
hsl.append(sha256)
self.assertTrue(hsl.verify_file(apt_pkg.__file__))
md5sum = apt_pkg.HashString("MD5Sum",
"a60599e6200b60050d7a30721e3532ed")
hsl.append(md5sum)
self.assertFalse(hsl.verify_file(apt_pkg.__file__))
hsl2 = hashes.hashes
self.assertIsInstance(hsl2, apt_pkg.HashStringList)
self.assertGreater(len(hsl2), 0)
self.assertTrue(hsl2.verify_file(apt_pkg.__file__))
if __name__ == "__main__":
unittest.main()
|
mvo5/python-apt
|
tests/test_hashes.py
|
Python
|
gpl-2.0
| 9,619 | 0 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'APIAuth'
db.create_table(u'pycon_api_apiauth', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('auth_key', self.gf('django.db.models.fields.CharField')(default='3871ec85-2c87-4f82-b92e-d067843fba45', max_length=36)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'pycon_api', ['APIAuth'])
# Adding model 'ProposalData'
db.create_table(u'pycon_api_proposaldata', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('proposal', self.gf('django.db.models.fields.related.OneToOneField')(related_name='data', unique=True, to=orm['proposals.ProposalBase'])),
('data', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'pycon_api', ['ProposalData'])
# Adding model 'IRCLogLine'
db.create_table(u'pycon_api_irclogline', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('timestamp', self.gf('django.db.models.fields.DateTimeField')()),
('proposal', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['proposals.ProposalBase'])),
('user', self.gf('django.db.models.fields.CharField')(max_length=40)),
('line', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'pycon_api', ['IRCLogLine'])
def backwards(self, orm):
# Deleting model 'APIAuth'
db.delete_table(u'pycon_api_apiauth')
# Deleting model 'ProposalData'
db.delete_table(u'pycon_api_proposaldata')
# Deleting model 'IRCLogLine'
db.delete_table(u'pycon_api_irclogline')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'conference.conference': {
'Meta': {'object_name': 'Conference'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('timezones.fields.TimeZoneField', [], {'default': "'US/Eastern'", 'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'conference.section': {
'Meta': {'object_name': 'Section'},
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['conference.Conference']"}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'proposals.additionalspeaker': {
'Meta': {'unique_together': "(('speaker', 'proposalbase'),)", 'object_name': 'AdditionalSpeaker', 'db_table': "'proposals_proposalbase_additional_speakers'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposalbase': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['proposals.ProposalBase']"}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['speakers.Speaker']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'proposals.proposalbase': {
'Meta': {'object_name': 'ProposalBase'},
'abstract': ('django.db.models.fields.TextField', [], {}),
'additional_notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'additional_speakers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['speakers.Speaker']", 'symmetrical': 'False', 'through': u"orm['proposals.AdditionalSpeaker']", 'blank': 'True'}),
'cancelled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '400'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['proposals.ProposalKind']"}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposals'", 'to': u"orm['speakers.Speaker']"}),
'submitted': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'proposals.proposalkind': {
'Meta': {'object_name': 'ProposalKind'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposal_kinds'", 'to': u"orm['conference.Section']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'pycon_api.apiauth': {
'Meta': {'object_name': 'APIAuth'},
'auth_key': ('django.db.models.fields.CharField', [], {'default': "'6f51aa9c-b509-4693-9dc6-c19341a4ac8d'", 'max_length': '36'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'pycon_api.irclogline': {
'Meta': {'object_name': 'IRCLogLine'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'proposal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['proposals.ProposalBase']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'pycon_api.proposaldata': {
'Meta': {'object_name': 'ProposalData'},
'data': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposal': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'data'", 'unique': 'True', 'to': u"orm['proposals.ProposalBase']"})
},
u'speakers.speaker': {
'Meta': {'object_name': 'Speaker'},
'annotation': ('django.db.models.fields.TextField', [], {}),
'biography': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_email': ('django.db.models.fields.CharField', [], {'max_length': '200', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'invite_token': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'sessions_preference': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'twitter_username': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'speaker_profile'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['pycon_api']
|
pyconjp/pyconjp-website
|
pycon/pycon_api/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 12,615 | 0.007372 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20150914_2147'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='slug',
field=models.SlugField(null=True, editable=False),
),
]
|
rtancman/filmes
|
movies/core/migrations/0003_auto_20150914_2157.py
|
Python
|
mit
| 408 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from .context import tripp
from tripp import logistic_regression
from tripp import munge
from tripp import gradient
from tripp import ml
from tripp import algebra
import random
import logging
logging.basicConfig(level=logging.ERROR, format="%(lineno)d\t%(message)s")
class TestLogisticRegression(unittest.TestCase):
def setUp(self):
tuples = [
(0.7, 48000, 1), (1.9, 48000, 0), (2.5, 60000, 1),
(4.2, 63000, 0), (6, 76000, 0), (6.5, 69000, 0),
(7.5, 76000, 0), (8.1, 88000, 0), (8.7, 83000, 1),
(10, 83000, 1), (0.8, 43000, 0), (1.8, 60000, 0),
(10, 79000, 1), (6.1, 76000, 0), (1.4, 50000, 0),
(9.1, 92000, 0), (5.8, 75000, 0), (5.2, 69000, 0),
(1, 56000, 0), (6, 67000, 0), (4.9, 74000, 0),
(6.4, 63000, 1), (6.2, 82000, 0), (3.3, 58000, 0),
(9.3, 90000, 1), (5.5, 57000, 1), (9.1, 102000, 0),
(2.4, 54000, 0), (8.2, 65000, 1), (5.3, 82000, 0),
(9.8, 107000, 0), (1.8, 64000, 0), (0.6, 46000, 1),
(0.8, 48000, 0), (8.6, 84000, 1), (0.6, 45000, 0),
(0.5, 30000, 1), (7.3, 89000, 0), (2.5, 48000, 1),
(5.6, 76000, 0), (7.4, 77000, 0), (2.7, 56000, 0),
(0.7, 48000, 0), (1.2, 42000, 0), (0.2, 32000, 1),
(4.7, 56000, 1), (2.8, 44000, 1), (7.6, 78000, 0),
(1.1, 63000, 0), (8, 79000, 1), (2.7, 56000, 0),
(6, 52000, 1), (4.6, 56000, 0), (2.5, 51000, 0),
(5.7, 71000, 0), (2.9, 65000, 0), (1.1, 33000, 1),
(3, 62000, 0), (4, 71000, 0), (2.4, 61000, 0),
(7.5, 75000, 0), (9.7, 81000, 1), (3.2, 62000, 0),
(7.9, 88000, 0), (4.7, 44000, 1), (2.5, 55000, 0),
(1.6, 41000, 0), (6.7, 64000, 1), (6.9, 66000, 1),
(7.9, 78000, 1), (8.1, 102000, 0), (5.3, 48000, 1),
(8.5, 66000, 1), (0.2, 56000, 0), (6, 69000, 0),
(7.5, 77000, 0), (8, 86000, 0), (4.4, 68000, 0),
(4.9, 75000, 0), (1.5, 60000, 0), (2.2, 50000, 0),
(3.4, 49000, 1), (4.2, 70000, 0), (7.7, 98000, 0),
(8.2, 85000, 0), (5.4, 88000, 0), (0.1, 46000, 0),
(1.5, 37000, 0), (6.3, 86000, 0), (3.7, 57000, 0),
(8.4, 85000, 0), (2, 42000, 0), (5.8, 69000, 1),
(2.7, 64000, 0), (3.1, 63000, 0), (1.9, 48000, 0),
(10, 72000, 1), (0.2, 45000, 0), (8.6, 95000, 0),
(1.5, 64000, 0), (9.8, 95000, 0), (5.3, 65000, 0),
(7.5, 80000, 0), (9.9, 91000, 0), (9.7, 50000, 1),
(2.8, 68000, 0), (3.6, 58000, 0), (3.9, 74000, 0),
(4.4, 76000, 0), (2.5, 49000, 0), (7.2, 81000, 0),
(5.2, 60000, 1), (2.4, 62000, 0), (8.9, 94000, 0),
(2.4, 63000, 0), (6.8, 69000, 1), (6.5, 77000, 0),
(7, 86000, 0), (9.4, 94000, 0), (7.8, 72000, 1),
(0.2, 53000, 0), (10, 97000, 0), (5.5, 65000, 0),
(7.7, 71000, 1), (8.1, 66000, 1), (9.8, 91000, 0),
(8, 84000, 0), (2.7, 55000, 0), (2.8, 62000, 0),
(9.4, 79000, 0), (2.5, 57000, 0), (7.4, 70000, 1),
(2.1, 47000, 0), (5.3, 62000, 1), (6.3, 79000, 0),
(6.8, 58000, 1), (5.7, 80000, 0), (2.2, 61000, 0),
(4.8, 62000, 0), (3.7, 64000, 0), (4.1, 85000, 0),
(2.3, 51000, 0), (3.5, 58000, 0), (0.9, 43000, 0),
(0.9, 54000, 0), (4.5, 74000, 0), (6.5, 55000, 1),
(4.1, 41000, 1), (7.1, 73000, 0), (1.1, 66000, 0),
(9.1, 81000, 1), (8, 69000, 1), (7.3, 72000, 1),
(3.3, 50000, 0), (3.9, 58000, 0), (2.6, 49000, 0),
(1.6, 78000, 0), (0.7, 56000, 0), (2.1, 36000, 1),
(7.5, 90000, 0), (4.8, 59000, 1), (8.9, 95000, 0),
(6.2, 72000, 0), (6.3, 63000, 0), (9.1, 100000, 0),
(7.3, 61000, 1), (5.6, 74000, 0), (0.5, 66000, 0),
(1.1, 59000, 0), (5.1, 61000, 0), (6.2, 70000, 0),
(6.6, 56000, 1), (6.3, 76000, 0), (6.5, 78000, 0),
(5.1, 59000, 0), (9.5, 74000, 1), (4.5, 64000, 0),
(2, 54000, 0), (1, 52000, 0), (4, 69000, 0),
(6.5, 76000, 0), (3, 60000, 0), (4.5, 63000, 0),
(7.8, 70000, 0), (3.9, 60000, 1), (0.8, 51000, 0),
(4.2, 78000, 0), (1.1, 54000, 0), (6.2, 60000, 0),
(2.9, 59000, 0), (2.1, 52000, 0), (8.2, 87000, 0),
(4.8, 73000, 0), (2.2, 42000, 1), (9.1, 98000, 0),
(6.5, 84000, 0), (6.9, 73000, 0), (5.1, 72000, 0),
(9.1, 69000, 1), (9.8, 79000, 1)
]
self.data = map(list, tuples)
def test_gradient(self):
"""logistic_regression -- gradient"""
# x is [1, experience, salary]
x = [[1] + row[:2] for row in self.data]
# y is "paid account"
y = [row[2] for row in self.data]
rescaled = munge.rescale(x)
random.seed(0)
x_train, x_test, y_train, y_test = ml.train_test_split(rescaled,
y,
0.33)
beta_0 = [1, 1, 1]
beta_hat = gradient.\
maximize_stochastic(logistic_regression.logistic_log_likelihood_i,
logistic_regression.logistic_log_gradient_i,
x_train,
y_train,
beta_0)
true_positives = false_positives = true_negatives = false_negatives = 0
for x_i, y_i in zip(x_test, y_test):
predict = logistic_regression.logistic(algebra.dot(beta_hat, x_i))
if y_i == 1 and predict >= 0.5:
true_positives += 1
elif y_i == 1:
false_negatives += 1
elif predict >= 0.5:
false_positives += 1
else:
true_negatives += 1
message = "true_pos={0}; false_neg={1}, false_pos={2}; true_neg={3}"
logging.debug(message.format(true_positives,
false_negatives,
false_positives,
true_negatives))
precision = true_positives / float((true_positives + false_positives))
recall = true_positives / float((true_positives + false_negatives))
self.assertEqual(0.93, round(precision, 2))
self.assertEqual(0.82, round(recall, 2))
|
mjamesruggiero/tripp
|
tests/test_logistic_regression.py
|
Python
|
bsd-3-clause
| 6,511 | 0.000154 |
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright 2012 Canonical
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
"""Tests for the TextInput Qml component."""
from autopilot.matchers import Eventually
from textwrap import dedent
from testtools.matchers import Is, Not, Equals
from testtools import skip
import os
from tavastia.tests import TavastiaTestCase
class TextFieldTests(TavastiaTestCase):
"""Tests for TextField component."""
test_qml_file = "%s/%s.qml" % (os.path.dirname(os.path.realpath(__file__)),"TextFieldTests")
def test_can_select_textfield(self):
"""Must be able to select the Qml TextField component."""
obj = self.app.select_single('TextField')
self.assertThat(obj, Not(Is(None)))
|
LeoTestard/qt-ubuntu-components
|
tests/autopilot/tavastia/tests/textfield/test_textfield.py
|
Python
|
lgpl-3.0
| 923 | 0.005417 |
"""Support for deCONZ devices."""
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_API_KEY, CONF_HOST, CONF_PORT, EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers import config_validation as cv
# Loading the config flow file will register the flow
from .config_flow import get_master_gateway
from .const import (
CONF_ALLOW_CLIP_SENSOR, CONF_ALLOW_DECONZ_GROUPS, CONF_BRIDGEID,
CONF_MASTER_GATEWAY, DEFAULT_PORT, DOMAIN, _LOGGER)
from .gateway import DeconzGateway
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_API_KEY): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
}, extra=vol.ALLOW_EXTRA)
SERVICE_DECONZ = 'configure'
SERVICE_FIELD = 'field'
SERVICE_ENTITY = 'entity'
SERVICE_DATA = 'data'
SERVICE_SCHEMA = vol.All(vol.Schema({
vol.Optional(SERVICE_ENTITY): cv.entity_id,
vol.Optional(SERVICE_FIELD): cv.matches_regex('/.*'),
vol.Required(SERVICE_DATA): dict,
vol.Optional(CONF_BRIDGEID): str
}), cv.has_at_least_one_key(SERVICE_ENTITY, SERVICE_FIELD))
SERVICE_DEVICE_REFRESH = 'device_refresh'
SERVICE_DEVICE_REFRESCH_SCHEMA = vol.All(vol.Schema({
vol.Optional(CONF_BRIDGEID): str
}))
async def async_setup(hass, config):
"""Load configuration for deCONZ component.
Discovery has loaded the component if DOMAIN is not present in config.
"""
if not hass.config_entries.async_entries(DOMAIN) and DOMAIN in config:
deconz_config = config[DOMAIN]
hass.async_create_task(hass.config_entries.flow.async_init(
DOMAIN, context={'source': config_entries.SOURCE_IMPORT},
data=deconz_config
))
return True
async def async_setup_entry(hass, config_entry):
"""Set up a deCONZ bridge for a config entry.
Load config, group, light and sensor data for server information.
Start websocket for push notification of state changes from deCONZ.
"""
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
if not config_entry.options:
await async_populate_options(hass, config_entry)
gateway = DeconzGateway(hass, config_entry)
if not await gateway.async_setup():
return False
hass.data[DOMAIN][gateway.bridgeid] = gateway
await gateway.async_update_device_registry()
async def async_configure(call):
"""Set attribute of device in deCONZ.
Entity is used to resolve to a device path (e.g. '/lights/1').
Field is a string representing either a full path
(e.g. '/lights/1/state') when entity is not specified, or a
subpath (e.g. '/state') when used together with entity.
Data is a json object with what data you want to alter
e.g. data={'on': true}.
{
"field": "/lights/1/state",
"data": {"on": true}
}
See Dresden Elektroniks REST API documentation for details:
http://dresden-elektronik.github.io/deconz-rest-doc/rest/
"""
field = call.data.get(SERVICE_FIELD, '')
entity_id = call.data.get(SERVICE_ENTITY)
data = call.data[SERVICE_DATA]
gateway = get_master_gateway(hass)
if CONF_BRIDGEID in call.data:
gateway = hass.data[DOMAIN][call.data[CONF_BRIDGEID]]
if entity_id:
try:
field = gateway.deconz_ids[entity_id] + field
except KeyError:
_LOGGER.error('Could not find the entity %s', entity_id)
return
await gateway.api.async_put_state(field, data)
hass.services.async_register(
DOMAIN, SERVICE_DECONZ, async_configure, schema=SERVICE_SCHEMA)
async def async_refresh_devices(call):
"""Refresh available devices from deCONZ."""
gateway = get_master_gateway(hass)
if CONF_BRIDGEID in call.data:
gateway = hass.data[DOMAIN][call.data[CONF_BRIDGEID]]
groups = set(gateway.api.groups.keys())
lights = set(gateway.api.lights.keys())
scenes = set(gateway.api.scenes.keys())
sensors = set(gateway.api.sensors.keys())
await gateway.api.async_load_parameters()
gateway.async_add_device_callback(
'group', [group
for group_id, group in gateway.api.groups.items()
if group_id not in groups]
)
gateway.async_add_device_callback(
'light', [light
for light_id, light in gateway.api.lights.items()
if light_id not in lights]
)
gateway.async_add_device_callback(
'scene', [scene
for scene_id, scene in gateway.api.scenes.items()
if scene_id not in scenes]
)
gateway.async_add_device_callback(
'sensor', [sensor
for sensor_id, sensor in gateway.api.sensors.items()
if sensor_id not in sensors]
)
hass.services.async_register(
DOMAIN, SERVICE_DEVICE_REFRESH, async_refresh_devices,
schema=SERVICE_DEVICE_REFRESCH_SCHEMA)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, gateway.shutdown)
return True
async def async_unload_entry(hass, config_entry):
"""Unload deCONZ config entry."""
gateway = hass.data[DOMAIN].pop(config_entry.data[CONF_BRIDGEID])
if not hass.data[DOMAIN]:
hass.services.async_remove(DOMAIN, SERVICE_DECONZ)
hass.services.async_remove(DOMAIN, SERVICE_DEVICE_REFRESH)
elif gateway.master:
await async_populate_options(hass, config_entry)
new_master_gateway = next(iter(hass.data[DOMAIN].values()))
await async_populate_options(hass, new_master_gateway.config_entry)
return await gateway.async_reset()
async def async_populate_options(hass, config_entry):
"""Populate default options for gateway.
Called by setup_entry and unload_entry.
Makes sure there is always one master available.
"""
master = not get_master_gateway(hass)
options = {
CONF_MASTER_GATEWAY: master,
CONF_ALLOW_CLIP_SENSOR: config_entry.data.get(
CONF_ALLOW_CLIP_SENSOR, False),
CONF_ALLOW_DECONZ_GROUPS: config_entry.data.get(
CONF_ALLOW_DECONZ_GROUPS, True)
}
hass.config_entries.async_update_entry(config_entry, options=options)
|
aequitas/home-assistant
|
homeassistant/components/deconz/__init__.py
|
Python
|
apache-2.0
| 6,477 | 0 |
"""
Core model handling routines.
"""
from __future__ import print_function
__all__ = [
"list_models", "load_model", "load_model_info",
"build_model", "precompile_dlls", "reparameterize",
]
import os
from os.path import basename, join as joinpath
from glob import glob
import re
import copy
import numpy as np # type: ignore
# NOTE: delay loading of kernelcl, kernelcuda, kerneldll and kernelpy
# cl and cuda in particular take awhile since they try to establish a
# connection with the card to verify that the environment works.
from . import generate
from . import modelinfo
from . import product
from . import mixture
from . import custom
# pylint: disable=unused-import
try:
from typing import List, Union, Optional, Any, Tuple
from .kernel import KernelModel
from .modelinfo import ModelInfo
except ImportError:
pass
# pylint: enable=unused-import
CUSTOM_MODEL_PATH = os.environ.get('SAS_MODELPATH', "")
if CUSTOM_MODEL_PATH == "":
CUSTOM_MODEL_PATH = joinpath(os.path.expanduser("~"), ".sasmodels", "custom_models")
#if not os.path.isdir(CUSTOM_MODEL_PATH):
# os.makedirs(CUSTOM_MODEL_PATH)
# TODO: refactor composite model support
# The current load_model_info/build_model does not reuse existing model
# definitions when loading a composite model, instead reloading and
# rebuilding the kernel for each component model in the expression. This
# is fine in a scripting environment where the model is built when the script
# starts and is thrown away when the script ends, but may not be the best
# solution in a long-lived application. This affects the following functions:
#
# load_model
# load_model_info
# build_model
KINDS = ("all", "py", "c", "double", "single", "opencl", "1d", "2d",
"nonmagnetic", "magnetic")
def list_models(kind=None):
# type: (str) -> List[str]
"""
Return the list of available models on the model path.
*kind* can be one of the following:
* all: all models
* py: python models only
* c: c models only
* single: c models which support single precision
* double: c models which require double precision
* opencl: c models which run in opencl
* dll: c models which do not run in opencl
* 1d: models without orientation
* 2d: models with orientation
* magnetic: models supporting magnetic sld
* nommagnetic: models without magnetic parameter
For multiple conditions, combine with plus. For example, *c+single+2d*
would return all oriented models implemented in C which can be computed
accurately with single precision arithmetic.
"""
if kind and any(k not in KINDS for k in kind.split('+')):
raise ValueError("kind not in " + ", ".join(KINDS))
files = sorted(glob(joinpath(generate.MODEL_PATH, "[a-zA-Z]*.py")))
available_models = [basename(f)[:-3] for f in files]
if kind and '+' in kind:
all_kinds = kind.split('+')
condition = lambda name: all(_matches(name, k) for k in all_kinds)
else:
condition = lambda name: _matches(name, kind)
selected = [name for name in available_models if condition(name)]
return selected
def _matches(name, kind):
if kind is None or kind == "all":
return True
info = load_model_info(name)
pars = info.parameters.kernel_parameters
# TODO: may be adding Fq to the list at some point
is_pure_py = callable(info.Iq)
if kind == "py":
return is_pure_py
elif kind == "c":
return not is_pure_py
elif kind == "double":
return not info.single and not is_pure_py
elif kind == "single":
return info.single and not is_pure_py
elif kind == "opencl":
return info.opencl
elif kind == "dll":
return not info.opencl and not is_pure_py
elif kind == "2d":
return any(p.type == 'orientation' for p in pars)
elif kind == "1d":
return all(p.type != 'orientation' for p in pars)
elif kind == "magnetic":
return any(p.type == 'sld' for p in pars)
elif kind == "nonmagnetic":
return not any(p.type == 'sld' for p in pars)
return False
def load_model(model_name, dtype=None, platform='ocl'):
# type: (str, str, str) -> KernelModel
"""
Load model info and build model.
*model_name* is the name of the model, or perhaps a model expression
such as sphere*hardsphere or sphere+cylinder.
*dtype* and *platform* are given by :func:`build_model`.
"""
return build_model(load_model_info(model_name),
dtype=dtype, platform=platform)
def load_model_info(model_string):
# type: (str) -> modelinfo.ModelInfo
"""
Load a model definition given the model name.
*model_string* is the name of the model, or perhaps a model expression
such as sphere*cylinder or sphere+cylinder. Use '@' for a structure
factor product, e.g. sphere@hardsphere. Custom models can be specified by
prefixing the model name with 'custom.', e.g. 'custom.MyModel+sphere'.
This returns a handle to the module defining the model. This can be
used with functions in generate to build the docs or extract model info.
"""
if "+" in model_string:
parts = [load_model_info(part)
for part in model_string.split("+")]
return mixture.make_mixture_info(parts, operation='+')
elif "*" in model_string:
parts = [load_model_info(part)
for part in model_string.split("*")]
return mixture.make_mixture_info(parts, operation='*')
elif "@" in model_string:
p_info, q_info = [load_model_info(part)
for part in model_string.split("@")]
return product.make_product_info(p_info, q_info)
# We are now dealing with a pure model
elif "custom." in model_string:
pattern = "custom.([A-Za-z0-9_-]+)"
result = re.match(pattern, model_string)
if result is None:
raise ValueError("Model name in invalid format: " + model_string)
model_name = result.group(1)
# Use ModelName to find the path to the custom model file
model_path = joinpath(CUSTOM_MODEL_PATH, model_name + ".py")
if not os.path.isfile(model_path):
raise ValueError("The model file {} doesn't exist".format(model_path))
kernel_module = custom.load_custom_kernel_module(model_path)
return modelinfo.make_model_info(kernel_module)
kernel_module = generate.load_kernel_module(model_string)
return modelinfo.make_model_info(kernel_module)
_REPARAMETERIZE_DOCS = """\
Definition
----------
Constrain :ref:`%(base)s` according to the following::
%(translation)s
"""
_LHS_RE = re.compile(r"^ *(?<![.0-9])([A-Za-z_][A-Za-z0-9_]+) *=",
flags=re.MULTILINE)
def reparameterize(
base, parameters, translation, filename=None,
title=None, insert_after=None, docs=None, name=None,
source=None,
):
"""
Reparameterize an existing model.
*base* is the original modelinfo. This cannot be a reparameterized model;
only one level of reparameterization is supported.
*parameters* are the new parameter definitions that will be
included in the model info.
*translation* is a string each line containing *var = expr*. The variable
*var* can be a new intermediate value, or it can be a parameter from
the base model that will be replace by the expression. The expression
*expr* can be any C99 expression, including C-style if-expressions
*condition ? value1 : value2*. Expressions can use any new or existing
parameter that is not being replaced including intermediate values that
are previously defined. Parameters can only be assigned once, never
updated. C99 math functions are available, as well as any functions
defined in the base model or included in *source* (see below).
*filename* is the filename for the replacement model. This is usually
*__file__*, giving the path to the model file, but it could also be a
nominal filename for translations defined on-the-fly.
*title* is the model title, which defaults to *base.title* plus
" (reparameterized)".
*insert_after* controls parameter placement. By default, the new
parameters replace the old parameters in their original position.
Instead, you can provide a dictionary *{'par': 'newpar1,newpar2'}*
indicating that new parameters named *newpar1* and *newpar2* should
be included in the table after the existing parameter *par*, or at
the beginning if *par* is the empty string.
*docs* constains the doc string for the translated model, which by default
references the base model and gives the *translation* text.
*name* is the model name (default = :code:`"constrained_" + base.name`).
*source* is a list any additional C source files that should be included to
define functions and constants used in the translation expressions. This
will be included after all sources for the base model. Sources will only
be included once, even if they are listed in both places, so feel free to
list all dependencies for the helper function, such as "lib/polevl.c".
"""
if not isinstance(base, modelinfo.ModelInfo):
base = load_model_info(base)
if name is None:
name = filename if filename is not None else "constrained_" + base.name
name = os.path.basename(name).split('.')[0]
if title is None:
title = base.title + " (reparameterized)"
if docs is None:
lines = "\n ".join(s.lstrip() for s in translation.split('\n'))
docs = _REPARAMETERIZE_DOCS%{'base': base.id, 'translation': lines}
#source = merge_deps(base.source, source)
source = (base.source + [f for f in source if f not in base.source]
if source else base.source)
# TODO: don't repeat code from generate._build_translation
base_pars = [par.id for par in base.parameters.kernel_parameters]
old_pars = [match.group(1) for match in _LHS_RE.finditer(translation)
if match.group(1) in base_pars]
new_pars = [modelinfo.parse_parameter(*p) for p in parameters]
table = modelinfo.derive_table(base.parameters, remove=old_pars,
insert=new_pars, insert_after=insert_after)
caller = copy.copy(base)
caller.translation = translation
caller.name = caller.id = name
caller.docs = docs
caller.filename = filename
caller.parameters = table
caller.source = source
return caller
# Note: not used at the moment.
def merge_deps(old, new):
"""
Merge two dependency lists. The lists are partially ordered, with
all dependents coming after the items they depend on, but otherwise
order doesn't matter. The merged list preserves the partial ordering.
So if old and new both include the item "c", then all items that come
before "c" in old and new will come before "c" in the result, and all
items that come after "c" in old and new will come after "c" in the
result.
"""
if new is None:
return old
result = []
for item in new:
try:
index = old.index(item)
#print(item,"found in",old,"at",index,"giving",old[:index])
result.extend(old[:index])
old = old[index+1:]
except ValueError:
#print(item, "not found in", old)
pass
result.append(item)
#print("after", item, "old", old, "result", result)
result.extend(old)
return result
def build_model(model_info, dtype=None, platform="ocl"):
# type: (ModelInfo, str, str) -> KernelModel
"""
Prepare the model for the default execution platform.
This will return an OpenCL model, a DLL model or a python model depending
on the model and the computing platform.
*model_info* is the model definition structure returned from
:func:`load_model_info`.
*dtype* indicates whether the model should use single or double precision
for the calculation. Choices are 'single', 'double', 'quad', 'half',
or 'fast'. If *dtype* ends with '!', then force the use of the DLL rather
than OpenCL for the calculation.
*platform* should be "dll" to force the dll to be used for C models,
otherwise it uses the default "ocl".
"""
composition = model_info.composition
if composition is not None:
composition_type, parts = composition
models = [build_model(p, dtype=dtype, platform=platform) for p in parts]
if composition_type == 'mixture':
return mixture.MixtureModel(model_info, models)
elif composition_type == 'product':
P, S = models
return product.ProductModel(model_info, P, S)
else:
raise ValueError('unknown mixture type %s'%composition_type)
# If it is a python model, return it immediately
if callable(model_info.Iq):
from . import kernelpy
return kernelpy.PyModel(model_info)
numpy_dtype, fast, platform = parse_dtype(model_info, dtype, platform)
source = generate.make_source(model_info)
if platform == "dll":
from . import kerneldll
#print("building dll", numpy_dtype)
return kerneldll.load_dll(source['dll'], model_info, numpy_dtype)
elif platform == "cuda":
from . import kernelcuda
return kernelcuda.GpuModel(source, model_info, numpy_dtype, fast=fast)
else:
from . import kernelcl
#print("building ocl", numpy_dtype)
return kernelcl.GpuModel(source, model_info, numpy_dtype, fast=fast)
def precompile_dlls(path, dtype="double"):
# type: (str, str) -> List[str]
"""
Precompile the dlls for all builtin models, returning a list of dll paths.
*path* is the directory in which to save the dlls. It will be created if
it does not already exist.
This can be used when build the windows distribution of sasmodels
which may be missing the OpenCL driver and the dll compiler.
"""
from . import kerneldll
numpy_dtype = np.dtype(dtype)
if not os.path.exists(path):
os.makedirs(path)
compiled_dlls = []
for model_name in list_models():
model_info = load_model_info(model_name)
if not callable(model_info.Iq):
source = generate.make_source(model_info)['dll']
old_path = kerneldll.SAS_DLL_PATH
try:
kerneldll.SAS_DLL_PATH = path
dll = kerneldll.make_dll(source, model_info, dtype=numpy_dtype)
finally:
kerneldll.SAS_DLL_PATH = old_path
compiled_dlls.append(dll)
return compiled_dlls
def parse_dtype(model_info, dtype=None, platform=None):
# type: (ModelInfo, str, str) -> Tuple[np.dtype, bool, str]
"""
Interpret dtype string, returning np.dtype, fast flag and platform.
Possible types include 'half', 'single', 'double' and 'quad'. If the
type is 'fast', then this is equivalent to dtype 'single' but using
fast native functions rather than those with the precision level
guaranteed by the OpenCL standard. 'default' will choose the appropriate
default for the model and platform.
Platform preference can be specfied ("ocl", "cuda", "dll"), with the
default being OpenCL or CUDA if available, otherwise DLL. If the dtype
name ends with '!' then platform is forced to be DLL rather than GPU.
The default platform is set by the environment variable SAS_OPENCL,
SAS_OPENCL=driver:device for OpenCL, SAS_OPENCL=cuda:device for CUDA
or SAS_OPENCL=none for DLL.
This routine ignores the preferences within the model definition. This
is by design. It allows us to test models in single precision even when
we have flagged them as requiring double precision so we can easily check
the performance on different platforms without having to change the model
definition.
"""
# Assign default platform, overriding ocl with dll if OpenCL is unavailable
# If opencl=False OpenCL is switched off
if platform is None:
platform = "ocl"
# Check if type indicates dll regardless of which platform is given
if dtype is not None and dtype.endswith('!'):
platform = "dll"
dtype = dtype[:-1]
# Make sure model allows opencl/gpu
if not model_info.opencl:
platform = "dll"
# Make sure opencl is available, or fallback to cuda then to dll
if platform == "ocl":
from . import kernelcl
if not kernelcl.use_opencl():
from . import kernelcuda
platform = "cuda" if kernelcuda.use_cuda() else "dll"
# Convert special type names "half", "fast", and "quad"
fast = (dtype == "fast")
if fast:
dtype = "single"
elif dtype == "quad":
dtype = "longdouble"
elif dtype == "half":
dtype = "float16"
# Convert dtype string to numpy dtype. Use single precision for GPU
# if model allows it, otherwise use double precision.
if dtype is None or dtype == "default":
numpy_dtype = (generate.F32 if model_info.single and platform in ("ocl", "cuda")
else generate.F64)
else:
numpy_dtype = np.dtype(dtype)
# Make sure that the type is supported by GPU, otherwise use dll
if platform == "ocl":
from . import kernelcl
env = kernelcl.environment()
elif platform == "cuda":
from . import kernelcuda
env = kernelcuda.environment()
else:
env = None
if env is not None and not env.has_type(numpy_dtype):
platform = "dll"
if dtype is None:
numpy_dtype = generate.F64
return numpy_dtype, fast, platform
def test_composite_order():
"""
Check that mixture models produce the same result independent of ordder.
"""
def test_models(fst, snd):
"""Confirm that two models produce the same parameters"""
fst = load_model(fst)
snd = load_model(snd)
# Un-disambiguate parameter names so that we can check if the same
# parameters are in a pair of composite models. Since each parameter in
# the mixture model is tagged as e.g., A_sld, we ought to use a
# regex subsitution s/^[A-Z]+_/_/, but removing all uppercase letters
# is good enough.
# TODO: check that the models produce the same results
# Note that compare.py will give a misleading answer. For
# "cylinder+sphere" the A_radius parameter will use the default
# cylinder radius, but for "sphere+cylinder" it will use the default
# sphere radius so a simple comparison of the two will appear to be
# different unless you explicitly set radius, solvent, and solvent_sld
# for the A and B models.
fst = [[x for x in p.name if x == x.lower()]
for p in fst.info.parameters.kernel_parameters]
snd = [[x for x in p.name if x == x.lower()]
for p in snd.info.parameters.kernel_parameters]
assert sorted(fst) == sorted(snd), "{} != {}".format(fst, snd)
test_models(
"cylinder+sphere",
"sphere+cylinder")
test_models(
"cylinder*sphere",
"sphere*cylinder")
test_models(
"cylinder@hardsphere*sphere",
"sphere*cylinder@hardsphere")
test_models(
"barbell+sphere*cylinder@hardsphere",
"sphere*cylinder@hardsphere+barbell")
test_models(
"barbell+cylinder@hardsphere*sphere",
"cylinder@hardsphere*sphere+barbell")
test_models(
"barbell+sphere*cylinder@hardsphere",
"barbell+cylinder@hardsphere*sphere")
test_models(
"sphere*cylinder@hardsphere+barbell",
"cylinder@hardsphere*sphere+barbell")
test_models(
"barbell+sphere*cylinder@hardsphere",
"cylinder@hardsphere*sphere+barbell")
test_models(
"barbell+cylinder@hardsphere*sphere",
"sphere*cylinder@hardsphere+barbell")
def test_composite():
# type: () -> None
"""Check that model load works"""
from .product import RADIUS_ID, VOLFRAC_ID, STRUCTURE_MODE_ID, RADIUS_MODE_ID
#Test the the model produces the parameters that we would expect
model = load_model("cylinder@hardsphere*sphere")
actual = [p.name for p in model.info.parameters.kernel_parameters]
a_parts = ("sld", "sld_solvent", "radius", "length", "theta", "phi",
RADIUS_ID, VOLFRAC_ID, STRUCTURE_MODE_ID, RADIUS_MODE_ID)
b_parts = ("sld", "sld_solvent", "radius")
target = [*(f"A_{p}" for p in a_parts), *(f"B_{p}" for p in b_parts)]
assert target == actual, "%s != %s"%(target, actual)
def list_models_main():
# type: () -> int
"""
Run list_models as a main program. See :func:`list_models` for the
kinds of models that can be requested on the command line.
"""
import sys
kind = sys.argv[1] if len(sys.argv) > 1 else "all"
try:
models = list_models(kind)
print("\n".join(models))
except Exception:
print(list_models.__doc__)
return 1
return 0
if __name__ == "__main__":
list_models_main()
|
SasView/sasmodels
|
sasmodels/core.py
|
Python
|
bsd-3-clause
| 21,256 | 0.001553 |
from __future__ import unicode_literals
from django.utils.encoding import force_text
from django.db.models import Q
from . import find_modelname
from models import LogEntry
LATEST_N_AUDITLOG_ENTRIES = 15
def get_auditlog_entries(iterable, limit=LATEST_N_AUDITLOG_ENTRIES):
modelname = find_modelname(list(iterable)[0])
pks = [force_text(i.pk) for i in iterable]
object_query = Q(object_pk__in=pks, object_model=modelname)
target_query = Q(target_pk__in=pks, object_model=modelname)
actor_query = Q(actor_pk__in=pks, object_model=modelname)
filter_query = object_query | target_query | actor_query
entries = (LogEntry.objects
.filter(filter_query)
.distinct()
.order_by('-timestamp')[:limit]
)
return entries
|
sigmunau/nav
|
python/nav/auditlog/utils.py
|
Python
|
gpl-2.0
| 794 | 0.001259 |
from collections.abc import Iterable
from django import template
from django.db.models import Model
register = template.Library()
@register.filter
def get_type(value):
# inspired by: https://stackoverflow.com/a/12028864
return type(value)
@register.filter
def is_model(value):
return isinstance(value, Model)
@register.filter
def is_iterable(value):
return isinstance(value, Iterable)
@register.filter
def is_str(value):
return isinstance(value, str)
@register.filter
def is_bool(value):
return isinstance(value, bool)
|
pbanaszkiewicz/amy
|
amy/autoemails/templatetags/type_extras.py
|
Python
|
mit
| 555 | 0 |
#!/usr/bin/env python
###########################################################################
## ##
## Language Technologies Institute ##
## Carnegie Mellon University ##
## Copyright (c) 2012 ##
## All Rights Reserved. ##
## ##
## Permission is hereby granted, free of charge, to use and distribute ##
## this software and its documentation without restriction, including ##
## without limitation the rights to use, copy, modify, merge, publish, ##
## distribute, sublicense, and/or sell copies of this work, and to ##
## permit persons to whom this work is furnished to do so, subject to ##
## the following conditions: ##
## 1. The code must retain the above copyright notice, this list of ##
## conditions and the following disclaimer. ##
## 2. Any modifications must be clearly marked as such. ##
## 3. Original authors' names are not deleted. ##
## 4. The authors' names are not used to endorse or promote products ##
## derived from this software without specific prior written ##
## permission. ##
## ##
## CARNEGIE MELLON UNIVERSITY AND THE CONTRIBUTORS TO THIS WORK ##
## DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ##
## ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT ##
## SHALL CARNEGIE MELLON UNIVERSITY NOR THE CONTRIBUTORS BE LIABLE ##
## FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ##
## WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN ##
## AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ##
## ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF ##
## THIS SOFTWARE. ##
## ##
###########################################################################
## Author: Aasish Pappu (aasish@cs.cmu.edu) ##
## Date : November 2012 ##
###########################################################################
## Description: Example python backend module for olympus applications ##
###########################################################################
###########################################################################
## RedditResponder Modifications ##
## Author: Leah Nicolich-Henkin (leah.nh@cs.cmu.edu) ##
## Date : January 2016 ##
## ##
## Working off TickTock 'galbackend' version from January 2015 ##
## with notable additions by @yipeiw ##
## Deleted nearly the entirety of the code, retaining structure of ##
## methods previously used for debugging ##
## and resource structure/initialization ##
## ##
###########################################################################
# LNH: uses the Loader to create idf_dict, which is used for comparing candidates
import Loader
import RedditQuery
# @yipeiw
resource = {}
# listfile = 'reddit_corpus.list' # file listing all corpus files to be used as a database
idf_file = 'idf_dict.csv' # file listing words and idf values
def init_resource():
global resource
resource = Loader.load_language_resource(idf_file)
# @yipeiw
# LNH: instead of using Control/Understand/Retrieval to find a response from the database,
# call RedditQuery, which queries Reddit directly
def get_response(user_input):
global database, resource
relevance, answer = RedditQuery.find_candidate(user_input, resource)
# print("answer is: " + str(answer))
output = " ".join(answer)
return output
|
leahrnh/ticktock_text_api
|
Backend.py
|
Python
|
gpl-2.0
| 4,516 | 0.010407 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Hetzner Cloud GmbH <info@hetzner-cloud.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: hcloud_server
short_description: Create and manage cloud servers on the Hetzner Cloud.
version_added: "2.8"
description:
- Create, update and manage cloud servers on the Hetzner Cloud.
author:
- Lukas Kaemmerling (@LKaemmerling)
options:
id:
description:
- The ID of the Hetzner Cloud server to manage.
- Only required if no server I(name) is given
type: int
name:
description:
- The Name of the Hetzner Cloud server to manage.
- Only required if no server I(id) is given or a server does not exists.
type: str
server_type:
description:
- The Server Type of the Hetzner Cloud server to manage.
- Required if server does not exists.
type: str
ssh_keys:
description:
- List of SSH key names
- The key names correspond to the SSH keys configured for your
Hetzner Cloud account access.
type: list
volumes:
description:
- List of Volumes IDs that should be attached to the server on server creation.
type: list
image:
description:
- Image the server should be created from.
- Required if server does not exists.
type: str
location:
description:
- Location of Server.
- Required if no I(datacenter) is given and server does not exists.
type: str
datacenter:
description:
- Datacenter of Server.
- Required of no I(location) is given and server does not exists.
type: str
backups:
description:
- Enable or disable Backups for the given Server.
type: bool
default: no
upgrade_disk:
description:
- Resize the disk size, when resizing a server.
- If you want to downgrade the server later, this value should be False.
type: bool
default: no
force_upgrade:
description:
- Force the upgrade of the server.
- Power off the server if it is running on upgrade.
type: bool
default: no
user_data:
description:
- User Data to be passed to the server on creation.
- Only used if server does not exists.
type: str
labels:
description:
- User-defined labels (key-value pairs).
type: dict
state:
description:
- State of the server.
default: present
choices: [ absent, present, restarted, started, stopped, rebuild ]
type: str
extends_documentation_fragment: hcloud
"""
EXAMPLES = """
- name: Create a basic server
hcloud_server:
name: my-server
server_type: cx11
image: ubuntu-18.04
state: present
- name: Create a basic server with ssh key
hcloud_server:
name: my-server
server_type: cx11
image: ubuntu-18.04
location: fsn1
ssh_keys:
- me@myorganisation
state: present
- name: Resize an existing server
hcloud_server:
name: my-server
server_type: cx21
upgrade_disk: yes
state: present
- name: Ensure the server is absent (remove if needed)
hcloud_server:
name: my-server
state: absent
- name: Ensure the server is started
hcloud_server:
name: my-server
state: started
- name: Ensure the server is stopped
hcloud_server:
name: my-server
state: stopped
- name: Ensure the server is restarted
hcloud_server:
name: my-server
state: restarted
- name: Ensure the server is rebuild
hcloud_server:
name: my-server
image: ubuntu-18.04
state: rebuild
"""
RETURN = """
hcloud_server:
description: The server instance
returned: Always
type: dict
sample: {
"backup_window": null,
"datacenter": "nbg1-dc3",
"id": 1937415,
"image": "ubuntu-18.04",
"ipv4_address": "116.203.104.109",
"ipv6": "2a01:4f8:1c1c:c140::/64",
"labels": {},
"location": "nbg1",
"name": "mein-server-2",
"rescue_enabled": false,
"server_type": "cx11",
"status": "running"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.hcloud import Hcloud
try:
from hcloud.volumes.domain import Volume
from hcloud.ssh_keys.domain import SSHKey
from hcloud.servers.domain import Server
from hcloud import APIException
except ImportError:
pass
class AnsibleHcloudServer(Hcloud):
def __init__(self, module):
Hcloud.__init__(self, module, "hcloud_server")
self.hcloud_server = None
def _prepare_result(self):
return {
"id": to_native(self.hcloud_server.id),
"name": to_native(self.hcloud_server.name),
"ipv4_address": to_native(self.hcloud_server.public_net.ipv4.ip),
"ipv6": to_native(self.hcloud_server.public_net.ipv6.ip),
"image": to_native(self.hcloud_server.image.name),
"server_type": to_native(self.hcloud_server.server_type.name),
"datacenter": to_native(self.hcloud_server.datacenter.name),
"location": to_native(self.hcloud_server.datacenter.location.name),
"rescue_enabled": self.hcloud_server.rescue_enabled,
"backup_window": to_native(self.hcloud_server.backup_window),
"labels": self.hcloud_server.labels,
"status": to_native(self.hcloud_server.status),
}
def _get_server(self):
try:
if self.module.params.get("id") is not None:
self.hcloud_server = self.client.servers.get_by_id(
self.module.params.get("id")
)
else:
self.hcloud_server = self.client.servers.get_by_name(
self.module.params.get("name")
)
except APIException as e:
self.module.fail_json(msg=e.message)
def _create_server(self):
self.module.fail_on_missing_params(
required_params=["name", "server_type", "image"]
)
params = {
"name": self.module.params.get("name"),
"server_type": self.client.server_types.get_by_name(
self.module.params.get("server_type")
),
"user_data": self.module.params.get("user_data"),
"labels": self.module.params.get("labels"),
}
if self.client.images.get_by_name(self.module.params.get("image")) is not None:
# When image name is not available look for id instead
params["image"] = self.client.images.get_by_name(self.module.params.get("image"))
else:
params["image"] = self.client.images.get_by_id(self.module.params.get("image"))
if self.module.params.get("ssh_keys") is not None:
params["ssh_keys"] = [
SSHKey(name=ssh_key_name)
for ssh_key_name in self.module.params.get("ssh_keys")
]
if self.module.params.get("volumes") is not None:
params["volumes"] = [
Volume(id=volume_id) for volume_id in self.module.params.get("volumes")
]
if self.module.params.get("location") is None and self.module.params.get("datacenter") is None:
# When not given, the API will choose the location.
params["location"] = None
params["datacenter"] = None
elif self.module.params.get("location") is not None and self.module.params.get("datacenter") is None:
params["location"] = self.client.locations.get_by_name(
self.module.params.get("location")
)
elif self.module.params.get("location") is None and self.module.params.get("datacenter") is not None:
params["datacenter"] = self.client.datacenters.get_by_name(
self.module.params.get("datacenter")
)
if not self.module.check_mode:
resp = self.client.servers.create(**params)
self.result["root_password"] = resp.root_password
resp.action.wait_until_finished(max_retries=1000)
[action.wait_until_finished() for action in resp.next_actions]
self._mark_as_changed()
self._get_server()
def _update_server(self):
if self.module.params.get("backups") and self.hcloud_server.backup_window is None:
if not self.module.check_mode:
self.hcloud_server.enable_backup().wait_until_finished()
self._mark_as_changed()
elif not self.module.params.get("backups") and self.hcloud_server.backup_window is not None:
if not self.module.check_mode:
self.hcloud_server.disable_backup().wait_until_finished()
self._mark_as_changed()
labels = self.module.params.get("labels")
if labels is not None and labels != self.hcloud_server.labels:
if not self.module.check_mode:
self.hcloud_server.update(labels=labels)
self._mark_as_changed()
server_type = self.module.params.get("server_type")
if server_type is not None and self.hcloud_server.server_type.name != server_type:
previous_server_status = self.hcloud_server.status
state = self.module.params.get("state")
if previous_server_status == Server.STATUS_RUNNING:
if not self.module.check_mode:
if self.module.params.get("force_upgrade") or state == "stopped":
self.stop_server() # Only stopped server can be upgraded
else:
self.module.warn(
"You can not upgrade a running instance %s. You need to stop the instance or use force_upgrade=yes."
% self.hcloud_server.name
)
timeout = 100
if self.module.params.get("upgrade_disk"):
timeout = (
1000
) # When we upgrade the disk too the resize progress takes some more time.
if not self.module.check_mode:
self.hcloud_server.change_type(
server_type=self.client.server_types.get_by_name(server_type),
upgrade_disk=self.module.params.get("upgrade_disk"),
).wait_until_finished(timeout)
if state == "present" and previous_server_status == Server.STATUS_RUNNING or state == "started":
self.start_server()
self._mark_as_changed()
self._get_server()
def start_server(self):
if self.hcloud_server.status != Server.STATUS_RUNNING:
if not self.module.check_mode:
self.client.servers.power_on(self.hcloud_server).wait_until_finished()
self._mark_as_changed()
self._get_server()
def stop_server(self):
if self.hcloud_server.status != Server.STATUS_OFF:
if not self.module.check_mode:
self.client.servers.power_off(self.hcloud_server).wait_until_finished()
self._mark_as_changed()
self._get_server()
def rebuild_server(self):
self.module.fail_on_missing_params(
required_params=["image"]
)
if not self.module.check_mode:
self.client.servers.rebuild(self.hcloud_server, self.client.images.get_by_name(self.module.params.get("image"))).wait_until_finished()
self._mark_as_changed()
self._get_server()
def present_server(self):
self._get_server()
if self.hcloud_server is None:
self._create_server()
else:
self._update_server()
def delete_server(self):
self._get_server()
if self.hcloud_server is not None:
if not self.module.check_mode:
self.client.servers.delete(self.hcloud_server).wait_until_finished()
self._mark_as_changed()
self.hcloud_server = None
@staticmethod
def define_module():
return AnsibleModule(
argument_spec=dict(
id={"type": "int"},
name={"type": "str"},
image={"type": "str"},
server_type={"type": "str"},
location={"type": "str"},
datacenter={"type": "str"},
user_data={"type": "str"},
ssh_keys={"type": "list"},
volumes={"type": "list"},
labels={"type": "dict"},
backups={"type": "bool", "default": False},
upgrade_disk={"type": "bool", "default": False},
force_upgrade={"type": "bool", "default": False},
state={
"choices": ["absent", "present", "restarted", "started", "stopped", "rebuild"],
"default": "present",
},
**Hcloud.base_module_arguments()
),
required_one_of=[['id', 'name']],
mutually_exclusive=[["location", "datacenter"]],
supports_check_mode=True,
)
def main():
module = AnsibleHcloudServer.define_module()
hcloud = AnsibleHcloudServer(module)
state = module.params.get("state")
if state == "absent":
hcloud.delete_server()
elif state == "present":
hcloud.present_server()
elif state == "started":
hcloud.present_server()
hcloud.start_server()
elif state == "stopped":
hcloud.present_server()
hcloud.stop_server()
elif state == "restarted":
hcloud.present_server()
hcloud.stop_server()
hcloud.start_server()
elif state == "rebuild":
hcloud.present_server()
hcloud.rebuild_server()
module.exit_json(**hcloud.get_result())
if __name__ == "__main__":
main()
|
aperigault/ansible
|
lib/ansible/modules/cloud/hcloud/hcloud_server.py
|
Python
|
gpl-3.0
| 14,437 | 0.001939 |
from rasa.engine.caching import TrainingCache
from rasa.engine.graph import ExecutionContext, GraphNode, GraphSchema, SchemaNode
from rasa.engine.storage.storage import ModelStorage
from rasa.engine.training import fingerprinting
from rasa.engine.training.components import PrecomputedValueProvider
from rasa.engine.training.hooks import TrainingHook
from tests.engine.graph_components_test_classes import (
CacheableComponent,
CacheableText,
)
def test_training_hook_saves_to_cache(
default_model_storage: ModelStorage, temp_cache: TrainingCache,
):
# We need an execution context so the hook can determine the class of the graph
# component
execution_context = ExecutionContext(
GraphSchema(
{
"hello": SchemaNode(
needs={},
constructor_name="create",
fn="run",
config={},
uses=CacheableComponent,
)
}
),
"1",
)
node = GraphNode(
node_name="hello",
component_class=CacheableComponent,
constructor_name="create",
component_config={},
fn_name="run",
inputs={"suffix": "input_node"},
eager=False,
model_storage=default_model_storage,
resource=None,
execution_context=execution_context,
hooks=[
TrainingHook(
cache=temp_cache,
model_storage=default_model_storage,
pruned_schema=execution_context.graph_schema,
)
],
)
node(("input_node", "Joe"))
# This is the same key that the hook will generate
fingerprint_key = fingerprinting.calculate_fingerprint_key(
graph_component_class=CacheableComponent,
config={"prefix": "Hello "},
inputs={"suffix": "Joe"},
)
output_fingerprint_key = temp_cache.get_cached_output_fingerprint(fingerprint_key)
assert output_fingerprint_key
cached_result = temp_cache.get_cached_result(
output_fingerprint_key=output_fingerprint_key,
model_storage=default_model_storage,
node_name="hello",
)
assert isinstance(cached_result, CacheableText)
assert cached_result.text == "Hello Joe"
def test_training_hook_does_not_cache_cached_component(
default_model_storage: ModelStorage, temp_cache: TrainingCache,
):
# We need an execution context so the hook can determine the class of the graph
# component
execution_context = ExecutionContext(
GraphSchema(
{
"hello": SchemaNode(
needs={},
constructor_name="create",
fn="run",
config={},
uses=PrecomputedValueProvider,
)
}
),
"1",
)
node = GraphNode(
node_name="hello",
component_class=PrecomputedValueProvider,
constructor_name="create",
component_config={"output": CacheableText("hi")},
fn_name="get_value",
inputs={},
eager=False,
model_storage=default_model_storage,
resource=None,
execution_context=execution_context,
hooks=[
TrainingHook(
cache=temp_cache,
model_storage=default_model_storage,
pruned_schema=execution_context.graph_schema,
)
],
)
node(("input_node", "Joe"))
# This is the same key that the hook will generate
fingerprint_key = fingerprinting.calculate_fingerprint_key(
graph_component_class=PrecomputedValueProvider,
config={"output": CacheableText("hi")},
inputs={},
)
# The hook should not cache the output of a PrecomputedValueProvider
assert not temp_cache.get_cached_output_fingerprint(fingerprint_key)
|
RasaHQ/rasa_nlu
|
tests/engine/training/test_hooks.py
|
Python
|
apache-2.0
| 3,903 | 0.001025 |
# module General Ui
# file ui_maya_dock.py
# Main Dock Window interface
from thlib.side.Qt import QtWidgets as QtGui
#from thlib.side.Qt import QtCore
from thlib.environment import env_inst, env_mode, env_read_config, env_write_config
import thlib.maya_functions as mf
import thlib.tactic_classes as tc
import thlib.global_functions as gf
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
import maya.cmds as cmds
import ui_main_classes
# reload(ui_main_classes)
class Ui_DockMain(MayaQWidgetDockableMixin, QtGui.QMainWindow):
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent=parent)
env_inst.ui_maya_dock = self
self.setObjectName('TacticHandlerDock')
self.docked = None
self.dock_pos = None
self.dock_area = None
self.dock_size = None
self.dock_is_floating = None
self.readSettings()
self.toggle_dock = None
self.maya_dock = None
self.status_bar = None
self.create_ui_main()
self.create_ui()
self.catch_maya_closing()
def create_ui(self):
if self.docked:
self.set_docked()
else:
self.set_undocked()
def toggle_docking(self):
if self.toggle_dock:
self.set_undocked()
else:
self.set_docked()
def create_ui_main(self):
env_inst.ui_main = ui_main_classes.Ui_Main()
self.setCentralWidget(env_inst.ui_main)
self.setWindowTitle(env_inst.ui_main.windowTitle())
self.move(self.dock_pos)
def set_docked(self):
# status_bar = env_inst.ui_main.statusBar()
# if status_bar:
# status_bar.show()
self.toggle_dock = True
self.setDockableParameters(
dockable=True,
floating=self.dock_is_floating,
area=self.dock_area,
width=self.dock_size.width(),
height=self.dock_size.height()
)
self.show()
self.raise_()
self.docked = True
def set_undocked(self):
self.toggle_dock = False
self.setDockableParameters(
dockable=False,
floating=self.dock_is_floating,
area=self.dock_area,
width=self.dock_size.width(),
height=self.dock_size.height()
)
if self.maya_dock:
print self.maya_dock
self.removeDockWidget(self.maya_dock)
self.maya_dock.close()
self.maya_dock.deleteLater()
self.docked = False
# status_bar = env_inst.ui_main.statusBar()
# status_bar.show()
def set_settings_from_dict(self, settings_dict=None):
ref_settings_dict = {
'docked': 0,
'dock_pos': (200, 200),
'dock_size': (427, 690),
'dock_isFloating': 0,
'dock_tabArea': 1,
}
settings = gf.check_config(ref_settings_dict, settings_dict)
self.docked = bool(int(settings['docked']))
self.dock_pos = gf.tuple_to_qsize(settings['dock_pos'], 'pos')
self.dock_size = gf.tuple_to_qsize(settings['dock_size'], 'size')
self.dock_is_floating = bool(int(settings['dock_isFloating']))
if int(settings['dock_tabArea']) == 2:
self.dock_area = 'right'
else:
self.dock_area = 'left'
def get_settings_dict(self):
settings_dict = {
'docked': int(self.docked),
}
if self.docked:
maya_dock = self.parent()
settings_dict['dock_pos'] = gf.qsize_to_tuple(maya_dock.pos())
settings_dict['dock_size'] = gf.qsize_to_tuple(maya_dock.size())
settings_dict['dock_isFloating'] = int(bool(self.isFloating()))
settings_dict['dock_tabArea'] = int(env_inst.ui_super.dockWidgetArea(self.maya_dock))
else:
settings_dict['dock_pos'] = gf.qsize_to_tuple(self.pos())
settings_dict['dock_size'] = gf.qsize_to_tuple(self.size())
settings_dict['dock_isFloating'] = 0
settings_dict['dock_tabArea'] = 1
return settings_dict
def readSettings(self):
self.set_settings_from_dict(
env_read_config(filename='ui_maya_settings', unique_id='ui_main', long_abs_path=True)
)
def writeSettings(self):
env_write_config(self.get_settings_dict(), filename='ui_maya_settings', unique_id='ui_main', long_abs_path=True)
def raise_window(self):
if self.isMaximized():
self.showMaximized()
else:
self.showNormal()
QtGui.QDialog.activateWindow(self)
def catch_maya_closing(self):
QtGui.QApplication.instance().aboutToQuit.connect(env_inst.ui_main.close)
QtGui.QApplication.instance().aboutToQuit.connect(self.close)
def closeEvent(self, event):
if self.docked:
self.removeDockWidget(self.maya_dock)
self.maya_dock.close()
self.maya_dock.deleteLater()
self.writeSettings()
event.accept()
def init_env(current_path):
env_mode.set_current_path(current_path)
env_mode.set_mode('maya')
def close_all_instances():
try:
main_docks = mf.get_maya_dock_window()
for dock in main_docks:
dock.writeSettings()
dock.close()
dock.deleteLater()
if env_inst.ui_main:
env_inst.ui_main.close()
if cmds.workspaceControl('TacticHandlerDockWorkspaceControl', e=True, exists=True):
cmds.deleteUI('TacticHandlerDockWorkspaceControl', control=True)
except:
raise
@gf.catch_error
def create_ui(error_tuple=None):
if error_tuple:
env_mode.set_offline()
main_tab = Ui_DockMain()
gf.error_handle(error_tuple)
else:
env_mode.set_online()
main_tab = Ui_DockMain()
main_tab.show()
main_tab.raise_()
@gf.catch_error
def startup(restart=False, *args, **kwargs):
if restart:
close_all_instances()
env_inst.ui_super = mf.get_maya_window()
try:
main_tab = mf.get_maya_dock_window()[0]
main_tab.show()
main_tab.raise_()
main_tab.raise_window()
except:
# def server_ping_agent():
# return tc.server_ping()
#
# ping_worker, thread_pool = gf.get_thread_worker(
# server_ping_agent,
# finished_func=lambda: create_ui(None),
# error_func=create_ui
# )
#
# thread_pool.start(ping_worker)
env_inst.start_pools()
worker = env_inst.server_pool.add_task(tc.server_ping)
worker.finished.connect(create_ui)
worker.error.connect(create_ui)
worker.start()
|
listyque/TACTIC-Handler
|
thlib/ui_classes/ui_maya_dock.py
|
Python
|
epl-1.0
| 6,812 | 0.001468 |
import json
from PIL import Image, ImageFont, ImageDraw
with open('./config/nodes.json') as data_file:
data = json.load(data_file)
gray = (200, 200, 200)
black = (0,0,0)
lightblue = (225, 255, 255)
darkblue = (185, 255, 255)
img = Image.new("RGBA", (400,400), 128)
usr_font = ImageFont.truetype("./fonts/Arial.ttf", 10)
draw = ImageDraw.Draw(img)
# grid
for i in range(10, 400, 10):
if (i / 50.0) % 1 == 0:
fillcolor = darkblue
else:
fillcolor = lightblue
draw.line([(i, 1), (i, 399)], fill=fillcolor, width=1)
draw.line([(1, i), (399, i)], fill=fillcolor, width=1)
# square border
draw.line([
(1, 1), (1, img.size[1]-1),
(1, img.size[1]-1), (img.size[0]-1, img.size[1]-1),
(img.size[0]-1, img.size[1]-1), (img.size[0]-1, 1),
(img.size[0]-1, 1), (1, 1)
], fill=gray, width=1)
# crosshair
draw.line([(img.size[0]/2, 1), (img.size[0]/2, img.size[1]-1)], fill=gray, width=1)
draw.line([(1, img.size[1]/2), (img.size[0]-1, img.size[1]/2)], fill=gray, width=1)
# top tier circle approximation
draw.ellipse([
(130, 130),
(270, 270)
], fill=None, outline=black)
# roof tier approximation
draw.ellipse([
(60, 60),
(340, 340)
], fill=None, outline=black)
# bottom tier (walls) approximation
draw.ellipse([
(25, 25),
(375, 375)
], fill=None, outline=black)
# label the global position numbers
for i, pos in data.items():
# huh? y axis 0 is top of the image...
draw.point((pos['x'], -pos['y'] + 400), (0, 0, 0))
draw.text((pos['x'], -pos['y'] + 400), i, (0,0,0), font=usr_font)
img.save("./web/positions.png", "PNG")
|
Ibuprofen/gizehmoviepy
|
node_positions_image.py
|
Python
|
mit
| 1,603 | 0.008734 |
#-*- coding: utf-8 -*-
"""
@author: Rinze de Laat
Copyright © 2013 Rinze de Laat, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the terms
of the GNU General Public License version 2 as published by the Free Software
Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
"""
from __future__ import division
from functools import partial
import gc
import logging
from odemis import util
from odemis.util import limit_invocation, TimeoutError
from odemis.util import timeout
import time
import unittest
import weakref
logging.getLogger().setLevel(logging.DEBUG)
class TestLimitInvocation(unittest.TestCase):
def test_not_too_often(self):
self.count = 0
now = time.time()
end = now + 1.1 # a bit more than 1 s
while time.time() < end:
self.count_max_1s()
time.sleep(0.01)
self.assertLessEqual(self.count, 2, "method was called more than twice in 1 second: %d" % self.count)
time.sleep(2) # wait for the last potential calls to happen
self.assertLessEqual(self.count, 3, "method was called more than three times in 2 seconds: %d" % self.count)
@limit_invocation(1)
def count_max_1s(self):
# never called more than once per second
self.count += 1
time.sleep(0.2)
def test_gc(self):
u = Useless()
wku = weakref.ref(u)
now = time.time()
end = now + 1.1 # a bit more than 1 s
while time.time() < end:
u.doit(time.time(), b=3)
time.sleep(0.01)
# Check the object u has nothing preventing it from being dereferenced
del u
time.sleep(1) # wait for the last potential calls to happen
self.assertIsNone(wku())
class Useless(object):
"""
Independent class for testing limit_invocation decorator
"""
def __del__(self):
print "Useless %r is gone" % self
@limit_invocation(0.1)
def doit(self, a, b=None):
print "doing it %s, %s" % (a, b)
class TestTimeout(unittest.TestCase):
@timeout(1.2)
def test_notimeout(self):
time.sleep(1)
def test_timeout(self):
self.assertRaises(TimeoutError, self.toolong)
@timeout(0.5)
def toolong(self):
# will always timeout
time.sleep(1)
class SortedAccordingTestCase(unittest.TestCase):
def test_simple(self):
in_exp = ((([1, 2, 3], [3, 2, 1]), [3, 2, 1]),
(([1, 2, 3], [4, 2]), [2, 1, 3]),
(([], [4, 2]), []),
((["b", "a"], []), ["b", "a"]),
)
for i, eo in in_exp:
o = util.sorted_according_to(*i)
self.assertEqual(o, eo, "Failed to get correct output for %s" % (i,))
class AlmostEqualTestCase(unittest.TestCase):
def test_simple(self):
in_exp = {(0., 0): True,
(-5, -5.): True,
(1., 1. - 1e-9): True,
(1., 1. - 1e-3): False,
(1., 1. + 1e-3): False,
(-5e-8, -5e-8 + 1e-19): True,
(5e18, 5e18 + 1): True,
}
for i, eo in in_exp.items():
o = util.almost_equal(*i)
self.assertEqual(o, eo, "Failed to get correct output for %s" % (i,))
# Bounding box clipping test data generation
def tp(trans, ps):
""" Translate points ps using trans """
r = []
i = 0
for p in ps:
r.append(p + trans[i])
i = (i + 1) % len(trans)
return tuple(r)
# First we define a bounding boxes, at different locations
bounding_boxes = [(-2, -2, 0, 0),
(-1, -1, 1, 1),
(0, 0, 2, 2),
(2, 2, 4, 4)]
# From this, we generate boxes that are situated all around these
# bounding boxes, but that do not touch or overlap them.
def relative_boxes(bb):
t_left = [(-3, i) for i in range(-3, 4)]
to_the_left = [tp(t, bb) for t in t_left]
t_top = [(i, -3) for i in range(-3, 4)]
to_the_top = [tp(t, bb) for t in t_top]
t_right = [(3, i) for i in range(-3, 4)]
to_the_right = [tp(t, bb) for t in t_right]
t_bottom = [(i, 3) for i in range(-3, 4)]
to_the_bottom = [tp(t, bb) for t in t_bottom]
outside_boxes = to_the_left + to_the_top + to_the_right + to_the_bottom
# Selection boxes that touch the outside of the bounding box
touch_left = [tp((1, 0), b) for b in to_the_left[1:-1]]
touch_top = [tp((0, 1), b) for b in to_the_top[1:-1]]
touch_right = [tp((-1, 0), b) for b in to_the_right[1:-1]]
touch_bottom = [tp((0, -1), b) for b in to_the_bottom[1:-1]]
touching_boxes = touch_left + touch_top + touch_right + touch_bottom
# Partial overlapping boxes
overlap_left = [tp((1, 0), b) for b in touch_left[1:-1]]
overlap_top = [tp((0, 1), b) for b in touch_top[1:-1]]
overlap_right = [tp((-1, 0), b) for b in touch_right[1:-1]]
overlap_bottom = [tp((0, -1), b) for b in touch_bottom[1:-1]]
overlap_boxes = overlap_left + overlap_top + overlap_right + overlap_bottom
return outside_boxes, touching_boxes, overlap_boxes
class CanvasTestCase(unittest.TestCase):
def test_clipping(self):
tmp = "{}: {} - {} -> {}"
for bb in bounding_boxes:
outside, touching, overlap = relative_boxes(bb)
for b in outside:
r = util.rect_intersect(b, bb)
msg = tmp.format("outside", b, bb, r)
self.assertIsNone(r, msg)
for b in touching:
r = util.rect_intersect(b, bb)
msg = tmp.format("touching", b, bb, r)
self.assertIsNone(r, msg)
for b in overlap:
r = util.rect_intersect(b, bb)
msg = tmp.format("overlap", b, bb, r)
self.assertIsNotNone(r, msg)
# 'Manual' checks
if bb == (-1, -1, 1, 1):
if b[:2] == (-2, -2):
self.assertEqual(r, (-1, -1, 0, 0), msg)
elif b[:2] == (0, -1):
self.assertEqual(r, (0, -1, 1, 1), msg)
elif b[:2] == (0, 0):
self.assertEqual(r, (0, 0, 1, 1), msg)
# full and exact overlap
b = bb
r = util.rect_intersect(b, bb)
self.assertEqual(r, bb)
# inner overlap
b = (bb[0] + 1, bb[1] + 1, bb[2], bb[3])
r = util.rect_intersect(b, bb)
self.assertEqual(r, b)
# overflowing overlap
b = (bb[0] - 1, bb[1] - 1, bb[2] + 1, bb[2] + 1)
r = util.rect_intersect(b, bb)
self.assertEqual(r, bb)
def test_line_clipping(self):
bounding_box = (0, 4, 4, 0)
clip = partial(util.clip_line, *bounding_box)
# Test lines within bounding box, i.e. no clipping should occur
internal = [
(0, 0, 0, 0),
(2, 2, 2, 2),
(0, 0, 4, 4),
(4, 4, 0, 0),
(0, 2, 2, 0),
(2, 0, 0, 2),
]
for line in internal:
self.assertEqual(line, clip(*line))
# Test clipping for lines originating in the center of the bounding box and ending outside
# of it.
inner_to_outer = [
((2, 2, 2, 6), (2, 2, 2, 4)),
((2, 2, 6, 2), (2, 2, 4, 2)),
((2, 2, 2, -2), (2, 2, 2, 0)),
((2, 2, -2, 2), (2, 2, 0, 2)),
((2, 2, 6, -2), (2, 2, 4, 0)),
((2, 2, -2, -2), (2, 2, 0, 0)),
((2, 2, -2, -2), (2, 2, 0, 0)),
]
for orig, clipped in inner_to_outer:
self.assertEqual(clipped, clip(*orig))
outer_to_inner = [
((2, 6, 2, 2), (2, 4, 2, 2)),
((6, 2, 2, 2), (4, 2, 2, 2)),
((2, -2, 2, 2), (2, 0, 2, 2)),
((-2, 2, 2, 2), (0, 2, 2, 2)),
((6, -2, 2, 2), (4, 0, 2, 2)),
((-2, -2, 2, 2), (0, 0, 2, 2)),
((-2, -2, 2, 2), (0, 0, 2, 2)),
]
for orig, clipped in outer_to_inner:
self.assertEqual(clipped, clip(*orig))
if __name__ == "__main__":
unittest.main()
|
gstiebler/odemis
|
src/odemis/util/test/util_test.py
|
Python
|
gpl-2.0
| 8,569 | 0.001984 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from abc import ABCMeta, abstractmethod
from superdesk.etree import etree as sd_etree
from superdesk.errors import SkipValue
from flask import current_app as app
from superdesk.metadata.item import Priority
from collections import OrderedDict
from lxml import etree
import superdesk
import logging
logger = logging.getLogger(__name__)
class FeedParser(metaclass=ABCMeta):
"""
Base class for a Feed Parser.
A Feed Parser class must have the following attributes:
1. `NAME` - unique name under which to register the class.
"""
@abstractmethod
def can_parse(self, article):
"""Sub-classes must override this method and tell whether it can parse the given article.
:param article: article
:return: True if the feed parser can parse, False otherwise.
:rtype: bool
"""
raise NotImplementedError()
@abstractmethod
def parse(self, article, provider=None):
"""Parse the given article and extracts the relevant elements/attributes values from the given article.
:param article: XML String to parse
:type article: str
:param provider: Ingest Provider Details, defaults to None
:type provider: dict having properties defined in
:py:class: `superdesk.io.ingest_provider_model.IngestProviderResource`
:return: parsed data as dict.
:rtype: dict having properties defined in :py:mod: `superdesk.metadata.item`
"""
raise NotImplementedError()
def set_dateline(self, item, city=None, text=None):
"""Sets the 'dateline' to the article identified by item.
If city is passed then the system checks if city is available in Cities collection.
If city is not found in Cities collection then dateline's located is set with default values.
:param item: article.
:type item: dict
:param city: Name of the city, if passed the system will search in Cities collection.
:type city: str
:param text: dateline in full. For example, "STOCKHOLM, Aug 29, 2014"
:type text: str
"""
item.setdefault('dateline', {})
if city:
cities = app.locators.find_cities()
located = [c for c in cities if c['city'] == city]
item['dateline']['located'] = located[0] if len(located) > 0 else {'city_code': city, 'city': city,
'tz': 'UTC', 'dateline': 'city'}
if text:
item['dateline']['text'] = text
def map_priority(self, source_priority):
"""
Maps the source priority to superdesk priority
:param source_priority:
:type source_priority: str
:return: priority of the item
:rtype int
"""
if source_priority and source_priority.isdigit():
if int(source_priority) in Priority.values():
return int(source_priority)
return Priority.Ordinary.value
class XMLFeedParser(FeedParser, metaclass=ABCMeta):
"""
Base class for Feed Parsers which can parse the XML Content.
"""
def __init__(self):
self.root = None
self.metadata_mapping = None
def _parse_mapping(self, value):
if isinstance(value, dict):
if 'default_attr' in value:
if 'default' in value:
logger.error("default and default_attr can't be used at the same time,"
"only default will be used ({})".format(self.__class__))
if 'xpath':
if '/' not in 'xpath':
logger.error("default_attr can be used for simple child element ({})".format(self.__class__))
else:
logger.error("xpath is needed when default_attr is used ({})".format(self.__class__))
if 'callback' in value and 'list' in value:
del value['list']
logger.error("list can't ve used with callback ({})".format(self.__class__))
return value
elif isinstance(value, str):
if not value:
return {}
return {'xpath': value}
elif callable(value):
return {'callback': value}
else:
logger.warn("Can't parse mapping value {}, ignoring it".format(value))
return {}
def _generate_mapping(self, setting_param_name):
"""Generate self.metadata_mapping according to available mappings.
The following mappings are used in this order (last is more important):
- self.default_mapping
- self.MAPPING, intended for subclasses
- [setting_param_name] dictionary which can be put in settings
If a value is a non-empty string, it is a xpath, @attribute can be used as last path component.
If value is empty string/dict, the key will be ignored
If value is a callable, it will be executed with nitf Element as argument, return value will be used.
If a dictionary is used as value, following keys can be used:
xpath: path to the element
callback: callback executed with nitf Element as argument, return value will be used
default: value to use if element/attribute doesn't exists (default: doesn't set the key)
list: a bool which indicate if a list is expected
if False (default), only first value is used
filter: callable to be used with found element/value
value returned by the callable will be used
if None is returned, value will be ignored
In case of multiple values (i.e. if "list" is set), filter is called on each item
default_attr: value if element exist but attribute is missing
this works actually for all values, if it is not found parent element is checked
and default_attr is used only if parent element exists
key_hook: a callable which store itself the resulting value in the item,
usefull for specific behaviours when several values goes to same key
callable will get item and value as arguments.
update: a bool which indicate that default mapping must be updated instead of overwritten
Note the difference between using a callable directly, and "filter" in a dict:
the former get the root element and can be skipped with SkipValue, while the
later get an element/value found with xpath.
"""
try:
class_mapping = self.MAPPING
except AttributeError:
class_mapping = {}
if setting_param_name is not None:
settings_mapping = getattr(superdesk.config, setting_param_name)
if settings_mapping is None:
logging.info("No mapping found in settings for NITF parser, using default one")
settings_mapping = {}
else:
settings_mapping = {}
mapping = self.metadata_mapping = OrderedDict()
for source_mapping in (self.default_mapping, class_mapping, settings_mapping):
for key, value in source_mapping.items():
key_mapping = self._parse_mapping(value)
if key_mapping.get('update', False) and key in mapping:
mapping[key].update(key_mapping)
else:
mapping[key] = key_mapping
def do_mapping(self, item, item_xml, setting_param_name=None, namespaces=None):
"""Apply mapping to item's XML content to get article metadata
mapping is generated by self._generate_mapping
:param item: dictionary to fill with item metadata
:type item: dict
:param item_xml: XML element to parse
:type item_xml: lxml.etree.Element
:param setting_param_name: name of the settings attribute containing the mapping
:type setting_param_name: str
:type setting_param_name: NoneType
:param namespaces: namespaces map to use with lxml methods
:type namespaces: dict
:type namespaces: NoneType
"""
if self.metadata_mapping is None:
self._generate_mapping(setting_param_name)
for key, mapping in self.metadata_mapping.items():
if not mapping:
# key is ignored
continue
try:
xpath = mapping['xpath']
except KeyError:
# no xpath, we must have a callable
try:
values = [mapping['callback'](item_xml)]
except KeyError:
logging.warn("invalid mapping for key {}, ignoring it".format(key))
continue
except SkipValue:
continue
list_ = False
else:
values = item_xml.xpath(xpath, namespaces=namespaces)
list_ = mapping.get('list', False)
if not list_:
if isinstance(values, list):
values = values[:1]
else:
# result was not a list, can happen if a function
# has been used
values = [values]
if not values:
# nothing found, we check default
try:
values = [mapping['default']]
except KeyError:
if 'default_attr' in mapping:
parent = item_xml.xpath(xpath[:xpath.rfind('/')], namespaces=namespaces)
if parent:
# default_attr is only used when there is a parent element
values = [mapping['default_attr']]
else:
continue
else:
# if there is not default value we skip the key
continue
else:
for idx, current_value in enumerate(values):
if isinstance(current_value, etree._Element):
# do we want a filter or the content?
try:
# filter
filter_cb = mapping['filter']
except KeyError:
# content
values[idx] = ''.join(current_value.itertext())
else:
values[idx] = filter_cb(current_value)
else:
if 'filter' in mapping:
values[idx] = mapping['filter'](current_value)
if None in values:
# filter can return None to skip a value
values = [v for v in values if v is not None]
if not values and not list_:
continue
value = values if list_ else values[0]
if 'key_hook' in mapping:
mapping['key_hook'](item, value)
else:
item[key] = value
return item
def qname(self, tag, ns=None):
"""
Return the Qualified Name of given XML tag.
:param tag: XML Tag
:type tag: str
:param ns: Namespace to be used for generating Qualified Name, defaults to None.
:type ns: str
:return: Qualified Name of tag
:rtype: str
"""
if ns is None:
ns = self.root.tag.rsplit('}')[0].lstrip('{')
elif ns is not None and ns == 'xml':
ns = 'http://www.w3.org/XML/1998/namespace'
return str(sd_etree.QName(ns, tag))
class FileFeedParser(FeedParser, metaclass=ABCMeta):
"""
Base class for Feed Parsers which can parse the content in a file.
"""
pass
class EmailFeedParser(FeedParser, metaclass=ABCMeta):
"""
Base class for Feed Parsers which can parse email message.
"""
pass
# must be imported for registration
from superdesk.io.feed_parsers.anpa import ANPAFeedParser # NOQA
from superdesk.io.feed_parsers.iptc7901 import IPTC7901FeedParser # NOQA
from superdesk.io.feed_parsers.newsml_1_2 import NewsMLOneFeedParser # NOQA
from superdesk.io.feed_parsers.newsml_2_0 import NewsMLTwoFeedParser # NOQA
from superdesk.io.feed_parsers.nitf import NITFFeedParser # NOQA
from superdesk.io.feed_parsers.rfc822 import EMailRFC822FeedParser # NOQA
from superdesk.io.feed_parsers.wenn_parser import WENNFeedParser # NOQA
from superdesk.io.feed_parsers.dpa_iptc7901 import DPAIPTC7901FeedParser # NOQA
from superdesk.io.feed_parsers.afp_newsml_1_2 import AFPNewsMLOneFeedParser # NOQA
from superdesk.io.feed_parsers.scoop_newsml_2_0 import ScoopNewsMLTwoFeedParser # NOQA
from superdesk.io.feed_parsers.ap_anpa import AP_ANPAFeedParser # NOQA
from superdesk.io.feed_parsers.pa_nitf import PAFeedParser # NOQA
from superdesk.io.feed_parsers.efe_nitf import EFEFeedParser # NOQA
from superdesk.io.feed_parsers.wordpress_wxr import WPWXRFeedParser # NOQA
from superdesk.io.feed_parsers.ninjs import NINJSFeedParser # NOQA
from superdesk.io.feed_parsers.stt_newsml import STTNewsMLFeedParser # NOQA
from superdesk.io.feed_parsers.ritzau import RitzauFeedParser # NOQA
from superdesk.io.feed_parsers.image_iptc import ImageIPTCFeedParser # NOQA
from superdesk.io.feed_parsers.ana_mpe_newsml import ANANewsMLOneFeedParser # NOQA
from superdesk.io.feed_parsers.bbc_ninjs import BBCNINJSFeedParser # NOQA
|
mugurrus/superdesk-core
|
superdesk/io/feed_parsers/__init__.py
|
Python
|
agpl-3.0
| 14,119 | 0.002408 |
__version__ = '0.2.0'
class Preferences(object):
"""
Placeholder class to which preferences properties are added
dynamically through a signal.
See behaviours.Preferences and behaviours.preferences_class_prepared
"""
pass
preferences = Preferences()
|
DjenieLabs/django-multisites-utils
|
multisitesutils/__init__.py
|
Python
|
bsd-3-clause
| 276 | 0.003623 |
from __future__ import absolute_import
import logging
import os
import sys
import datetime
import psutil
from six import StringIO
from twisted.web import http, resource
from Tribler.Core.Utilities.instrumentation import WatchDog
import Tribler.Core.Utilities.json_util as json
HAS_MELIAE = True
try:
from meliae import scanner
except ImportError:
HAS_MELIAE = False
class MemoryDumpBuffer(StringIO):
"""
Meliae expects its file handle to support write(), flush() and __call__().
The StringIO class does not support __call__(), therefore we provide this subclass.
"""
def __call__(self, s):
StringIO.write(self, s)
class DebugEndpoint(resource.Resource):
"""
This endpoint is responsible for handing requests regarding debug information in Tribler.
"""
def __init__(self, session):
resource.Resource.__init__(self)
child_handler_dict = {"circuits": DebugCircuitsEndpoint, "open_files": DebugOpenFilesEndpoint,
"open_sockets": DebugOpenSocketsEndpoint, "threads": DebugThreadsEndpoint,
"cpu": DebugCPUEndpoint, "memory": DebugMemoryEndpoint,
"log": DebugLogEndpoint, "profiler": DebugProfilerEndpoint}
for path, child_cls in child_handler_dict.iteritems():
self.putChild(path, child_cls(session))
class DebugCircuitsEndpoint(resource.Resource):
"""
This class handles requests regarding the tunnel community debug information.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
self.putChild("slots", DebugCircuitSlotsEndpoint(session))
def render_GET(self, request):
"""
.. http:get:: /debug/circuits
A GET request to this endpoint returns information about the built circuits in the tunnel community.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/circuits
**Example response**:
.. sourcecode:: javascript
{
"circuits": [{
"id": 1234,
"state": "EXTENDING",
"goal_hops": 4,
"bytes_up": 45,
"bytes_down": 49,
"created": 1468176257,
"hops": [{
"host": "unknown"
}, {
"host": "39.95.147.20:8965"
}],
...
}, ...]
}
"""
tunnel_community = self.session.lm.tunnel_community
if not tunnel_community:
request.setResponseCode(http.NOT_FOUND)
return json.dumps({"error": "tunnel community not found"})
circuits_json = []
for circuit_id, circuit in tunnel_community.circuits.iteritems():
item = {'id': circuit_id, 'state': str(circuit.state), 'goal_hops': circuit.goal_hops,
'bytes_up': circuit.bytes_up, 'bytes_down': circuit.bytes_down, 'created': circuit.creation_time}
hops_array = []
for hop in circuit.hops:
hops_array.append({'host': 'unknown' if 'UNKNOWN HOST' in hop.host else '%s:%s' % (hop.host, hop.port)})
item['hops'] = hops_array
circuits_json.append(item)
return json.dumps({'circuits': circuits_json})
class DebugCircuitSlotsEndpoint(resource.Resource):
"""
This class handles requests for information about slots in the tunnel overlay.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/circuits/slots
A GET request to this endpoint returns information about the slots in the tunnel overlay.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/circuits/slots
**Example response**:
.. sourcecode:: javascript
{
"open_files": [{
"path": "path/to/open/file.txt",
"fd": 33,
}, ...]
}
"""
return json.dumps({
"slots": {
"random": self.session.lm.tunnel_community.random_slots,
"competing": self.session.lm.tunnel_community.competing_slots
}
})
class DebugOpenFilesEndpoint(resource.Resource):
"""
This class handles request for information about open files.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/open_files
A GET request to this endpoint returns information about files opened by Tribler.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/open_files
**Example response**:
.. sourcecode:: javascript
{
"open_files": [{
"path": "path/to/open/file.txt",
"fd": 33,
}, ...]
}
"""
my_process = psutil.Process()
return json.dumps({
"open_files": [{"path": open_file.path, "fd": open_file.fd} for open_file in my_process.open_files()]})
class DebugOpenSocketsEndpoint(resource.Resource):
"""
This class handles request for information about open sockets.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/open_sockets
A GET request to this endpoint returns information about open sockets.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/openfiles
**Example response**:
.. sourcecode:: javascript
{
"open_sockets": [{
"family": 2,
"status": "ESTABLISHED",
"laddr": "0.0.0.0:0",
"raddr": "0.0.0.0:0",
"type": 30
}, ...]
}
"""
my_process = psutil.Process()
sockets = []
for open_socket in my_process.connections():
sockets.append({
"family": open_socket.family,
"status": open_socket.status,
"laddr": ("%s:%d" % open_socket.laddr) if open_socket.laddr else "-",
"raddr": ("%s:%d" % open_socket.raddr) if open_socket.raddr else "-",
"type": open_socket.type
})
return json.dumps({"open_sockets": sockets})
class DebugThreadsEndpoint(resource.Resource):
"""
This class handles request for information about threads.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/threads
A GET request to this endpoint returns information about running threads.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/threads
**Example response**:
.. sourcecode:: javascript
{
"threads": [{
"thread_id": 123456,
"thread_name": "my_thread",
"frames": ["my_frame", ...]
}, ...]
}
"""
watchdog = WatchDog()
return json.dumps({"threads": watchdog.get_threads_info()})
class DebugCPUEndpoint(resource.Resource):
"""
This class handles request for information about CPU.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.putChild("history", DebugCPUHistoryEndpoint(session))
class DebugCPUHistoryEndpoint(resource.Resource):
"""
This class handles request for information about CPU usage history.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/cpu/history
A GET request to this endpoint returns information about CPU usage history in the form of a list.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/cpu/history
**Example response**:
.. sourcecode:: javascript
{
"cpu_history": [{
"time": 1504015291214,
"cpu": 3.4,
}, ...]
}
"""
history = self.session.lm.resource_monitor.get_cpu_history_dict() if self.session.lm.resource_monitor else {}
return json.dumps({"cpu_history": history})
class DebugMemoryEndpoint(resource.Resource):
"""
This class handles request for information about memory.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.putChild("history", DebugMemoryHistoryEndpoint(session))
if HAS_MELIAE:
self.putChild("dump", DebugMemoryDumpEndpoint(session))
class DebugMemoryHistoryEndpoint(resource.Resource):
"""
This class handles request for information about memory usage history.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/memory/history
A GET request to this endpoint returns information about memory usage history in the form of a list.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/memory/history
**Example response**:
.. sourcecode:: javascript
{
"memory_history": [{
"time": 1504015291214,
"mem": 324324,
}, ...]
}
"""
history = self.session.lm.resource_monitor.get_memory_history_dict() if self.session.lm.resource_monitor else {}
return json.dumps({"memory_history": history})
class DebugMemoryDumpEndpoint(resource.Resource):
"""
This class handles request for dumping memory contents.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/memory/dump
A GET request to this endpoint returns a Meliae-compatible dump of the memory contents.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/memory/dump
**Example response**:
The content of the memory dump file.
"""
content = ""
if sys.platform == "win32":
# On Windows meliae (especially older versions) segfault on writing to file
dump_buffer = MemoryDumpBuffer()
try:
scanner.dump_all_objects(dump_buffer)
except OverflowError as e:
# https://bugs.launchpad.net/meliae/+bug/569947
logging.error("meliae dump failed (your version may be too old): %s", str(e))
content = dump_buffer.getvalue()
dump_buffer.close()
else:
# On other platforms, simply writing to file is much faster
dump_file_path = os.path.join(self.session.config.get_state_dir(), 'memory_dump.json')
scanner.dump_all_objects(dump_file_path)
with open(dump_file_path, 'r') as dump_file:
content = dump_file.read()
date_str = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
request.setHeader(b'content-type', 'application/json')
request.setHeader(b'Content-Disposition', 'attachment; filename=tribler_memory_dump_%s.json' % date_str)
return content
class DebugLogEndpoint(resource.Resource):
"""
This class handles the request for displaying the logs.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/log?process=<core|gui>&max_lines=<max_lines>
A GET request to this endpoint returns a json with content of core or gui log file & max_lines requested
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/log?process=core&max_lines=5
**Example response**:
A JSON with content of the log file & max_lines requested, for eg.
{
"max_lines" : 5,
"content" :"INFO 1506675301.76 sqlitecachedb:181 Reading database version...
INFO 1506675301.76 sqlitecachedb:185 Current database version is 29
INFO 1506675301.76 sqlitecachedb:203 Beginning the first transaction...
INFO 1506675301.76 upgrade:93 tribler is in the latest version,...
INFO 1506675302.08 LaunchManyCore:254 lmc: Starting Dispersy..."
}
"""
# First, flush all the logs to make sure it is written to file
for handler in logging.getLogger().handlers:
handler.flush()
# Get the location of log file
param_process = request.args['process'][0] if request.args['process'] else 'core'
log_file_name = os.path.join(self.session.config.get_log_dir(), 'tribler-%s-info.log' % param_process)
# Default response
response = {'content': '', 'max_lines': 0}
# Check if log file exists and return last requested 'max_lines' of log
if os.path.exists(log_file_name):
try:
max_lines = int(request.args['max_lines'][0])
with open(log_file_name, 'r') as log_file:
response['content'] = self.tail(log_file, max_lines)
response['max_lines'] = max_lines
except ValueError:
with open(log_file_name, 'r') as log_file:
response['content'] = self.tail(log_file, 100) # default 100 lines
response['max_lines'] = 0
return json.dumps(response)
def tail(self, file_handler, lines=1):
"""Tail a file and get X lines from the end"""
# place holder for the lines found
lines_found = []
byte_buffer = 1024
# block counter will be multiplied by buffer
# to get the block size from the end
block_counter = -1
# loop until we find X lines
while len(lines_found) < lines:
try:
file_handler.seek(block_counter * byte_buffer, os.SEEK_END)
except IOError: # either file is too small, or too many lines requested
file_handler.seek(0)
lines_found = file_handler.readlines()
break
lines_found = file_handler.readlines()
# we found enough lines, get out
if len(lines_found) > lines:
break
# decrement the block counter to get the
# next X bytes
block_counter -= 1
return ''.join(lines_found[-lines:])
class DebugProfilerEndpoint(resource.Resource):
"""
This class handles requests for the profiler.
"""
def __init__(self, session):
resource.Resource.__init__(self)
self.session = session
def render_GET(self, request):
"""
.. http:get:: /debug/profiler
A GET request to this endpoint returns information about the state of the profiler.
This state is either STARTED or STOPPED.
**Example request**:
.. sourcecode:: none
curl -X GET http://localhost:8085/debug/profiler
**Example response**:
.. sourcecode:: javascript
{
"state": "STARTED"
}
"""
monitor_enabled = self.session.config.get_resource_monitor_enabled()
state = "STARTED" if (monitor_enabled and self.session.lm.resource_monitor.profiler_running) else "STOPPED"
return json.dumps({"state": state})
def render_PUT(self, request):
"""
.. http:put:: /debug/profiler
A PUT request to this endpoint starts the profiler.
**Example request**:
.. sourcecode:: none
curl -X PUT http://localhost:8085/debug/profiler
**Example response**:
.. sourcecode:: javascript
{
"success": "true"
}
"""
self.session.lm.resource_monitor.start_profiler()
return json.dumps({"success": True})
def render_DELETE(self, request):
"""
.. http:delete:: /debug/profiler
A PUT request to this endpoint stops the profiler.
**Example request**:
.. sourcecode:: none
curl -X DELETE http://localhost:8085/debug/profiler
**Example response**:
.. sourcecode:: javascript
{
"success": "true"
}
"""
file_path = self.session.lm.resource_monitor.stop_profiler()
return json.dumps({"success": True, "profiler_file": file_path})
|
Captain-Coder/tribler
|
Tribler/Core/Modules/restapi/debug_endpoint.py
|
Python
|
lgpl-3.0
| 18,160 | 0.002203 |
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy.testing import (
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
TestCase, run_module_suite, assert_allclose, assert_equal, assert_,
assert_raises)
from scipy.interpolate import (
KroghInterpolator, krogh_interpolate,
BarycentricInterpolator, barycentric_interpolate,
approximate_taylor_polynomial, pchip, PchipInterpolator,
pchip_interpolate, Akima1DInterpolator, CubicSpline, make_interp_spline)
from scipy._lib.six import xrange
def check_shape(interpolator_cls, x_shape, y_shape, deriv_shape=None, axis=0,
extra_args={}):
np.random.seed(1234)
x = [-1, 0, 1, 2, 3, 4]
s = list(range(1, len(y_shape)+1))
s.insert(axis % (len(y_shape)+1), 0)
y = np.random.rand(*((6,) + y_shape)).transpose(s)
# Cython code chokes on y.shape = (0, 3) etc, skip them
if y.size == 0:
return
xi = np.zeros(x_shape)
yi = interpolator_cls(x, y, axis=axis, **extra_args)(xi)
target_shape = ((deriv_shape or ()) + y.shape[:axis]
+ x_shape + y.shape[axis:][1:])
assert_equal(yi.shape, target_shape)
# check it works also with lists
if x_shape and y.size > 0:
interpolator_cls(list(x), list(y), axis=axis, **extra_args)(list(xi))
# check also values
if xi.size > 0 and deriv_shape is None:
bs_shape = y.shape[:axis] + (1,)*len(x_shape) + y.shape[axis:][1:]
yv = y[((slice(None,),)*(axis % y.ndim)) + (1,)]
yv = yv.reshape(bs_shape)
yi, y = np.broadcast_arrays(yi, yv)
assert_allclose(yi, y)
SHAPES = [(), (0,), (1,), (6, 2, 5)]
def test_shapes():
def spl_interp(x, y, axis):
return make_interp_spline(x, y, axis=axis)
for ip in [KroghInterpolator, BarycentricInterpolator, pchip,
Akima1DInterpolator, CubicSpline, spl_interp]:
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
if ip != CubicSpline:
yield check_shape, ip, s1, s2, None, axis
else:
for bc in ['natural', 'clamped']:
extra = {'bc_type': bc}
yield check_shape, ip, s1, s2, None, axis, extra
def test_derivs_shapes():
def krogh_derivs(x, y, axis=0):
return KroghInterpolator(x, y, axis).derivatives
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
yield check_shape, krogh_derivs, s1, s2, (6,), axis
def test_deriv_shapes():
def krogh_deriv(x, y, axis=0):
return KroghInterpolator(x, y, axis).derivative
def pchip_deriv(x, y, axis=0):
return pchip(x, y, axis).derivative()
def pchip_deriv2(x, y, axis=0):
return pchip(x, y, axis).derivative(2)
def pchip_antideriv(x, y, axis=0):
return pchip(x, y, axis).derivative()
def pchip_antideriv2(x, y, axis=0):
return pchip(x, y, axis).derivative(2)
def pchip_deriv_inplace(x, y, axis=0):
class P(PchipInterpolator):
def __call__(self, x):
return PchipInterpolator.__call__(self, x, 1)
pass
return P(x, y, axis)
def akima_deriv(x, y, axis=0):
return Akima1DInterpolator(x, y, axis).derivative()
def akima_antideriv(x, y, axis=0):
return Akima1DInterpolator(x, y, axis).antiderivative()
def cspline_deriv(x, y, axis=0):
return CubicSpline(x, y, axis).derivative()
def cspline_antideriv(x, y, axis=0):
return CubicSpline(x, y, axis).antiderivative()
def bspl_deriv(x, y, axis=0):
return make_interp_spline(x, y, axis=axis).derivative()
def bspl_antideriv(x, y, axis=0):
return make_interp_spline(x, y, axis=axis).antiderivative()
for ip in [krogh_deriv, pchip_deriv, pchip_deriv2, pchip_deriv_inplace,
pchip_antideriv, pchip_antideriv2, akima_deriv, akima_antideriv,
cspline_deriv, cspline_antideriv, bspl_deriv, bspl_antideriv]:
for s1 in SHAPES:
for s2 in SHAPES:
for axis in range(-len(s2), len(s2)):
yield check_shape, ip, s1, s2, (), axis
def _check_complex(ip):
x = [1, 2, 3, 4]
y = [1, 2, 1j, 3]
p = ip(x, y)
assert_allclose(y, p(x))
def test_complex():
for ip in [KroghInterpolator, BarycentricInterpolator, pchip, CubicSpline]:
yield _check_complex, ip
class CheckKrogh(TestCase):
def setUp(self):
self.true_poly = np.poly1d([-2,3,1,5,-4])
self.test_xs = np.linspace(-1,1,100)
self.xs = np.linspace(-1,1,5)
self.ys = self.true_poly(self.xs)
def test_lagrange(self):
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_scalar(self):
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(7),P(7))
assert_almost_equal(self.true_poly(np.array(7)), P(np.array(7)))
def test_derivatives(self):
P = KroghInterpolator(self.xs,self.ys)
D = P.derivatives(self.test_xs)
for i in xrange(D.shape[0]):
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
D[i])
def test_low_derivatives(self):
P = KroghInterpolator(self.xs,self.ys)
D = P.derivatives(self.test_xs,len(self.xs)+2)
for i in xrange(D.shape[0]):
assert_almost_equal(self.true_poly.deriv(i)(self.test_xs),
D[i])
def test_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
m = 10
r = P.derivatives(self.test_xs,m)
for i in xrange(m):
assert_almost_equal(P.derivative(self.test_xs,i),r[i])
def test_high_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
for i in xrange(len(self.xs),2*len(self.xs)):
assert_almost_equal(P.derivative(self.test_xs,i),
np.zeros(len(self.test_xs)))
def test_hermite(self):
xs = [0,0,0,1,1,1,2]
ys = [self.true_poly(0),
self.true_poly.deriv(1)(0),
self.true_poly.deriv(2)(0),
self.true_poly(1),
self.true_poly.deriv(1)(1),
self.true_poly.deriv(2)(1),
self.true_poly(2)]
P = KroghInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_vector(self):
xs = [0, 1, 2]
ys = np.array([[0,1],[1,0],[2,1]])
P = KroghInterpolator(xs,ys)
Pi = [KroghInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])]
test_xs = np.linspace(-1,3,100)
assert_almost_equal(P(test_xs),
np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))
assert_almost_equal(P.derivatives(test_xs),
np.transpose(np.asarray([p.derivatives(test_xs) for p in Pi]),
(1,2,0)))
def test_empty(self):
P = KroghInterpolator(self.xs,self.ys)
assert_array_equal(P([]), [])
def test_shapes_scalarvalue(self):
P = KroghInterpolator(self.xs,self.ys)
assert_array_equal(np.shape(P(0)), ())
assert_array_equal(np.shape(P(np.array(0))), ())
assert_array_equal(np.shape(P([0])), (1,))
assert_array_equal(np.shape(P([0,1])), (2,))
def test_shapes_scalarvalue_derivative(self):
P = KroghInterpolator(self.xs,self.ys)
n = P.n
assert_array_equal(np.shape(P.derivatives(0)), (n,))
assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,))
assert_array_equal(np.shape(P.derivatives([0])), (n,1))
assert_array_equal(np.shape(P.derivatives([0,1])), (n,2))
def test_shapes_vectorvalue(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
assert_array_equal(np.shape(P(0)), (3,))
assert_array_equal(np.shape(P([0])), (1,3))
assert_array_equal(np.shape(P([0,1])), (2,3))
def test_shapes_1d_vectorvalue(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,[1]))
assert_array_equal(np.shape(P(0)), (1,))
assert_array_equal(np.shape(P([0])), (1,1))
assert_array_equal(np.shape(P([0,1])), (2,1))
def test_shapes_vectorvalue_derivative(self):
P = KroghInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
n = P.n
assert_array_equal(np.shape(P.derivatives(0)), (n,3))
assert_array_equal(np.shape(P.derivatives([0])), (n,1,3))
assert_array_equal(np.shape(P.derivatives([0,1])), (n,2,3))
def test_wrapper(self):
P = KroghInterpolator(self.xs, self.ys)
ki = krogh_interpolate
assert_almost_equal(P(self.test_xs), ki(self.xs, self.ys, self.test_xs))
assert_almost_equal(P.derivative(self.test_xs, 2),
ki(self.xs, self.ys, self.test_xs, der=2))
assert_almost_equal(P.derivatives(self.test_xs, 2),
ki(self.xs, self.ys, self.test_xs, der=[0, 1]))
def test_int_inputs(self):
# Check input args are cast correctly to floats, gh-3669
x = [0, 234, 468, 702, 936, 1170, 1404, 2340, 3744, 6084, 8424,
13104, 60000]
offset_cdf = np.array([-0.95, -0.86114777, -0.8147762, -0.64072425,
-0.48002351, -0.34925329, -0.26503107,
-0.13148093, -0.12988833, -0.12979296,
-0.12973574, -0.08582937, 0.05])
f = KroghInterpolator(x, offset_cdf)
assert_allclose(abs((f(x) - offset_cdf) / f.derivative(x, 1)),
0, atol=1e-10)
def test_derivatives_complex(self):
# regression test for gh-7381: krogh.derivatives(0) fails complex y
x, y = np.array([-1, -1, 0, 1, 1]), np.array([1, 1.0j, 0, -1, 1.0j])
func = KroghInterpolator(x, y)
cmplx = func.derivatives(0)
cmplx2 = (KroghInterpolator(x, y.real).derivatives(0) +
1j*KroghInterpolator(x, y.imag).derivatives(0))
assert_allclose(cmplx, cmplx2, atol=1e-15)
class CheckTaylor(TestCase):
def test_exponential(self):
degree = 5
p = approximate_taylor_polynomial(np.exp, 0, degree, 1, 15)
for i in xrange(degree+1):
assert_almost_equal(p(0),1)
p = p.deriv()
assert_almost_equal(p(0),0)
class CheckBarycentric(TestCase):
def setUp(self):
self.true_poly = np.poly1d([-2,3,1,5,-4])
self.test_xs = np.linspace(-1,1,100)
self.xs = np.linspace(-1,1,5)
self.ys = self.true_poly(self.xs)
def test_lagrange(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_scalar(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_almost_equal(self.true_poly(7),P(7))
assert_almost_equal(self.true_poly(np.array(7)),P(np.array(7)))
def test_delayed(self):
P = BarycentricInterpolator(self.xs)
P.set_yi(self.ys)
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_append(self):
P = BarycentricInterpolator(self.xs[:3],self.ys[:3])
P.add_xi(self.xs[3:],self.ys[3:])
assert_almost_equal(self.true_poly(self.test_xs),P(self.test_xs))
def test_vector(self):
xs = [0, 1, 2]
ys = np.array([[0,1],[1,0],[2,1]])
P = BarycentricInterpolator(xs,ys)
Pi = [BarycentricInterpolator(xs,ys[:,i]) for i in xrange(ys.shape[1])]
test_xs = np.linspace(-1,3,100)
assert_almost_equal(P(test_xs),
np.rollaxis(np.asarray([p(test_xs) for p in Pi]),-1))
def test_shapes_scalarvalue(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_array_equal(np.shape(P(0)), ())
assert_array_equal(np.shape(P(np.array(0))), ())
assert_array_equal(np.shape(P([0])), (1,))
assert_array_equal(np.shape(P([0,1])), (2,))
def test_shapes_vectorvalue(self):
P = BarycentricInterpolator(self.xs,np.outer(self.ys,np.arange(3)))
assert_array_equal(np.shape(P(0)), (3,))
assert_array_equal(np.shape(P([0])), (1,3))
assert_array_equal(np.shape(P([0,1])), (2,3))
def test_shapes_1d_vectorvalue(self):
P = BarycentricInterpolator(self.xs,np.outer(self.ys,[1]))
assert_array_equal(np.shape(P(0)), (1,))
assert_array_equal(np.shape(P([0])), (1,1))
assert_array_equal(np.shape(P([0,1])), (2,1))
def test_wrapper(self):
P = BarycentricInterpolator(self.xs,self.ys)
assert_almost_equal(P(self.test_xs),barycentric_interpolate(self.xs,self.ys,self.test_xs))
class TestPCHIP(TestCase):
def _make_random(self, npts=20):
np.random.seed(1234)
xi = np.sort(np.random.random(npts))
yi = np.random.random(npts)
return pchip(xi, yi), xi, yi
def test_overshoot(self):
# PCHIP should not overshoot
p, xi, yi = self._make_random()
for i in range(len(xi)-1):
x1, x2 = xi[i], xi[i+1]
y1, y2 = yi[i], yi[i+1]
if y1 > y2:
y1, y2 = y2, y1
xp = np.linspace(x1, x2, 10)
yp = p(xp)
assert_(((y1 <= yp) & (yp <= y2)).all())
def test_monotone(self):
# PCHIP should preserve monotonicty
p, xi, yi = self._make_random()
for i in range(len(xi)-1):
x1, x2 = xi[i], xi[i+1]
y1, y2 = yi[i], yi[i+1]
xp = np.linspace(x1, x2, 10)
yp = p(xp)
assert_(((y2-y1) * (yp[1:] - yp[:1]) > 0).all())
def test_cast(self):
# regression test for integer input data, see gh-3453
data = np.array([[0, 4, 12, 27, 47, 60, 79, 87, 99, 100],
[-33, -33, -19, -2, 12, 26, 38, 45, 53, 55]])
xx = np.arange(100)
curve = pchip(data[0], data[1])(xx)
data1 = data * 1.0
curve1 = pchip(data1[0], data1[1])(xx)
assert_allclose(curve, curve1, atol=1e-14, rtol=1e-14)
def test_nag(self):
# Example from NAG C implementation,
# http://nag.com/numeric/cl/nagdoc_cl25/html/e01/e01bec.html
# suggested in gh-5326 as a smoke test for the way the derivatives
# are computed (see also gh-3453)
from scipy._lib.six import StringIO
dataStr = '''
7.99 0.00000E+0
8.09 0.27643E-4
8.19 0.43750E-1
8.70 0.16918E+0
9.20 0.46943E+0
10.00 0.94374E+0
12.00 0.99864E+0
15.00 0.99992E+0
20.00 0.99999E+0
'''
data = np.loadtxt(StringIO(dataStr))
pch = pchip(data[:,0], data[:,1])
resultStr = '''
7.9900 0.0000
9.1910 0.4640
10.3920 0.9645
11.5930 0.9965
12.7940 0.9992
13.9950 0.9998
15.1960 0.9999
16.3970 1.0000
17.5980 1.0000
18.7990 1.0000
20.0000 1.0000
'''
result = np.loadtxt(StringIO(resultStr))
assert_allclose(result[:,1], pch(result[:,0]), rtol=0., atol=5e-5)
def test_endslopes(self):
# this is a smoke test for gh-3453: PCHIP interpolator should not
# set edge slopes to zero if the data do not suggest zero edge derivatives
x = np.array([0.0, 0.1, 0.25, 0.35])
y1 = np.array([279.35, 0.5e3, 1.0e3, 2.5e3])
y2 = np.array([279.35, 2.5e3, 1.50e3, 1.0e3])
for pp in (pchip(x, y1), pchip(x, y2)):
for t in (x[0], x[-1]):
assert_(pp(t, 1) != 0)
def test_all_zeros(self):
x = np.arange(10)
y = np.zeros_like(x)
# this should work and not generate any warnings
with warnings.catch_warnings():
warnings.filterwarnings('error')
pch = pchip(x, y)
xx = np.linspace(0, 9, 101)
assert_equal(pch(xx), 0.)
def test_two_points(self):
# regression test for gh-6222: pchip([0, 1], [0, 1]) fails because
# it tries to use a three-point scheme to estimate edge derivatives,
# while there are only two points available.
# Instead, it should construct a linear interpolator.
x = np.linspace(0, 1, 11)
p = pchip([0, 1], [0, 2])
assert_allclose(p(x), 2*x, atol=1e-15)
def test_pchip_interpolate(self):
assert_array_almost_equal(
pchip_interpolate([1,2,3], [4,5,6], [0.5], der=1),
[1.])
assert_array_almost_equal(
pchip_interpolate([1,2,3], [4,5,6], [0.5], der=0),
[3.5])
assert_array_almost_equal(
pchip_interpolate([1,2,3], [4,5,6], [0.5], der=[0, 1]),
[[3.5], [1]])
def test_roots(self):
# regression test for gh-6357: .roots method should work
p = pchip([0, 1], [-1, 1])
r = p.roots()
assert_allclose(r, 0.5)
class TestCubicSpline(object):
@staticmethod
def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',
tol=1e-14):
"""Check that spline coefficients satisfy the continuity and boundary
conditions."""
x = S.x
c = S.c
dx = np.diff(x)
dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))
dxi = dx[:-1]
# Check C2 continuity.
assert_allclose(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +
c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)
assert_allclose(c[2, 1:], 3 * c[0, :-1] * dxi**2 +
2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)
assert_allclose(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],
rtol=tol, atol=tol)
# Check that we found a parabola, the third derivative is 0.
if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':
assert_allclose(c[0], 0, rtol=tol, atol=tol)
return
# Check periodic boundary conditions.
if bc_start == 'periodic':
assert_allclose(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)
assert_allclose(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)
assert_allclose(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)
return
# Check other boundary conditions.
if bc_start == 'not-a-knot':
if x.size == 2:
slope = (S(x[1]) - S(x[0])) / dx[0]
assert_allclose(S(x[0], 1), slope, rtol=tol, atol=tol)
else:
assert_allclose(c[0, 0], c[0, 1], rtol=tol, atol=tol)
elif bc_start == 'clamped':
assert_allclose(S(x[0], 1), 0, rtol=tol, atol=tol)
elif bc_start == 'natural':
assert_allclose(S(x[0], 2), 0, rtol=tol, atol=tol)
else:
order, value = bc_start
assert_allclose(S(x[0], order), value, rtol=tol, atol=tol)
if bc_end == 'not-a-knot':
if x.size == 2:
slope = (S(x[1]) - S(x[0])) / dx[0]
assert_allclose(S(x[1], 1), slope, rtol=tol, atol=tol)
else:
assert_allclose(c[0, -1], c[0, -2], rtol=tol, atol=tol)
elif bc_end == 'clamped':
assert_allclose(S(x[-1], 1), 0, rtol=tol, atol=tol)
elif bc_end == 'natural':
assert_allclose(S(x[-1], 2), 0, rtol=tol, atol=tol)
else:
order, value = bc_end
assert_allclose(S(x[-1], order), value, rtol=tol, atol=tol)
def check_all_bc(self, x, y, axis):
deriv_shape = list(y.shape)
del deriv_shape[axis]
first_deriv = np.empty(deriv_shape)
first_deriv.fill(2)
second_deriv = np.empty(deriv_shape)
second_deriv.fill(-1)
bc_all = [
'not-a-knot',
'natural',
'clamped',
(1, first_deriv),
(2, second_deriv)
]
for bc in bc_all[:3]:
S = CubicSpline(x, y, axis=axis, bc_type=bc)
self.check_correctness(S, bc, bc)
for bc_start in bc_all:
for bc_end in bc_all:
S = CubicSpline(x, y, axis=axis, bc_type=(bc_start, bc_end))
self.check_correctness(S, bc_start, bc_end, tol=2e-14)
def test_general(self):
x = np.array([-1, 0, 0.5, 2, 4, 4.5, 5.5, 9])
y = np.array([0, -0.5, 2, 3, 2.5, 1, 1, 0.5])
for n in [2, 3, x.size]:
self.check_all_bc(x[:n], y[:n], 0)
Y = np.empty((2, n, 2))
Y[0, :, 0] = y[:n]
Y[0, :, 1] = y[:n] - 1
Y[1, :, 0] = y[:n] + 2
Y[1, :, 1] = y[:n] + 3
self.check_all_bc(x[:n], Y, 1)
def test_periodic(self):
for n in [2, 3, 5]:
x = np.linspace(0, 2 * np.pi, n)
y = np.cos(x)
S = CubicSpline(x, y, bc_type='periodic')
self.check_correctness(S, 'periodic', 'periodic')
Y = np.empty((2, n, 2))
Y[0, :, 0] = y
Y[0, :, 1] = y + 2
Y[1, :, 0] = y - 1
Y[1, :, 1] = y + 5
S = CubicSpline(x, Y, axis=1, bc_type='periodic')
self.check_correctness(S, 'periodic', 'periodic')
def test_periodic_eval(self):
x = np.linspace(0, 2 * np.pi, 10)
y = np.cos(x)
S = CubicSpline(x, y, bc_type='periodic')
assert_almost_equal(S(1), S(1 + 2 * np.pi), decimal=15)
def test_dtypes(self):
x = np.array([0, 1, 2, 3], dtype=int)
y = np.array([-5, 2, 3, 1], dtype=int)
S = CubicSpline(x, y)
self.check_correctness(S)
y = np.array([-1+1j, 0.0, 1-1j, 0.5-1.5j])
S = CubicSpline(x, y)
self.check_correctness(S)
S = CubicSpline(x, x ** 3, bc_type=("natural", (1, 2j)))
self.check_correctness(S, "natural", (1, 2j))
y = np.array([-5, 2, 3, 1])
S = CubicSpline(x, y, bc_type=[(1, 2 + 0.5j), (2, 0.5 - 1j)])
self.check_correctness(S, (1, 2 + 0.5j), (2, 0.5 - 1j))
def test_small_dx(self):
rng = np.random.RandomState(0)
x = np.sort(rng.uniform(size=100))
y = 1e4 + rng.uniform(size=100)
S = CubicSpline(x, y)
self.check_correctness(S, tol=1e-13)
def test_incorrect_inputs(self):
x = np.array([1, 2, 3, 4])
y = np.array([1, 2, 3, 4])
xc = np.array([1 + 1j, 2, 3, 4])
xn = np.array([np.nan, 2, 3, 4])
xo = np.array([2, 1, 3, 4])
yn = np.array([np.nan, 2, 3, 4])
y3 = [1, 2, 3]
x1 = [1]
y1 = [1]
assert_raises(ValueError, CubicSpline, xc, y)
assert_raises(ValueError, CubicSpline, xn, y)
assert_raises(ValueError, CubicSpline, x, yn)
assert_raises(ValueError, CubicSpline, xo, y)
assert_raises(ValueError, CubicSpline, x, y3)
assert_raises(ValueError, CubicSpline, x[:, np.newaxis], y)
assert_raises(ValueError, CubicSpline, x1, y1)
wrong_bc = [('periodic', 'clamped'),
((2, 0), (3, 10)),
((1, 0), ),
(0., 0.),
'not-a-typo']
for bc_type in wrong_bc:
assert_raises(ValueError, CubicSpline, x, y, 0, bc_type, True)
# Shapes mismatch when giving arbitrary derivative values:
Y = np.c_[y, y]
bc1 = ('clamped', (1, 0))
bc2 = ('clamped', (1, [0, 0, 0]))
bc3 = ('clamped', (1, [[0, 0]]))
assert_raises(ValueError, CubicSpline, x, Y, 0, bc1, True)
assert_raises(ValueError, CubicSpline, x, Y, 0, bc2, True)
assert_raises(ValueError, CubicSpline, x, Y, 0, bc3, True)
# periodic condition, y[-1] must be equal to y[0]:
assert_raises(ValueError, CubicSpline, x, y, 0, 'periodic', True)
if __name__ == '__main__':
run_module_suite()
|
pbrod/scipy
|
scipy/interpolate/tests/test_polyint.py
|
Python
|
bsd-3-clause
| 24,329 | 0.005754 |
"""
Test Setup Module -- THIS SHOULD BE REPURPOSED, TRIGGERED BY make-test.sh script
"""
import os
import sys
import time
import json
import inspect
from configparser import ConfigParser
import logging
# aws imports
import boto3
import moto
import pytest
from botocore.exceptions import ClientError, ProfileNotFound
# test imports
sys.path.insert(0, os.path.abspath('../'))
from tests import environment
from keyup.statics import PACKAGE
# global objects
config = ConfigParser()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# set global Autotag-specific vars
account_number = '123456789012'
TestUsers = ('developer1', 'developer2', 'developer3')
setup_profilename = 'default'
# test module globals
base_path = '/tmp/autotag-tests-%s' % time.time()
version = 'testing-' + base_path
test_assets = 'tests/assets'
# set region default
if os.getenv('AWS_DEFAULT_REGION') is None:
default_region = 'us-east-2'
os.environ['AWS_DEFAULT_REGION'] = default_region
else:
default_region = os.getenv('AWS_DEFAULT_REGION')
ami_id = 'ami-redhat7'
min_count = 1
max_count = 2
ec2_size = 't2.micro'
@moto.mock_ec2
def get_regions():
ec2 = boto3.client('ec2')
return [x['RegionName'] for x in ec2.describe_regions()['Regions'] if 'cn' not in x['RegionName']]
@pytest.fixture()
def regionize():
os.environ['AWS_REGION'] = default_region
yield
if default_region is not None:
os.environ['AWS_REGION'] = default_region
else:
del os.environ['AWS_REGION']
@pytest.fixture()
def sts_resource_objects(region=default_region):
session = boto3.Session(profile_name=setup_profilename)
client = session.client('sts', region_name=region)
yield client
moto.mock_sts().stop()
@pytest.fixture()
def iam_resource_objects(region=default_region):
session = boto3.Session(profile_name=setup_profilename)
client = session.client('iam')
yield client
@pytest.fixture()
def return_reference(filename):
with open(test_assets + '/' + filename, 'r') as f1:
f2 = f1.read()
content = json.loads(f2)
yield content
@pytest.fixture()
def import_file_object(filepath):
handle = open(filepath, 'r')
file_obj = handle.read()
return file_obj
def tear_down():
""" Tears down structures setup expressly for testing """
HOME = os.environ['HOME']
awscli = HOME + '/.aws/credentials'
if os.path.isfile(awscli):
config.read(awscli)
for profile in config.sections():
if 'ec2cli-dev1' in profile:
config.pop(profile)
with open(awscli, 'w') as f1:
config.write(f1)
return True
return False
class PreTestSetup():
"""
Sets up default AWS Account with all structures to run
keyup automated testing
"""
def __init__(self, user_list):
self.test_users = user_list
self.policy_arns = []
if self.setup_complete(user_list[0]) is False:
complete1 = self.create_users(user_list)
complete2 = self.create_policies(user_list)
complete3 = self.assign_policies(user_list)
complete4 = self.create_keys(user_list)
r = self.assess_setup(complete1, complete2, complete3, complete4)
return r
else:
return True
def setup_complete(self, canary):
""" Determines if setup has occurred """
iam_client = next(iam_resource_objects())
users = iam_client.list_users()
if canary in users:
logger.info('PreTest Setup already completed. Exit setup')
return True
else:
return False
def create_users(self, iam_resource_objects, iam_user, profile=setup_profilename):
"""
Setup for successive tests in this module
"""
try:
iam_client = iam_resource_objects
# create users
for user in self.test_users:
r = iam_client.create_user(Path='/', UserName=iam_user)
except ClientError as e:
logger.exception(
"%s: Error while creating test user IAM accounts (Code: %s Message: %s)" %
(inspect.stack()[0][3], e.response['Error']['Code'],
e.response['Error']['Message']))
return True
def create_policies(self, users):
""" Create IAM policies for new test users """
iam_client = next(iam_resource_objects())
policy = next(return_reference('iampolicy-AccessKeySelfService.json'))
try:
r = iam_client.create_policy(
PolicyName='iampolicy-AccessKeySelfService',
Path='/',
PolicyDocument=str(policy),
Description='self manage iam access keys'
)
self.policy_arns.append(r['Policy']['Arn'])
except Exception as e:
logger.exception('Error while creating IAM policy')
return False
return True
def assign_policies(self, users):
""" Assign IAM policies to new test users """
iam_client = next(iam_resource_objects())
try:
for user in users:
for arn in self.policy_arns:
r = iam_client.attach_user_policy(UserName=user, PolicyArn=arn)
except ClientError as e:
logger.exception('Error while attaching IAM policy')
return False
return True
def create_keys(self, users):
""" Create initial set of access keys for each test user """
iam_client = next(iam_resource_objects())
HOME = os.environ['HOME']
config.read(HOME + '/.aws/credentials')
# create keys for each user
for user in users:
keys = iam_client.create_access_key(UserName=user)
access_key = keys['AccessKey']['AccessKeyId']
secret_key = keys['AccessKey']['SecretAccessKey']
config[profile]
# write new keys
def assess_setup(self, *args):
for arg in args:
if arg is False:
return False
return True
if ___name__ == '__main__':
# run setup
response = PreTestSetup(TestUsers)
logger.info('End result from PreTestSetup run: %s' % str(response))
|
t-stark/ec2cli
|
scripts/pretest_setup.py
|
Python
|
gpl-2.0
| 6,293 | 0.001112 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import redis
import json
class Redis(object):
def __init__(self, host="localhost", port=6379):
self._host = host
self._port = port
self._redis_cursor = None
def conn(self):
if self._redis_cursor is None:
pool = redis.ConnectionPool(host=self._host, port=self._port, db=0)
self._redis_cursor = redis.Redis(connection_pool=pool)
def enqueue(self, qname, data):
self.conn()
self._redis_cursor.rpush(qname, json.dumps(data))
def dequeue(self, qname):
self.conn()
r = self._redis_cursor.blpop(qname)
return json.loads(r[1])
if __name__ == "__main__":
pass
|
huhuchen/asyncqueue
|
asyncqueue/_redis.py
|
Python
|
mit
| 720 | 0.002778 |
# coding=utf8
import sublime
from .Base import Base
from ...utils import Debug
from ...utils.uiutils import get_prefix
class Outline(Base):
regions = {}
ts_view = None
def __init__(self, t3sviews):
super(Outline, self).__init__('Typescript : Outline View', t3sviews)
# SET TEXT
def set_text(self, edit_token, members, ts_view):
"""
This function takes the tss.js members structure instead of a string.
"""
# this will process the outline, even if the view is closed
self.ts_view = ts_view
if type(members) == list:
self._tssjs_2_outline_format(members)
elif type(members) == str:
self.text = members
super(Outline, self).set_text(edit_token, self.text)
def is_current_ts(self, ts_view):
if ts_view is None or self.ts_view is None:
return
return ts_view.id() == self.ts_view.id()
def _tssjs_2_outline_format(self, members):
text = []
line = 0
self.regions = {}
for member in members:
start_line = member['min']['line']
end_line = member['lim']['line']
left = member['min']['character']
right = member['lim']['character']
a = self.ts_view.text_point(start_line-1, left-1)
b = self.ts_view.text_point(end_line-1, right-1)
region = sublime.Region(a, b)
kind = get_prefix(member['loc']['kind'])
container_kind = get_prefix(member['loc']['containerKind'])
if member['loc']['kindModifiers'] != "":
member['loc']['kindModifiers'] = " " + member['loc']['kindModifiers']
if member['loc']['kind'] != 'class' and member['loc']['kind'] != 'interface':
t = "%s %s %s %s" % (kind, member['loc']['kindModifiers'], member['loc']['kind'], member['loc']['name'])
text.append('\n\t')
text.append(t.strip())
line += 1
self.regions[line] = region
else:
t = "%s %s %s %s {" % (container_kind, member['loc']['kindModifiers'], member['loc']['kind'], member['loc']['name'])
if len(text) == 0:
text.append('\n%s\n' % t.strip())
line += 2
self.regions[line - 1] = region
else:
text.append('\n\n}\n\n%s\n' % t.strip())
line += 5
self.regions[line - 1] = region
if len(members) == 0:
text.append("\n\nno members found\n")
self.text = ''.join(text)
is_focusing_ts_view = False
def on_click(self,line):
if self.is_focusing_ts_view:
Debug('focus', 'Outline.on_click: is just focusing other view > ignore')
return
if line in self.regions:
draw = sublime.DRAW_NO_FILL
self.ts_view.add_regions('typescript-definition', [self.regions[line]], 'comment', 'dot', draw)
self._focus_member_in_view(self.regions[line])
def _focus_member_in_view(self, region):
if self.ts_view.is_loading():
return
else:
Debug('focus', "_focus_member_in_view, Region @pos %i" % (region.begin()))
self.is_focusing_ts_view = True
self.ts_view.show(region)
self.ts_view.window().focus_view(self.ts_view)
self.is_focusing_ts_view = False
|
Phaiax/ArcticTypescript
|
lib/display/views/Outline.py
|
Python
|
mit
| 3,505 | 0.003994 |
#!python
import math
import fractions
import pygame
import argparse
import os.path
import sys
import subprocess
import time
from itertools import combinations,islice
from ntracer import NTracer,Material,ImageFormat,Channel,BlockingRenderer,CUBE
from ntracer.pygame_render import PygameRenderer
ROT_SENSITIVITY = 0.005
WHEEL_INCREMENT = 8
def excepthook(type,value,traceback):
if isinstance(value,Exception):
print('error: '+str(value),file=sys.stderr)
else:
sys.__excepthook__(type,value,traceback)
sys.excepthook = excepthook
def schlafli_component(x):
x = x.partition('/')
p = int(x[0],10)
if p < 3: raise argparse.ArgumentTypeError('a component cannot be less than 3')
if not x[2]: return fractions.Fraction(p)
s = int(x[2],10)
if s < 1: raise argparse.ArgumentTypeError('for component p/q: q cannot be less than 1')
if s >= p: raise argparse.ArgumentTypeError('for component p/q: q must be less than p')
if fractions.gcd(s,p) != 1: raise argparse.ArgumentTypeError('for component p/q: p and q must be co-prime')
return fractions.Fraction(p,s)
def positive_int(x):
x = int(x,10)
if x < 1: raise argparse.ArgumentTypeError('a positive number is required')
return x
def screen_size(x):
w,_,h = x.partition('x')
w = int(w,10)
h = int(h,10)
if w < 1 or h < 1: raise argparse.ArgumentTypeError('invalid screen size')
return w,h
def fov_type(x):
x = float(x)
if x <= 0 or x >= 180: raise argparse.ArgumentTypeError('fov must be between 0 and 180 degrees')
return x/180*math.pi
parser = argparse.ArgumentParser(
description='Display a regular polytope given its Schl\u00e4fli symbol.')
parser.add_argument('schlafli',metavar='N',type=schlafli_component,nargs='+',help='the Schl\u00e4fli symbol components')
parser.add_argument('-o','--output',metavar='PATH',help='save an animation to PATH instead of displaying the polytope')
parser.add_argument('-t','--type',metavar='TYPE',default='h264',
help='Specifies output type when --output is used. If TYPE is "png", the '+
'output is a series of PNG images. For any other value, it is used '+
'as the video codec for ffmpeg.')
parser.add_argument('-f','--frames',metavar='F',type=positive_int,default=160,help='when creating an animation or benchmarking, the number of frames to render')
parser.add_argument('-s','--screen',metavar='WIDTHxHEIGHT',type=screen_size,default=(800,600),help='screen size')
parser.add_argument('-a','--fov',metavar='FOV',type=fov_type,default=0.8,help='field of vision in degrees')
parser.add_argument('-d','--cam-dist',metavar='DIST',type=float,default=4,
help='How far the view-port is from the center of the polytope. The '+
'value is a multiple of the outer raidius of the polytope.')
parser.add_argument('--benchmark',action='store_true',help='measure the speed of rendering the scene')
parser.add_argument('--no-special',action='store_true',help='use the slower generic version of library even if a specialized version exists')
args = parser.parse_args()
material = Material((1,0.5,0.5))
nt = NTracer(max(len(args.schlafli)+1,3),force_generic=args.no_special)
def higher_dihedral_supplement(schlafli,ds):
a = math.pi*schlafli.denominator/schlafli.numerator
return 2*math.asin(math.sin(math.acos(1/(math.tan(ds/2)*math.tan(a))))*math.sin(a))
def almost_equal(a,b,threshold=0.001):
return (a-b).absolute() < threshold
def radial_vector(angle):
return nt.Vector.axis(0,math.sin(angle)) + nt.Vector.axis(1,math.cos(angle))
class Instance:
def __init__(self,shape,position,orientation=nt.Matrix.identity()):
self.shape = shape
self.position = position
self.orientation = orientation
self.inv_orientation = orientation.inverse()
def translated(self,position=nt.Vector(),orientation=nt.Matrix.identity()):
return (
position + (orientation * self.position),
orientation * self.orientation)
def tesselate(self,*args):
return self.shape.tesselate(*self.translated(*args))
def tesselate_inner(self,*args):
return self.shape.tesselate_inner(*self.translated(*args))
def any_point(self,*args):
return self.shape.any_point(*self.translated(*args))
def contains(self,p):
return self.shape.contains(self.inv_orientation * (p - self.position))
def star_component(x):
return (x.numerator - 1) > x.denominator > 1
class LineSegment:
star = False
def __init__(self,index,convex_ds,polygon):
self.index = index
self.p = polygon
self.position = radial_vector(index*convex_ds)
def tesselate(self,position,orientation):
return [
orientation*self.p.base_points[self.index-1]+position,
orientation*self.p.base_points[self.index]+position]
class Polygon:
apothem = 1
def __init__(self,schlafli):
self.star = star_component(schlafli)
convex_ds = 2 * math.pi / schlafli.numerator
self.dihedral_s = convex_ds * schlafli.denominator
self.parts = [LineSegment(i,convex_ds,self) for i in range(schlafli.numerator)]
self._circumradius = 1/math.cos(convex_ds/2)
self.base_points = [self._circumradius * radial_vector((i+0.5) * convex_ds) for i in range(schlafli.numerator)]
if self.star:
self._circumradius = math.tan(convex_ds)*math.tan(convex_ds/2) + 1
self.outer_points = [self._circumradius * radial_vector(i * convex_ds) for i in range(schlafli.numerator)]
def points(self,position,orientation,pset=None):
if pset is None: pset = self.base_points
return (orientation * bp + position for bp in pset)
def tesselate_inner(self,position,orientation):
points = list(self.points(position,orientation))
r = [points[0:3]]
for i in range(len(points)-3):
r.append([points[0],points[i+2],points[i+3]])
return r
def tesselate(self,position,orientation):
if not self.star:
return self.tesselate_inner(position,orientation)
points = list(self.points(position,orientation))
opoints = list(self.points(position,orientation,self.outer_points))
return [[opoints[i],points[i-1],points[i]] for i in range(len(points))]
def any_point(self,position,orientation):
return next(self.points(position,orientation))
def contains(self,p):
return any(almost_equal(p,test_p) for test_p in self.base_points)
def hull(self,position=nt.Vector(),orientation=nt.Matrix.identity()):
tris = [nt.TrianglePrototype(tri,material) for tri in self.tesselate_inner(position,orientation)]
if self.star: tris.extend(nt.TrianglePrototype(tri,material) for tri in
self.tesselate(position,orientation))
return tris
def circumradius(self):
return self._circumradius
def circumradius_square(self):
return self._circumradius*self._circumradius
def line_apothem_square(self):
return 1
class Plane:
def __init__(self,nt,position):
self.normal = position.unit()
self.d = -position.absolute()
self._dot = nt.dot
def distance(self,point):
return self._dot(point,self.normal) + self.d
class Line:
def __init__(self,nt,p0,v,planes,outer=False):
self.p0 = p0
self.v = v
self.planes = set(planes)
self.outer = outer
self._dot = nt.dot
def point_at(self,t):
return self.p0 + self.v*t
def dist_square(self,point):
a = point - self.p0
b = self._dot(a,self.v)
return a.square() - b*b/self.v.square()
def __repr__(self):
return 'Line({0!r},{1!r})'.format(self.p0,self.v)
def plane_point_intersection(nt,planes):
assert nt.dimension == len(planes)
try:
return nt.Matrix(p.normal for p in planes).inverse()*nt.Vector(-p.d for p in planes)
except ValueError:
return None
def plane_line_intersection(nt,planes):
assert nt.dimension - 1 == len(planes)
v = nt.cross(p.normal for p in planes).unit()
return Line(
nt,
nt.Matrix([p.normal for p in planes] + [v]).inverse() * nt.Vector([-p.d for p in planes] + [0]),
v,
planes)
def line_intersection(nt,l1,l2):
d = nt.dot(l1.v,l2.v)
denom = 1 - d*d
if not denom: return None
id = 1/denom
a = nt.dot(l2.p0 - l1.p0,l1.v)
b = nt.dot(l1.p0 - l2.p0,l2.v)
t1 = id*(a + d*b)
t2 = id*(d*a + b)
p1 = l1.point_at(t1)
p2 = l2.point_at(t2)
if abs(p1-p2) > 0.01: return None
return (p1 + p2) * 0.5, t1, t2
class Node:
def __init__(self,pos,planes,outer,alive=True):
self.pos = pos
self.planes = planes
self.outer = outer
self.neighbors = set() if alive else None
def detach(self):
for n in self.neighbors:
n.neighbors.remove(self)
self.neighbors = None
@property
def dead(self):
return self.neighbors is None
def find_cycles(self,length,sequence=None,exclude=None):
if sequence is None: sequence = [self]
if len(sequence) < length:
exclude = exclude.copy() if exclude is not None else set([self])
for n in self.neighbors:
if n not in exclude:
exclude.add(n)
for r in n.find_cycles(length,sequence + [n],exclude):
yield r
else:
for n in self.neighbors:
if n is sequence[0] and n.planes.intersection(*(sequence[i].planes for i in range(1,len(sequence)))):
yield sequence
def join(a,b):
if not (a.dead or b.dead):
a.neighbors.add(b)
b.neighbors.add(a)
class FuzzyGraph:
def __init__(self):
self.nodes = []
def add(self,pos,planes,outer):
for n in self.nodes:
if almost_equal(n.pos,pos):
n.planes |= planes
return n
n = Node(pos,planes,outer)
self.nodes.append(n)
return n
def remove_at(self,i):
self.nodes[i].detach()
if i+1 != len(self.nodes):
self.nodes[i] = self.nodes[-1]
del self.nodes[-1]
def remove(self,pos):
if isinstance(pos,Node):
if not pos.dead:
self.remove_at(self.nodes.index(pos))
else:
for i,n in enumerate(self.nodes):
if almost_equal(n.pos,pos):
self.remove_at(i)
break
# Cells are enlarged ever so slightly to prevent the view frustum from being
# wedged exactly between two adjacent primitives, which, do to limited
# precision, can cause that volume to appear to vanish.
fuzz_scale = nt.Matrix.scale(1.00001)
class PolyTope:
def __init__(self,dimension,schlafli,dihedral_s,face_apothem):
self.dimension = dimension
self.schlafli = schlafli
self.dihedral_s = dihedral_s
self.apothem = math.tan((math.pi - dihedral_s)/2) * face_apothem
self.star = star_component(schlafli)
self.parts = []
@property
def facet(self):
return self.parts[0].shape
def propogate_faces(self,potentials):
new_p = []
for instance,p in potentials:
dir = (instance.orientation * p.position).unit()
reflect = nt.Matrix.reflection(dir)
turn = nt.Matrix.rotation(
instance.position.unit(),
dir,
self.dihedral_s)
new_p += self.add_face(Instance(
instance.shape,
turn * instance.position,
fuzz_scale * turn * reflect * instance.orientation))
return new_p
def add_face(self,instance):
for p in self.parts:
if almost_equal(instance.position,p.position): return []
self.parts.append(instance)
return [(instance,p) for p in instance.shape.parts]
def star_tesselation(self):
t = getattr(self,'_star_tesselation',None)
if t is None:
co_nt = NTracer(self.dimension)
lines = []
planes = [Plane(co_nt,co_nt.Vector(islice(part.position,co_nt.dimension))) for part in self.parts]
las = self.line_apothem_square()
for pgroup in combinations(planes,co_nt.dimension-1):
try:
line = plane_line_intersection(co_nt,pgroup)
except ValueError:
pass
else:
if line:
for lineb in lines:
if almost_equal(line.p0,lineb.p0) and almost_equal(line.v,lineb.v):
lineb.planes |= line.planes
break
else:
outer_dist = line.dist_square(co_nt.Vector()) - las
if outer_dist < 0.1:
line.outer = outer_dist > -0.1
lines.append(line)
pmap = {}
for line in lines:
pmap[line] = {}
graph = FuzzyGraph()
maxr = self.circumradius_square() + 0.1
for l1,l2 in combinations(lines,2):
inter = line_intersection(co_nt,l1,l2)
if inter and inter[0].square() < maxr:
n = graph.add(inter[0],l1.planes | l2.planes,l1.outer or l2.outer)
pmap[l1][n] = inter[1]
pmap[l2][n] = inter[2]
for line,poss in pmap.items():
if len(poss) == 0: continue
if len(poss) == 1:
graph.remove(poss[0])
continue
poss = sorted(poss.items(),key=(lambda x: x[1]))
if line.outer:
for i in range(len(poss)-1):
join(poss[i][0],poss[i+1][0])
elif len(poss) == 2:
join(poss[0][0],poss[1][0])
elif len(poss) > 3:
for i in range(2,len(poss)-2):
graph.remove(poss[i][0])
join(poss[0][0],poss[1][0])
join(poss[-1][0],poss[-2][0])
t = []
self._star_tesselation = t
for n in islice(graph.nodes,0,len(graph.nodes)-co_nt.dimension):
for cycle in n.find_cycles(co_nt.dimension):
t.append([nt.Vector(tuple(x.pos) + (0,) * (nt.dimension-co_nt.dimension)) for x in cycle] + [nt.Vector()])
n.detach()
return t
def tesselate(self,position,orientation):
if self.star or self.facet.star:
return [[orientation * p + position for p in tri] for tri in self.star_tesselation()]
return self.tesselate_inner(position,orientation)
def tesselate_inner(self,position,orientation):
tris = []
point1 = self.parts[0].any_point(position,orientation)
inv_orientation = orientation.inverse()
for part in self.parts[1:]:
if not part.contains(inv_orientation * (point1 - position)):
new_t = part.tesselate(position,orientation)
for t in new_t: t.append(point1)
tris += new_t
return tris
def hull(self,position=nt.Vector(),orientation=nt.Matrix.identity()):
tris = []
for p in self.parts:
tris += p.tesselate(position,orientation)
return [nt.TrianglePrototype(tri,material) for tri in tris]
def any_point(self,position,orientation):
return self.parts[0].any_point(position,orientation)
def contains(self,p):
return any(part.contains(p) for part in self.parts)
def circumradius_square(self):
return self.apothem*self.apothem + self.facet.circumradius_square()
def circumradius(self):
return math.sqrt(self.circumradius_square())
def line_apothem_square(self):
return self.apothem*self.apothem + self.facet.line_apothem_square()
def compose(part,order,schlafli):
if schlafli.numerator * (math.pi - part.dihedral_s) >= math.pi * 2 * schlafli.denominator:
exit("Component #{0} ({1}) is invalid because the angles of the parts add up to 360\u00b0 or\nmore and thus can't be folded inward".format(order,schlafli))
higher = PolyTope(
order+1,
schlafli,
higher_dihedral_supplement(schlafli,part.dihedral_s),
part.apothem)
potentials = higher.add_face(Instance(part,nt.Vector.axis(order,higher.apothem)))
while potentials:
potentials = higher.propogate_faces(potentials)
return higher
jitter = nt.Vector((0,0,0) + (0.0001,) * (nt.dimension-3))
def process_movement():
global x_move, y_move, w_move
if x_move or y_move or w_move:
h = math.sqrt(x_move*x_move + y_move*y_move + w_move*w_move)
a2 = camera.axes[0]*(x_move/h) + camera.axes[1]*(-y_move/h)
if w_move: a2 += camera.axes[3] * (w_move / h)
camera.transform(nt.Matrix.rotation(
camera.axes[2],
a2,
h * ROT_SENSITIVITY))
camera.normalize()
camera.origin = camera.axes[2] * cam_distance + jitter
scene.set_camera(camera)
x_move = 0
y_move = 0
w_move = 0
run()
def run():
global running
running = True
render.begin_render(screen,scene)
try:
timer = time.perf_counter
except AttributeError:
timer = time.clock
if args.benchmark and not sys.platform.startswith('win'):
print('''warning: on multi-core systems, Python\'s high-resolution timer may combine
time spent on all cores, making the reported time spent rendering, much higher
than the actual time''',file=sys.stderr)
class RotatingCamera(object):
incr = 2 * math.pi / args.frames
h = 1/math.sqrt(nt.dimension-1)
_timer = staticmethod(timer if args.benchmark else (lambda: 0))
def __enter__(self):
self.frame = 0
self.total_time = 0
return self
def __exit__(self,type,value,tb):
if type is None and self.total_time:
print('''rendered {0} frame(s) in {1} seconds
time per frame: {2} seconds
frames per second: {3}'''.format(self.frame,self.total_time,self.total_time/self.frame,self.frame/self.total_time))
def start_timer(self):
self.t = self._timer()
def end_timer(self):
self.total_time += self._timer() - self.t
def advance_camera(self):
self.frame += 1
if self.frame >= args.frames: return False
a2 = camera.axes[0]*self.h + camera.axes[1]*self.h
for i in range(nt.dimension-3): a2 += camera.axes[i+3]*self.h
camera.transform(nt.Matrix.rotation(camera.axes[2],a2,self.incr))
camera.normalize()
camera.origin = camera.axes[2] * cam_distance
scene.set_camera(camera)
return True
if nt.dimension >= 3 and args.schlafli[0] == 4 and all(c == 3 for c in args.schlafli[1:]):
cam_distance = -math.sqrt(nt.dimension) * args.cam_dist
scene = nt.BoxScene()
else:
print('building geometry...')
timing = timer()
p = Polygon(args.schlafli[0])
for i,s in enumerate(args.schlafli[1:]):
p = compose(p,i+2,s)
hull = p.hull()
timing = timer() - timing
print('done in {0} seconds'.format(timing))
cam_distance = -math.sqrt(p.circumradius_square()) * args.cam_dist
print('partitioning scene...')
timing = timer()
scene = nt.build_composite_scene(hull)
timing = timer() - timing
print('done in {0} seconds'.format(timing))
del p
del hull
camera = nt.Camera()
camera.translate(nt.Vector.axis(2,cam_distance) + jitter)
scene.set_camera(camera)
scene.set_fov(args.fov)
if args.output is not None:
if args.type != 'png':
render = BlockingRenderer()
format = ImageFormat(
args.screen[0],
args.screen[1],
[Channel(16,1,0,0),
Channel(16,0,1,0),
Channel(16,0,0,1)])
surf = bytearray(args.screen[0]*args.screen[1]*format.bytes_per_pixel)
pipe = subprocess.Popen(['ffmpeg',
'-y',
'-f','rawvideo',
'-vcodec','rawvideo',
'-s','{0}x{1}'.format(*args.screen),
'-pix_fmt','rgb48be',
'-r','60',
'-i','-',
'-an',
'-vcodec',args.type,
'-crf','10',
args.output],
stdin=subprocess.PIPE)
try:
with RotatingCamera() as rc:
while True:
rc.start_timer()
render.render(surf,format,scene)
rc.end_timer()
print(surf,file=pipe.stdin,sep='',end='')
if not rc.advance_camera(): break
finally:
pipe.stdin.close()
r = pipe.wait()
sys.exit(r)
pygame.display.init()
render = PygameRenderer()
surf = pygame.Surface(args.screen,depth=24)
def announce_frame(frame):
print('drawing frame {0}/{1}'.format(frame+1,args.frames))
with RotatingCamera() as rc:
announce_frame(0)
rc.start_timer()
render.begin_render(surf,scene)
while True:
e = pygame.event.wait()
if e.type == pygame.USEREVENT:
rc.end_timer()
pygame.image.save(
surf,
os.path.join(args.output,'frame{0:04}.png'.format(rc.frame)))
if not rc.advance_camera(): break
announce_frame(rc.frame)
rc.start_timer()
render.begin_render(surf,scene)
elif e.type == pygame.QUIT:
render.abort_render()
break
else:
pygame.display.init()
render = PygameRenderer()
screen = pygame.display.set_mode(args.screen)
if args.benchmark:
with RotatingCamera() as rc:
rc.start_timer()
render.begin_render(screen,scene)
while True:
e = pygame.event.wait()
if e.type == pygame.USEREVENT:
rc.end_timer()
pygame.display.flip()
if not rc.advance_camera(): break
rc.start_timer()
render.begin_render(screen,scene)
elif e.type == pygame.QUIT:
render.abort_render()
break
else:
running = False
run()
x_move = 0
y_move = 0
w_move = 0
while True:
e = pygame.event.wait()
if e.type == pygame.MOUSEMOTION:
if e.buttons[0]:
x_move += e.rel[0]
y_move += e.rel[1]
if not running:
process_movement()
elif e.type == pygame.MOUSEBUTTONDOWN:
if nt.dimension > 3:
if e.button == 4 or e.button == 5:
if e.button == 4:
w_move += WHEEL_INCREMENT
else:
w_move -= WHEEL_INCREMENT
if not running:
process_movement()
elif e.type == pygame.USEREVENT:
running = False
pygame.display.flip()
process_movement()
elif e.type == pygame.KEYDOWN:
if e.key == pygame.K_c:
x,y = pygame.mouse.get_pos()
fovI = (2 * math.tan(scene.fov/2)) / screen.get_width()
print(camera.origin)
print((camera.axes[2] + camera.axes[0] * (fovI * (x - screen.get_width()/2)) - camera.axes[1] * (fovI * (y - screen.get_height()/2))).unit())
elif e.type == pygame.QUIT:
render.abort_render()
break
|
Rouslan/NTracer
|
scripts/polytope.py
|
Python
|
mit
| 24,145 | 0.014206 |
from __future__ import absolute_import, unicode_literals
from .views import ContactCRUDL
urlpatterns = ContactCRUDL().as_urlpatterns()
|
xkmato/tracpro
|
tracpro/contacts/urls.py
|
Python
|
bsd-3-clause
| 137 | 0 |
#!/usr/bin/env python
"""
a class to access the REST API of the website www.factuursturen.nl
"""
import collections
import ConfigParser
from datetime import datetime, date
import re
import requests
from os.path import expanduser
import copy
import urllib
__author__ = 'Reinoud van Leeuwen'
__copyright__ = "Copyright 2013, Reinoud van Leeuwen"
__license__ = "BSD"
__maintainer__ = "Reinoud van Leeuwen"
__email__ = "reinoud.v@n.leeuwen.net"
CONVERTABLEFIELDS = {
'clients' : {'clientnr': 'int',
'showcontact': 'bool',
'tax_shifted': 'bool',
'lastinvoice': 'date',
'top': 'int',
'stddiscount': 'float',
'notes_on_invoice': 'bool',
'active': 'bool',
'default_email': 'int',
'timestamp': 'date'},
'products': {'id': 'int',
'price': 'float',
'taxes': 'int'},
'invoices': {'profile': 'int',
'discount': 'float',
'paymentperiod': 'int',
'collection': 'bool',
'tax': 'float',
'totalintax': 'float',
'sent': 'date',
'uncollectible': 'date',
'lastreminder': 'date',
'open': 'float',
'paiddate': 'date',
'duedate': 'date',
'overwrite_if_exist': 'bool',
'initialdate': 'date',
'finalsenddate': 'date'},
'invoices_payment': {'date': 'date'},
'invoices_saved': {'id': 'int',
'profile': 'int',
'discount': 'float',
'paymentperiod': 'int',
'totaldiscount': 'float',
'totalintax': 'float',
'clientnr': 'int'},
'invoices_repeated': {'id': 'int',
'profile': 'int',
'discount': 'float',
'paymentperiod': 'int',
'datesaved': 'date',
'totalintax': 'float',
'initialdate': 'date',
'nextsenddate': 'date',
'finalsenddate': 'date',
'clientnr': 'int'},
'profiles': {'id': 'int'},
'countrylist' : {'id': 'int'},
'taxes': {'percentage': 'int',
'default': 'bool'}
}
API = {'getters' : ['clients',
'products',
'invoices',
'invoices_saved',
'invoices_repeated',
'profiles',
'balance',
'countrylist',
'taxes'],
'single_getters' : ['invoices_pdf'],
'posters' : ['clients',
'products',
'invoices'],
'putters' : ['clients',
'products',
'invoices_payment'],
'deleters' : ['clients',
'products',
'invoices',
'invoices_saved',
'invoices_repeated']}
class FactuursturenError(Exception):
"""Base class for exceptions in this module."""
def __init__(self, value = ''):
self.value = value
def __str__(self):
return repr(self.value)
class FactuursturenGetError(FactuursturenError):
pass
class FactuursturenPostError(FactuursturenError):
pass
class FactuursturenWrongPostvalue(FactuursturenError):
pass
class FactuursturenWrongPutvalue(FactuursturenError):
pass
class FactuursturenEmptyResult(FactuursturenError):
pass
class FactuursturenNoAuth(FactuursturenError):
pass
class FactuursturenConversionError(FactuursturenError):
pass
class FactuursturenWrongCall(FactuursturenError):
pass
class FactuursturenNotFound(FactuursturenError):
pass
class FactuursturenNoMoreApiCalls(FactuursturenError):
pass
class Client:
"""
client class to access www.factuursturen.nl though REST API
"""
def __init__(self,
apikey='',
username='',
configsection='default',
host='www.factuursturen.nl',
protocol='https',
apipath='/api',
version='v0'):
"""
initialize object
When apikey and username are not present, look for INI-style file .factuursturen_rc
in current directory and homedirectory to find those values there.
when only username is present, try to find apikey in configfilesection where it is defined
:param apikey: APIkey (string) as generated online on the website http://www.factuursturen.nl
:param username: accountname for the website
:param configsection: section in file ~/.factuursturen_rc where apikey and username should be present
"""
self._url = protocol + '://' + host + apipath + '/' + version + '/'
# try to read auth details from file when not passed
config = ConfigParser.RawConfigParser()
config.read(['.factuursturen_rc', expanduser('~/.factuursturen_rc')])
if (not apikey) and (not username):
try:
self._apikey = config.get(configsection, 'apikey')
self._username = config.get(configsection, 'username')
except ConfigParser.NoSectionError:
raise FactuursturenNoAuth ('key and username not given, nor found in .factuursturen_rc or ~/.factuursturen_rc')
except ConfigParser.NoOptionError:
raise FactuursturenNoAuth ('no complete auth found')
elif username and (not apikey):
self._username = username
for section in config.sections():
if config.get(section, 'username') == username:
self._apikey = config.get(section, 'apikey')
if not self._apikey:
raise FactuursturenNoAuth ('no apikey found for username {}'.format(username))
else:
if not (apikey and username):
raise FactuursturenNoAuth ('no complete auth passed to factuursturen.Client')
self._apikey = apikey
self._username = username
# remaining allowed calls to API
self._remaining = None
self._lastresponse = None
self._headers = {'content-type': 'application/json',
'accept': 'application/json'}
# keep a list of which functions can be used to convert the fields
# from and to a string
self._convertfunctions = {'fromstring': {'int': self._string2int,
'bool': self._string2bool,
'float': self._string2float,
'date': self._string2date},
'tostring': {'int': self._int2string,
'bool': self._bool2string,
'float': self._float2string,
'date': self._date2string}}
# single value conversionfunctions
def _string2int(self, string):
try:
return int(string)
except ValueError:
raise FactuursturenConversionError('cannot convert {} to int'.format(string))
def _string2bool(self, string):
return string.lower() in ("yes", "true", "t", "1")
def _string2float(self, string):
try:
return float(string)
except ValueError:
raise FactuursturenConversionError('cannot convert {} to float'.format(string))
def _string2date(self, string):
if string == '':
return None
try:
return datetime.strptime(string, '%Y-%m-%d')
except ValueError:
raise FactuursturenConversionError('cannot convert {} to date'.format(string))
def _int2string(self, number):
if not isinstance(number, int):
raise FactuursturenConversionError('number {} should be of type int'.format(number))
return str(number)
def _bool2string(self, booleanvalue):
if not isinstance(booleanvalue, int):
raise FactuursturenConversionError('booleanvalue should be of type bool')
return str(booleanvalue).lower()
def _float2string(self, number):
if not (isinstance(number, float) or (isinstance(number, int))):
raise FactuursturenConversionError('number {} should be of type float'.format(number))
return str(number)
def _date2string(self, date):
if not isinstance(date, datetime):
raise FactuursturenConversionError('date should be of type datetime')
return date.strftime("%Y-%m-%d")
def _convertstringfields_in_dict(self, adict, function, direction):
"""convert fields of a single dict either from or to strings
fieldnames to convert are read from CONVERTIBLEFIELDS dict, which
is in essence a datadictionary for this API
:param adict: dictionary to convert
:param function: callable function in the API ('clients', 'products' etc)
:param direction: either 'tostring' or 'fromstring'
"""
if direction not in self._convertfunctions:
raise FactuursturenWrongCall ('_convertstringfields_in_dict called with {}'.format(direction))
if function in CONVERTABLEFIELDS:
for key, value in adict.iteritems():
if key in CONVERTABLEFIELDS[function]:
# note: target is something like 'int'. Depending
# on conversion direction, this is the source or the target
target = CONVERTABLEFIELDS[function][key]
conversion_function = self._convertfunctions[direction][target]
try:
adict[key] = conversion_function(value)
except FactuursturenConversionError:
print "key = {}, value = {}, direction = {}, target = {}".format(key, value, direction, target)
raise BaseException
return adict
def _convertstringfields_in_list_of_dicts(self, alist, function, direction):
"""convert each dict in the list
Basically, a loop over the function _convertstringfields_in_dict
:param alist: a list of dicts
:param function: callable function in the API ('clients', 'products' etc)
:param direction: either 'tostring' or 'fromstring'
"""
if direction not in self._convertfunctions:
raise FactuursturenWrongCall ('_convertstringfields_in_list_of_dicts called with {}'.format(direction))
for index, entry in enumerate(alist):
alist[index] = self._convertstringfields_in_dict(alist[index], function, direction)
return alist
def _flatten(self, adict, parent_key=''):
"""flatten a nested dict
The API expects nested dicts to be flattened when posting
{'lines': {'line1': {'amount': 1,
'tax': 21},
'line2': {'amount': 2,
'tax': 21}
}
}
to
{'lines[line1][amount]': 1,
'lines[line1][tax]': 21,
'lines[line2][amount]': 2,
'lines[line2][tax]': 21
}
:param adict: a nested dict
:param parent_key: should be empty, used for recursion
"""
items = []
for k, v in adict.items():
new_key = parent_key + '[' + k + ']' if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(self._flatten(v, new_key).items())
else:
items.append((new_key, v))
return dict(items)
def _fixkeynames(self, adict):
"""replace keynames in dict
replace keys like 'lines[line0][amount_desc]'
with 'lines[0][amount_desc]'
(keeping the same value)
:param adict: dictionary to be changed
"""
for key, val in adict.items():
fields = re.split('\]\[', key)
if len(fields) > 1:
leftfields = re.split('\[', fields[0])
middlefield = re.sub("[^0-9]", "", leftfields[1])
newfield = leftfields[0] + '[' + middlefield + '][' + fields[1]
adict[newfield] = val
del adict[key]
return adict
def _prepare_for_send(self, adict, function):
"""fix dict so it can be posted
:param adict: dictionary to be posted
:param function: callable function from the API ('clients', 'products', etc)
"""
adict = self._convertstringfields_in_dict(adict, function, 'tostring')
adict = self._flatten(adict)
adict = self._fixkeynames(adict)
return adict
def _escape_characters(self, string):
"""escape unsafe webcharacters to use in API call
by default urllib considers '/' as safe, override the default for the second argument by
considering nothing safe
"""
return urllib.quote(str(string), safe='')
@property
def remaining(self):
"""return remaining allowed API calls (for this hour)"""
return self._remaining
@property
def ok(self):
"""return status of last call"""
return self._lastresponse
def post(self, function, objData):
"""Generic wrapper for all POSTable functions
errors from server during post (like wrong values) are propagated to the exceptionclass
:param function: callabe function from the API ('clients', 'products', etc)
:param objData: data to be posted
"""
fullUrl = self._url + function
objData_local = copy.deepcopy(objData)
if function not in API['posters']:
raise FactuursturenPostError("{function} not in available POSTable functions".format(function=function))
if isinstance(objData_local, dict):
objData_local = self._prepare_for_send(objData_local, function)
response = requests.post(fullUrl,
data=objData_local,
auth=(self._username, self._apikey))
self._lastresponse = response.ok
if response.ok:
self._remaining = int(response.headers['x-ratelimit-remaining'])
return response.content
else:
raise FactuursturenWrongPostvalue(response.content)
def put(self, function, objId, objData):
"""Generic wrapper for all PUTable functions
errors from server during post (like wrong values) are propagated to the exceptionclass
:param function: callabe function from the API ('clients', 'products', etc)
:param objId: id of object to be put (usually retrieved from the API)
:param objData: data to be posted. All required fields should be present, or the API will not accept the changes
"""
fullUrl = self._url + function + '/{objId}'.format(objId=self._escape_characters(objId))
if function not in API['putters']:
raise FactuursturenPostError("{function} not in available PUTable functions".format(function=function))
if isinstance(objData, dict):
objData = self._prepare_for_send(objData, function)
response = requests.put(fullUrl,
data=objData,
auth=(self._username, self._apikey))
self._lastresponse = response.ok
if response.ok:
self._remaining = int(response.headers['x-ratelimit-remaining'])
return
else:
raise FactuursturenWrongPutvalue(response.content)
def delete(self, function, objId):
"""Generic wrapper for all DELETEable functions
errors from server during post (like wrong values) are propagated to the exceptionclass
:param function: callabe function from the API ('clients', 'products', etc)
:param objId: id of object to be put (usually retrieved from the API)
"""
fullUrl = self._url + function + '/{objId}'.format(objId=self._escape_characters(objId))
if function not in API['deleters']:
raise FactuursturenPostError("{function} not in available DELETEable functions".format(function=function))
response = requests.delete(fullUrl,
auth=(self._username, self._apikey))
self._lastresponse = response.ok
if response.ok:
self._remaining = int(response.headers['x-ratelimit-remaining'])
else:
raise FactuursturenError(response.content)
def get(self, function, objId=None):
"""Generic wrapper for all GETtable functions
when no objId is passed, retrieve all objects (in a list of dicts)
when objId is passed, only retrieve a single object (in a single dict)
:param function: callabe function from the API ('clients', 'products', etc)
:param objId: id of object to be put (usually retrieved from the API)
"""
# TODO: some errorchecking:
# - on function
# - on return
# - on network error
# - on password
# - on remaining allowed requests
fullUrl = self._url + function
# check function against self.getters and self.singleGetters
if function not in API['getters'] + API['single_getters']:
raise FactuursturenGetError("{function} not in available GETtable functions".format(function=function))
if objId:
fullUrl += '/{objId}'.format(objId=self._escape_characters(objId))
response = requests.get(fullUrl,
auth=(self._username, self._apikey),
headers=self._headers)
self._lastresponse = response.ok
# when one record is returned, acces it normally so
# return the single element of the dict that is called 'client'
# when the functioncall was 'clients/<id>
singlefunction = function[:-1]
self._remaining = int(response.headers['x-ratelimit-remaining'])
if response.ok:
if function == 'invoices_pdf':
return response.content
try:
raw_structure = response.json()
if objId is None:
retval = self._convertstringfields_in_list_of_dicts(raw_structure, function, 'fromstring')
else:
retval = self._convertstringfields_in_dict(raw_structure[singlefunction], function, 'fromstring')
except FactuursturenError as error:
print error
retval = response.content
return retval
else:
# TODO: more checking
if response.status_code == 404:
raise FactuursturenNotFound (response.content)
elif self._remaining == 0:
raise FactuursturenNoMoreApiCalls ('limit of API calls reached.')
else:
raise FactuursturenEmptyResult (response.content)
|
reinoud/factuursturen
|
factuursturen/__init__.py
|
Python
|
bsd-2-clause
| 19,420 | 0.003862 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('city', models.CharField(max_length=50)),
('brand', models.CharField(max_length=50)),
('types', models.CharField(max_length=50)),
('car_time', models.CharField(max_length=30)),
('mileage', models.CharField(max_length=30)),
('car_price', models.CharField(max_length=30)),
('image_url', models.CharField(max_length=200)),
('car_url', models.CharField(max_length=200)),
('model', models.CharField(max_length=300)),
('transmission_mode', models.CharField(max_length=50)),
('have_accident', models.CharField(max_length=10)),
],
),
]
|
TheDavidGithub/mysite
|
car/migrations/0001_initial.py
|
Python
|
gpl-2.0
| 1,116 | 0.000896 |
#{{{ valid_key
valid_key = """-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAyGE8JpvCpp/0aLm6s0C56V5JtVr/fWra/pdFviA8YSabUlur
kcVPwoZLPbYmZtqmjfLSp5k1aCQbSqQDuB3nduFvWS6TzB8ACRDfw4KHE2D76pNE
lPbPejzIF8AyNKLrqi/Tba17cmqw1FFICg3B5ftu7mBzhJCPS/mt1i89CuoKVWRo
VB1CCKqJ0XIUO5/GC8nH2TwAzhnQpCx5B0bUJZkPxB5qbjXPpewY0IpSMNLrAGBJ
AMshLs04rTDSQpWuWHgEcN8QGOedDlYkaVIFgrLG1OZjTzr63cIqWhbRO4b/iC3u
vgca2WvkALNvhYz42Ckment1by6BlGpku/EKZQIDAQABAoIBAQCXsGQkpg04riYL
kW4wfHNAoswRqg13GAkoacO2JhkPQnaaZiL/euriS19yHA3+Fs+27XdiwKyZZZvz
aS4Xlyu95vKwBH6GK4GPW2LJUmqG1Qhfj5R/YEe4NYMT178p8ZuNfh8EDCxDdVjf
oZhzY+7A9xIHCqToWbbgwOZEGgaP1z19ByNME0sL1V1bqQRgDK0cdjschM2l27SM
n5gwv412Gj+zvyo6WrXVq1RxRW+Lz8xVFKfL3ojv/4mC9TCmXj5XCLYkvtzz5Fm3
T3/QNOBw+/Ki6bTPd7AlkmI0/hQzAWoJmaka32195o+0YfKF0EWqOrjPqmUKHg+4
NT/qAoWBAoGBAOZd6MH/iFclB3yGRDOgCjFuOQ8BKxFxu6djW+QnRlIcSlmC/PoO
dYF1Ao811+QyIGIeikpcXkKPjsT2t/FpEidzKJAdFlfB/R83gR6VY2av666Dd46+
c5oEvZwJ4F2B53dcfE4uc3eDBSIvDkDXg8GgoKUspC5oAMc0SApJtTTtAoGBAN6t
IkawG4RACnP5JJbQSzqJMmYQEDjYwlYKaBOAK8driRf5riH+1zxV2iZSbRMRi5YB
OXnMqVyeAMrCmDuWC/m13Q9+F1mwrGCC4d2PdxG3siBk76Wk9lt5i0E5u+0Fkhb8
LocL/i5CdnS7vMzjUIJv+2nynZBcI2C5O5h5R7RZAoGBAKse0wlYccSaAwejDQJ0
PV4ejE/zBDT/EO/0RIwkCH5x+WCohpaRKwQxEOekjWw7OAbqvLskBfjXfqwcwOKY
tB2N03HJyZECzq3joww+2AmtYBrQGX11R5X/M4G3bGzJVFE76DKDGs6KCsQrxCCy
dnLciLhpgsPeCcXvthu+SNa5AoGAU339VR3hI2RTxwpKwtWU73zTzdsGChqUO2ip
f25We+6BN+EhfpOsJm5WXnoXdcflW7hX0aiAdD62M1BlfioTkWy0V4PCw5lv+Lv6
bnXFgnorKuRNywEgckoPgcRhB01Iap/h3jcL9lJrUfe2oeg1nsLYNPCITcGObkzl
4JTQ4vkCgYEAmdxFKmwWLzsG0Q01YHp6W6Mc+YzIPAVYGc3+sJ7tSpk09XFXRCMw
Ypq+K8dAZ0dkyFqfPjeY/loTQUAkat/Ay6YT+Ytnflg8laZXjS8c5OZPD7dj425C
oDTkAxaQEVeJ33n37f9coMnLv7bySNZQzeD9ai5ysddFMQ6+RYEeaRE=
-----END RSA PRIVATE KEY-----"""
valid_pubkey = """ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDIYTwmm8Kmn/RoubqzQLnpXkm1Wv99atr+l0W+IDxhJptSW6uRxU/Chks9tiZm2qaN8tKnmTVoJBtKpAO4Hed24W9ZLpPMHwAJEN/DgocTYPvqk0SU9s96PMgXwDI0ouuqL9NtrXtyarDUUUgKDcHl+27uYHOEkI9L+a3WLz0K6gpVZGhUHUIIqonRchQ7n8YLycfZPADOGdCkLHkHRtQlmQ/EHmpuNc+l7BjQilIw0usAYEkAyyEuzTitMNJCla5YeARw3xAY550OViRpUgWCssbU5mNPOvrdwipaFtE7hv+ILe6+BxrZa+QAs2+FjPjYKSZ6e3VvLoGUamS78Qpl valid_key"""
#}}}
#{{{ invalid_key
bad_key = """-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEA8kRD19JjebNWHmGPnTcToYhFR2PE8XjiPJJ4qdd72AjHwGcJ
UZVGAuiFrtEX1QiG619ccUnm/wGPhTS19o4vImBLCz07soOb9qfkkl5b0tjYB9oq
oBbCs1sgPSnO1Ju05/FuVANDQH53jRpHi9t7Jta8A0fZ3t2j1nITwj/aJL4zC3lI
VIQXcR8DteKoY656eavLevKDXNueOpdJIa5kVR3cSLlJzNQGY1AJi4CXpr/+2Krh
mXr+SdHPDtgL0DpJsXVkDYkRkOxGJ49XlRq/SGH+mUhEVM6yXkU53NYsyl64z92u
6Uw0793R2wUIlBo2waeng7BqSijo6lRRoO1p7wIDAQABAoIBAFVu21nVeHznUBxA
nUt8q7CQBJZLSZ052tYvdNu4AJVLa12fODsL3/eQlevzEmtuqV2BcHEG9a3BmCIK
V4zN0TNXL7+T5WRrYumVhVZUbh8plu0V82gm/pwPK4xGHQj+q8oLarO3vtSUhIY+
2TIpwQEOQpkJknw0Pt2VtwAOUlgYBuz9joirz8qgU63lRjrt1dok/tXUulaIXwIq
u7UNR+KltpM+OG8Dkw3dRGi0vd+0pE/udN08mIdbnpF0WxoRYDax5CPKTNVZnNA4
PyjPriXLQNbguRITZhOjdNhEbpFjYnh6SDoWjXILEu5eMMqRn/AkOXgIaV5uMhjK
WBbvcykCgYEA/c1pCWtMICcWQDLLIc+gyBOOM225xTEc/Uwd/3BKY0d5WCZwME5C
KWfWP5FHIjcbkqSFjxoF8iqcjJDDK3mvdhw4FfYky0p4MBVbgcHmWlA43aMISUg6
yOkSVlgJGcBE7qjaity8zq9EplXJ0jeacnVg3PFwcyGyET8fwqRjhtsCgYEA9F1I
u4ZDn/nCESuYan72U64NGb4otmzY5bECsCVxuqvFW18DBOZT5dmhy8WvRzIjHvUk
e5BKBBHH9sqoU62L8TDhgxCkFQI5tW/6wFtxMkagIf2ZS34LjXw6DNmadFSdt895
QgTuOTfDo7CNUCmKVvWvnfYh3WflVFcdduFAA30CgYBDAjOkN/7+UjOOuZpmD+J8
UKVvsJ2mkYP84FbfNS9+rMU939ApANB5DCBF4j5sqy4NdoCIuq8tM9DnT+UnmlYY
EfVA3x7xyXSYBVuGk8IUwC9zXPL4HijfGKItx8qHUPsiI9SnawYyb5XpcKb7aJDP
fWwYiWpOC2xvH0JsamiYlwKBgFclXWbi/wLyQy90aKqyFI7LJ4jitAo6zy867UZ8
VA7Ka1umiqEYsxrpfjX5jgTJ8JR9wEEAuCSzYqi0pUsSPTIKyKHBMYL8hDAce0RW
WAwh+8GjeyPdE6mlf95NTDPoaUbACvJsZ7ioFnEt6XeJGPjiLXsb5//ZqYNmfe+D
bRzRAoGAcii2U2bkXl0v3gTsFgfFYgWVofnUnGersnPzwsL0/5l/O36McPZxdAf5
OVBHW0TIsQ22eyNcOwQiqxlWPOdimCuMi3FdVfXiYjO1FT1Fg/rPBUbfTO9JDJVJ
LE7PfYWR0rWVnASlEYFGnlTNDHRXTfrsP9A6hqllSjvCH/BRgLs=
-----END RSA PRIVATE KEY-----"""
bad_pubkey = """ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDyREPX0mN5s1YeYY+dNxOhiEVHY8TxeOI8knip13vYCMfAZwlRlUYC6IWu0RfVCIbrX1xxSeb/AY+FNLX2ji8iYEsLPTuyg5v2p+SSXlvS2NgH2iqgFsKzWyA9Kc7Um7Tn8W5UA0NAfneNGkeL23sm1rwDR9ne3aPWchPCP9okvjMLeUhUhBdxHwO14qhjrnp5q8t68oNc2546l0khrmRVHdxIuUnM1AZjUAmLgJemv/7YquGZev5J0c8O2AvQOkmxdWQNiRGQ7EYnj1eVGr9IYf6ZSERUzrJeRTnc1izKXrjP3a7pTDTv3dHbBQiUGjbBp6eDsGpKKOjqVFGg7Wnv invalid_pubkey"""
#}}}
#{{{ passphrase_key
passphrase_key = """-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-128-CBC,203FFB8DE1E202DD1F430A5E1B6C0210
YIncd0kBPCZCsW7W7fXBrMAD7/lsLj8k+ZuFJNZFF/fK+361Lteoy8O/wJn5ZHWm
R2r0/JLuXt6diwmpJO2xi2NpT9ktq1D9YEvKq/R1cSWdBXFHxlBf9eZi+B5+z/pF
ObzlkHbeSEKQmOTbTN+jqO3hOsf6y665geqTE4/fcH1Y/gl4y92v+yNJ6p1PVmv6
/4V/8uYVMTkt+xGEYDMDzxnGNfufkvwfB8hYlVB7eJB1IBcRlciee4h9NretEjds
UenXEYMtmRtHNEat+eTiPUJRihoedc36Za64yO/9Qk2A+bxeOe5JIFzaSHFFr6om
stsAnAqW6Vl7xx0p3FTzqhTrVh+DRB4WNYfOPLZz26906OvGfsxcp8PC+NLEXcUm
Bgti016e/UzYWlikCx8iao9LTVx0vTsJVqKX1zEJ/wBkzhkU9GSPP18grCKECjdW
gEwMO830aLLQpSHj1fzoPCabaNRDiF5qwz2ZPMpT5R5PTMKUjwqXU/4K7qzGCf77
8WZtOvthzGLpr7Sk4XC38+fhXEmHiBlnDeaESamw8Sub6cNbnanhuDFSfVl+m6+z
0suhE1wxY6IEPPkfbAhoMTtbxUl+uaxZS1cZBPseTw4Lyo7vut1UQdNvqM9pxirg
Az7Sefv9xB6OTfFKWf0bS+U6ZRipTvGUEaI2rgKUjf1IyhjVHTp4nN2H36x6XxBh
vMS9t+VcVwnpAVeG+owHEMfns0jdGhpborcM/xUdToovUk4wvxx9OjS17FNEDrJC
HvcbHfA3Gmt5QTlhPDrmy+KcUMmbBDqSuuxDezQBVHdiMTK1/3YL0cnEeyzq9/Fv
u8WVonE2oopOGzpW4zIcPnLAyz2Era8JcjW0cvsjTl8nIkxEnY6vXlz4X4L50EW3
2zUAT9iVV49qwDNWOehog7bD/+M8DL1MdMZyhKP6kT9gP6yVzk7pLTs7mDc4aDNp
ZyzDVVf6FB5vET3zIC3bkDywI1vY/5E7u6P2tV4M8SzPDjWb5wf32FlXhwHnlDBK
qDGV0aRK8mwZ74begQN3GsXW4V6iYOVo5ny2n4o5Q/mkVZ7gcvPuvyOBL5xqSNt+
Ux5iiyBk2phFRW5xEomjZkTwgzXgci+JMrwX7w3AZzR+QCQlNXdJ4JvwJ2PDLUKn
Rd3qBFh8VJHPhiIPtp309L3qqvQM9LdmoMP8HPxGVEpuy5qLNqZN10uQT3HM2D4d
RzMwhDRACO59SQN3iEcWsiXhwq3Qtd1YULEfEjhACkDKDSKPsI0SPa4/0Xp53zpw
PspdS29xzaWIcMahC0qI1k00x/HAIX78AS/c3aiujuxdb/6NifDfph5RAjeH0Qc9
EY7yVwffjFVu74a6goc8Ru8yOsdIl4Z49rFRlzsGjeWNV1qu55TZOSudPphYDZF3
m0T09+lJya8Mpd25Fnl8DVKxoop2FOQlRx9yYlMhsCyLcfdETGgr79cqUvNEBOem
wdSaedQYAMbvJdHp+3pgI/FiJa4w+nVaczgFM8rt4O1hdDmPpYXuVAWj5zSvIB4r
Z/+5HMtkbzOYzHwMHTwIvTLsn6AGtLeLLHj4fF4YxOaPZ/fzLQJys+TmLLzcJtua
-----END RSA PRIVATE KEY-----"""
passphrase_pubkey = """ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGmMmPwbfpQJp9FnbNLti8txJ04ByE+cHc68SmjGJiJJEE+f1xnMMt0EeRit1BAJn7I8YLIansmf582KaknQS909OuNfzer8W5fNAOp6T+eHyi4sQ5vtOdeRURudP0q3FjJvn8ZtHu0Fp28HXbRTbvPCOPCJK/ZGK0z3M4JgG2Ir/L0KibvcN8eDbkMXLYmqfD0t95Rm4DFGTbtkWn8DzzPZmkodLAvUfEZE5fKUp+ZR2HO1XEEjPZwdg+bYBqs7hldps2K/TlJAw6fhAzEohiogMKMueFds+ZLuJB3yO1nKgPMXOZwKrxSR+IJ1nAlwHoLoipozefffoxwqtoEzcB passphrase_key"""
#}}}
#{{{ invalid_key
invalid_key = """-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEA8kRD19JjebNWHmGPnTcToYhFR2PE8XjiPJJ4qdd72AjHwGcJ
UZVGAuiFrtEX1QiG619ccUnm/wGPhTS19o4vImBLCz07soOb9qfkkl5b0tjYB9oq
oBbCs1sgPSnO1Ju05/FuVANDQH53jRpHi9t7Jta8A0fZ3t2j1nITwj/aJL4zC3lI
VIQXcR8DteKoY656eavLevKDXNueOpdJIa5kVR3cSLlJzNQGY1AJi4CXpr/+2Krh
mXr+SdHPDtgL0DpJsXVkDYkRkOxGJ49XlRq/SGH+mUhEVM6yXkU53NYsyl64z92u
6Uw0793R2wUIlBo2waeng7BqSijo6lRRoO1p7wIDAQABAoIBAFVu21nVeHznUBxA
V4zN0TNXL7+T5WRrYumVhVZUbh8plu0V82gm/pwPK4xGHQj+q8oLarO3vtSUhIY+
2TIpwQEOQpkJknw0Pt2VtwAOUlgYBuz9joirz8qgU63lRjrt1dok/tXUulaIXwIq
u7UNR+KltpM+OG8Dkw3dRGi0vd+0pE/udN08mIdbnpF0WxoRYDax5CPKTNVZnNA4
PyjPriXLQNbguRITZhOjdNhEbpFjYnh6SDoWjXILEu5eMMqRn/AkOXgIaV5uMhjK
WBbvcykCgYEA/c1pCWtMICcWQDLLIc+gyBOOM225xTEc/Uwd/3BKY0d5WCZwME5C
KWfWP5FHIjcbkqSFjxoF8iqcjJDDK3mvdhw4FfYky0p4MBVbgcHmWlA43aMISUg6
u4ZDn/nCESuYan72U64NGb4otmzY5bECsCVxuqvFW18DBOZT5dmhy8WvRzIjHvUk
e5BKBBHH9sqoU62L8TDhgxCkFQI5tW/6wFtxMkagIf2ZS34LjXw6DNmadFSdt895
QgTuOTfDo7CNUCmKVvWvnfYh3WflVFcdduFAA30CgYBDAjOkN/7+UjOOuZpmD+J8
UKVvsJ2mkYP84FbfNS9+rMU939ApANB5DCBF4j5sqy4NdoCIuq8tM9DnT+UnmlYY
fWwYiWpOC2xvH0JsamiYlwKBgFclXWbi/wLyQy90aKqyFI7LJ4jitAo6zy867UZ8
VA7Ka1umiqEYsxrpfjX5jgTJ8JR9wEEAuCSzYqi0pUsSPTIKyKHBMYL8hDAce0RW
WAwh+8GjeyPdE6mlf95NTDPoaUbACvJsZ7ioFnEt6XeJGPjiLXsb5//ZqYNmfe+D
bRzRAoGAcii2U2bkXl0v3gTsFgfFYgWVofnUnGersnPzwsL0/5l/O36McPZxdAf5
OVBHW0TIsQ22eyNcOwQiqxlWPOdimCuMi3FdVfXiYjO1FT1Fg/rPBUbfTO9JDJVJ
LE7PfYWR0rWVnASlEYFGnlTNDHRXTfrsP9A6hqllSjvCH/BRgLs=
-----END RSA PRIVATE KEY-----"""
invalid_pubkey = """ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDyREPX0mN5s1YeYY+dNxOhiEVHY8TxeOI8knip13vYCMfAZwlRlUYC6IWu0RfVCIbrX1xxSebsLPTuyg5v2p+SSXlvS2NgH2iqgFsKzWyA9Kc7Um7Tn8W5UA0NAfneNGkeL23sm1rwDR9ne3aPWchPCP9okvjMLeUhUhBdxHwO14qhjrnp5q8t68oNc2546l0khrmRVHdxIuUnM1AZjUAmLgJemv/7YquGZev5J0c8O2AvQOkmxdWQNiRGQ7EYnj1eVGr9IYf6ZSERUzrJeRTnc1izKXrjP3a7pTDTv3dHbBQiUGjbBp6eDsGpKKOjqVFGg7Wnv invalid_pubkey"""
#}}}
sample_data = "0a2199401b1c9d8c052d992cbbec4aaacda319e4"
signatures = {
"sample_data": {
"valid_key": (9071810417495709405633356698199582942244105637772743946536854292351977071974807418883520031454840317968965557661397547801304997803374041934253479837144530419751975110111379111342249884434888396619009496881891262035866826559855625459648180428717196695767292389018292110052418709451952308107410043081967035219620309385180324114805791874473882064185217325613637287463710731825028931418132880340514595744434332253330512906424922871454457866191873149750140967858301775438859597631351267967445133328434103829948930004723817580085482121313333486406579183188937167119484540498216289409471730179949994659015230566739036461990L,)
}
}
materialize_in = '\x00\xc8a<&\x9b\xc2\xa6\x9f\xf4h\xb9\xba\xb3@\xb9\xe9^I\xb5Z\xff}j\xda\xfe\x97E\xbe <a&\x9bR[\xab\x91\xc5O\xc2\x86K=\xb6&f\xda\xa6\x8d\xf2\xd2\xa7\x995h$\x1bJ\xa4\x03\xb8\x1d\xe7v\xe1oY.\x93\xcc\x1f\x00\t\x10\xdf\xc3\x82\x87\x13`\xfb\xea\x93D\x94\xf6\xcfz<\xc8\x17\xc024\xa2\xeb\xaa/\xd3m\xad{rj\xb0\xd4QH\n\r\xc1\xe5\xfbn\xee`s\x84\x90\x8fK\xf9\xad\xd6/=\n\xea\nUdhT\x1dB\x08\xaa\x89\xd1r\x14;\x9f\xc6\x0b\xc9\xc7\xd9<\x00\xce\x19\xd0\xa4,y\x07F\xd4%\x99\x0f\xc4\x1ejn5\xcf\xa5\xec\x18\xd0\x8aR0\xd2\xeb\x00`I\x00\xcb!.\xcd8\xad0\xd2B\x95\xaeXx\x04p\xdf\x10\x18\xe7\x9d\x0eV$iR\x05\x82\xb2\xc6\xd4\xe6cO:\xfa\xdd\xc2*Z\x16\xd1;\x86\xff\x88-\xee\xbe\x07\x1a\xd9k\xe4\x00\xb3o\x85\x8c\xf8\xd8)&z{uo.\x81\x94jd\xbb\xf1\ne'
materialize_out = 25295609341077720578149569787215598577366829958423604490571842817851020326910350088890690762153486357542446484023370236715144281344593768828137335494349981384301965464088609112700794946345854075351704174911521521663171660125828501117559294494873683974182590681118700347057284907411580161334838074365259803223067542557791586133265850132752790020520954221796249780990448630994176019548193469319689064714540091813483543342178734915728791490566037686545158659984868538938049987338269190132632631305863491555281131699180304523593897903273198561911749292509922781739515718241720229724468532400205931858198469622593991281253L
|
richo/groundstation
|
test/support/crypto_fixture.py
|
Python
|
mit
| 10,569 | 0.001608 |
from importlib import import_module
from djangoautoconf.auto_conf_urls import enum_app_names
from djangoautoconf.auto_conf_utils import is_at_least_one_sub_filesystem_item_exists, get_module_path
from ufs_tools.short_decorator.ignore_exception import ignore_exc_with_result
def autodiscover():
from django.conf import settings
routing_holder = settings.CHANNEL_LAYERS["default"]["ROUTING"]
routing_module = ".".join(routing_holder.split(".")[0:-1])
root_url = import_module(routing_module)
root_default_channel_routing = root_url.default_channel_routing
for app in enum_app_names():
if app == "channels":
continue
mod = import_module(app)
if is_at_least_one_sub_filesystem_item_exists(get_module_path(mod), ["routing.py"]):
routing_module_name = "%s.routing" % app
routing_settings = get_routing_settings(routing_module_name)
root_default_channel_routing.extend(routing_settings)
@ignore_exc_with_result(exception_result=[], is_notification_needed=True)
def get_routing_settings(routing_module_name):
routing_module = import_module(routing_module_name)
routing_settings = routing_module.channel_routing
return routing_settings
|
weijia/djangoautoconf
|
djangoautoconf/auto_detection/routing_auto_detection.py
|
Python
|
bsd-3-clause
| 1,242 | 0.00161 |
###############################################################################
## ##
## This file is part of ModelBlocks. Copyright 2009, ModelBlocks developers. ##
## ##
## ModelBlocks is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## ModelBlocks is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with ModelBlocks. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###############################################################################
import sys, os, collections, sets
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'resource-gcg', 'scripts'))
import discgraph
VERBOSE = False
for a in sys.argv:
if a=='-d':
VERBOSE = True
################################################################################
def complain( s ):
print( '#ERROR: ' + s )
sys.stderr.write( 'ERROR: ' + s + '\n' )
# exit( 1 )
################################################################################
class InducibleDiscGraph( discgraph.DiscGraph ):
def getChainFromSup( D, xLo ):
return [ xLo ] + [ x for l,xHi in D.Inhs.get(xLo,{}).items() if l!='w' and l!='o' for x in D.getChainFromSup(xHi) ]
def getChainFromSub( D, xHi ):
return [ xHi ] + [ x for xLo in D.Subs.get(xHi,[]) for x in D.getChainFromSub(xLo) ]
def __init__( D, line ):
discgraph.DiscGraph.__init__( D, line )
## List of referents that participate in elementary predications (which does not include the eventuality / elementary predication itself)...
D.Participants = sets.Set([ x for pred in D.PredTuples for x in pred[2:] ])
## List of heirs for each inherited referent...
D.Legators = collections.defaultdict( list )
D.Heirs = collections.defaultdict( list )
for xLo in D.Referents:
D.Legators[ xLo ] = D.getLegators( xLo )
for xHi in D.Referents:
D.Heirs[ xHi ] = D.getHeirs( xHi )
if VERBOSE: print( 'Legators = ' + str(D.Legators) )
if VERBOSE: print( 'Heirs = ' + str(D.Heirs) )
def getTopUnaryLegators( xLo ):
L = [ xLeg for l,xHi in D.Inhs.get( xLo, {} ).items() if l!='w' and l!='o' and len( D.Subs.get(xHi,[]) ) < 2 for xLeg in getTopUnaryLegators(xHi) ]
return L if L != [] else [ xLo ]
# if D.Inhs.get( xLo, {} ).items() != [] else [ xLo ]
# UnaryL = [ xLeg for xLeg in D.Legators.get(xLo,[]) if all([ xLo in D.Heirs.get(xHeir,[]) for xHeir in D.Legators.get(xLo,[]) if xHeir in D.Heirs.get(xLeg,[]) ]) ]
# return [ x for x in UnaryL if not any([ x in D.Heirs.get(y,[]) for y in UnaryL if y != x ]) ]
def getTopLegators( xLo ):
L = [ xLeg for l,xHi in D.Inhs.get( xLo, {} ).items() if l!='w' and l!='o' for xLeg in getTopLegators(xHi) ]
return L if L != [] else [ xLo ]
# if D.Inhs.get( xLo, {} ).items() != [] else [ xLo ]
D.TopLegators = { xLo : sets.Set( getTopLegators(xLo) ) for xLo in D.Inhs }
if VERBOSE: print( 'TopLegators = ' + str(D.TopLegators) )
D.TopUnaryLegators = { xLo : sets.Set( getTopUnaryLegators(xLo) ) for xLo in D.Inhs }
if VERBOSE: print( 'TopUnaryLegators = ' + str(D.TopUnaryLegators) )
# D.PredRecency = { }
## List of heirs for each participant...
D.HeirsOfParticipants = [ xLo for xHi in D.Participants for xLo in D.Heirs.get(xHi,[]) ]
if VERBOSE: print( 'HeirsOfParticipants = ' + str(D.HeirsOfParticipants) )
## Obtain inheritance chain for each reft...
D.Chains = { x : sets.Set( D.getChainFromSup(x) + D.getChainFromSub(x) ) for x in D.Referents }
if VERBOSE: print( 'Chains = ' + str(D.Chains) )
# Inheritances = { x : sets.Set( getChainFromSup(x) ) for x in Referents }
## Mapping from referent to elementary predications containing it...
# D.RefToPredTuples = { xOrig : [ (ptup,xInChain) for xInChain in D.Chains[xOrig] for ptup in D.PredTuples if xInChain in ptup[2:] ] for xOrig in D.Referents }
def orderTuplesFromSups( x ):
Out = []
if x in D.Nuscos:
for src in D.Nuscos[x]:
Out += [ (ptup,src) for ptup in D.PredTuples if src in ptup[2:] ]
Out += [ (ptup,x) for ptup in D.PredTuples if x in ptup[2:] ]
for lbl,dst in D.Inhs.get(x,{}).items():
Out += orderTuplesFromSups( dst )
return Out
def orderTuplesFromSubs( x ):
Out = []
Out += [ (ptup,x) for ptup in D.PredTuples if x in ptup[2:] ]
for src in D.Subs.get(x,[]):
Out += orderTuplesFromSubs( src )
# Out += [ (ptup,src) for ptup in D.PredTuples if src in ptup[2:] ]
return Out
D.FullRefToPredTuples = { x : sets.Set( orderTuplesFromSubs(x) + orderTuplesFromSups(x) ) for x in D.Referents }
D.WeakRefToPredTuples = { x : orderTuplesFromSubs( D.Inhs.get(x,{}).get('r',x) ) for x in D.Referents }
D.BareRefToPredTuples = { x : [ (ptup,x) for ptup in D.PredTuples if x in ptup[2:] ] for x in D.Referents }
if VERBOSE: print( 'FullRefToPredTuples = ' + str(D.FullRefToPredTuples) )
if VERBOSE: print( 'WeakRefToPredTuples = ' + str(D.WeakRefToPredTuples) )
if VERBOSE: print( 'BareRefToPredTuples = ' + str(D.BareRefToPredTuples) )
def constrainingTuplesFromSups( x ):
return [ ptup for ptup in D.PredTuples if x in ptup[1:] ] + [ ptup for _,xHi in D.Inhs.get(x,{}).items() for ptup in constrainingTuplesFromSups( xHi ) ]
def constrainingTuplesFromSubs( x ):
return [ ptup for ptup in D.PredTuples if x in ptup[1:] ] + [ ptup for xLo in D.Subs.get(x,[]) for ptup in constrainingTuplesFromSubs( xLo ) ]
D.ConstrainingTuples = { x : sets.Set( constrainingTuplesFromSups(x) + constrainingTuplesFromSubs(x) ) for x in D.Referents }
## Calculate ceilings of scoped refts...
# D.AnnotatedCeilings = sets.Set([ y for y in D.Referents for x in D.Scopes.keys() if D.ceiling(x) in D.Chains[y] ]) #D.Chains[D.ceiling(x)] for x in D.Scopes.keys() ])
# if len(D.AnnotatedCeilings) == 0:
# D.AnnotatedCeilings = sets.Set( sorted([ (len(chain),chain) for x,chain in D.Chains.items() if x.startswith('000') ])[-1][1] ) # sets.Set(D.Chains['0001s'])
# print( '#NOTE: Discourse contains no scope annotations -- defining root as longest chain through first sentence: ' + str(sorted(D.AnnotatedCeilings)) )
# sys.stderr.write( 'NOTE: Discourse contains no scope annotations -- defining root as longest chain through first sentence: ' + str(sorted(D.AnnotatedCeilings)) + '\n' )
# DisjointCeilingPairs = [ (x,y) for x in D.AnnotatedCeilings for y in D.AnnotatedCeilings if x<y and not D.reachesInChain( x, y ) ]
# if len(DisjointCeilingPairs) > 0:
# print( '#WARNING: Maxima of scopal annotations are disjoint: ' + str(DisjointCeilingPairs) + ' -- disconnected annotations cannot all be assumed dominant.' )
# sys.stderr.write( 'WARNING: Maxima of scopal annotations are disjoint: ' + str(DisjointCeilingPairs) + ' -- disconnected annotations cannot all be assumed dominant.\n' )
# if VERBOSE: print( 'AnnotatedCeilings = ' + str(D.AnnotatedCeilings) )
# D.NotOutscopable = [ x for x in D.Referents if D.ceiling(x) in D.AnnotatedCeilings ]
# if VERBOSE: print( 'NotOutscopable = ' + str(D.NotOutscopable) )
D.PredToTuple = { xOrig : ptup for ptup in D.PredTuples for xOrig in D.Chains[ ptup[1] ] }
if VERBOSE: print( 'PredToTuple = ' + str(D.PredToTuple) )
def allInherited( src ):
Out = []
for lbl,dst in D.Inhs.get(src,{}).items():
if lbl!='w' and lbl!='o':
Out += [ dst ] + allInherited( dst )
return Out
D.AllInherited = { x : allInherited( x ) for x in D.Referents }
if VERBOSE: print( 'AllInherited = ' + str(D.AllInherited) )
def getCeils( D, xHi ):
# print( 'ceil of ' + xHi )
return D.getCeils( D.Scopes[xHi] ) if xHi in D.Scopes else sets.Set([ y for xLo in D.Subs.get(xHi,[]) for y in D.getCeils(xLo) ]) if len(D.Subs.get(xHi,[]))>0 else [ xHi ]
def ceiling( D, x ):
# X = sorted( D.getBossesInChain(x) )
Y = sorted( D.getCeils(x) ) #D.getBossesInChain(x) )[0]
# print( x + ' for ' + str(X) + ' vs ' + str(Y) )
# if len(Y) == 0: Y = [ x ]
return Y[0] if Y[0] in D.NuscoValues or Y[0] not in D.Nuscos else D.Nuscos[Y[0]][0]
def getHeirs( D, xHi ):
Out = [ xHi ]
for xLo in D.Subs.get(xHi,[]):
Out += D.getHeirs( xLo )
return Out
def getLegators( D, xLo ):
Out = [ xLo ]
for l,xHi in D.Inhs.get(xLo,{}).items():
Out += D.getLegators( xHi )
return Out
## Helper function to determine if one ref state outscopes another...
def reachesFromSup( D, xLo, xHi ):
# print( 'reachesFromSup ' + xLo + ' ' + xHi )
if any([ D.reachesInChain( D.Scopes[xNusco], xHi ) for xNusco in D.Nuscos.get(xLo,[]) if xNusco in D.Scopes ]): return True ## Outscoper of nusco is outscoper of restrictor.
return True if xLo in D.Chains.get(xHi,[]) else D.reachesInChain( D.Scopes[xLo], xHi ) if xLo in D.Scopes else any( [ D.reachesFromSup(xSup,xHi) for l,xSup in D.Inhs.get(xLo,{}).items() if l!='w' and l!='o' ] )
def reachesFromSub( D, xLo, xHi ):
# print( 'reachesFromSub ' + xLo + ' ' + xHi )
return True if xLo in D.Chains.get(xHi,[]) else D.reachesInChain( D.Scopes[xLo], xHi ) if xLo in D.Scopes else any( [ D.reachesFromSub(xSub,xHi) for xSub in D.Subs.get(xLo,[]) ] )
def reachesInChain( D, xLo, xHi ):
# print( 'reachesInChain ' + xLo + ' ' + xHi )
return D.reachesFromSup( xLo, xHi ) or D.reachesFromSub( xLo, xHi )
## Reach by traversing backward to heirs, then forward along scopes...
def reaches( D, xLo, xHi ):
# print( 'reaches ' + xLo + ' ' + xHi )
return True if xLo in D.Chains.get(xHi,[]) else D.reaches( D.Scopes[xLo], xHi ) if xLo in D.Scopes else any( [ D.reaches(xSub,xHi) for xSub in D.Subs.get(xLo,[]) ] )
'''
def reachesInChain( D, xLo, xHi ):
return True if xLo in D.Chains.get(xHi,[]) else any([ D.reachesInChain( D.Scopes[xCoChainer], xHi ) for xCoChainer in D.Chains.get(xLo,[]) if xCoChainer in D.Scopes ])
'''
## Helper function to determine if ref state is already connected to other ref state...
def alreadyConnected( D, xLo, xHi, Connected ):
# print( 'ceiling of ' + x + ' is ' + D.ceiling(x) )
# return ( xGoal == '' and any([ y in Connected for y in D.Chains[x] ]) ) or D.reachesInChain( x, xGoal )
return ( xHi == '' and xLo in Connected ) or any([ D.reaches(xLo,x) for x in D.Heirs.get(xHi,[]) ]) # D.reaches( x, xGoal )
# return ( xGoal == '' and D.ceiling( x ) in D.AnnotatedCeilings ) or D.reachesInChain( x, xGoal )
# return ( xGoal == '' and any([ D.ceiling( x ) in D.AnnotatedCeilings ] + [ D.ceiling( y ) in D.AnnotatedCeilings for l,y in D.Inhs.get(x,{}).items() if l!='w' and l!='o' ]) ) or D.reachesInChain( x, xGoal )
def weaklyConnected( D, x, xGoal, Connected ):
return ( xGoal == '' and any([ y in Connected for y in D.Chains[x] ]) ) or D.reaches( x, xGoal )
## Method to return list of scope (src,dst) pairs to connect target to goal...
def scopesToConnect( D, xTarget, xGoal, step, Connected, xOrigin=None ):
if VERBOSE: print( ' '*step + str(step) + ': trying to satisfy pred ' + xTarget + ' under goal ' + xGoal + '...' )
# print( [ xSub for xSub in D.Subs.get(xTarget,[]) if D.Inhs.get(xSub,{}).get('r','') != xTarget ] )
## If any non-'r' heirs, return results for heirs (elementary predicates are always final heirs)...
# if [] != [ xSub for xSub in D.Subs.get(xTarget,[]) ]:
def notOffOriginChain( xT, xS, xO ):
if xO == None: return True
if xO not in D.Chains.get(xT,[xT]) and D.Inhs.get(xO,{}).get('r','') not in D.Chains.get(xT,[xT]): return True
return xO in D.Chains.get(xS,[xS]) or D.Inhs.get(xO,{}).get('r','') in D.Chains.get(xS,[xS]) or xO in D.Chains.get(D.Inhs.get(xS,{}).get('r',''),[]) or D.Inhs.get(xO,{}).get('r','') in D.Chains.get(D.Inhs.get(xS,{}).get('r',''),[])
def possible( xLo, xMd, xHi ):
'''
return ( ( not D.alreadyConnected( xHi, xMd, Connected ) or D.alreadyConnected( xMd, xHi, Connected ) ) and
( not D.alreadyConnected( xLo, xHi, Connected ) or D.alreadyConnected( xMd, xHi, Connected ) ) )
'''
return ( ( not D.alreadyConnected( xMd, '' , Connected ) or D.alreadyConnected( xMd, xHi, Connected ) and D.alreadyConnected( xHi, '' , Connected ) ) and
( not D.alreadyConnected( xHi, xMd, Connected ) or D.alreadyConnected( xMd, xHi, Connected ) ) and
( not D.alreadyConnected( xLo, xHi, Connected ) or D.alreadyConnected( xLo, xMd, Connected ) and D.alreadyConnected( xMd, xHi, Connected ) ) )
ptup = D.PredToTuple.get( xTarget, () ) #[ xTarget ]
# ## Sanity check...
# if ptup[1] != xTarget:
# complain( 'too weird -- elem pred ' + xTarget + ' not equal to ptup[1]: ' + ptup[1] )
if len(ptup) > 0:
xLowest = ptup[1]
if len(ptup) > 2: xOther1 = ptup[2]
if len(ptup) > 3: xOther2 = ptup[3]
if len(ptup) > 2 and D.Scopes.get(ptup[2],'') == ptup[1]: xLowest,xOther1 = ptup[2],ptup[1]
if len(ptup) > 3 and D.Scopes.get(ptup[2],'') == ptup[1]: xLowest,xOther1,xOther2 = ptup[2],ptup[1],ptup[3]
if len(ptup) > 3 and D.Scopes.get(ptup[3],'') == ptup[1]: xLowest,xOther1,xOther2 = ptup[3],ptup[1],ptup[2]
## Report any cycles from participant to elementary predicate...
for x in ptup[2:]:
if D.reaches( x, xLowest ) and not x.endswith('\''):
complain( 'elementary predication ' + ptup[0] + ' ' + xLowest + ' should not outscope argument ' + x + ' -- unable to build complete expression!' )
return [(None,None)]
# if len(ptup) > 2 and D.alreadyConnected( xLowest, '', Connected ) and D.alreadyConnected( xOther1, '', Connected ):
# complain( 'elementary predication ' + ptup[0] + ' ' + ptup[1] + ' has (top-level) scope annotation but argument ' + xOther1 + ' does not -- unable to build complete expression!' )
# return [(None,None)]
# if len(ptup) > 2 and D.reachesInChain( xLowest, D.ceiling(xOther1) ) and not D.alreadyConnected( xLowest, xOther1, Connected ) and not D.alreadyConnected( xOther1, xLowest, Connected ):
# complain( 'arguments ' + xLowest + ' and ' + xOther1 + ' of elementary predication ' + ptup[0] + ' ' + ptup[1] + ' outscoped in different branches or components -- possibly due to disconnected scope annotations' + ' -- unable to build complete expression!' )
# return [(None,None)]
## If any participant / elem pred reaches all other participants, nothing to do...
if any([ all([ D.reaches(xLo,xHi) for xHi in ptup[1:] if xHi != xLo ]) for xLo in ptup[1:] ]): #all([ D.reachesInChain( xLowest, x ) for x in ptup[2:] ]):
# if VERBOSE: print( ' '*step + str(step) + ': all args of pred ' + ptup[0] + ' ' + ptup[1] + ' are reachable' )
if xGoal == '' or any([ D.reaches( x, xGoal ) for x in ptup[1:] ]): #D.reachesInChain( xLowest, xGoal ):
return []
elif D.alreadyConnected( ptup[1], '', Connected ):
complain( 'elementary predication ' + ptup[0] + ' ' + ptup[1] + ' is already fully bound, cannot become outscoped by goal referent ' + xGoal + ' -- unable to build complete expression!' )
return [(None,None)]
## Try heirs first...
L = [ sco for xSub in D.Subs.get( xTarget, [] ) if notOffOriginChain(xTarget,xSub,xOrigin) for sco in D.scopesToConnect( xSub, xGoal, step+1, Connected, xOrigin ) ]
if L != []: return L
# if [] != [ xSub for xSub in D.Subs.get(xTarget,[]) if D.Inhs.get(xSub,{}).get('r','') != xTarget ]:
# return [ sco for xSub in D.Subs.get( xTarget, [] ) if D.Inhs.get(xSub,{}).get('r','') != xTarget for sco in D.scopesToConnect( xSub, xGoal, step+1 ) ]
## If zero-ary (non-predicate)...
if xTarget not in D.PredToTuple:
if xGoal == '' or D.reaches( xTarget, xGoal ): return []
else:
if D.alreadyConnected( xTarget, '', Connected ):
complain( 'target ' + xTarget + ' and goal ' + xGoal + ' outscoped in different branches or components -- possibly due to disconnected scope annotations -- unable to build complete expression!' )
return [(None,None)]
else:
if VERBOSE: print( ' ' + ' '*step + str(step) + ': case 0 -- non-predicate ' + xTarget + ' under goal ' + xGoal )
return [ (xTarget,xGoal) ]
# else: complain( xTarget + ' is not already connected to goal and is not predicate, so cannot be outscoped by ' + xGoal )
## If unary predicate...
# if len( ptup ) == 3:
for xLowest,xOther1 in [ (xLowest,xOther1) ] if len(ptup)==3 else [ (ptup[1],ptup[3]) ] if len(ptup)==4 and D.Scopes.get(ptup[2],'')==ptup[1] else [ (ptup[1],ptup[2]) ] if len(ptup)==4 and D.Scopes.get(ptup[3],'')==ptup[1] else []:
'''
## Update any existing scopes to ensure scope chain targets are or are inherited by arguments...
if D.reachesInChain( xLowest, xOther1 ):
for xScopeParent in D.Heirs[ xOther1 ]:
for xScopeChild in [ xC for xC,xP in D.Scopes.items() if xP == xScopeParent if xP == xScopeParent and D.reachesInChain( xLowest, xC ) ]: #D.ScopeChildren.get( xScopeParent, {} ):
if xScopeParent != xOther1:
D.Scopes[ xScopeChild ] = xOther1
if VERBOSE: print( ' '*step + str(step) + ': changing heir scope ' + xScopeChild + ' ' + xScopeParent + ' to legator scope ' + xScopeChild + ' ' + xOther1 )
'''
## Flag unscopable configurations...
if ( D.reaches( xOther1, D.ceiling(xGoal) ) or D.reaches( xGoal, D.ceiling(xOther1) ) ) and not D.alreadyConnected( xOther1, xGoal, Connected ) and not D.alreadyConnected( xGoal, xOther1, Connected ):
complain( 'argument ' + xOther1 + ' and goal ' + xGoal + ' of elementary predication ' + ptup[0] + ' ' + ptup[1] + ' outscoped in different branches or components -- possibly due to disconnected scope annotations -- unable to build complete expression!' )
return [(None,None)]
if D.alreadyConnected( xOther1, xLowest, Connected ):
complain( 'elementary predication ' + ptup[0] + ' ' + xLowest + ' should not outscope argument ' + xOther1 + ' -- unable to build complete expression!' )
return [(None,None)]
if xLowest==ptup[1] and D.alreadyConnected( xOther1, D.ceiling(xLowest), Connected ):
complain( 'elementary predication ' + ptup[0] + ' ' + xLowest + ' in separate branch from argument ' + xOther1 + ' -- unable to build complete expression!' )
return [(None,None)]
if xLowest==ptup[1] and D.alreadyConnected( xLowest, '', Connected ) and not D.alreadyConnected( xLowest, xOther1, Connected ):
complain( 'elementary predication ' + ptup[0] + ' ' + xLowest + ' already connected, but excludes argument ' + xOther1 + ' -- unable to build complete expression!' )
return [(None,None)]
## Recommend scopes...
if D.alreadyConnected( xGoal, xOther1, Connected ):
if VERBOSE: print( ' ' + ' '*step + str(step) + ': case 1a -- ' + xLowest + ' under goal ' + xGoal + ' under ' + xOther1 )
return ( [ (xLowest,xGoal) ] if xLowest == ptup[1] else [] )
else:
if VERBOSE: print( ' ' + ' '*step + str(step) + ': case 1b -- ' + xLowest + ' under ' + xOther1 + ' under goal ' + xGoal )
return ( [ (xLowest,xOther1) ] if xLowest == ptup[1] else [] ) + ( D.scopesToConnect( xOther1, xGoal, step+1, Connected, xOrigin ) if xOther1 != ptup[1] else [ (xOther1,xGoal) ] )
## If binary predicate...
if len( ptup ) == 4:
'''
## Update any existing scopes to ensure scope chain targets are or are inherited by arguments...
for xLo,xHi in [ (xLowest,xOther1), (xLowest,xOther2), (xOther1,xLowest), (xOther1,xOther2), (xOther2,xLowest), (xOther2,xOther1) ]:
if D.reachesInChain( xLo, xHi ):
for xScopeParent in D.Heirs[ xHi ]:
for xScopeChild in [ xC for xC,xP in D.Scopes.items() if xP == xScopeParent and D.reachesInChain( xLo, xC ) ]:
if xScopeParent != xHi:
D.Scopes[ xScopeChild ] = xHi
if VERBOSE: print( ' '*step + str(step) + ': changing heir scope ' + xScopeChild + ' ' + xScopeParent + ' to legator scope ' + xScopeChild + ' ' + xHi )
'''
## Flag unscopable configurations...
if ( D.alreadyConnected( xOther1, xGoal, Connected ) and D.alreadyConnected( xOther2, xGoal, Connected ) or D.alreadyConnected( xOther1, '', Connected ) and D.alreadyConnected( xOther2, '', Connected ) ) and not D.alreadyConnected( xOther1, xOther2, Connected ) and not D.alreadyConnected( xOther2, xOther1, Connected ):
complain( 'arguments ' + xOther1 + ' and ' + xOther2 + ' of elementary predication ' + ptup[0] + ' ' + ptup[1] + ' outscoped in different branches or components -- possibly due to disconnected scope annotations' + ' -- unable to build complete expression! (1)' )
return [(None,None)]
# if D.alreadyConnected( xOther1, xOther2, Connected ) and D.alreadyConnected( xGoal, xOther2, Connected ) and not D.alreadyConnected( xOther1, xGoal, Connected ) and not D.alreadyConnected( xGoal, xOther1, Connected ):
if ( D.reaches( xOther1, D.ceiling(xGoal) ) or D.reaches( xGoal, D.ceiling(xOther1) ) ) and not D.alreadyConnected( xOther1, xGoal, Connected ) and not D.alreadyConnected( xGoal, xOther1, Connected ):
complain( 'argument ' + xOther1 + ' and goal ' + xGoal + ' of elementary predication ' + ptup[0] + ' ' + ptup[1] + ' outscoped in different branches or components -- possibly due to disconnected scope annotations' + ' -- unable to build complete expression!' )
return [(None,None)]
# if D.alreadyConnected( xOther2, xOther1, Connected ) and D.alreadyConnected( xGoal, xOther1, Connected ) and not D.alreadyConnected( xOther2, xGoal, Connected ) and not D.alreadyConnected( xGoal, xOther2, Connected ):
if ( D.reaches( xOther2, D.ceiling(xGoal) ) or D.reaches( xGoal, D.ceiling(xOther2) ) ) and not D.alreadyConnected( xOther2, xGoal, Connected ) and not D.alreadyConnected( xGoal, xOther2, Connected ):
complain( 'argument ' + xOther2 + ' and goal ' + xGoal + ' of elementary predication ' + ptup[0] + ' ' + ptup[1] + ' outscoped in different branches or components -- possibly due to disconnected scope annotations' + ' -- unable to build complete expression!' )
return [(None,None)]
if ( D.reaches( xOther1, D.ceiling(xOther2) ) or D.reaches( xOther1, D.ceiling(xOther2) ) ) and not D.alreadyConnected( xOther2, xOther1, Connected ) and not D.alreadyConnected( xOther1, xOther2, Connected ):
complain( 'arguments ' + xOther1 + ' and ' + xOther2 + ' of elementary predication ' + ptup[0] + ' ' + ptup[1] + ' outscoped in different branches or components -- possibly due to disconnected scope annotations' + ' -- unable to build complete expression! (2)' )
return [(None,None)]
## Recommend scopes...
## Try short-circuit to refine inherited scope...
for xLo,xMd,xHi in [ (xLowest,xOther2,xOther1), (xLowest,xOther1,xOther2) ]: #if xLegMd != xMd
if any([ D.Scopes.get(xLegMd,'') == xLegHi for xLegMd in D.Heirs.get(xMd,[]) for xLegHi in D.Heirs.get(xHi,[]) ]):
for x in D.Nuscos.get(xMd,[]) + ( [ xMd ] if not xMd in D.Nuscos else [] ):
if x in D.Subs: # if not redundant nusco.
if VERBOSE: print( ' ' + ' '*step + str(step) + ': case 2a -- short-circuit to refine scope ' + x + ' to ' + xHi )
D.Scopes[ x ] = xHi #return [ (xMd,xHi) ] # D.Scopes[ xMd ] = xHi
## Try low, goal, mid, hi...
if D.reaches( xGoal, xOther1 ) and D.reaches( xGoal, xOther2 ):
if VERBOSE: print( ' ' + ' '*step + str(step) + ': case 2b -- ' + xLowest + ' under goal ' + xGoal + ' under {' + xOther1 + ',' + xOther2 + '}' )
return ( [ (xLowest,xGoal) ] if xLowest == ptup[1] else [] )
## Try low, mid, goal, hi...
for xLo,xMd,xHi in [ (xLowest,xOther2,xOther1), (xLowest,xOther1,xOther2) ]:
if D.reaches( xGoal, xHi ):
if VERBOSE: print( ' ' + ' '*step + str(step) + ': case 2c -- ' + xLo + ' under ' + xMd + ' under goal ' + xGoal + ' under ' + xHi )
return ( [ (xLo,xMd) ] if xLo == ptup[1] else [] ) + ( [ (xMd,xGoal) ] if xMd == ptup[1] else D.scopesToConnect( xMd, xGoal, step+1, Connected, xOrigin ) )
# if D.reachesInChain( xGoal, xOther1 ):
# if VERBOSE: print( ' ' + ' '*step + str(step) + ': case 1' )
# return ( [ (xLowest,xOther2) ] if xLowest == ptup[1] else [] ) + ( D.scopesToConnect( xOther2, xGoal, step+1, Connected ) if xOther2 != ptup[1] else [ (xOther2,xGoal) ] )
# if D.reachesInChain( xGoal, xOther2 ):
# if VERBOSE: print( ' ' + ' '*step + str(step) + ': case 2' )
# return ( [ (xLowest,xOther1) ] if xLowest == ptup[1] else [] ) + ( D.scopesToConnect( xOther1, xGoal, step+1, Connected ) if xOther1 != ptup[1] else [ (xOther1,xGoal) ] )
# print( 'D.alreadyConnected( ' + xOther1 + ', ' + xGoal + ' ) = ' + str( D.alreadyConnected( xOther1, xGoal, Connected ) ) + 'D.reachesInChain( ' + xOther1 + ', ' + xOther2 + ' ) = ' + str( D.reachesInChain( xOther1, xOther2 ) ) )
# ## Try (strongly connected) low, mid, hi, goal...
# for xLo,xMd,xHi in [ (xLowest,xOther1,xOther2), (xLowest,xOther2,xOther1) ]:
# if D.alreadyConnected( xHi, xGoal, Connected ) and not D.reachesInChain( xHi, xMd ) and possible( xLo, xMd, xHi ):
# if VERBOSE: print( ' ' + ' '*step + str(step) + ': case 2b -- ' + xLo + ' under ' + xMd + ' under ' + xHi + ' under goal ' + xGoal )
# return ( [ (xLowest,xMd) ] if xLo == ptup[1] else [] ) + ( [ (xMd,xHi) ] if xMd == ptup[1] else D.scopesToConnect( xMd, xHi, step+1, Connected, xOrigin ) )
## Try low, mid, hi, goal...
for xLo,xMd,xHi in [ (xLowest,xOther1,xOther2), (xLowest,xOther2,xOther1) ]:
if D.weaklyConnected( xHi, xGoal, Connected ) and not D.reaches( xHi, xMd ) and possible( xLo, xMd, xHi ):
if VERBOSE: print( ' ' + ' '*step + str(step) + ': case 2d -- ' + xLo + ' under ' + xMd + ' under ' + xHi + ' under goal ' + xGoal )
return ( [ (xLowest,xMd) ] if xLo == ptup[1] else [] ) + ( [ (xMd,xHi) ] if xMd == ptup[1] else D.scopesToConnect( xMd, xHi, step+1, Connected, xOrigin ) )
# if D.alreadyConnected( xOther1, xGoal, Connected ) and not D.reachesInChain( xOther1, xOther2 ):
# if VERBOSE: print( ' ' + ' '*step + str(step) + ': case 3' )
# return ( [ (xLowest,xOther2) ] if xLowest == ptup[1] else [] ) + ( D.scopesToConnect( xOther2, xOther1, step+1, Connected ) if xOther2 != ptup[1] else [ (xOther2,xOther1) ] )
# if D.alreadyConnected( xOther2, xGoal, Connected ) and not D.reachesInChain( xOther2, xOther1 ):
# if VERBOSE: print( ' ' + ' '*step + str(step) + ': case 4' )
# return ( [ (xLowest,xOther1) ] if xLowest == ptup[1] else [] ) + ( D.scopesToConnect( xOther1, xOther2, step+1, Connected ) if xOther1 != ptup[1] else [ (xOther1,xOther2) ] )
if xGoal == '' and xOther2 in D.getHeirs( xOther1 ):
if VERBOSE: print( ' ' + ' '*step + str(step) + ': case 2e -- no goal ' + xLowest + ' under ' + xOther2 + ' under ' + xOther1 )
return ( [ (xLowest,xOther2) ] if xLowest == ptup[1] else [] ) + ( [ (xOther2,xOther1) ] if xOther2 == ptup[1] else D.scopesToConnect( xOther2, xOther1, step+1, Connected, xOrigin ) )
if xGoal == '':
for xLo,xMd,xHi in [ (xLowest,xOther1,xOther2), (xLowest,xOther2,xOther1) ]:
if possible( xLo, xMd, xHi ):
if VERBOSE: print( ' ' + ' '*step + str(step) + ': case 2f -- no goal ' + xLo + ' under ' + xMd + ' under ' + xHi )
return ( [ (xLo,xMd) ] if xLo == ptup[1] else [] ) + ( [ (xMd,xHi) ] if xMd == ptup[1] else D.scopesToConnect( xMd, xHi, step+1, Connected, xOrigin ) )
else:
for xLo,xMd,xHi in [ (xLowest,xOther1,xOther2), (xLowest,xOther2,xOther1) ]:
if possible( xLo, xMd, xHi ):
if VERBOSE: print( ' ' + ' '*step + str(step) + ': case 2g -- no goal, no constraints ' + xLo + ' under ' + xMd + ' under ' + xHi )
return ( [ (xLo,xMd) ] if xLo == ptup[1] else [] ) + ( [ (xMd,xHi) ] if xMd == ptup[1] else D.scopesToConnect( xMd, xHi, step+1, Connected, xOrigin ) ) + ( D.scopesToConnect( xHi, xGoal, step+1, Connected, xOrigin ) if xHi != ptup[1] else [ (xHi,xGoal) ] )
#complain( 'predicate ' + xLowest + ' with goal ' + xGoal + ' not sufficiently constrained; danger of garden-pathing' )
complain( 'found no way to order ' + xLowest + ' ' + xOther1 + ' ' + xOther2 + ' -- unable to build complete expression!' )
return [(None,None)]
## If trinary and higher predicates...
else:
complain( 'no support for super-binary predicates: ' + ' '.join(ptup) + ' -- unable to build complete expression!' )
return [(None,None)]
def constrainDeepestReft( D, xTarg, step, Connected, xOrigin=None ):
if VERBOSE: print( ' '*step + str(step) + ': recursing to ' + xTarg + '...' )
## First, recurse down scopes (sorting children from last to first)...
for xLo,xHi in sorted( D.Scopes.items(), reverse=True ):
if xHi in D.Legators.get(xTarg,[]): #xHi == xTarg:
l = D.constrainDeepestReft( xLo, step+1, Connected, xLo )
if l != []: return l
## Second, try all preds...
for ptup,_ in D.BareRefToPredTuples.get( xTarg, [] ): #D.FullRefToPredTuples.get( xTarg, [] ) if isFull else D.WeakRefToPredTuples.get( xTarg, [] ):
if ptup[1] not in Connected:
l = D.scopesToConnect( ptup[1], '', step+1, Connected, xOrigin )
if l != []: return l
## Third, recurse up inheritances...
for lbl,xLeg in D.Inhs.get(xTarg,{}).items():
if lbl != 't':
l = D.constrainDeepestReft( xLeg, step+1, Connected, xOrigin )
if l != []: return l
return []
'''
def constrainDeepestReft( D, xTarg, step, Connected, xOrigin=None ):
if VERBOSE: print( ' '*step + str(step) + ': recursing to ' + xTarg + '...' )
## If any non-'r' heirs, return results for heirs (elementary predicates are always final heirs)...
# if [] != [ xSub for xSub in D.Subs.get( xTarg, [] ) ]:
# return [ sco for xSub in D.Subs.get( xTarg, [] ) for sco in D.constrainDeepestReft( xSub, step+1, Connected, isFull ) ]
for xSub in D.Subs.get( xTarg, [] ):
# if xOrigin == None or xOrigin in D.Chains.get(xSub,[xSub]) or D.Inhs.get(xOrigin,{}).get('r','') in D.Chains.get(xSub,[xSub]):
if xOrigin == None or xOrigin in D.Chains.get(xSub,[xSub]):
l = D.constrainDeepestReft( xSub, step+1, Connected, xOrigin )
if l != []: return l
## First, recurse down scope tree...
# for x in D.Chains.get( D.Inhs.get(xTarg,{}).get('r',xTarg), D.Chains[xTarg] ):
for xLo,xHi in D.Scopes.items():
if xHi == xTarg:
# for xLeg in D.TopLegators.get( xLo, sets.Set([xLo]) ) | D.TopLegators.get( D.Inhs.get(xLo,{}).get('r',''), sets.Set([]) ) if isFull else D.TopUnaryLegators.get( xLo, [xLo] ):
for xLeg in D.TopLegators.get( xLo, sets.Set([xLo]) ):
l = D.constrainDeepestReft( xLeg, step+1, Connected, xLo ) #D.Inhs.get(xLo,{}).get('r',xLo) )
if l != []: return l
## Second, try all preds...
# for x in D.Chains[ xTarg ]:
for ptup,_ in D.BareRefToPredTuples.get( xTarg, [] ): #D.FullRefToPredTuples.get( xTarg, [] ) if isFull else D.WeakRefToPredTuples.get( xTarg, [] ):
if ptup[1] not in Connected:
l = D.scopesToConnect( ptup[1], '', step+1, Connected, xOrigin )
if l != []: return l
return []
'''
## Method to fill in deterministic or truth-functionally indistinguishable scope associations (e.g. for elementary predications) that are not explicitly annotated...
def tryScope( D, xTarget, Connected, step=1 ):
if VERBOSE: print( 'Connected = ' + str(sorted(Connected)) )
active = True
while active:
active = False
if VERBOSE: print( ' '*step + 'GRAPH: ' + D.strGraph() )
## Calculate recommended scopings...
l = []
for xFinHeir in D.Heirs.get( xTarget, [] ):
if xFinHeir not in D.Subs:
l = D.constrainDeepestReft( xFinHeir, step+1, Connected )
if l != []: break
'''
l = D.constrainDeepestReft( xTarget, step+1, Connected )
'''
if VERBOSE: print( ' '*step + str(step) + ' l=' + str(l) )
## Add recommended scopings...
for xLo,xHi in sets.Set(l):
## Bail on fail...
if xLo == None: return False
## Create additional scope in chain to avoid inheriting from wrong heir, unbound variables...
if xLo not in D.Scopes and any([ D.Scopes.get(x,[])==xHi for x in D.Chains.get(xLo,[]) ]):
if VERBOSE: print( ' '*step + str(step) + ' multi-scoping ' + D.ceiling(xLo) + ' to ' + xHi )
D.Scopes[ xLo ] = xHi
if D.alreadyConnected( xLo, xHi, Connected ):
if VERBOSE: print( 'CODE REVIEW: WHY SUGGEST SCOPES ALREADY CONNECTED: ' + xLo + ' ' + xHi )
continue
if any([ D.reaches( xHi, x ) for x in D.getCeils(xLo) ]): #D.reaches( xHi, D.ceiling(xLo) ):
complain( 'combination of scopes involving ' + xLo + ' with ceiling ' + D.ceiling(xLo) + ' to ' + xHi + ' creates cycle -- unable to build complete expression' )
if VERBOSE: print( ' '*step + 'GRAPH: ' + D.strGraph() )
return False
## Report and construct scope association...
if VERBOSE: print( ' '*step + str(step) + ' intended scoping ' + xLo + ' w ceiling ' + D.ceiling(xLo) + ' to ' + xHi )
for x in D.getCeils(xLo):
if VERBOSE: print( ' '*step + str(step) + ' actual scoping ' + x + ' to ' + xHi )
D.Scopes[ x ] = xHi
# D.Scopes[ D.ceiling(xLo) ] = xHi
## Update recently connected...
Connected.extend( D.Chains.get(xLo,sets.Set([])) | D.Chains.get( D.Inhs.get(xLo,{}).get('r',''), sets.Set([]) ) )
if VERBOSE: print( 'Adding to Connected: ' + str(D.Chains.get(xLo,sets.Set([])) | D.Chains.get( D.Inhs.get(xLo,{}).get('r',''), sets.Set([]) ) ) )
active = True
if VERBOSE: D.check()
return True
|
modelblocks/modelblocks-release
|
resource-gcg/scripts/induciblediscgraph.py
|
Python
|
gpl-3.0
| 35,189 | 0.036204 |
import sys, os
import visit_rcParams as vrc
import visit_colormaps as vc
import visit_time as vt
import visit as v
rcParams = dict( surface_linewidth=3,
snow_linewidth=4
)
class Operator:
def __init__(self, oname, otype, oatts):
self.oname = oname
self.otype = otype
self.oatts = oatts
class Plot:
def __init__(self, pname, ptype, patts, varname=None):
"""Plot class"""
self.pname = pname
self.varname = varname
self.ptype = ptype
self.patts = patts
self.operators = []
self.annot = v.GetAnnotationObject(pname)
self.annot.fontFamily = vrc.getDefaultFont()
self.annot.fontHeight = vrc.rcParams['legend.fontheight']
self.annot.managePosition = 0
self.annot.xScale = vrc.rcParams['legend.scale'][0]
self.annot.yScale = vrc.rcParams['legend.scale'][1]
self.annot.position = vrc.rcParams['legend.position']
self.annot.drawMinMax = vrc.rcParams['legend.minmax']
if varname is not None:
self.annot.drawTitle = 0
self.title = v.CreateAnnotationObject("Text2D")
self.title.text = varname
self.title.fontFamily = vrc.getDefaultFont()
self.title.height = vrc.rcParams['legend.title.fontheight']
self.title.position = vrc.rcParams['legend.title.position']
def getLimits(self):
assert self.ptype == 'Pseudocolor'
min = None
max = None
if self.patts.minFlag:
min = self.patts.min
if self.patts.maxFlag:
min = self.patts.max
return min,max
class VisItWindow:
"""Class for a window"""
class Slice:
"""Helper class for slicing into 2D"""
def __init__(self, point=None, normal=None):
if point is None:
point = (0,0,0)
if normal is None:
normal = (0,-1,0)
assert type(point) is tuple
assert len(point) == 3
assert type(normal) is tuple
assert len(normal) == 3
self.point = point
self.normal = normal
def toAttributes(self):
s = v.SliceAttributes()
s.originType = s.Point
s.originPoint = self.point
s.normal = self.normal
s.axisType = s.Arbitrary
return s
def __init__(self, index):
self.i = index
self.annot = vrc.getAnnotationAttributes()
self.setDimension(3)
self.plots = []
self.nonplots = [] # other objects like meshes
self._slice = None
self.exaggeration = None
def setDimension(self, dim):
"""Sets the dimension, which is used in controlling the view"""
self.dim = dim
if dim == 2:
self.view = v.GetView2D()
elif dim == 3:
self.view = v.GetView3D()
else:
raise RuntimeError("Invalid dimension %s"%str(dim))
def slice(self, point=None, normal=None):
"""Registers a slice -- this is not immediately added"""
self._slice = self.Slice(point, normal)
self.setDimension(2)
def exaggerateVertical(self, factor):
"""Registers an exxageration -- this is not immediately added"""
self.exaggeration = factor
def _exaggerateVertical(self):
if self.dim == 3:
self.view.axis3DScaleFlag = 1
self.view.axis3DScales = (self.view.axis3DScales[0],
self.view.axis3DScales[1],
self.exaggeration)
else:
for i,plot in enumerate(self.plots):
done = False
for op in plot.operators:
if "exaggerate_vertical" == op.oname:
done = True
if not done:
print "transforming plot %d..."%i
tr = v.TransformAttributes()
tr.doScale = 1
tr.scaleY = self.exaggeration
v.SetActivePlots(i)
v.AddOperator("Transform")
v.SetOperatorOptions(tr)
plot.operators.append(Operator("exaggerate_vertical", "Transform", tr))
def createMesh(self, color='w', opacity=0.15, silo=False):
_colors = dict(w=(255,255,255,255),
k=(0,0,0,255),
gray=(175,175,175),
)
if silo:
v.AddPlot('Mesh', "mesh")
else:
v.AddPlot('Mesh', "Mesh")
ma = v.MeshAttributes()
ma.legendFlag = 0
ma.meshColor = _colors[color]
ma.meshColorSource = ma.MeshCustom
if (opacity < 1.):
ma.opaqueMode = ma.On
ma.opacity = opacity
v.SetPlotOptions(ma)
pname = v.GetPlotList().GetPlots(v.GetNumPlots()-1).plotName
if silo:
plot = Plot(pname, 'mesh', ma)
else:
plot = Plot(pname, 'Mesh', ma)
self.nonplots.append(plot)
return plot
def createPseudocolor(self, varname, display_name=None, cmap=None,
limits=None, linewidth=None, legend=True, alpha=False):
"""Generic creation of pseudocolor"""
if display_name is None:
display_name = vrc.renameScalar(varname)
if "temperature" in display_name:
display_name = display_name.replace("[K]", "[C]")
print "defining alias: %s = %s"%(display_name, varname)
v.DefineScalarExpression(display_name, "<%s> - 273.15"%varname)
elif display_name != varname:
print "defining alias: %s = %s"%(display_name, varname)
v.DefineScalarExpression(display_name, '<'+varname+'>')
v.AddPlot('Pseudocolor', display_name)
pa = v.PseudocolorAttributes()
# limits
if limits is None:
limits = vrc.getLimits(varname)
if limits is not None:
min = limits[0]
max = limits[1]
if min is not None:
pa.minFlag = 1
pa.min = min
if max is not None:
pa.maxFlag = 1
pa.max = max
# opacity
if alpha:
pa.opacity = 0
pa.opacityType = pa.ColorTable
# colormap
if cmap is not None:
reverse = cmap.endswith("_r")
if reverse:
cmap = cmap.strip("_r")
pa.invertColorTable = 1
pa.colorTableName = cmap
# linewidth for 2D
if linewidth is None:
linewidth = vrc.rcParams['pseudocolor.linewidth']
pa.lineWidth = linewidth
# turn off legend for 2D surf
if not legend:
pa.legendFlag = 0
v.SetActivePlots(len(self.plots)+1)
v.SetPlotOptions(pa)
pname = v.GetPlotList().GetPlots(v.GetNumPlots()-1).plotName
if legend:
plot = Plot(pname, 'Pseudocolor', pa, display_name)
else:
plot = Plot(pname, 'Pseudocolor', pa)
self.plots.append(plot)
return plot
def createContour(self, varname, value, color=None, linewidth=None):
"""Generic creation of a single contour without a legend"""
v.AddPlot('Contour', varname)
ca = v.ContourAttributes()
ca.contourMethod = ca.Value
ca.contourValue = (value,)
ca.colorType = ca.ColorBySingleColor
if color is None:
color = vrc.rcParams['contour.color']
if type(color) is str:
color = vc.common_colors[color]
ca.singleColor = color
if linewidth is None:
linewidth = vrc.rcParams['contour.linewidth']
ca.lineWidth = linewidth
# turn off legend for 2D surf
ca.legendFlag = 0
v.SetPlotOptions(ca)
pname = v.GetPlotList().GetPlots(v.GetNumPlots()-1).plotName
plot = Plot(pname, 'Contour', ca)
self.plots.append(plot)
return plot
def draw(self):
print "drawing window %d of dimension %d"%(self.i,self.dim)
v.SetActiveWindow(self.i)
v.SetAnnotationAttributes(self.annot)
if self.dim == 2:
# add the slice
assert self._slice is not None
for i,plot in enumerate(self.plots):
sliced = False
for op in plot.operators:
if "slice" == op.oname:
sliced = True
if not sliced:
print "slicing plot %d..."%i
v.SetActivePlots(i)
v.AddOperator("Slice")
sa = self._slice.toAttributes()
v.SetOperatorOptions(sa)
plot.operators.append(Operator("slice", "Slice", sa))
if self.exaggeration is not None:
print "exaggerating..."
self._exaggerateVertical()
# set the plot options
for i, plot in enumerate(self.plots):
print "setting plot options for plot %i..."%i
v.SetActivePlots(i)
v.SetPlotOptions(plot.patts)
# set the view
print "setting the view..."
if self.dim == 2:
v.SetView2D(self.view)
else:
v.SetView3D(self.view)
print "drawing..."
v.DrawPlots()
class Vis:
"""Container class for windows, also manages sources and correlations"""
def __init__(self, directory, hostname="localhost", n_windows=1):
self.directory = directory
self.hostname = hostname
self.windows = []
self._active_window = 0
for i in range(n_windows):
self.addWindow()
self.setActiveWindow(1)
# if self.hostname != "localhost":
# args = []
# v.OpenMDServer(self.hostname,args)
# v.OpenComputeEngine(self.hostname,args)
def loadSources(self, prefix="visdump_data",
surface_prefix="visdump_surface_data", filetype="xdmf"):
"""Loads source files for subsurface and potentially surface."""
if prefix is None:
self.subsurface_src = None
else:
if filetype == "xdmf":
self.subsurface_src = ":".join((self.hostname, os.path.join(self.directory, "%s.VisIt.xmf"%prefix)))
elif filetype == "silo":
self.subsurface_src = ":".join((self.hostname, os.path.join(self.directory, "%s.silo"%prefix)))
if surface_prefix is None:
self.surface_src = None
else:
if filetype == "xdmf":
self.surface_src = ":".join((self.hostname, os.path.join(self.directory, "%s.VisIt.xmf"%surface_prefix)))
elif filetype == "silo":
self.surface_src = ":".join((self.hostname, os.path.join(self.directory, "%s.silo"%surface_prefix)))
# open the subsurface database
if self.subsurface_src is not None:
v.OpenDatabase(self.subsurface_src)
if surface_prefix is not None:
# open the surface database
v.OpenDatabase(self.surface_src)
if prefix is not None:
# create the database correlation
v.CreateDatabaseCorrelation("my_correlation", (self.subsurface_src, self.surface_src), 0)
v.SetActiveTimeSlider("my_correlation")
# create vector expressions for ponded depth and snow depth
v.DefineVectorExpression("ponded_depth_displace", "{0,0,ponded_depth.cell.0}")
v.DefineVectorExpression("snow_displace", "{0,0,snow_depth.cell.0+ponded_depth.cell.0}")
def loadSourcesList(self, srclist):
"""A generic set of sources."""
self.src = [":".join((self.hostname, os.path.join(self.directory, s))) for s in srclist]
for s in self.src:
v.OpenDatabase(s)
def unloadSources(self):
v.DeleteAllPlots()
if self.subsurface_src is not None:
v.CloseDatabase(self.subsurface_src)
if self.surface_src is not None:
v.CloseDatabase(self.surface_src)
def addWindow(self):
"""Adds a window to VisIt and makes it active"""
if len(self.windows) != 0:
v.AddWindow()
win = VisItWindow(len(self.windows)+1)
self.windows.append(win)
self.setActiveWindow(len(self.windows))
v.ToggleLockTime()
def getActiveWindow(self):
return self.windows[self._active_window-1]
def setActiveWindow(self, i):
assert 0 < i <= len(self.windows)
self._active_window = i
v.SetActiveWindow(i)
def activateSurface(self):
v.ActivateDatabase(self.surface_src)
def activateSubsurface(self):
v.ActivateDatabase(self.subsurface_src)
def activateSource(self, src):
v.ActivateDatabase(src)
class ATSVis(Vis):
def __init__(self, *args, **kwargs):
Vis.__init__(self, *args, **kwargs)
self.time_annot = None
def createSubsurfacePseudocolor(self, varname, limits=None, cmap=None, window=None):
"""Simplified interface to create standard pseudocolors"""
self.activateSubsurface()
if window is not None:
self.setActiveWindow(window)
win = self.getActiveWindow()
return win.createPseudocolor(varname, limits=limits, cmap=cmap,
legend=True)
def createSurfacePseudocolor(self, varname, limits=None, cmap=None, window=None,
displace=True, alpha=False, legend=False):
"""Simplified interface to create standard pseudocolors on the surface."""
self.activateSurface()
if window is not None:
self.setActiveWindow(window)
win = self.getActiveWindow()
pcolor = win.createPseudocolor(varname, limits=limits, cmap=cmap,
legend=legend, alpha=alpha)
if displace:
# deform by surface vector
v.AddOperator("Displace")
da = v.DisplaceAttributes()
da.variable = "ponded_depth_displace"
v.SetOperatorOptions(da)
pcolor.operators.append(Operator("displace", "Displace", da))
return pcolor
def createSnowPseudocolor(self, varname, limits=None, cmap=None, window=None, legend=False):
"""Simplified interface to create standard pseudocolors on the snow surface."""
self.activateSurface()
if window is not None:
self.setActiveWindow(window)
win = self.getActiveWindow()
if cmap is None:
cmap = "hot"
pcolor = win.createPseudocolor(varname, limits=limits, cmap=cmap,
legend=legend)
# deform by surface vector
v.AddOperator("Displace")
da = v.DisplaceAttributes()
da.variable = "snow_displace"
v.SetOperatorOptions(da)
pcolor.operators.append(Operator("displace", "Displace", da))
return pcolor
def createSubsurfaceContour(self, varname, value, window=None, color=None,
linewidth=None):
self.activateSubsurface()
if window is not None:
self.setActiveWindow(window)
win = self.getActiveWindow()
return win.createContour(varname, value, color=color, linewidth=linewidth)
def plotPressure(self, window=None, limits=None):
"""Adds a plot of subsurface pressure"""
return self.createSubsurfacePseudocolor("pressure.cell.0", limits=limits, window=window)
def plotSurfacePressure(self, window=None, limits=None):
"""Adds a plot of surface pressure"""
return self.createSurfacePseudocolor("surface_pressure.cell.0", limits=limits, window=window)
def plotLiquidSaturation(self, window=None, limits=None, cmap="saturation_liquid_r"):
"""Adds a plot of subsurface pressure"""
return self.createSubsurfacePseudocolor("saturation_liquid.cell.0", limits=limits,
cmap=cmap, window=window)
def plotGasSaturation(self, window=None, limits=None):
"""Adds a plot of subsurface pressure"""
return self.createSubsurfacePseudocolor("saturation_gas.cell.0", limits=limits,
window=window)
def plotIceSaturation(self, window=None, limits=None, cmap="saturation_ice_r"):
"""Adds a plot of subsurface pressure"""
return self.createSubsurfacePseudocolor("saturation_ice.cell.0", limits=limits,
cmap=cmap, window=window)
def plotTemperature(self, window=None, limits=None):
"""Adds a plot of subsurface temperature"""
# create the colormap
cmap = None
if limits is None:
limits = vrc.rcParams['var.limits']["temperature"]
if limits is not None:
if limits[0] != None and limits[1] != None:
vc.createTemperaturesColorMap(limits[0], limits[1], 0., "temperature")
cmap = "temperature"
return self.createSubsurfacePseudocolor("temperature.cell.0", limits=limits,
cmap=cmap, window=window)
def plotSurfaceTemperature(self, window=None, limits=None):
"""Adds a plot of surface temperature"""
# create the colormap
cmap = None
if limits is None:
limits = vrc.rcParams['var.limits']["temperature"]
if limits != None:
if limits[0] != None and limits[1] != None:
vc.createTemperaturesColorMap(limits[0], limits[1], 0., "surface_temperature")
cmap = "surface_temperature"
return self.createSurfacePseudocolor("surface_temperature.cell.0", limits=limits,
cmap=cmap, window=window)
def plotSnowTemperature(self, window=None, limits=None):
"""Adds a plot of snow temperature"""
# create the colormap
cmap = None
if limits is None:
limits = vrc.rcParams['var.limits']["temperature"]
if limits != None:
if limits[0] != None and limits[1] != None:
vc.createTemperaturesColorMap(limits[0], limits[1], 0., "snow_temperature")
cmap = "snow_temperature"
return self.createSnowPseudocolor("snow_temperature.cell.0", limits=limits,
cmap=cmap, window=window)
def plotPondedDepth(self, **kwargs):
"""Adds a plot of surface ponded depth"""
if 'domain_name' in kwargs.keys():
varname = kwargs['domain_name']+"-ponded_depth.cell.0"
kwargs.pop("domain_name")
else:
varname = "surface-ponded_depth.cell.0"
return self.createSurfacePseudocolor(varname, **kwargs)
def plotSnowDepth(self, **kwargs):
"""Adds a plot of snow depth"""
return self.createSnowPseudocolor("snow_depth.cell.0", **kwargs)
def _getIndexByTime(self, time):
pass
def plotALD(self, yr_start, yr_end):
"""Adds a plot of contours of ALD from year start to year end, inclusive"""
# find the time slice that starts the full period
# for yr in range(yr_start, yr_end+1):
# # find the time slice that starts the year
pass
def writeTime(self, round=None):
self.setActiveWindow(vrc.rcParams['time.window'])
if self.time_annot is None:
ta = v.CreateAnnotationObject("Text2D")
ta.position = vrc.rcParams['time.location']
ta.height = vrc.rcParams['time.fontheight']
ta.fontFamily = vrc.getDefaultFont()
self.time_annot = ta
if round is None:
round = vrc.rcParams['time.round']
self.time_annot.text = vt.visitTime(round)
def plotSurfaceMesh(self, color='w', opacity=.15):
"""Simplified interface to create standard pseudocolors on the surface."""
self.activateSurface()
win = self.getActiveWindow()
mesh = win.createMesh(color,opacity)
return mesh
def plotSubsurfaceMesh(self, color='w', opacity=.15):
"""Simplified interface to create standard pseudocolors on the surface."""
self.activateSubsurface()
win = self.getActiveWindow()
mesh = win.createMesh(color,opacity)
return mesh
def draw(self):
"""Draw the plots"""
for win in self.windows:
win.draw()
# leave with 1 as the active window, which seems to be
# required to get saving to work
self.setActiveWindow(1)
def update(self):
"""Any changes not made by SetTimeSliderState()"""
if self.time_annot is not None:
self.writeTime()
|
amanzi/ats-dev
|
tools/visit_ats/visit_ats/visit_ats.py
|
Python
|
bsd-3-clause
| 21,342 | 0.005716 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Drivers for audio functionality provided by the VoiceHat."""
import time
import wave
import aiy._drivers._player
import aiy._drivers._recorder
import aiy._drivers._tts
AUDIO_SAMPLE_SIZE = 2 # bytes per sample
AUDIO_SAMPLE_RATE_HZ = 16000
# Global variables. They are lazily initialized.
_voicehat_recorder = None
_voicehat_player = None
_status_ui = None
class _WaveDump(object):
"""A processor that saves recorded audio to a wave file."""
def __init__(self, filepath, duration):
self._wave = wave.open(filepath, 'wb')
self._wave.setnchannels(1)
self._wave.setsampwidth(2)
self._wave.setframerate(16000)
self._bytes = 0
self._bytes_limit = int(duration * 16000) * 1 * 2
def add_data(self, data):
max_bytes = self._bytes_limit - self._bytes
data = data[:max_bytes]
self._bytes += len(data)
if data:
self._wave.writeframes(data)
def is_done(self):
return self._bytes >= self._bytes_limit
def __enter__(self):
return self
def __exit__(self, *args):
self._wave.close()
def get_player():
"""Returns a driver to control the VoiceHat speaker.
The aiy modules automatically use this player. So usually you do not need to
use this. Instead, use 'aiy.audio.play_wave' if you would like to play some
audio.
"""
global _voicehat_player
if _voicehat_player is None:
_voicehat_player = aiy._drivers._player.Player()
return _voicehat_player
def get_recorder():
"""Returns a driver to control the VoiceHat microphones.
The aiy modules automatically use this recorder. So usually you do not need to
use this.
"""
global _voicehat_recorder
if _voicehat_recorder is None:
_voicehat_recorder = aiy._drivers._recorder.Recorder()
return _voicehat_recorder
def record_to_wave(filepath, duration):
"""Records an audio for the given duration to a wave file."""
recorder = get_recorder()
dumper = _WaveDump(filepath, duration)
with recorder, dumper:
recorder.add_processor(dumper)
while not dumper.is_done():
time.sleep(0.1)
def play_wave(wave_file):
"""Plays the given wave file.
The wave file has to be mono and small enough to be loaded in memory.
"""
player = get_player()
player.play_wav(wave_file)
def play_audio(audio_data):
"""Plays the given audio data."""
player = get_player()
player.play_bytes(audio_data, sample_width=AUDIO_SAMPLE_SIZE, sample_rate=AUDIO_SAMPLE_RATE_HZ)
def say(words, lang=None):
"""Says the given words in the given language with Google TTS engine.
If lang is specified, e.g. "en-US', it will be used to say the given words.
Otherwise, the language from aiy.i18n will be used.
"""
if not lang:
lang = aiy.i18n.get_language_code()
aiy._drivers._tts.say(aiy.audio.get_player(), words, lang=lang)
def get_status_ui():
"""Returns a driver to access the StatusUI daemon.
The StatusUI daemon controls the LEDs in the background. It supports a list
of statuses it is able to communicate with the LED on the Voicehat.
"""
global _status_ui
if _status_ui is None:
_status_ui = aiy._drivers._StatusUi()
return _status_ui
|
hanmy75/voice-recognizer
|
src/aiy/audio.py
|
Python
|
apache-2.0
| 3,887 | 0.000772 |
'''
Produce minimum, maximum and the difference of number list
Status: Accepted
'''
###############################################################################
def read_line_of_integers():
"""Read one line of numbers or detect EOF"""
try:
text = input()
return [int(i) for i in text.split()][1:]
except EOFError:
pass
return None
###############################################################################
def main():
"""Read input and print output statistics about list of numbers"""
test_case = 0
while True:
numbers = read_line_of_integers()
if numbers:
test_case += 1
mini = min(numbers)
maxi = max(numbers)
print('Case {0}: {1} {2} {3}'.format(test_case, mini, maxi, maxi - mini))
else:
break
###############################################################################
if __name__ == '__main__':
main()
|
ivanlyon/exercises
|
kattis/k_statistics.py
|
Python
|
mit
| 970 | 0.004124 |
#-*-coding:UTF-8 -*-
#
# 判斷輸入是否為整數(int)
input_string = input('Please input n:')
#while input_string.isdigit() == False:
while not input_string.isdigit():
print("Error, %s is not digit!" % input_string)
input_string = input('Please input n:')
print("%s is digit!" % input_string)
|
yuzheng/python-ex
|
checkDigit.py
|
Python
|
mit
| 301 | 0.017668 |
from unittest import TestCase
from io import StringIO
from trtools.util.tempdir import TemporaryDirectory
import trtools.io.filecache as fc
class TestFileCache(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
class MetaKey(object):
def __init__(self, key):
self.key = key
def __hash__(self):
return hash(self.key)
def __filename__(self):
return str(self.key) + '.save'
def __repr__(self):
return repr(self.key)
class Value(object):
def __init__(self, string):
self.string = string
class TestMetaFileCache(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_meta(self):
key1 = MetaKey('dale')
key2 = MetaKey('bob')
with TemporaryDirectory() as td:
mfc = fc.MetaFileCache(td)
mfc[key1] = Value('daledata')
mfc[key2] = Value('bobdata')
mfc[key1] = Value('daledata2')
# grabbing with key still works
assert mfc[key1].string == 'daledata2'
# this one should load keys from index file
mfc2 = fc.MetaFileCache(td)
for a,b in zip( list(mfc2.keys()), list(mfc.keys())):
assert a.key == b.key
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],exit=False)
|
dalejung/trtools
|
trtools/io/tests/test_filecache.py
|
Python
|
mit
| 1,754 | 0.008552 |
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# Methods to create Script object
script_factory_methods = [ 'dml', 'pydml', 'dmlFromResource', 'pydmlFromResource', 'dmlFromFile', 'pydmlFromFile', 'dmlFromUrl', 'pydmlFromUrl' ]
# Utility methods
util_methods = [ '_java2py', 'getHopDAG' ]
__all__ = ['MLResults', 'MLContext', 'Script', 'Matrix' ] + script_factory_methods + util_methods
import os
import numpy as np
import pandas as pd
import threading, time
try:
import py4j.java_gateway
from py4j.java_gateway import JavaObject
from pyspark import SparkContext
from pyspark.conf import SparkConf
import pyspark.mllib.common
from pyspark.sql import SparkSession
except ImportError:
raise ImportError('Unable to import `pyspark`. Hint: Make sure you are running with PySpark.')
from .converters import *
from .classloader import *
def getHopDAG(ml, script, lines=None, conf=None, apply_rewrites=True, with_subgraph=False):
"""
Compile a DML / PyDML script.
Parameters
----------
ml: MLContext instance
MLContext instance.
script: Script instance
Script instance defined with the appropriate input and output variables.
lines: list of integers
Optional: only display the hops that have begin and end line number equals to the given integers.
conf: SparkConf instance
Optional spark configuration
apply_rewrites: boolean
If True, perform static rewrites, perform intra-/inter-procedural analysis to propagate size information into functions and apply dynamic rewrites
with_subgraph: boolean
If False, the dot graph will be created without subgraphs for statement blocks.
Returns
-------
hopDAG: string
hop DAG in dot format
"""
if not isinstance(script, Script):
raise ValueError("Expected script to be an instance of Script")
scriptString = script.scriptString
script_java = script.script_java
lines = [ int(x) for x in lines ] if lines is not None else [int(-1)]
sc = get_spark_context()
if conf is not None:
hopDAG = sc._jvm.org.apache.sysml.api.mlcontext.MLContextUtil.getHopDAG(ml._ml, script_java, lines, conf._jconf, apply_rewrites, with_subgraph)
else:
hopDAG = sc._jvm.org.apache.sysml.api.mlcontext.MLContextUtil.getHopDAG(ml._ml, script_java, lines, apply_rewrites, with_subgraph)
return hopDAG
def dml(scriptString):
"""
Create a dml script object based on a string.
Parameters
----------
scriptString: string
Can be a path to a dml script or a dml script itself.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(scriptString, str):
raise ValueError("scriptString should be a string, got %s" % type(scriptString))
return Script(scriptString, scriptType="dml")
def dmlFromResource(resourcePath):
"""
Create a dml script object based on a resource path.
Parameters
----------
resourcePath: string
Path to a dml script on the classpath.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(resourcePath, str):
raise ValueError("resourcePath should be a string, got %s" % type(resourcePath))
return Script(resourcePath, scriptType="dml", isResource=True)
def pydml(scriptString):
"""
Create a pydml script object based on a string.
Parameters
----------
scriptString: string
Can be a path to a pydml script or a pydml script itself.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(scriptString, str):
raise ValueError("scriptString should be a string, got %s" % type(scriptString))
return Script(scriptString, scriptType="pydml")
def pydmlFromResource(resourcePath):
"""
Create a pydml script object based on a resource path.
Parameters
----------
resourcePath: string
Path to a pydml script on the classpath.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(resourcePath, str):
raise ValueError("resourcePath should be a string, got %s" % type(resourcePath))
return Script(resourcePath, scriptType="pydml", isResource=True)
def dmlFromFile(filePath):
"""
Create a dml script object based on a file path.
Parameters
----------
filePath: string
Path to a dml script.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(filePath, str):
raise ValueError("filePath should be a string, got %s" % type(filePath))
return Script(filePath, scriptType="dml", isResource=False, scriptFormat="file")
def pydmlFromFile(filePath):
"""
Create a pydml script object based on a file path.
Parameters
----------
filePath: string
Path to a pydml script.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(filePath, str):
raise ValueError("filePath should be a string, got %s" % type(filePath))
return Script(filePath, scriptType="pydml", isResource=False, scriptFormat="file")
def dmlFromUrl(url):
"""
Create a dml script object based on a url.
Parameters
----------
url: string
URL to a dml script.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(url, str):
raise ValueError("url should be a string, got %s" % type(url))
return Script(url, scriptType="dml", isResource=False, scriptFormat="url")
def pydmlFromUrl(url):
"""
Create a pydml script object based on a url.
Parameters
----------
url: string
URL to a pydml script.
Returns
-------
script: Script instance
Instance of a script object.
"""
if not isinstance(url, str):
raise ValueError("url should be a string, got %s" % type(url))
return Script(url, scriptType="pydml", isResource=False, scriptFormat="url")
def _java2py(sc, obj):
""" Convert Java object to Python. """
# TODO: Port this private PySpark function.
obj = pyspark.mllib.common._java2py(sc, obj)
if isinstance(obj, JavaObject):
class_name = obj.getClass().getSimpleName()
if class_name == 'Matrix':
obj = Matrix(obj, sc)
return obj
def _py2java(sc, obj):
""" Convert Python object to Java. """
if isinstance(obj, SUPPORTED_TYPES):
obj = convertToMatrixBlock(sc, obj)
else:
if isinstance(obj, Matrix):
obj = obj._java_matrix
# TODO: Port this private PySpark function.
obj = pyspark.mllib.common._py2java(sc, obj)
return obj
class Matrix(object):
"""
Wrapper around a Java Matrix object.
Parameters
----------
javaMatrix: JavaObject
A Java Matrix object as returned by calling `ml.execute().get()`.
sc: SparkContext
SparkContext
"""
def __init__(self, javaMatrix, sc):
self._java_matrix = javaMatrix
self._sc = sc
def __repr__(self):
return "Matrix"
def toDF(self):
"""
Convert the Matrix to a PySpark SQL DataFrame.
Returns
-------
PySpark SQL DataFrame
A PySpark SQL DataFrame representing the matrix, with
one "__INDEX" column containing the row index (since Spark
DataFrames are unordered), followed by columns of doubles
for each column in the matrix.
"""
jdf = self._java_matrix.toDF()
df = _java2py(self._sc, jdf)
return df
def toNumPy(self):
"""
Convert the Matrix to a NumPy Array.
Returns
-------
NumPy Array
A NumPy Array representing the Matrix object.
"""
np_array = convertToNumPyArr(self._sc, self._java_matrix.toMatrixBlock())
return np_array
class MLResults(object):
"""
Wrapper around a Java ML Results object.
Parameters
----------
results: JavaObject
A Java MLResults object as returned by calling `ml.execute()`.
sc: SparkContext
SparkContext
"""
def __init__(self, results, sc):
self._java_results = results
self._sc = sc
def __repr__(self):
return "MLResults"
def get(self, *outputs):
"""
Parameters
----------
outputs: string, list of strings
Output variables as defined inside the DML script.
"""
outs = [_java2py(self._sc, self._java_results.get(out)) for out in outputs]
if len(outs) == 1:
return outs[0]
return outs
class Script(object):
"""
Instance of a DML/PyDML Script.
Parameters
----------
scriptString: string
Can be either a file path to a DML script or a DML script itself.
scriptType: string
Script language, either "dml" for DML (R-like) or "pydml" for PyDML (Python-like).
isResource: boolean
If true, scriptString is a path to a resource on the classpath
scriptFormat: string
Optional script format, either "auto" or "url" or "file" or "resource" or "string"
"""
def __init__(self, scriptString, scriptType="dml", isResource=False, scriptFormat="auto"):
self.sc = get_spark_context()
self.scriptString = scriptString
self.scriptType = scriptType
self.isResource = isResource
if scriptFormat != "auto":
if scriptFormat == "url" and self.scriptType == "dml":
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromUrl(scriptString)
elif scriptFormat == "url" and self.scriptType == "pydml":
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromUrl(scriptString)
elif scriptFormat == "file" and self.scriptType == "dml":
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromFile(scriptString)
elif scriptFormat == "file" and self.scriptType == "pydml":
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromFile(scriptString)
elif isResource and self.scriptType == "dml":
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromResource(scriptString)
elif isResource and self.scriptType == "pydml":
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromResource(scriptString)
elif scriptFormat == "string" and self.scriptType == "dml":
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dml(scriptString)
elif scriptFormat == "string" and self.scriptType == "pydml":
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydml(scriptString)
else:
raise ValueError('Unsupported script format' + scriptFormat)
elif self.scriptType == "dml":
if scriptString.endswith(".dml"):
if scriptString.startswith("http"):
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromUrl(scriptString)
elif os.path.exists(scriptString):
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromFile(scriptString)
elif self.isResource == True:
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dmlFromResource(scriptString)
else:
raise ValueError("path: %s does not exist" % scriptString)
else:
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.dml(scriptString)
elif self.scriptType == "pydml":
if scriptString.endswith(".pydml"):
if scriptString.startswith("http"):
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromUrl(scriptString)
elif os.path.exists(scriptString):
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromFile(scriptString)
elif self.isResource == True:
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydmlFromResource(scriptString)
else:
raise ValueError("path: %s does not exist" % scriptString)
else:
self.script_java = self.sc._jvm.org.apache.sysml.api.mlcontext.ScriptFactory.pydml(scriptString)
def getScriptString(self):
"""
Obtain the script string (in unicode).
"""
return self.script_java.getScriptString()
def setScriptString(self, scriptString):
"""
Set the script string.
Parameters
----------
scriptString: string
Can be either a file path to a DML script or a DML script itself.
"""
self.scriptString = scriptString
self.script_java.setScriptString(scriptString)
return self
def getInputVariables(self):
"""
Obtain the input variable names.
"""
return self.script_java.getInputVariables()
def getOutputVariables(self):
"""
Obtain the output variable names.
"""
return self.script_java.getOutputVariables()
def clearIOS(self):
"""
Clear the inputs, outputs, and symbol table.
"""
self.script_java.clearIOS()
return self
def clearIO(self):
"""
Clear the inputs and outputs, but not the symbol table.
"""
self.script_java.clearIO()
return self
def clearAll(self):
"""
Clear the script string, inputs, outputs, and symbol table.
"""
self.script_java.clearAll()
return self
def clearInputs(self):
"""
Clear the inputs.
"""
self.script_java.clearInputs()
return self
def clearOutputs(self):
"""
Clear the outputs.
"""
self.script_java.clearOutputs()
return self
def clearSymbolTable(self):
"""
Clear the symbol table.
"""
self.script_java.clearSymbolTable()
return self
def results(self):
"""
Obtain the results of the script execution.
"""
return MLResults(self.script_java.results(), self.sc)
def getResults(self):
"""
Obtain the results of the script execution.
"""
return MLResults(self.script_java.getResults(), self.sc)
def setResults(self, results):
"""
Set the results of the script execution.
"""
self.script_java.setResults(results._java_results)
return self
def isDML(self):
"""
Is the script type DML?
"""
return self.script_java.isDML()
def isPYDML(self):
"""
Is the script type DML?
"""
return self.script_java.isPYDML()
def getScriptExecutionString(self):
"""
Generate the script execution string, which adds read/load/write/save
statements to the beginning and end of the script to execute.
"""
return self.script_java.getScriptExecutionString()
def __repr__(self):
return "Script"
def info(self):
"""
Display information about the script as a String. This consists of the
script type, inputs, outputs, input parameters, input variables, output
variables, the symbol table, the script string, and the script execution string.
"""
return self.script_java.info()
def displayInputs(self):
"""
Display the script inputs.
"""
return self.script_java.displayInputs()
def displayOutputs(self):
"""
Display the script outputs.
"""
return self.script_java.displayOutputs()
def displayInputParameters(self):
"""
Display the script input parameters.
"""
return self.script_java.displayInputParameters()
def displayInputVariables(self):
"""
Display the script input variables.
"""
return self.script_java.displayInputVariables()
def displayOutputVariables(self):
"""
Display the script output variables.
"""
return self.script_java.displayOutputVariables()
def displaySymbolTable(self):
"""
Display the script symbol table.
"""
return self.script_java.displaySymbolTable()
def getName(self):
"""
Obtain the script name.
"""
return self.script_java.getName()
def setName(self, name):
"""
Set the script name.
"""
self.script_java.setName(name)
return self
def getScriptType(self):
"""
Obtain the script type.
"""
return self.scriptType
def input(self, *args, **kwargs):
"""
Parameters
----------
args: name, value tuple
where name is a string, and currently supported value formats
are double, string, dataframe, rdd, and list of such object.
kwargs: dict of name, value pairs
To know what formats are supported for name and value, look above.
"""
if args and len(args) != 2:
raise ValueError("Expected name, value pair.")
elif args:
self._setInput(args[0], args[1])
for name, value in kwargs.items():
self._setInput(name, value)
return self
def _setInput(self, key, val):
# `in` is a reserved word ("keyword") in Python, so `script_java.in(...)` is not
# allowed. Therefore, we use the following code in which we retrieve a function
# representing `script_java.in`, and then call it with the arguments. This is in
# lieu of adding a new `input` method on the JVM side, as that would complicate use
# from Scala/Java.
if isinstance(val, py4j.java_gateway.JavaObject):
py4j.java_gateway.get_method(self.script_java, "in")(key, val)
else:
py4j.java_gateway.get_method(self.script_java, "in")(key, _py2java(self.sc, val))
def output(self, *names):
"""
Parameters
----------
names: string, list of strings
Output variables as defined inside the DML script.
"""
for val in names:
self.script_java.out(val)
return self
class MLContext(object):
"""
Wrapper around the new SystemML MLContext.
Parameters
----------
sc: SparkContext or SparkSession
An instance of pyspark.SparkContext or pyspark.sql.SparkSession.
"""
def __init__(self, sc):
if isinstance(sc, pyspark.sql.session.SparkSession):
sc = sc._sc
elif not isinstance(sc, SparkContext):
raise ValueError("Expected sc to be a SparkContext or SparkSession, got " % str(type(sc)))
self._sc = sc
self._ml = createJavaObject(sc, 'mlcontext')
def __repr__(self):
return "MLContext"
def execute(self, script):
"""
Execute a DML / PyDML script.
Parameters
----------
script: Script instance
Script instance defined with the appropriate input and output variables.
Returns
-------
ml_results: MLResults
MLResults instance.
"""
if not isinstance(script, Script):
raise ValueError("Expected script to be an instance of Script")
scriptString = script.scriptString
script_java = script.script_java
global default_jvm_stdout, default_jvm_stdout_parallel_flush
if default_jvm_stdout:
with jvm_stdout(parallel_flush=default_jvm_stdout_parallel_flush):
return MLResults(self._ml.execute(script_java), self._sc)
else:
return MLResults(self._ml.execute(script_java), self._sc)
def setStatistics(self, statistics):
"""
Whether or not to output statistics (such as execution time, elapsed time)
about script executions.
Parameters
----------
statistics: boolean
"""
self._ml.setStatistics(bool(statistics))
return self
def setGPU(self, enable):
"""
Whether or not to enable GPU.
Parameters
----------
enable: boolean
"""
self._ml.setGPU(bool(enable))
return self
def setForceGPU(self, enable):
"""
Whether or not to force the usage of GPU operators.
Parameters
----------
enable: boolean
"""
self._ml.setForceGPU(bool(enable))
return self
def setStatisticsMaxHeavyHitters(self, maxHeavyHitters):
"""
The maximum number of heavy hitters that are printed as part of the statistics.
Parameters
----------
maxHeavyHitters: int
"""
self._ml.setStatisticsMaxHeavyHitters(maxHeavyHitters)
return self
def setExplain(self, explain):
"""
Explanation about the program. Mainly intended for developers.
Parameters
----------
explain: boolean
"""
self._ml.setExplain(bool(explain))
return self
def setExplainLevel(self, explainLevel):
"""
Set explain level.
Parameters
----------
explainLevel: string
Can be one of "hops", "runtime", "recompile_hops", "recompile_runtime"
or in the above in upper case.
"""
self._ml.setExplainLevel(explainLevel)
return self
def setConfigProperty(self, propertyName, propertyValue):
"""
Set configuration property, such as setConfigProperty("sysml.localtmpdir", "/tmp/systemml").
Parameters
----------
propertyName: String
propertyValue: String
"""
self._ml.setConfigProperty(propertyName, propertyValue)
return self
def setConfig(self, configFilePath):
"""
Set SystemML configuration based on a configuration file.
Parameters
----------
configFilePath: String
"""
self._ml.setConfig(configFilePath)
return self
def resetConfig(self):
"""
Reset configuration settings to default values.
"""
self._ml.resetConfig()
return self
def version(self):
"""Display the project version."""
return self._ml.version()
def buildTime(self):
"""Display the project build time."""
return self._ml.buildTime()
def info(self):
"""Display the project information."""
return self._ml.info().toString()
def isExplain(self):
"""Returns True if program instruction details should be output, False otherwise."""
return self._ml.isExplain()
def isStatistics(self):
"""Returns True if program execution statistics should be output, False otherwise."""
return self._ml.isStatistics()
def isGPU(self):
"""Returns True if GPU mode is enabled, False otherwise."""
return self._ml.isGPU()
def isForceGPU(self):
"""Returns True if "force" GPU mode is enabled, False otherwise."""
return self._ml.isForceGPU()
def close(self):
"""
Closes this MLContext instance to cleanup buffer pool, static/local state and scratch space.
Note the SparkContext is not explicitly closed to allow external reuse.
"""
self._ml.close()
return self
|
nakul02/incubator-systemml
|
src/main/python/systemml/mlcontext.py
|
Python
|
apache-2.0
| 25,332 | 0.004934 |
#!/usr/bin/env python3
# Copyright 2010-2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for issue 46 in or-tools."""
from ortools.constraint_solver import pywrapcp
class AssignToStartMin(pywrapcp.PyDecisionBuilder):
def __init__(self, intervals):
pywrapcp.PyDecisionBuilder.__init__(self)
self.__intervals = intervals
def Next(self, solver):
for interval in self.__intervals:
interval.SetStartMax(interval.StartMin())
return None
def DebugString(self):
return 'CustomDecisionBuilder'
def NoSequence():
print('NoSequence')
solver = pywrapcp.Solver('Ordo')
tasks = []
[
tasks.append(
solver.FixedDurationIntervalVar(0, 25, 5, False, 'Tasks%i' % i))
for i in range(3)
]
print(tasks)
disj = solver.DisjunctiveConstraint(tasks, 'Disjunctive')
solver.Add(disj)
collector = solver.AllSolutionCollector()
collector.Add(tasks)
intervalPhase = solver.Phase(tasks, solver.INTERVAL_DEFAULT)
solver.Solve(intervalPhase, [collector])
print(collector.SolutionCount())
for i in range(collector.SolutionCount()):
print("Solution ", i)
print(collector.ObjectiveValue(i))
print([collector.StartValue(i, tasks[j]) for j in range(3)])
print([collector.EndValue(i, tasks[j]) for j in range(3)])
def Sequence():
print('Sequence')
solver = pywrapcp.Solver('Ordo')
tasks = []
[
tasks.append(
solver.FixedDurationIntervalVar(0, 25, 5, False, 'Tasks%i' % i))
for i in range(3)
]
print(tasks)
disj = solver.DisjunctiveConstraint(tasks, 'Disjunctive')
solver.Add(disj)
sequence = []
sequence.append(disj.SequenceVar())
sequence[0].RankFirst(0)
collector = solver.AllSolutionCollector()
collector.Add(sequence)
collector.Add(tasks)
sequencePhase = solver.Phase(sequence, solver.SEQUENCE_DEFAULT)
intervalPhase = AssignToStartMin(tasks)
# intervalPhase = solver.Phase(tasks, solver.INTERVAL_DEFAULT)
mainPhase = solver.Compose([sequencePhase, intervalPhase])
solver.Solve(mainPhase, [collector])
print(collector.SolutionCount())
for i in range(collector.SolutionCount()):
print("Solution ", i)
print(collector.ObjectiveValue(i))
print([collector.StartValue(i, tasks[j]) for j in range(3)])
print([collector.EndValue(i, tasks[j]) for j in range(3)])
def main():
NoSequence()
Sequence()
if __name__ == '__main__':
main()
|
google/or-tools
|
examples/tests/issue46.py
|
Python
|
apache-2.0
| 3,057 | 0 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import string
import factory.fuzzy
from attribution.tests.factories.attribution_new import AttributionNewFactory
from base.tests.factories.learning_component_year import LearningComponentYearFactory
class AttributionChargeNewFactory(factory.django.DjangoModelFactory):
class Meta:
model = "attribution.AttributionChargeNew"
external_id = factory.fuzzy.FuzzyText(length=10, chars=string.digits)
attribution = factory.SubFactory(
AttributionNewFactory,
learning_container_year=factory.SelfAttribute(
'..learning_component_year.learning_unit_year.learning_container_year'
)
)
learning_component_year = factory.SubFactory(LearningComponentYearFactory)
allocation_charge = 0
|
uclouvain/osis
|
attribution/tests/factories/attribution_charge_new.py
|
Python
|
agpl-3.0
| 2,021 | 0.001485 |
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "nxtools",
version = "0.8.4",
author = "Martin Wacker",
author_email = "martas@imm.cz",
description = "Set of common utilities and little helpers.",
license = "MIT",
keywords = "utilities log logging ffmpeg watchfolder media mam time",
url = "https://github.com/immstudios/nxtools",
packages=['nxtools', 'tests', 'nxtools.media', 'nxtools.caspar'],
long_description=read('README.rst'),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Topic :: Multimedia :: Video :: Conversion",
"Topic :: Utilities",
],
)
|
martastain/nxtools
|
setup.py
|
Python
|
mit
| 989 | 0.0182 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.