text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for exploration domain objects and methods defined on them."""
__author__ = 'Sean Lip'
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import param_domain
from core.tests import test_utils
import feconf
import utils
class ExplorationDomainUnitTests(test_utils.GenericTestBase):
"""Test the exploration domain object."""
def test_validation(self):
"""Test validation of explorations."""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id', '', '')
exploration.init_state_name = ''
exploration.states = {}
with self.assertRaisesRegexp(
utils.ValidationError, 'between 1 and 50 characters'):
exploration.validate()
exploration.title = 'Hello #'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid character #'):
exploration.validate()
exploration.title = 'Title'
with self.assertRaisesRegexp(
utils.ValidationError, 'between 1 and 50 characters'):
exploration.validate()
exploration.category = 'Category'
# Note: If '/' ever becomes a valid state name, ensure that the rule
# editor frontend tenplate is fixed -- it currently uses '/' as a
# sentinel for an invalid state name.
bad_state = exp_domain.State.create_default_state('/')
exploration.states = {'/': bad_state}
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid character / in a state name'):
exploration.validate()
new_state = exp_domain.State.create_default_state('ABC')
new_state.update_interaction_id('TextInput')
# The 'states' property must be a non-empty dict of states.
exploration.states = {}
with self.assertRaisesRegexp(
utils.ValidationError, 'exploration has no states'):
exploration.validate()
exploration.states = {'A string #': new_state}
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid character # in a state name'):
exploration.validate()
exploration.states = {'A string _': new_state}
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid character _ in a state name'):
exploration.validate()
exploration.states = {'ABC': new_state}
with self.assertRaisesRegexp(
utils.ValidationError, 'has no initial state name'):
exploration.validate()
exploration.init_state_name = 'initname'
with self.assertRaisesRegexp(
utils.ValidationError,
r'There is no state in \[\'ABC\'\] corresponding to '
'the exploration\'s initial state name initname.'):
exploration.validate()
exploration.states = {exploration.init_state_name: new_state}
with self.assertRaisesRegexp(
utils.ValidationError, 'destination ABC is not a valid'):
exploration.validate()
exploration.states = {
exploration.init_state_name: exp_domain.State.create_default_state(
exploration.init_state_name)
}
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
exploration.validate()
exploration.language_code = 'fake_code'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code'):
exploration.validate()
exploration.language_code = 'English'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code'):
exploration.validate()
exploration.language_code = 'en'
exploration.validate()
exploration.param_specs = 'A string'
with self.assertRaisesRegexp(
utils.ValidationError, 'param_specs to be a dict'):
exploration.validate()
exploration.param_specs = {
'@': param_domain.ParamSpec.from_dict({'obj_type': 'Int'})
}
with self.assertRaisesRegexp(
utils.ValidationError, 'Only parameter names with characters'):
exploration.validate()
exploration.param_specs = {
'notAParamSpec': param_domain.ParamSpec.from_dict(
{'obj_type': 'Int'})
}
exploration.validate()
def test_objective_validation(self):
"""Test that objectives are validated only in 'strict' mode."""
self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='Title', category='Category',
objective='')
exploration = exp_services.get_exploration_by_id('exp_id')
exploration.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'objective must be specified'):
exploration.validate(strict=True)
exploration.objective = 'An objective'
# Link the start state to the END state in order to make the
# exploration valid.
exploration.states[exploration.init_state_name].interaction.handlers[
0].rule_specs[0].dest = feconf.END_DEST
exploration.validate(strict=True)
def test_is_demo_property(self):
"""Test the is_demo property."""
demo = exp_domain.Exploration.create_default_exploration(
'0', 'title', 'category')
self.assertEqual(demo.is_demo, True)
notdemo1 = exp_domain.Exploration.create_default_exploration(
'a', 'title', 'category')
self.assertEqual(notdemo1.is_demo, False)
notdemo2 = exp_domain.Exploration.create_default_exploration(
'abcd', 'title', 'category')
self.assertEqual(notdemo2.is_demo, False)
class StateExportUnitTests(test_utils.GenericTestBase):
"""Test export of states."""
def test_export_state_to_dict(self):
"""Test exporting a state to a dict."""
exploration = exp_domain.Exploration.create_default_exploration(
'A different exploration_id', 'A title', 'A category')
exploration.add_states(['New state'])
state_dict = exploration.states['New state'].to_dict()
expected_dict = {
'content': [{
'type': 'text',
'value': u''
}],
'interaction': {
'customization_args': {},
'handlers': [{
'name': u'submit',
'rule_specs': [{
'definition': {
u'rule_type': u'default'
},
'dest': 'New state',
'feedback': [],
'param_changes': [],
}]
}],
'id': None,
},
'param_changes': [],
}
self.assertEqual(expected_dict, state_dict)
class YamlCreationUnitTests(test_utils.GenericTestBase):
"""Test creation of explorations from YAML files."""
SAMPLE_YAML_CONTENT = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 4
skill_tags: []
states:
%s:
content:
- type: text
value: Welcome to the Oppia editor!<br><br>Anything you type here will be shown
to the learner playing your exploration.<br><br>If you need more help getting
started, check out the Help link in the navigation bar.
interaction:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: %s
feedback: []
param_changes: []
id: null
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: New state
feedback: []
param_changes: []
id: null
param_changes: []
""") % (
feconf.DEFAULT_INIT_STATE_NAME, feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME)
def test_yaml_import_and_export(self):
"""Test the from_yaml() and to_yaml() methods."""
EXP_ID = 'An exploration_id'
exploration = exp_domain.Exploration.create_default_exploration(
EXP_ID, 'A title', 'A category')
exploration.add_states(['New state'])
self.assertEqual(len(exploration.states), 2)
yaml_content = exploration.to_yaml()
self.assertEqual(yaml_content, self.SAMPLE_YAML_CONTENT)
exploration2 = exp_domain.Exploration.from_yaml(
'exp2', 'Title', 'Category', yaml_content)
self.assertEqual(len(exploration2.states), 2)
yaml_content_2 = exploration2.to_yaml()
self.assertEqual(yaml_content_2, yaml_content)
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp3', 'Title', 'Category', 'No_initial_state_name')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'Title', 'Category',
'Invalid\ninit_state_name:\nMore stuff')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'Title', 'Category', 'State1:\n(\nInvalid yaml')
class SchemaMigrationUnitTests(test_utils.GenericTestBase):
"""Test migration methods for yaml content."""
YAML_CONTENT_V1 = (
"""default_skin: conversation_v1
param_changes: []
param_specs: {}
schema_version: 1
states:
- content:
- type: text
value: ''
name: (untitled state)
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
- content:
- type: text
value: ''
name: New state
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: New state
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V2 = (
"""default_skin: conversation_v1
init_state_name: (untitled state)
param_changes: []
param_specs: {}
schema_version: 2
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: New state
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V3 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 3
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: New state
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V4 = (
"""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 4
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: New state
feedback: []
param_changes: []
id: TextInput
param_changes: []
""")
_LATEST_YAML_CONTENT = YAML_CONTENT_V4
def test_load_from_v1(self):
"""Test direct loading from a v1 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', 'A title', 'A category', self.YAML_CONTENT_V1)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v2(self):
"""Test direct loading from a v2 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', 'A title', 'A category', self.YAML_CONTENT_V2)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v3(self):
"""Test direct loading from a v3 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', 'A title', 'A category', self.YAML_CONTENT_V3)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v4(self):
"""Test direct loading from a v4 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', 'A title', 'A category', self.YAML_CONTENT_V4)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
class ConversionUnitTests(test_utils.GenericTestBase):
"""Test conversion methods."""
def test_convert_exploration_to_player_dict(self):
EXP_TITLE = 'A title'
SECOND_STATE_NAME = 'first state'
exploration = exp_domain.Exploration.create_default_exploration(
'eid', EXP_TITLE, 'A category')
exploration.add_states([SECOND_STATE_NAME])
def _get_default_state_dict(content_str, dest_name):
return {
'content': [{
'type': 'text',
'value': content_str,
}],
'interaction': {
'customization_args': {},
'handlers': [{
'name': 'submit',
'rule_specs': [{
'definition': {
'rule_type': 'default',
},
'description': 'Default',
'dest': dest_name,
'feedback': [],
'param_changes': [],
}],
}],
'id': None,
},
'param_changes': [],
}
self.assertEqual(exploration.to_player_dict(), {
'init_state_name': feconf.DEFAULT_INIT_STATE_NAME,
'title': EXP_TITLE,
'states': {
feconf.DEFAULT_INIT_STATE_NAME: _get_default_state_dict(
feconf.DEFAULT_INIT_STATE_CONTENT_STR,
feconf.DEFAULT_INIT_STATE_NAME),
SECOND_STATE_NAME: _get_default_state_dict(
'', SECOND_STATE_NAME),
},
'param_changes': [],
'param_specs': {},
})
class StateOperationsUnitTests(test_utils.GenericTestBase):
"""Test methods operating on states."""
def test_delete_state(self):
"""Test deletion of states."""
exploration = exp_domain.Exploration.create_default_exploration(
'eid', 'A title', 'A category')
exploration.add_states(['first state'])
with self.assertRaisesRegexp(
ValueError, 'Cannot delete initial state'):
exploration.delete_state(exploration.init_state_name)
exploration.add_states(['second state'])
exploration.delete_state('second state')
with self.assertRaisesRegexp(ValueError, 'fake state does not exist'):
exploration.delete_state('fake state')
def test_state_operations(self):
"""Test adding, updating and checking existence of states."""
exploration = exp_domain.Exploration.create_default_exploration(
'eid', 'A title', 'A category')
with self.assertRaises(KeyError):
exploration.states['invalid_state_name']
self.assertEqual(len(exploration.states), 1)
default_state_name = exploration.init_state_name
exploration.rename_state(default_state_name, 'Renamed state')
self.assertEqual(len(exploration.states), 1)
self.assertEqual(exploration.init_state_name, 'Renamed state')
# Add a new state.
exploration.add_states(['State 2'])
self.assertEqual(len(exploration.states), 2)
# It is OK to rename a state to the same name.
exploration.rename_state('State 2', 'State 2')
# But it is not OK to add or rename a state using a name that already
# exists.
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.add_states(['State 2'])
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.rename_state('State 2', 'Renamed state')
# And it is not OK to rename a state to the END_DEST.
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid state name'):
exploration.rename_state('State 2', feconf.END_DEST)
# The exploration now has exactly two states.
self.assertNotIn(default_state_name, exploration.states)
self.assertIn('Renamed state', exploration.states)
self.assertIn('State 2', exploration.states)
| Cgruppo/oppia | core/domain/exp_domain_test.py | Python | apache-2.0 | 19,626 | 0.000357 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright 2012 Unknown <diogo@arch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
try:
from process.sequence import Alignment
from base.plotter import bar_plot, multi_bar_plot
from process.error_handling import KillByUser
except ImportError:
from trifusion.process.sequence import Alignment
from trifusion.base.plotter import bar_plot, multi_bar_plot
from trifusion.process.error_handling import KillByUser
from collections import OrderedDict, Counter
import pickle
import os
import sqlite3
from os.path import join
import random
import string
import copy
class Cluster(object):
""" Object for clusters of the OrthoMCL groups file. It is useful to set a
number of attributes that will make subsequent filtration and
processing much easier """
def __init__(self, line_string):
"""
To initialize a Cluster object, only a string compliant with the
format of a cluster in an OrthoMCL groups file has to be provided.
This line should contain the name of the group, a colon, and the
sequences belonging to that group separated by whitespace
:param line_string: String of a cluster
"""
# Initializing attributes for parse_string
self.name = None
self.sequences = None
self.species_frequency = {}
# Initializing attributes for apply filter
# If the value is different than None, this will inform downstream
# objects of whether this cluster is compliant with the specified
# gene_threshold
self.gene_compliant = None
# If the value is different than None, this will inform downstream
# objects of whether this cluster is compliant with the specified
# species_threshold
self.species_compliant = None
self.parse_string(line_string)
def parse_string(self, cluster_string):
"""
Parses the string and sets the group name and sequence list attributes
"""
fields = cluster_string.split(":")
# Setting the name and sequence list of the clusters
self.name = fields[0].strip()
self.sequences = fields[1].strip().split()
# Setting the gene frequency for each species in the cluster
self.species_frequency = Counter([field.split("|")[0] for field in
self.sequences])
def remove_taxa(self, taxa_list):
"""
Removes the taxa contained in taxa_list from self.sequences and
self.species_frequency
:param taxa_list: list, each element should be a taxon name
"""
self.sequences = [x for x in self.sequences if x.split("|")[0]
not in taxa_list]
self.species_frequency = dict((taxon, val) for taxon, val in
self.species_frequency.items()
if taxon not in taxa_list)
def apply_filter(self, gene_threshold, species_threshold):
"""
This method will update two Cluster attributes, self.gene_flag and
self.species_flag, which will inform downstream objects if this
cluster respects the gene and species threshold
:param gene_threshold: Integer for the maximum number of gene copies
per species
:param species_threshold: Integer for the minimum number of species
present
"""
# Check whether cluster is compliant with species_threshold
if len(self.species_frequency) >= species_threshold and \
species_threshold:
self.species_compliant = True
else:
self.species_compliant = False
# Check whether cluster is compliant with gene_threshold
if max(self.species_frequency.values()) <= gene_threshold and \
gene_threshold:
self.gene_compliant = True
else:
self.gene_compliant = False
class OrthoGroupException(Exception):
pass
class GroupLight(object):
"""
Analogous to Group object but with several changes to reduce memory usage
"""
def __init__(self, groups_file, gene_threshold=None,
species_threshold=None, ns=None):
self.gene_threshold = gene_threshold if gene_threshold else None
self.species_threshold = species_threshold if species_threshold \
else None
# Attribute containing the list of included species
self.species_list = []
# Attribute that will contain taxa to be excluded from analyses
self.excluded_taxa = []
self.species_frequency = []
# Attributes that will store the number (int) of cluster after gene and
# species filter
self.all_clusters = 0
self.num_gene_compliant = 0
self.num_species_compliant = 0
self.all_compliant = 0
# Attribute containing the total number of sequences
self.total_seqs = 0
# Attribute containing the maximum number of extra copies found in the
# clusters
self.max_extra_copy = 0
# Attribute with name of the group file, which will be an ID
self.name = os.path.abspath(groups_file)
self.table = groups_file.split(os.sep)[-1].split(".")[0]
# Initialize atribute containing the groups filtered using the gene and
# species threshold. This attribute can be updated at any time using
# the update_filtered_group method
self.filtered_groups = []
self._parse_groups(ns)
if type(self.species_threshold) is float:
self._get_sp_proportion()
def groups(self):
"""
Generator for group file. This replaces the self.groups attribute of
the original Group Object. Instead of loading the whole file into
memory, a generator is created to iterate over its contents. It may
run a bit slower but its a lot more memory efficient.
:return:
"""
file_handle = open(self.name)
for line in file_handle:
if line.strip() != "":
yield line.strip()
def iter_species_frequency(self):
"""
In order to prevent permanent changes to the species_frequency
attribute due to the filtering of taxa, this iterable should be used
instead of the said variable. This creates a temporary deepcopy of
species_frequency which will be iterated over and eventually modified.
"""
# Since the items of species_frequency are mutable, this ensures
# that even those objects are correctly cloned
sp_freq = copy.deepcopy(self.species_frequency)
for cl in sp_freq:
yield cl
def _remove_tx(self, line):
"""
Given a group line, remove all references to the excluded taxa
:param line: raw group file line
"""
new_line = "{}:".format(line.split(":")[0])
tx_str = "\t".join([x for x in line.split(":")[1].split() if
x.split("|")[0] not in self.excluded_taxa])
return new_line + tx_str
def _apply_filter(self, cl):
"""
Sets or updates the basic group statistics, such as the number of
orthologs compliant with the gene copy and minimum taxa filters.
:param cl: dictionary. Contains the number of occurrences for each
taxon present in the ortholog cluster
(e.g. {"taxonA": 2, "taxonB": 1).
"""
# First, remove excluded taxa from the cl object since this will
# impact all other filters
for tx in self.excluded_taxa:
cl.pop(tx, None)
if cl:
self.all_clusters += 1
extra_copies = max(cl.values())
if extra_copies > self.max_extra_copy:
self.max_extra_copy = extra_copies
if extra_copies <= self.gene_threshold and self.gene_threshold and\
len(cl) >= self.species_threshold and \
self.species_threshold:
self.num_gene_compliant += 1
self.num_species_compliant += 1
self.all_compliant += 1
elif (extra_copies <= self.gene_threshold and
self.gene_threshold) or self.gene_threshold == 0:
self.num_gene_compliant += 1
elif len(cl) >= self.species_threshold and \
self.species_threshold:
self.num_species_compliant += 1
def _get_compliance(self, cl):
"""
Determines whether an ortholog cluster is compliant with the specified
ortholog filters.
:param ccl: dictionary. Contains the number of occurrences for each
taxon present in the ortholog cluster
(e.g. {"taxonA": 2, "taxonB": 1).
:return: tuple. The first element refers to the gene copy filter
while the second refers to the minimum taxa filter. Values of 1
indicate that the ortholg cluster is compliant.
"""
for tx in self.excluded_taxa:
cl.pop(tx, None)
if cl:
cp = max(cl.values())
if not self.gene_threshold and not self.species_threshold:
return 1, 1
if cp <= self.gene_threshold and self.gene_threshold and\
len(cl) >= self.species_threshold and \
self.species_threshold:
return 1, 1
elif (cp <= self.gene_threshold and self.gene_threshold) or \
not self.gene_threshold:
return 1, 0
elif (len(cl) >= self.species_threshold and
self.species_threshold) or not self.species_threshold:
return 0, 1
else:
return 0, 0
def _reset_counter(self):
self.all_clusters = 0
self.num_gene_compliant = 0
self.num_species_compliant = 0
self.all_compliant = 0
def _parse_groups(self, ns=None):
for cl in self.groups():
if ns:
if ns.stop:
raise KillByUser("")
# Retrieve the field containing the ortholog sequences
sequence_field = cl.split(":")[1]
# Update species frequency list
sp_freq = Counter((x.split("|")[0] for x in
sequence_field.split()))
self.species_frequency.append(sp_freq)
# Update number of sequences
self.total_seqs += len(sequence_field)
# Update max number of extra copies
extra_copies = max(sp_freq.values())
if extra_copies > self.max_extra_copy:
self.max_extra_copy = max(sp_freq.values())
self.species_list.extend([x for x in sp_freq if x not in
self.species_list])
# Apply filters, if any
# gene filter
if self.species_threshold and self.gene_threshold:
self._apply_filter(sp_freq)
def exclude_taxa(self, taxa_list, update_stats=False):
"""
Updates the excluded_taxa attribute and updates group statistics if
update_stats is True. This does not change the Group object data
permanently, only sets an attribute that will be taken into account
when plotting and exporting data.
:param taxa_list: list. List of taxa that should be excluded from
downstream operations
:param update_stats: boolean. If True, it will update the group
statistics
"""
# IF the taxa_list is the same as the excluded_taxa attribute,
# there is nothing to do
if sorted(taxa_list) == sorted(self.excluded_taxa):
return
self.species_list = [x for x in self.species_list + self.excluded_taxa
if x not in taxa_list]
self.excluded_taxa = taxa_list
if update_stats:
self._reset_counter()
for cl in self.iter_species_frequency():
self._apply_filter(cl)
def basic_group_statistics(self, update_stats=True):
if update_stats:
self._reset_counter()
for cl in self.iter_species_frequency():
self._apply_filter(cl)
return len(self.species_frequency), self.total_seqs, \
self.num_gene_compliant, self.num_species_compliant, \
self.all_compliant
def _get_sp_proportion(self):
"""
When the species filter is a float value between 0 and 1, convert
this proportion into absolute values (rounded up), since filters were
already designed for absolutes.
"""
self.species_threshold = int(self.species_threshold *
len(self.species_list))
def update_filters(self, gn_filter, sp_filter, update_stats=False):
"""
Updates the group filter attributes and group summary stats if
update_stats is True. This method does not change the
data of the Group object, only sets attributes that will be taken into
account when plotting or exporting data
:param gn_filter: integer. Maximum number of gene copies allowed in an
ortholog cluster
:param sp_filter: integer/float. Minimum number/proportion of taxa
representation
:param update_stats: boolean. If True it will update the group summary
statistics
"""
# If the provided filters are the same as the current group attributes
# there is nothing to do
if (gn_filter, sp_filter) == (self.gene_threshold,
self.species_threshold):
return
self.gene_threshold = gn_filter
self.species_threshold = sp_filter
if type(self.species_threshold) is float:
self._get_sp_proportion()
if update_stats:
self._reset_counter()
for cl in self.iter_species_frequency():
self._apply_filter(cl)
def retrieve_sequences(self, sqldb, protein_db, dest="./",
shared_namespace=None, outfile=None):
"""
:param sqldb: srting. Path to sqlite database file
:param protein_db: string. Path to protein database file
:param dest: string. Directory where sequences will be exported
:param shared_namespace: Namespace object to communicate with
TriFusion's main process
:param outfile: If set, all sequeces will be instead saved in a
single output file. This is used for the nucleotide sequence export
:return:
"""
if not os.path.exists(dest) and not outfile:
os.makedirs(dest)
if not os.path.exists(join(dest, "header_correspondance")):
os.makedirs(join(dest, "header_correspondance"))
if shared_namespace:
shared_namespace.act = shared_namespace.msg = "Creating database"
# Stores sequences that could not be retrieved
shared_namespace.missed = shared_namespace.counter = 0
shared_namespace.progress = 0
# Get number of lines of protein database
p = 0
with open(protein_db) as fh:
for p, _ in enumerate(fh):
pass
shared_namespace.max_pb = shared_namespace.total = p + 1
# Connect to database
con = sqlite3.connect(sqldb)
c = con.cursor()
table_name = "".join([x for x in protein_db if x.isalnum()]).encode(
"utf8")
# Create table if it does not exist
if not c.execute("SELECT name FROM sqlite_master WHERE type='table' "
"AND name='{}'".format(table_name)).fetchall():
c.execute("CREATE TABLE [{}] (seq_id text PRIMARY KEY, seq text)".
format(table_name))
# Populate database
with open(protein_db) as ph:
seq = ""
for line in ph:
# Kill switch
if shared_namespace:
if shared_namespace.stop:
con.close()
raise KillByUser("")
shared_namespace.progress += 1
shared_namespace.counter += 1
if line.startswith(">"):
if seq != "":
c.execute("INSERT INTO [{}] VALUES (?, ?)".
format(table_name), (seq_id, seq))
seq_id = line.strip()[1:]
seq = ""
else:
seq += line.strip()
con.commit()
if shared_namespace:
shared_namespace.act = shared_namespace.msg = "Fetching sequences"
shared_namespace.good = shared_namespace.counter = 0
shared_namespace.progress = 0
shared_namespace.max_pb = shared_namespace.total = \
self.all_compliant
# Set single output file, if option is set
if outfile:
output_handle = open(join(dest, outfile), "w")
# Fetching sequences
for line, cl in zip(self.groups(), self.iter_species_frequency()):
# Kill switch
if shared_namespace:
if shared_namespace.stop:
con.close()
raise KillByUser("")
# Filter sequences
if self._get_compliance(cl) == (1, 1):
if shared_namespace:
shared_namespace.good += 1
shared_namespace.progress += 1
shared_namespace.counter += 1
# Retrieve sequences from current cluster
if self.excluded_taxa:
line = self._remove_tx(line)
fields = line.split(":")
# Open file
if not outfile:
cl_name = fields[0]
oname = join(dest, cl_name)
mname = join(dest, "header_correspondance", cl_name)
output_handle = open(oname + ".fas", "w")
map_handle = open(mname + "_headerMap.csv", "w")
seqs = fields[-1].split()
for i in seqs:
# Query database
c.execute("SELECT * FROM [{}] WHERE seq_id = ?".
format(table_name), (i,))
vals = c.fetchone()
# Handles cases where the sequence could not be retrieved
# If outfile is set, output_handle will be a single file
# for all groups. If not, it will represent an individual
# group file
try:
if not outfile:
tx_name = vals[0].split("|")[0]
output_handle.write(">{}\n{}\n".format(tx_name,
vals[1]))
map_handle.write("{}; {}\n".format(vals[0],
tx_name))
else:
output_handle.write(">{}\n{}\n".format(vals[0],
vals[1]))
except TypeError:
pass
if not outfile:
output_handle.close()
if outfile:
output_handle.close()
con.close()
def export_filtered_group(self, output_file_name="filtered_groups",
dest="./", shared_namespace=None):
if shared_namespace:
shared_namespace.act = "Exporting filtered orthologs"
shared_namespace.missed = 0
shared_namespace.good = 0
output_handle = open(os.path.join(dest, output_file_name), "w")
for p, (line, cl) in enumerate(zip(self.groups(),
self.iter_species_frequency())):
if shared_namespace:
if shared_namespace.stop:
raise KillByUser("")
if shared_namespace:
shared_namespace.progress = p
if self._get_compliance(cl) == (1, 1):
if shared_namespace:
shared_namespace.good += 1
if self.excluded_taxa:
l = self._remove_tx(line)
else:
l = line
output_handle.write("{}\n".format(l))
output_handle.close()
def bar_species_distribution(self, filt=False):
if filt:
data = Counter((len(cl) for cl in self.iter_species_frequency() if
self._get_compliance(cl) == (1, 1)))
else:
data = Counter((len(cl) for cl in self.species_frequency))
x_labels = [x for x in list(data)]
data = list(data.values())
# When data is empty, return an exception
if not data:
return {"data": None}
# Sort lists
x_labels = [list(x) for x in zip(*sorted(zip(x_labels, data)))][0]
# Convert label to strings
x_labels = [str(x) for x in x_labels]
title = "Taxa frequency distribution"
ax_names = ["Number of taxa", "Ortholog frequency"]
return {"data": [data],
"title": title,
"ax_names": ax_names,
"labels": x_labels,
"table_header": ["Number of species",
"Ortholog frequency"]}
def bar_genecopy_distribution(self, filt=False):
"""
Creates a bar plot with the distribution of gene copies across
clusters
:param filt: Boolean, whether or not to use the filtered groups.
"""
if filt:
data = Counter((max(cl.values()) for cl in
self.iter_species_frequency() if
self._get_compliance(cl) == (1, 1)))
else:
data = Counter((max(cl.values()) for cl in self.species_frequency
if cl))
x_labels = [x for x in list(data)]
data = list(data.values())
# When data is empty, return an exception
if not data:
return {"data": None}
x_labels, data = (list(x) for x in zip(*sorted(zip(x_labels, data))))
# Convert label to strings
x_labels = [str(x) for x in x_labels]
title = "Gene copy distribution"
ax_names = ["Number of gene copies", "Ortholog frequency"]
return {"data": [data],
"labels": x_labels,
"title": title,
"ax_names": ax_names,
"table_header": ["Number of gene copies",
"Ortholog frequency"]}
def bar_species_coverage(self, filt=False):
"""
Creates a stacked bar plot with the proportion of
:return:
"""
data = Counter(dict((x, 0) for x in self.species_list))
self._reset_counter()
for cl in self.iter_species_frequency():
self._apply_filter(cl)
if filt:
data += Counter(dict((x, 1) for x, y in cl.items() if y > 0 and
self._get_compliance(cl) == (1, 1)))
else:
data += Counter(dict((x, 1) for x, y in cl.items() if y > 0))
data = data.most_common()
# When data is empty, return an exception
if not data:
return {"data": None}
x_labels = [str(x[0]) for x in data]
data = [[x[1] for x in data], [self.all_clusters - x[1] if not
filt else self.all_compliant - x[1]
for x in data]]
lgd_list = ["Available data", "Missing data"]
ax_names = [None, "Ortholog frequency"]
return {"data": data,
"labels": x_labels,
"lgd_list": lgd_list,
"ax_names": ax_names}
def bar_genecopy_per_species(self, filt=False):
data = Counter(dict((x, 0) for x in self.species_list))
self._reset_counter()
for cl in self.iter_species_frequency():
self._apply_filter(cl)
if filt:
data += Counter(dict((x, y) for x, y in cl.items() if y > 1 and
self._get_compliance(cl) == (1, 1)))
else:
data += Counter(dict((x, y) for x, y in cl.items() if y > 1))
data = data.most_common()
# When data is empty, return an exception
if not data:
return {"data": None}
x_labels = [str(x[0]) for x in data]
data = [[x[1] for x in data]]
ax_names = [None, "Gene copies"]
return {"data": data,
"labels": x_labels,
"ax_names": ax_names}
class Group(object):
""" This represents the main object of the orthomcl toolbox module. It is
initialized with a file name of a orthomcl groups file and provides
several methods that act on that group file. To process multiple Group
objects, see MultiGroups object """
def __init__(self, groups_file, gene_threshold=None,
species_threshold=None, project_prefix="MyGroups"):
# Initializing thresholds. These may be set from the start, or using
# some method that uses them as arguments
self.gene_threshold = gene_threshold
self.species_threshold = species_threshold
# Attribute containing the list of included species
self.species_list = []
# Attribute that will contain taxa to be excluded from analyses
self.excluded_taxa = []
# Attributes that will store the number (int) of cluster after gene and
# species filter
self.all_compliant = 0
self.num_gene_compliant = 0
self.num_species_compliant = 0
# Attribute containing the total number of sequences
self.total_seqs = 0
# Attribute containing the maximum number of extra copies found in the
# clusters
self.max_extra_copy = 0
# Attribute with name of the group file, which will be an ID
self.group_name = groups_file
# Initialize the project prefix for possible output files
self.prefix = project_prefix
# Initialize attribute containing the original groups
self.groups = []
# Initialize atribute containing the groups filtered using the gene and
# species threshold. This attribute can be updated at any time using
# the update_filtered_group method
self.filtered_groups = []
self.name = None
# Parse groups file and populate groups attribute
self.__parse_groups(groups_file)
def __parse_groups(self, groups_file):
"""
Parses the ortholog clusters in the groups file and populates the
self.groups list with Cluster objects for each line in the groups file.
:param groups_file: File name for the orthomcl groups file
:return: populates the groups attribute
"""
self.name = groups_file
self.species_list = []
groups_file_handle = open(groups_file)
for line in groups_file_handle:
cluster_object = Cluster(line)
# Add cluster to general group list
self.groups.append(cluster_object)
# Update total sequence counter
self.total_seqs += len(cluster_object.sequences)
# Update maximum number of extra copies, if needed
if max(cluster_object.species_frequency.values()) > \
self.max_extra_copy:
self.max_extra_copy = \
max(cluster_object.species_frequency.values())
# Update species_list attribute
self.species_list = list(set(self.species_list).union(
set(cluster_object.species_frequency.keys())))
# If thresholds have been specified, update self.filtered_groups
# attribute
if self.species_threshold and self.gene_threshold:
cluster_object.apply_filter(self.gene_threshold,
self.species_threshold)
if cluster_object.species_compliant and \
cluster_object.gene_compliant:
# Add cluster to the filtered group list
self.filtered_groups.append(cluster_object)
self.all_compliant += 1
# Update num_species_compliant attribute
if cluster_object.species_compliant:
self.num_species_compliant += 1
# Update num_gene_compliant attribute
if cluster_object.gene_compliant:
self.num_gene_compliant += 1
def exclude_taxa(self, taxa_list):
"""
Adds a taxon_name to the excluded_taxa list and updates the
filtered_groups list
"""
self.excluded_taxa.extend(taxa_list)
# Storage variable for new filtered groups
filtered_groups = []
# Reset max_extra_copy attribute
self.max_extra_copy = 0
for cl in self.groups:
cl.remove_taxa(taxa_list)
if cl.iter_sequences and cl.species_frequency:
filtered_groups.append(cl)
# Update maximum number of extra copies, if needed
if max(cl.species_frequency.values()) > self.max_extra_copy:
self.max_extra_copy = max(cl.species_frequency.values())
# Update species_list
self.species_list = sorted(list(set(self.species_list) -
set(taxa_list)))
self.filtered_groups = self.groups = filtered_groups
def get_filters(self):
"""
Returns a tuple with the thresholds for max gene copies and min species
"""
return self.gene_threshold, self.species_threshold
def basic_group_statistics(self):
"""
This method creates a basic table in list format containing basic
information of the groups file (total number of clusters, total number
of sequences, number of clusters below the gene threshold, number of
clusters below the species threshold and number of clusters below the
gene AND species threshold)
:return: List containing number of
[total clusters,
total sequences,
clusters above gene threshold,
clusters above species threshold,
clusters above gene and species threshold]
"""
# Total number of clusters
total_cluster_num = len(self.groups)
# Total number of sequenes
total_sequence_num = self.total_seqs
# Gene compliant clusters
clusters_gene_threshold = self.num_gene_compliant
# Species compliant clusters
clusters_species_threshold = self.num_species_compliant
clusters_all_threshold = len(self.filtered_groups)
statistics = [total_cluster_num, total_sequence_num,
clusters_gene_threshold, clusters_species_threshold,
clusters_all_threshold]
return statistics
def paralog_per_species_statistic(self, output_file_name=
"Paralog_per_species.csv", filt=True):
"""
This method creates a CSV table with information on the number of
paralog clusters per species
:param output_file_name: string. Name of the output csv file
:param filt: Boolean. Whether to use the filtered groups (True) or
total groups (False)
"""
# Setting which clusters to use
if filt:
groups = self.filtered_groups
else:
groups = self.groups
paralog_count = dict((species, 0) for species in self.species_list)
for cluster in groups:
for species in paralog_count:
if cluster.species_frequency[species] > 1:
paralog_count[species] += 1
# Writing table
output_handle = open(output_file_name, "w")
output_handle.write("Species; Clusters with paralogs\n")
for species, val in paralog_count.items():
output_handle.write("%s; %s\n" % (species, val))
output_handle.close()
def export_filtered_group(self, output_file_name="filtered_groups",
dest="./", get_stats=False,
shared_namespace=None):
"""
Export the filtered groups into a new file.
:param output_file_name: string, name of the filtered groups file
:param dest: string, path to directory where the filtered groups file
will be created
:param get_stats: Boolean, whether to return the basic count stats or
not
:param shared_namespace: Namespace object, for communicating with
main process.
"""
if self.filtered_groups:
if shared_namespace:
shared_namespace.act = "Exporting filtered orthologs"
output_handle = open(os.path.join(dest, output_file_name), "w")
if get_stats:
all_orthologs = len(self.groups)
sp_compliant = 0
gene_compliant = 0
final_orthologs = 0
for cluster in self.filtered_groups:
if shared_namespace:
shared_namespace.progress = \
self.filtered_groups.index(cluster)
if cluster.species_compliant and cluster.gene_compliant:
output_handle.write("%s: %s\n" % (
cluster.name, " ".join(cluster.iter_sequences)))
if get_stats:
final_orthologs += 1
if get_stats:
if cluster.species_compliant:
sp_compliant += 1
if cluster.gene_compliant:
gene_compliant += 1
output_handle.close()
if get_stats:
return all_orthologs, sp_compliant, gene_compliant,\
final_orthologs
else:
raise OrthoGroupException("The groups object must be filtered "
"before using the export_filtered_group"
"method")
def update_filters(self, gn_filter, sp_filter):
"""
Sets new values for the self.species_threshold and self.gene_threshold
and updates the filtered_group
:param gn_filter: int. Maximum value for gene copies in cluster
:param sp_filter: int. Minimum value for species in cluster
"""
self.species_threshold = int(sp_filter)
self.gene_threshold = int(gn_filter)
self.update_filtered_group()
def update_filtered_group(self):
"""
This method creates a new filtered group variable, like
export_filtered_group, but instead of writing into a new file, it
replaces the self.filtered_groups variable
"""
self.filtered_groups = []
# Reset gene and species compliant counters
self.num_gene_compliant = 0
self.num_species_compliant = 0
for cluster in self.groups:
cluster.apply_filter(self.gene_threshold, self.species_threshold)
if cluster.species_compliant and cluster.gene_compliant:
self.filtered_groups.append(cluster)
# Update num_species_compliant attribute
if cluster.species_compliant:
self.num_species_compliant += 1
# Update num_gene_compliant attribute
if cluster.gene_compliant:
self.num_gene_compliant += 1
def retrieve_sequences(self, database, dest="./", mode="fasta",
filt=True, shared_namespace=None):
"""
When provided with a database in Fasta format, this will use the
Alignment object to retrieve sequences
:param database: String. Fasta file
:param dest: directory where files will be save
:param mode: string, whether to retrieve sequences to a file ('fasta'),
or a dictionary ('dict')
:param filt: Boolean. Whether to use the filtered groups (True) or
total groups (False)
:param shared_namespace: Namespace object. This argument is meant for
when fast are retrieved in a background process, where there is a need
to update the main process of the changes in this method
:param dest: string. Path to directory where the retrieved sequences
will be created.
"""
if mode == "dict":
seq_storage = {}
if filt:
groups = self.filtered_groups
else:
groups = self.groups
if not os.path.exists("Orthologs"):
os.makedirs("Orthologs")
# Update method progress
if shared_namespace:
shared_namespace.act = "Creating database"
shared_namespace.progress = 0
print("Creating db")
# Check what type of database was provided
#TODO: Add exception handling if file is not parsed with Aligment
if isinstance(database, str):
try:
db_aln = pickle.load(open(database, "rb"))
except (EnvironmentError, pickle.UnpicklingError):
db_aln = Alignment(database)
db_aln = db_aln.alignment
elif isinstance(database, dict):
db_aln = database
else:
raise OrthoGroupException("The input database is neither a string"
"nor a dictionary object")
print("Retrieving seqs")
# Update method progress
if shared_namespace:
shared_namespace.act = "Retrieving sequences"
for cluster in groups:
if shared_namespace:
shared_namespace.progress += 1
if mode == "dict":
seq_storage[cluster.name] = []
output_handle = open(join(dest, cluster.name + ".fas"), "w")
for sequence_id in cluster.iter_sequences:
seq = db_aln[sequence_id]
if mode == "fasta":
output_handle.write(">%s\n%s\n" % (sequence_id, seq))
elif mode == "dict":
seq_storage[cluster.name].append([sequence_id.split("|")[0],
seq])
output_handle.close()
if mode == "dict":
return seq_storage
def bar_species_distribution(self, dest="./", filt=False, ns=None,
output_file_name="Species_distribution"):
"""
Creates a bar plot with the distribution of species numbers across
clusters
:param dest: string, destination directory
:param filt: Boolean, whether or not to use the filtered groups.
:param output_file_name: string, name of the output file
"""
data = []
# Determine which groups to use
if filt:
groups = self.filtered_groups
else:
groups = self.groups
for i in groups:
if ns:
if ns.stop:
raise KillByUser("")
data.append(len([x for x, y in i.species_frequency.items()
if y > 0]))
# Transform data into histogram-like
transform_data = Counter(data)
x_labels = [x for x in list(transform_data)]
y_vals = list(transform_data.values())
# Sort lists
x_labels, y_vals = (list(x) for x in zip(*sorted(zip(x_labels,
y_vals))))
# Convert label to strings
x_labels = [str(x) for x in x_labels]
if ns:
if ns.stop:
raise KillByUser("")
# Create plot
b_plt, lgd, _ = bar_plot([y_vals], x_labels,
title="Taxa frequency distribution",
ax_names=["Number of taxa", "Ortholog frequency"])
b_plt.savefig(os.path.join(dest, output_file_name), bbox_inches="tight",
dpi=400)
# Create table
table_list = [["Number of species", "Ortholog frequency"]]
for x, y in zip(x_labels, y_vals):
table_list.append([x, y])
return b_plt, lgd, table_list
def bar_genecopy_distribution(self, dest="./", filt=False,
output_file_name="Gene_copy_distribution.png"):
"""
Creates a bar plot with the distribution of gene copies across
clusters
:param dest: string, destination directory
:param filt: Boolean, whether or not to use the filtered groups.
:param output_file_name: string, name of the output file
"""
data = []
# Determin which groups to use
if filt:
groups = self.filtered_groups
else:
groups = self.groups
for cl in groups:
# Get max number of copies
max_copies = max(cl.species_frequency.values())
data.append(max_copies)
# Transform data into histogram-like
transform_data = Counter(data)
x_labels = [x for x in list(transform_data)]
y_vals = list(transform_data.values())
# Sort lists
x_labels, y_vals = (list(x) for x in zip(*sorted(zip(x_labels,
y_vals))))
# Convert label to strings
x_labels = [str(x) for x in x_labels]
# Create plot
b_plt, lgd, _ = bar_plot([y_vals], x_labels,
title="Gene copy distribution",
ax_names=["Number of gene copies", "Ortholog frequency"],
reverse_x=False)
b_plt.savefig(os.path.join(dest, output_file_name), bbox_inches="tight",
figsize=(8 * len(x_labels) / 4, 6), dpi=200)
# Create table
table_list = [["Number of gene copies", "Ortholog frequency"]]
for x, y in zip(x_labels, y_vals):
table_list.append([x, y])
return b_plt, lgd, table_list
def bar_species_coverage(self, dest="./", filt=False, ns=None,
output_file_name="Species_coverage"):
"""
Creates a stacked bar plot with the proportion of
:return:
"""
# Determine which groups to use
if filt:
groups = self.filtered_groups
else:
groups = self.groups
data = Counter(dict((x, 0) for x in self.species_list))
for cl in groups:
if ns:
if ns.stop:
raise KillByUser("")
data += Counter(dict((x, 1) for x, y in cl.species_frequency.items()
if y > 0))
xlabels = [str(x) for x in list(data.keys())]
data = [list(data.values()), [len(groups) - x for x in
data.values()]]
lgd_list = ["Available data", "Missing data"]
if ns:
if ns.stop:
raise KillByUser("")
b_plt, lgd, _ = bar_plot(data, xlabels, lgd_list=lgd_list,
ax_names=[None, "Ortholog frequency"])
b_plt.savefig(os.path.join(dest, output_file_name), bbox_inches="tight",
dpi=200)
return b_plt, lgd, ""
class MultiGroups(object):
""" Creates an object composed of multiple Group objects """
def __init__(self, groups_files=None, gene_threshold=None,
species_threshold=None, project_prefix="MyGroups"):
"""
:param groups_files: A list containing the file names of the multiple
group files
:return: Populates the self.multiple_groups attribute
"""
# If a MultiGroups is initialized with duplicate Group objects, these
# will be stored in a list. If all Group objects are unique, the list
# will remain empty
self.duplicate_groups = []
# Initializing thresholds. These may be set from the start, or using
# some method that uses them as arguments
self.gene_threshold = gene_threshold
self.species_threshold = species_threshold
self.prefix = project_prefix
self.multiple_groups = {}
self.filters = {}
if groups_files:
for group_file in groups_files:
# If group_file is already a Group object, just add it
if not isinstance(group_file, Group):
# Check for duplicate group files
group_object = Group(group_file, self.gene_threshold,
self.species_threshold)
else:
group_object = group_file
if group_object.name in self.multiple_groups:
self.duplicate_groups.append(group_object.name)
else:
self.multiple_groups[group_object.name] = group_object
self.filters[group_object.name] = (1,
len(group_object.species_list))
def __iter__(self):
return iter(self.multiple_groups)
def iter_gnames(self):
return (x.name for x in self.multiple_groups)
def get_gnames(self):
return [x.name for x in self.multiple_groups]
def add_group(self, group_obj):
"""
Adds a group object
:param group_obj: Group object
"""
# Check for duplicate groups
if group_obj.name in self.multiple_groups:
self.duplicate_groups.append(group_obj.name)
else:
self.multiple_groups[group_obj.name] = group_obj
def remove_group(self, group_id):
"""
Removes a group object according to its name
:param group_id: string, name matching a Group object name attribute
"""
if group_id in self.multiple_groups:
del self.multiple_groups[group_id]
def get_group(self, group_id):
"""
Returns a group object based on its name. If the name does not match
any group object, returns None
:param group_id: string. Name of group object
"""
try:
return self.multiple_groups[group_id]
except KeyError:
return
def add_multigroups(self, multigroup_obj):
"""
Merges a MultiGroup object
:param multigroup_obj: MultiGroup object
"""
for group_obj in multigroup_obj:
self.add_group(group_obj)
def update_filters(self, gn_filter, sp_filter, group_names=None,
default=False):
"""
This will not change the Group object themselves, only the filter
mapping. The filter is only applied when the Group object is retrieved
to reduce computations
:param gn_filter: int, filter for max gene copies
:param sp_filter: int, filter for min species
:param group_names: list, with names of group objects
"""
if group_names:
for group_name in group_names:
# Get group object
group_obj = self.multiple_groups[group_name]
# Define filters
gn_filter = gn_filter if not default else 1
sp_filter = sp_filter if not default else \
len(group_obj.species_list)
# Update Group object with new filters
group_obj.update_filters(gn_filter, sp_filter)
# Update filter map
self.filters[group_name] = (gn_filter, sp_filter)
for group_name, group_obj in self.multiple_groups.items():
# Define filters
gn_filter = gn_filter if not default else 1
sp_filter = sp_filter if not default else \
len(group_obj.species_list)
# Update Group object with new filters
group_obj.update_filters(gn_filter, sp_filter)
# Update filter map
self.filters[group_name] = (gn_filter, sp_filter)
def basic_multigroup_statistics(self, output_file_name=
"multigroup_base_statistics.csv"):
"""
:param output_file_name:
:return:
"""
# Creates the storage for the statistics of the several files
statistics_storage = OrderedDict()
for group in self.multiple_groups:
group_statistics = group.basic_group_statistics()
statistics_storage[group.name] = group_statistics
output_handle = open(self.prefix + "." + output_file_name, "w")
output_handle.write("Group file; Total clusters; Total sequences; "
"Clusters below gene threshold; Clusters above "
"species threshold; Clusters below gene and above"
" species thresholds\n")
for group, vals in statistics_storage.items():
output_handle.write("%s; %s\n" % (group, ";".join([str(x) for x
in vals])))
output_handle.close()
def bar_orthologs(self, output_file_name="Final_orthologs",
dest="./", stats="total"):
"""
Creates a bar plot with the final ortholog values for each group file
:param output_file_name: string. Name of output file
:param dest: string. output directory
:param stats: string. The statistics that should be used to generate
the bar plot. Options are:
..: "1": Total orthologs
..: "2": Species compliant orthologs
..: "3": Gene compliant orthologs
..: "4": Final orthologs
..: "all": All of the above
Multiple combinations can be provided, for instance: "123" will
display bars for total, species compliant and gene compliant stats
"""
# Stores the x-axis labels
x_labels = []
# Stores final ortholog values for all 4 possible data sets
vals = [[], [], [], []]
lgd = ["Total orthologs", "After species filter", "After gene filter",
"Final orthologs"]
# Get final ortholog values
for g_obj in self.multiple_groups:
x_labels.append(g_obj.name.split(os.sep)[-1])
# Populate total orthologs
if "1" in stats or stats == "all":
vals[0].append(len(g_obj.groups))
# Populate species compliant orthologs
if "2" in stats or stats == "all":
vals[1].append(g_obj.num_species_compliant)
# Populate gene compliant orthologs
if "3" in stats or stats == "all":
vals[2].append(g_obj.num_gene_compliant)
# Populate final orthologs
if "4" in stats or stats == "all":
vals[3].append(len(g_obj.filtered_groups))
# Filter valid data sets
lgd_list = [x for x in lgd if vals[lgd.index(x)]]
vals = [l for l in vals if l]
# Create plot
b_plt, lgd = multi_bar_plot(vals, x_labels, lgd_list=lgd_list)
b_plt.savefig(os.path.join(dest, output_file_name),
bbox_extra_artists=(lgd,), bbox_inches="tight")
# Create table list object
table_list = []
# Create header
table_list.append([""] + x_labels)
# Create content
for i in range(len(vals)):
table_list += [x for x in [[lgd_list[i]] + vals[i]]]
return b_plt, lgd, table_list
def group_overlap(self):
"""
This will find the overlap of orthologs between two group files.
THIS METHOD IS TEMPORARY AND EXPERIMENTAL
"""
def parse_groups(group_obj):
"""
Returns a list with the sorted ortholog clusters
"""
storage = []
for cluster in group_obj.groups:
storage.append(set(cluster.iter_sequences))
return storage
if len(self.multiple_groups) != 2:
raise SystemExit("This method can only be used with two group "
"files")
group1 = self.multiple_groups[0]
group2 = self.multiple_groups[1]
group1_list = parse_groups(group1)
group2_list = parse_groups(group2)
counter = 0
for i in group1_list:
if i in group2_list:
counter += 1
class MultiGroupsLight(object):
"""
Creates an object composed of multiple Group objects like MultiGroups.
However, instead of storing the groups in memory, these are shelved in
the disk
"""
# The report calls available
calls = ['bar_genecopy_distribution',
'bar_species_distribution',
'bar_species_coverage',
'bar_genecopy_per_species']
def __init__(self, db_path, groups=None, gene_threshold=None,
species_threshold=None, project_prefix="MyGroups",
ns=None):
"""
:param groups: A list containing the file names of the multiple
group files
:return: Populates the self.multiple_groups attribute
"""
self.db_path = db_path
# If a MultiGroups is initialized with duplicate Group objects, their
# names will be stored in a list. If all Group objects are unique, the
# list will remain empty
self.duplicate_groups = []
self.groups = {}
self.groups_stats = {}
# Attribute that will store the paths of badly formated group files
self.bad_groups = []
# Initializing thresholds. These may be set from the start, or using
# some method that uses them as arguments
self.gene_threshold = gene_threshold
self.species_threshold = species_threshold
# Initializing mapping of group filters to their names. Should be
# something like {"groupA": (1, 10)}
self.filters = {}
self.taxa_list = {}
self.excluded_taxa = {}
# This attribute will contain a dictionary with the maximum extra copies
# for each group object
self.max_extra_copy = {}
# This attribute will contain a list with the number of species for
# each group object, excluding replicates. If a MultiGroupLight object
# contains Group objects with different taxa numbers, this attribute
# can be used to issue a warning
self.species_number = []
self.prefix = project_prefix
if ns:
ns.files = len(groups)
if groups:
for group_file in groups:
# If group_file is already a Group object, just add it
if not isinstance(group_file, GroupLight):
try:
if ns:
if ns.stop:
raise KillByUser("")
ns.counter += 1
group_object = GroupLight(group_file,
self.gene_threshold,
self.species_threshold,
ns=ns)
except Exception as e:
print(e.message)
self.bad_groups.append(group_file)
continue
else:
group_object = group_file
# Check for duplicate group files
if group_object.name in self.groups:
self.duplicate_groups.append(group_file.name)
else:
self.add_group(group_object)
def __iter__(self):
for k, val in self.groups.items():
yield k, pickle.load(open(val, "rb"))
def clear_groups(self):
"""
Clears the current MultiGroupsLight object
"""
for f in self.groups.values():
os.remove(f)
self.duplicate_groups = []
self.groups = {}
self.groups_stats = {}
self.filters = {}
self.max_extra_copy = {}
self.species_number = []
self.gene_threshold = self.species_threshold = 0
def add_group(self, group_obj):
"""
Adds a group object
:param group_obj: Group object
"""
# Check for duplicate groups
if group_obj.name not in self.groups:
gpath = os.path.join(self.db_path,
"".join(random.choice(string.ascii_uppercase) for _ in
range(15)))
pickle.dump(group_obj, open(gpath, "wb"))
self.groups[group_obj.name] = gpath
self.filters[group_obj.name] = (1, len(group_obj.species_list), [])
self.max_extra_copy[group_obj.name] = group_obj.max_extra_copy
if len(group_obj.species_list) not in self.species_number:
self.species_number.append(len(group_obj.species_list))
else:
self.duplicate_groups.append(group_obj.name)
def remove_group(self, group_id):
"""
Removes a group object according to its name
:param group_id: string, name matching a Group object name attribute
"""
if group_id in self.groups:
os.remove(self.groups[group_id])
del self.groups[group_id]
def get_group(self, group_id):
"""
Returns a group object based on its name. If the name does not match
any group object, returns None
:param group_id: string. Name of group object
"""
try:
return pickle.load(open(self.groups[unicode(group_id)], "rb"))
except KeyError:
return
def add_multigroups(self, multigroup_obj):
"""
Merges a MultiGroup object
:param multigroup_obj: MultiGroup object
"""
for _, group_obj in multigroup_obj:
self.add_group(group_obj)
def update_filters(self, gn_filter, sp_filter, excluded_taxa,
group_names=None, default=False):
"""
This will not change the Group object themselves, only the filter
mapping. The filter is only applied when the Group object is retrieved
to reduce computations
:param gn_filter: int, filter for max gene copies
:param sp_filter: int, filter for min species
:param group_names: list, with names of group objects
"""
# There are no groups to update
if group_names == []:
return
if group_names:
glist = group_names
else:
glist = self.groups
for group_name in glist:
# Get group object
group_obj = pickle.load(open(self.groups[group_name], "rb"))
# Define excluded taxa
group_obj.exclude_taxa(excluded_taxa, True)
# Define filters
gn_filter = gn_filter if not default else 1
sp_filter = sp_filter if not default else \
len(group_obj.species_list)
# Correct maximum filter values after excluding taxa
gn_filter = gn_filter if gn_filter <= group_obj.max_extra_copy \
else group_obj.max_extra_copy
sp_filter = sp_filter if sp_filter <= len(group_obj.species_list) \
else len(group_obj.species_list)
# Update Group object with new filters
group_obj.update_filters(gn_filter, sp_filter)
# Update group stats
self.get_multigroup_statistics(group_obj)
pickle.dump(group_obj, open(self.groups[group_name], "wb"))
# Update filter map
self.filters[group_name] = (gn_filter, group_obj.species_threshold)
self.taxa_list[group_name] = group_obj.species_list
self.excluded_taxa[group_name] = group_obj.excluded_taxa
def get_multigroup_statistics(self, group_obj):
"""
:return:
"""
stats = group_obj.basic_group_statistics()
self.groups_stats[group_obj.name] = {"stats": stats,
"species": group_obj.species_list,
"max_copies": group_obj.max_extra_copy}
def bar_orthologs(self, group_names=None, output_file_name="Final_orthologs",
dest="./", stats="all"):
"""
Creates a bar plot with the final ortholog values for each group file
:param group_names: list. If None, all groups in self.group_stats will
be used to generate the plot. Else, only the groups with the names in
the list will be plotted.
:param output_file_name: string. Name of output file
:param dest: string. output directory
:param stats: string. The statistics that should be used to generate
the bar plot. Options are:
..: "1": Total orthologs
..: "2": Species compliant orthologs
..: "3": Gene compliant orthologs
..: "4": Final orthologs
..: "all": All of the above
Multiple combinations can be provided, for instance: "123" will
display bars for total, species compliant and gene compliant stats
"""
# Stores the x-axis labels
x_labels = []
# Stores final ortholog values for all 4 possible data sets
vals = [[], [], [], []]
lgd = ["Total orthologs", "After species filter", "After gene filter",
"Final orthologs"]
# Determine which groups will be plotted
if group_names:
groups_lst = group_names
else:
groups_lst = self.groups_stats.keys()
for gname in groups_lst:
gstats = self.groups_stats[gname]
x_labels.append(gname.split(os.sep)[-1])
# Populate total orthologs
if "1" in stats or stats == "all":
vals[0].append(gstats["stats"][0])
# Populate species compliant orthologs
if "2" in stats or stats == "all":
vals[1].append(gstats["stats"][3])
# Populate gene compliant orthologs
if "3" in stats or stats == "all":
vals[2].append(gstats["stats"][2])
# Populate final orthologs
if "4" in stats or stats == "all":
vals[3].append(gstats["stats"][4])
# Filter valid data sets
lgd_list = [x for x in lgd if vals[lgd.index(x)]]
vals = [l for l in vals if l]
# Create plot
b_plt, lgd = multi_bar_plot(vals, x_labels, lgd_list=lgd_list)
b_plt.savefig(os.path.join(dest, output_file_name),
bbox_extra_artists=(lgd,), bbox_inches="tight", dpi=200)
# Create table list object
table_list = []
# Create header
table_list.append([""] + x_labels)
# Create content
for i in range(len(vals)):
table_list += [x for x in [[lgd_list[i]] + vals[i]]]
return b_plt, lgd, table_list
__author__ = "Diogo N. Silva"
| ODiogoSilva/TriFusion | trifusion/ortho/OrthomclToolbox.py | Python | gpl-3.0 | 64,833 | 0.000586 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class WhereOpTest(test.TestCase):
def _testWhere(self, x, truth, expected_err_re=None):
with self.cached_session(use_gpu=True):
ans = array_ops.where(x)
self.assertEqual([None, x.ndim], ans.get_shape().as_list())
if expected_err_re is None:
tf_ans = self.evaluate(ans)
self.assertAllClose(tf_ans, truth, atol=1e-10)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def testWrongNumbers(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.where([False, True], [1, 2], None)
with self.assertRaises(ValueError):
array_ops.where([False, True], None, [1, 2])
@test_util.run_deprecated_v1
def testBasicVec(self):
x = np.asarray([True, False])
truth = np.asarray([[0]], dtype=np.int64)
self._testWhere(x, truth)
x = np.asarray([False, True, False])
truth = np.asarray([[1]], dtype=np.int64)
self._testWhere(x, truth)
x = np.asarray([False, False, True, False, True])
truth = np.asarray([[2], [4]], dtype=np.int64)
self._testWhere(x, truth)
@test_util.run_deprecated_v1
def testRandomVec(self):
x = np.random.rand(1000000) > 0.5
truth = np.vstack([np.where(x)[0].astype(np.int64)]).T
self._testWhere(x, truth)
@test_util.run_deprecated_v1
def testBasicMat(self):
x = np.asarray([[True, False], [True, False]])
# Ensure RowMajor mode
truth = np.asarray([[0, 0], [1, 0]], dtype=np.int64)
self._testWhere(x, truth)
@test_util.run_deprecated_v1
def testBasic3Tensor(self):
x = np.asarray([[[True, False], [True, False]],
[[False, True], [False, True]],
[[False, False], [False, True]]])
# Ensure RowMajor mode
truth = np.asarray(
[[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]], dtype=np.int64)
self._testWhere(x, truth)
def _testRandom(self, dtype, expected_err_re=None):
shape = [127, 33, 53]
x = np.random.randn(*shape) + 1j * np.random.randn(*shape)
x = (np.random.randn(*shape) > 0).astype(dtype)
truth = np.where(np.abs(x) > 0) # Tuples of indices by axis.
truth = np.vstack(truth).T # Convert to [num_true, indices].
self._testWhere(x, truth, expected_err_re)
@test_util.run_deprecated_v1
def testRandomBool(self):
self._testRandom(np.bool)
@test_util.run_deprecated_v1
def testRandomInt32(self):
self._testRandom(np.int32)
@test_util.run_deprecated_v1
def testRandomInt64(self):
self._testRandom(np.int64)
@test_util.run_deprecated_v1
def testRandomFloat(self):
self._testRandom(np.float32)
@test_util.run_deprecated_v1
def testRandomDouble(self):
self._testRandom(np.float64)
@test_util.run_deprecated_v1
def testRandomComplex64(self):
self._testRandom(np.complex64)
@test_util.run_deprecated_v1
def testRandomComplex128(self):
self._testRandom(np.complex128)
@test_util.run_deprecated_v1
def testRandomUint8(self):
self._testRandom(np.uint8)
@test_util.run_deprecated_v1
def testRandomInt8(self):
self._testRandom(np.int8)
@test_util.run_deprecated_v1
def testRandomInt16(self):
self._testRandom(np.int16)
@test_util.run_deprecated_v1
def testThreeArgument(self):
x = np.array([[-2, 3, -1], [1, -3, -3]])
np_val = np.where(x > 0, x * x, -x)
with self.session(use_gpu=True):
tf_val = array_ops.where(constant_op.constant(x) > 0, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
@test_util.run_deprecated_v1
def testBatchSelect(self):
x = np.array([[-2, 3, -1] * 64, [1, -3, -3] * 64] * 8192) # [16384, 192]
c_mat = np.array([[False] * 192, [True] * 192] * 8192) # [16384, 192]
c_vec = np.array([False, True] * 8192) # [16384]
np_val = np.where(c_mat, x * x, -x)
with self.session(use_gpu=True):
tf_val = array_ops.where(c_vec, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
class WhereBenchmark(test.Benchmark):
def benchmarkWhere(self):
for (m, n, p, use_gpu) in itertools.product(
[10],
[10, 100, 1000, 10000, 100000, 1000000],
[0.01, 0.5, 0.99],
[False, True]):
name = "m_%d_n_%d_p_%g_use_gpu_%s" % (m, n, p, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x = random_ops.random_uniform((m, n), dtype=dtypes.float32) <= p
v = resource_variable_ops.ResourceVariable(x)
op = array_ops.where(v)
with session.Session(config=benchmark.benchmark_config()) as sess:
v.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
# approximate size of output: m*n*p int64s for each axis.
gb_processed_output = 2 * 8 * m * n * p / 1.0e9
gb_processed = gb_processed_input + gb_processed_output
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
def benchmarkBatchSelect(self):
for (m, n, use_gpu) in itertools.product([1000, 10000, 100000],
[10, 100, 1000], [False, True]):
name = "m_%d_n_%d_use_gpu_%s" % (m, n, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
y_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
c_gen = random_ops.random_uniform([m], dtype=dtypes.float32) <= 0.5
x = resource_variable_ops.ResourceVariable(x_gen)
y = resource_variable_ops.ResourceVariable(y_gen)
c = resource_variable_ops.ResourceVariable(c_gen)
op = array_ops.where(c, x, y)
with session.Session(config=benchmark.benchmark_config()) as sess:
x.initializer.run()
y.initializer.run()
c.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
# approximate size of output: m*n*2 floats for each axis.
gb_processed = m * n * 8 / 1.0e9
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
| jbedorf/tensorflow | tensorflow/python/kernel_tests/where_op_test.py | Python | apache-2.0 | 8,003 | 0.010371 |
"""
Weather information for air and road temperature, provided by Trafikverket.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.trafikverket_weatherstation/
"""
import asyncio
from datetime import timedelta
import logging
import aiohttp
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_API_KEY, CONF_MONITORED_CONDITIONS,
CONF_NAME, DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['pytrafikverket==0.1.5.9']
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Trafikverket"
ATTR_MEASURE_TIME = 'measure_time'
ATTR_ACTIVE = 'active'
CONF_STATION = 'station'
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=10)
SCAN_INTERVAL = timedelta(seconds=300)
SENSOR_TYPES = {
'air_temp': [
'Air temperature', TEMP_CELSIUS,
'air_temp', 'mdi:thermometer', DEVICE_CLASS_TEMPERATURE],
'road_temp': [
'Road temperature', TEMP_CELSIUS,
'road_temp', 'mdi:thermometer', DEVICE_CLASS_TEMPERATURE],
'precipitation': [
'Precipitation type', None,
'precipitationtype', 'mdi:weather-snowy-rainy', None],
'wind_direction': [
'Wind direction', '°',
'winddirection', 'mdi:flag-triangle', None],
'wind_direction_text': [
'Wind direction text', None,
'winddirectiontext', 'mdi:flag-triangle', None],
'wind_speed': [
'Wind speed', 'm/s',
'windforce', 'mdi:weather-windy', None],
'humidity': [
'Humidity', '%',
'humidity', 'mdi:water-percent', DEVICE_CLASS_HUMIDITY],
'precipitation_amount': [
'Precipitation amount', 'mm',
'precipitation_amount', 'mdi:cup-water', None],
'precipitation_amountname': [
'Precipitation name', None,
'precipitation_amountname', 'mdi:weather-pouring', None],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_STATION): cv.string,
vol.Required(CONF_MONITORED_CONDITIONS, default=[]):
[vol.In(SENSOR_TYPES)],
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the Trafikverket sensor platform."""
from pytrafikverket.trafikverket_weather import TrafikverketWeather
sensor_name = config[CONF_NAME]
sensor_api = config[CONF_API_KEY]
sensor_station = config[CONF_STATION]
web_session = async_get_clientsession(hass)
weather_api = TrafikverketWeather(web_session, sensor_api)
dev = []
for condition in config[CONF_MONITORED_CONDITIONS]:
dev.append(TrafikverketWeatherStation(
weather_api, sensor_name, condition, sensor_station))
if dev:
async_add_entities(dev, True)
class TrafikverketWeatherStation(Entity):
"""Representation of a Trafikverket sensor."""
def __init__(self, weather_api, name, sensor_type, sensor_station):
"""Initialize the sensor."""
self._client = name
self._name = SENSOR_TYPES[sensor_type][0]
self._type = sensor_type
self._state = None
self._unit = SENSOR_TYPES[sensor_type][1]
self._station = sensor_station
self._weather_api = weather_api
self._icon = SENSOR_TYPES[sensor_type][3]
self._device_class = SENSOR_TYPES[sensor_type][4]
self._weather = None
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self._client, self._name)
@property
def icon(self):
"""Icon to use in the frontend."""
return self._icon
@property
def device_state_attributes(self):
"""Return the state attributes of Trafikverket Weatherstation."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_ACTIVE: self._weather.active,
ATTR_MEASURE_TIME: self._weather.measure_time,
}
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from Trafikverket and updates the states."""
try:
self._weather = await self._weather_api.async_get_weather(
self._station)
self._state = getattr(
self._weather,
SENSOR_TYPES[self._type][2])
except (asyncio.TimeoutError,
aiohttp.ClientError, ValueError) as error:
_LOGGER.error("Could not fetch weather data: %s", error)
| jamespcole/home-assistant | homeassistant/components/trafikverket_weatherstation/sensor.py | Python | apache-2.0 | 5,210 | 0 |
import requests
import ujson as json
BASE_URL = 'http://localhost:8088/' # set the port and IP address correct
USERNAME = '<Your username here>'
PASSWD = '<Your password here>'
TOKEN = '' # will be filled in by the login request
VERBOSE = 1 # verbose 0 => No output, 1 => minimal output, 2 => full output
def create_url(endpoint):
url = '{}{}'.format(BASE_URL, endpoint)
return url
def pretty_print_dict(data, prefix=''):
result = ''
if data is None:
return '\n'
for k, v in data.items():
result += '{}{}: {}\n'.format(prefix, k, v)
return result
def pretty_print_output(output):
try:
json_dict = json.loads(output)
json_str = json.dumps(json_dict, indent=4)
return json_str
except Exception:
return output
def api_call(endpoint, method='get', headers=None, params=None, body=None, authenticated=True, verbose=None):
if verbose is None:
verbose = VERBOSE
if headers is None:
headers = {}
if TOKEN != '' and authenticated:
headers.update({'Authorization': 'Bearer {}'.format(TOKEN)})
method = method.lower()
url = create_url(endpoint)
if params is None:
params = {}
if verbose == 1:
print('Perform request: {}'.format(url))
elif verbose == 2:
headers_str = pretty_print_dict(headers, prefix=' ')
params_str = pretty_print_dict(params, prefix=' ')
print('Perform request:\n url: {},\n method: {},\n headers:\n{} params:\n{} body: {}\n'
.format(url, method, headers_str, params_str, body))
if method == 'get':
response = requests.get(url=url, headers=headers, params=params)
elif method == 'post':
response = requests.post(url=url, headers=headers, params=params, data=body)
resp_body = '\n'.join([str(x.decode()) for x in response.iter_lines()])
if verbose == 1:
print(' => Response body: {}'.format(resp_body))
print('--------------------------------------------')
elif verbose == 2:
headers = pretty_print_dict(response.headers, prefix=' ')
body = pretty_print_output(resp_body)
body_indent = ''
for line in body.splitlines():
body_indent += ' {}\n'.format(line)
print('Response:\n code: {},\n headers:\n{} body:\n{}'
.format(response.status_code, headers, body_indent))
print('--------------------------------------------')
return response
def login(verbose=None):
global TOKEN
if verbose == None:
verbose = VERBOSE
params = {'username': USERNAME, 'password': PASSWD}
resp = api_call('login', params=params, authenticated=False, verbose=verbose)
resp_json = resp.json()
if 'token' in resp_json:
token = resp.json()['token']
TOKEN = token
if verbose > 0:
print(' => logged in and received token: {}'.format(token))
print('--------------------------------------------')
else:
raise RuntimeError('Could not log in to the gateway')
return token
def main():
# do requests here
# Example requests to the get_version endpoint
login(verbose=1)
api_call('get_version', authenticated=False)
api_call('get_version', authenticated=True)
api_call('get_version', verbose=2, authenticated=False)
api_call('get_version', verbose=2, authenticated=True)
if __name__ == '__main__':
main()
| openmotics/gateway | tools/api-tester.py | Python | agpl-3.0 | 3,464 | 0.001443 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/space/weapon/shared_wpn_heavy_disruptor.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/draft_schematic/space/weapon/shared_wpn_heavy_disruptor.py | Python | mit | 461 | 0.047722 |
''' Simple test for apachelogs '''
import unittest
from apachelogs import ApacheLogFile
class apachelogs_test(unittest.TestCase):
def test_foo(self):
log = ApacheLogFile('test.log')
line = iter(log).next()
self.assertEquals(line.ip, '127.0.0.1')
self.assertEquals(line.ident, '-')
self.assertEquals(line.http_user, 'frank')
self.assertEquals(line.time, '5/Oct/2000:13:55:36 -0700')
self.assertEquals(line.request_line, 'GET /apache_pb.gif?foo=bar&baz=zip HTTP/1.0')
self.assertEquals(line.http_response_code, '200')
self.assertEquals(line.http_response_size, '2326')
self.assertEquals(line.referrer, 'http://www.example.com/start.html')
self.assertEquals(line.user_agent, 'Mozilla/4.08 [en] (Win98; I ;Nav)')
log.close()
def setUp(self):
pass
if __name__ == '__main__':
unittest.main()
| bkjones/loghetti | test/apachelogs_test.py | Python | bsd-3-clause | 930 | 0.005376 |
#!/usr/bin/python
#
# Copyright 2014: wycomco GmbH (choules@wycomco.de)
# 2015: modifications by Tim Sutton
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""See docstring for AdobeReaderURLProvider class"""
# Disabling warnings for env members and imports that only affect recipe-
# specific processors.
#pylint: disable=e1101
import urllib2
import plistlib
from autopkglib import Processor, ProcessorError
__all__ = ["AdobeReaderUpdatesURLProvider"]
MAJOR_VERSION_DEFAULT = "11"
CHECK_OS_VERSION_DEFAULT = "10.8"
MAJOR_VERSION_MATCH_STR = "adobe/reader/mac/%s"
AR_UPDATER_DOWNLOAD_URL = (
"http://download.adobe.com/"
"pub/adobe/reader/mac/%s.x/%s/misc/AdbeRdrUpd%s.dmg")
AR_UPDATER_DOWNLOAD_URL2 = "http://ardownload.adobe.com"
AR_UPDATER_BASE_URL = "https://armmf.adobe.com/arm-manifests/mac"
AR_URL_TEMPLATE = "/%s/current_version_url_template.txt"
AR_MANIFEST_TEMPLATE = "/%s/manifest_url_template.txt"
AR_MAJREV_IDENTIFIER = "{MAJREV}"
OSX_MAJREV_IDENTIFIER = "{OS_VER_MAJ}"
OSX_MINREV_IDENTIFIER = "{OS_VER_MIN}"
AR_PROD_IDENTIFIER = '{PROD}'
AR_PROD_ARCH_IDENTIFIER = '{PROD_ARCH}'
AR_PROD = 'com_adobe_Reader'
AR_PROD_ARCH = 'univ'
class AdobeReaderUpdatesURLProvider(Processor):
"""Provides URL to the latest Adobe Reader release."""
description = __doc__
input_variables = {
"major_version": {
"required": False,
"description": ("Major version. Examples: '10', '11'. Defaults to "
"%s" % MAJOR_VERSION_DEFAULT)
},
"os_version": {
"required": False,
"default": CHECK_OS_VERSION_DEFAULT,
"description": ("Version of OS X to check. Default: %s" %
CHECK_OS_VERSION_DEFAULT)
}
}
output_variables = {
"url": {
"description": "URL to the latest Adobe Reader release.",
},
"version": {
"description": "Version for this update.",
},
}
def get_reader_updater_pkg_url(self, major_version):
'''Returns download URL for Adobe Reader Updater DMG'''
request = urllib2.Request(
AR_UPDATER_BASE_URL + AR_MANIFEST_TEMPLATE % major_version)
try:
url_handle = urllib2.urlopen(request)
version_string = url_handle.read()
url_handle.close()
except BaseException as err:
raise ProcessorError("Can't open manifest template: %s" % (err))
os_maj, os_min = self.env["os_version"].split(".")
version_string = version_string.replace(
AR_MAJREV_IDENTIFIER, major_version)
version_string = version_string.replace(OSX_MAJREV_IDENTIFIER, os_maj)
version_string = version_string.replace(OSX_MINREV_IDENTIFIER, os_min)
version_string = version_string.replace(AR_PROD_IDENTIFIER, AR_PROD)
version_string = version_string.replace(AR_PROD_ARCH_IDENTIFIER, AR_PROD_ARCH)
request = urllib2.Request(
AR_UPDATER_BASE_URL + version_string)
try:
url_handle = urllib2.urlopen(request)
plist = plistlib.readPlistFromString(url_handle.read())
url_handle.close()
except BaseException as err:
raise ProcessorError("Can't get or read manifest: %s" % (err))
url = AR_UPDATER_DOWNLOAD_URL2 + plist['PatchURL']
return url
def get_reader_updater_dmg_url(self, major_version):
'''Returns download URL for Adobe Reader Updater DMG'''
request = urllib2.Request(
AR_UPDATER_BASE_URL + AR_URL_TEMPLATE % major_version)
try:
url_handle = urllib2.urlopen(request)
version_string = url_handle.read()
url_handle.close()
except BaseException as err:
raise ProcessorError("Can't open URL template: %s" % (err))
os_maj, os_min = self.env["os_version"].split(".")
version_string = version_string.replace(
AR_MAJREV_IDENTIFIER, major_version)
version_string = version_string.replace(OSX_MAJREV_IDENTIFIER, os_maj)
version_string = version_string.replace(OSX_MINREV_IDENTIFIER, os_min)
request = urllib2.Request(
AR_UPDATER_BASE_URL + version_string)
try:
url_handle = urllib2.urlopen(request)
version = url_handle.read()
url_handle.close()
except BaseException as err:
raise ProcessorError("Can't get version string: %s" % (err))
versioncode = version.replace('.', '')
url = AR_UPDATER_DOWNLOAD_URL % (major_version, version, versioncode)
return (url, version)
def main(self):
major_version = self.env.get("major_version", MAJOR_VERSION_DEFAULT)
(url, version) = self.get_reader_updater_dmg_url(major_version)
# only need the version, getting the URL from the manifest now
url = self.get_reader_updater_pkg_url(major_version)
self.env["url"] = url
self.env["version"] = version
self.output("Found URL %s" % self.env["url"])
if __name__ == "__main__":
PROCESSOR = AdobeReaderUpdatesURLProvider()
PROCESSOR.execute_shell()
| FabianN/autopkg_recipies | AdobeReader/AdobeReaderUpdatesURLProvider.py | Python | mit | 5,706 | 0.000526 |
# -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2020 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
import wx
from math import cos, pi, radians, sin
from Queue import Queue
from sys import maxint
from threading import Thread
from time import clock, sleep
from win32api import EnumDisplayMonitors, GetSystemMetrics, mouse_event as mouse_event2
from win32con import MOUSEEVENTF_ABSOLUTE, MOUSEEVENTF_MOVE
# Local imports
import eg
from eg import HasActiveHandler
from eg.cFunctions import SetMouseCallback
from eg.WinApi.Dynamic import GetCursorPos, mouse_event, POINT, SetCursorPos
from eg.WinApi.Utils import GetMonitorDimensions
ICON = """iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABmJLR0QA/wD/AP+gvaeT
AAAACXBIWXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH1QQIDRgEM71mAAAAADV0RVh0Q29tbWVudAAoYy
kgMjAwNCBKYWt1YiBTdGVpbmVyCgpDcmVhdGVkIHdpdGggVGhlIEdJTVCQ2YtvAAACHElEQVQ4y42Q
zUtUURjGf/fcyatz73Wiklwo2R/QplXQ/AURlLYJcrJNQrvQahYFI0wQ7lu0azNtYlAj2rUJRFciUf
kRgUwOM6Y5jePXfNzznhZ+NOpIvpvD+5zn/M7DY3Fo0ul0JzBQLpdvG2M8wHi++6r7Zs+Tet/Yu9Hr
W5tb/Yqjc2m7vB3zfPd7LBbzPd/tK/5Zu5ZKpZZSb1LZ0bGRG7u+F2E3PG0dfp1MJl+2tvq9xeLaJv
AxkUj01aW7UKtV3xvYam525nq6b92znieHEkqpIWwLpRSV7YBoNEoun2VhIUOTY6ODAAmkqJT68PRZ
orf+w1AoFBq63//A2LZthcNhhoeH0VrjNLVgYTHw8DGlUonC6u/IyEj6DnAAoAAq1ar1c3FxX8zlcl
QqlX97Po/XGrEa9MWREuPxOPl8nmw2Szwe538Tql9WVlZoa2tjcHDwgHZiwGqhwGqhgO/7dHZ0MDM7
e7IEG6V1zp05uy/WghrLv5YPaBul9eMBnufuRLXAwsIYQYsgRhCt0SK0n2/nuBKnxBi00YhotA7Qoh
ERRAsiBiOy559qBJjVWmMrmyAQtNboYBcmgojQdMrZ8083Anyan5/D8zxaWpqxlEKLoPVOfNd1iZyO
MDPzDeBHow7efv3yuc9xnGhX10U8z8MAGMPOYchkFlhaygG8bgSoVavVu5MT448mJ8YvA1cadJUBrg
Jrhy/+AqGrAMOnH86mAAAAAElFTkSuQmCC"""
eg.RegisterPlugin(
name = "Mouse",
author = (
"Bitmonster",
"Sem;colon",
),
version = "1.1.1",
description = (
"Actions to control the mouse cursor and emulation of mouse events."
),
kind = "core",
guid = "{6B1751BF-F94E-4260-AB7E-64C0693FD959}",
icon = ICON,
url = "http://www.eventghost.net/forum/viewtopic.php?f=9&t=5481",
)
class Mouse(eg.PluginBase):
def __init__(self):
self.AddEvents()
self.AddAction(LeftButton)
self.AddAction(LeftDoubleClick)
self.AddAction(ToggleLeftButton)
self.AddAction(MiddleButton)
self.AddAction(MoveAbsolute)
self.AddAction(MoveRelative)
self.AddAction(RightButton)
self.AddAction(RightDoubleClick)
self.AddAction(GoDirection)
self.AddAction(MouseWheel)
@eg.LogIt
def __close__(self):
pass
def __start__(self):
self.thread = MouseThread()
self.leftMouseButtonDown = False
self.lastMouseEvent = None
self.mouseButtonWasBlocked = [False, False, False, False, False]
SetMouseCallback(self.MouseCallBack)
@eg.LogIt
def __stop__(self):
SetMouseCallback(None)
self.thread.receiveQueue.put([-1])
def MouseCallBack(self, buttonName, buttonNum, param):
if param:
if self.lastMouseEvent:
self.lastMouseEvent.SetShouldEnd()
shouldBlock = HasActiveHandler("Mouse." + buttonName)
self.mouseButtonWasBlocked[buttonNum] = shouldBlock
self.lastMouseEvent = self.TriggerEnduringEvent(buttonName)
return shouldBlock
else:
if self.lastMouseEvent:
self.lastMouseEvent.SetShouldEnd()
return self.mouseButtonWasBlocked[buttonNum]
return False
class MouseThread(Thread):
currentAngle = 0
newAngle = 0
acceleration = 0
speed = 0
maxTicks = 5
yRemainder = 0
xRemainder = 0
leftButtonDown = False
lastTime = 0
initSpeed = 0.06
maxSpeed = 7.0
useAlternateMethod = False
def __init__(self):
Thread.__init__(self, name="MouseThread")
self.receiveQueue = Queue(2048)
self.start()
@eg.LogItWithReturn
def run(self):
stop = False
point = POINT()
while True:
self.lastTime = clock()
if not self.receiveQueue.empty():
data = self.receiveQueue.get()
if data[0] == -1:
break
elif data[0] == -2:
stop = True
else:
self.newAngle = radians(data[0])
self.initSpeed = data[1]
self.maxSpeed = data[2]
self.acceleration = data[3]
self.useAlternateMethod = data[4]
if stop:
self.acceleration = 0
self.speed = 0
stop = False
continue
if self.acceleration == 0:
sleep(0.05)
continue
ticks = 10
if self.speed == 0:
self.currentAngle = self.newAngle
self.speed = self.initSpeed
else:
diff = self.newAngle - self.currentAngle
if diff > pi:
diff = diff - 2 * pi
elif diff < -1 * pi:
diff = diff + 2 * pi
self.currentAngle = self.currentAngle + (diff / 20)
self.speed = self.speed + (self.speed * self.acceleration * ticks)
if self.speed > self.maxSpeed:
self.speed = self.maxSpeed
elif self.speed <= 0:
self.speed = 0
factor = self.speed * (ticks / 10)
xCurrent = sin(self.currentAngle) * factor + self.xRemainder
yCurrent = -1 * cos(self.currentAngle) * factor + self.yRemainder
x = int(xCurrent)
y = int(yCurrent)
self.xRemainder = xCurrent - x
self.yRemainder = yCurrent - y
try:
if self.useAlternateMethod:
mouse_event2(MOUSEEVENTF_MOVE, x, y)
else:
GetCursorPos(point)
SetCursorPos(point.x + x, point.y + y)
except:
pass
if self.speed == 0:
self.acceleration = 0
waitTicks = 0.01 - (clock() - self.lastTime)
if waitTicks < 0:
waitTicks = 0.0
sleep(waitTicks)
class GoDirection(eg.ActionBase):
name = "Start Movement"
description = "Starts cursor movement in the specified direction."
class text:
label = u"Start cursor movement in direction %.2f\u00B0"
text1 = "Start moving cursor in direction"
text2 = "degrees. (0-360)"
text3 = "Initial mouse speed:"
text4 = "Maximum mouse speed:"
text5 = "Acceleration factor:"
label_AM = "Use alternate method"
def __call__(self, direction=0, initSpeed = 60, maxSpeed = 7000, accelerationFactor = 3, useAlternateMethod=False):
def UpFunc():
self.plugin.thread.receiveQueue.put([-2])
self.plugin.thread.receiveQueue.put([float(direction), float(initSpeed) / 1000, float(maxSpeed) / 1000, float(accelerationFactor) / 1000, useAlternateMethod])
eg.event.AddUpFunc(UpFunc)
def Configure(self, direction=0, initSpeed = 60, maxSpeed = 7000, accelerationFactor = 3, useAlternateMethod=False):
text = self.text
panel = eg.ConfigPanel()
direction = float(direction)
valueCtrl = panel.SpinNumCtrl(float(direction), min=0, max=360)
panel.AddLine(text.text1, valueCtrl, text.text2)
initSpeedLabel = wx.StaticText(panel, -1, text.text3)
initSpeedSpin = eg.SpinIntCtrl(panel, -1, initSpeed, 10, 2000)
maxSpeedLabel = wx.StaticText(panel, -1, text.text4)
maxSpeedSpin = eg.SpinIntCtrl(panel, -1, maxSpeed, 4000, 32000)
accelerationFactorLabel = wx.StaticText(panel, -1, text.text5)
accelerationFactorSpin = eg.SpinIntCtrl(panel, -1, accelerationFactor, 1, 200)
eg.EqualizeWidths((initSpeedLabel, maxSpeedLabel, accelerationFactorLabel))
panel.AddLine(initSpeedLabel, initSpeedSpin)
panel.AddLine(maxSpeedLabel, maxSpeedSpin)
panel.AddLine(accelerationFactorLabel, accelerationFactorSpin)
uAMCB = panel.CheckBox(useAlternateMethod, text.label_AM)
panel.AddLine(uAMCB)
while panel.Affirmed():
panel.SetResult(
valueCtrl.GetValue(),
initSpeedSpin.GetValue(),
maxSpeedSpin.GetValue(),
accelerationFactorSpin.GetValue(),
uAMCB.GetValue(),
)
def GetLabel(self, direction=0, initSpeed = 60, maxSpeed = 7000, accelerationFactor = 3, useAlternateMethod=False):
direction = float(direction)
return self.text.label % direction
class LeftButton(eg.ActionBase):
name = "Left Mouse Click"
description = "Clicks the left mouse button."
def __call__(self):
def UpFunc():
mouse_event(0x0004, 0, 0, 0, 0)
self.plugin.leftMouseButtonDown = False
mouse_event(0x0002, 0, 0, 0, 0)
self.plugin.leftMouseButtonDown = True
eg.event.AddUpFunc(UpFunc)
class LeftDoubleClick(eg.ActionBase):
name = "Left Mouse Double-Click"
description = "Double-clicks the left mouse button."
def __call__(self):
def UpFunc():
mouse_event(0x0004, 0, 0, 0, 0)
self.plugin.leftMouseButtonDown = False
mouse_event(0x0002, 0, 0, 0, 0)
mouse_event(0x0004, 0, 0, 0, 0)
mouse_event(0x0002, 0, 0, 0, 0)
eg.event.AddUpFunc(UpFunc)
class MiddleButton(eg.ActionBase):
name = "Middle Mouse Click"
description = "Clicks the middle mouse button."
def __call__(self):
def UpFunc():
mouse_event(0x0040, 0, 0, 0, 0)
mouse_event(0x0020, 0, 0, 0, 0)
eg.event.AddUpFunc(UpFunc)
class MouseWheel(eg.ActionBase):
name = "Turn Mouse Wheel"
description = "Turns the mouse wheel."
class text:
label = u"Turn mouse wheel %d clicks"
text1 = "Turn mouse wheel by"
text2 = "clicks. (Negative values turn down)"
def __call__(self, direction=0):
mouse_event(0x0800, 0, 0, direction * 120, 0)
def Configure(self, direction=0):
panel = eg.ConfigPanel()
valueCtrl = panel.SpinIntCtrl(direction, min=-100, max=100)
panel.AddLine(self.text.text1, valueCtrl, self.text.text2)
while panel.Affirmed():
panel.SetResult(valueCtrl.GetValue())
def GetLabel(self, direction=0):
return self.text.label % direction
class MoveAbsolute(eg.ActionBase):
name = "Move Absolute"
description = "Moves the cursor to an absolute position."
class text:
display = "Move cursor to"
label_M = "Monitor: %i, "
label_X = "x: %i, "
label_Y = "y: %i"
label_C = "Set position to screen center"
label_AM = "Use alternate method"
center = "center"
text1 = "Set horizontal position X to"
text2 = "pixels"
text3 = "Set vertical position Y to"
note = (
"Note: The coordinates X and Y are related to the monitor "
'(not to the "virtual screen")'
)
def __call__(self, x = None, y = None, displayNumber = None, center = False, useAlternateMethod=False):
point = POINT()
GetCursorPos(point)
X = point.x
Y = point.y
mons = EnumDisplayMonitors(None, None)
mons = [item[2] for item in mons]
for mon in range(len(mons)): # on what monitor (= mon) is the cursor?
m = mons[mon]
if m[0] <= X and X <= m[2] and m[1] <= Y and Y <= m[3]:
break
if displayNumber is None:
displayNumber = mon
monitorDimensions = GetMonitorDimensions()
try:
displayRect = monitorDimensions[displayNumber]
except IndexError:
displayNumber = 0
displayRect = monitorDimensions[displayNumber]
if center:
x = displayRect[2] / 2
y = displayRect[3] / 2
if x is None:
x = X - mons[displayNumber][0]
if y is None:
y = Y - mons[displayNumber][1]
x += displayRect[0]
y += displayRect[1]
if useAlternateMethod:
x = x * 65535 / GetSystemMetrics(0)
y = y * 65535 / GetSystemMetrics(1)
mouse_event2(MOUSEEVENTF_ABSOLUTE | MOUSEEVENTF_MOVE, x, y)
else:
SetCursorPos(x, y)
def Configure(self, x = None, y = None, displayNumber = None, center = False, useAlternateMethod=False):
panel = eg.ConfigPanel()
text = self.text
uAMCB = panel.CheckBox(useAlternateMethod, text.label_AM)
cCB = panel.CheckBox(center, text.label_C)
xCB = panel.CheckBox(x is not None, text.text1)
yCB = panel.CheckBox(y is not None, text.text3)
displayCB = panel.CheckBox(displayNumber is not None, text.display)
#xCtrl = panel.SpinIntCtrl(x or 0, min = -maxint - 1, max = maxint)
xCtrl = panel.SpinIntCtrl(x or 0, min = 0, max = maxint) # since 1.0.1
xCtrl.Enable(x is not None)
#yCtrl = panel.SpinIntCtrl(y or 0, min = -maxint - 1, max = maxint)
yCtrl = panel.SpinIntCtrl(y or 0, min = 0, max = maxint) # since 1.0.1
yCtrl.Enable(y is not None)
display = -1 if displayNumber is None else displayNumber
displayChoice = eg.DisplayChoice(panel, display)
displayChoice.Enable(displayNumber is not None)
xPixels = wx.StaticText(panel, -1, text.text2)
yPixels = wx.StaticText(panel, -1, text.text2)
monsCtrl = eg.MonitorsCtrl(panel, background = (224, 238, 238))
note = wx.StaticText(panel, -1, text.note)
note.SetForegroundColour(wx.RED)
sizer = wx.GridBagSizer(vgap = 6, hgap = 5)
sizer.Add(cCB, (0, 0), (1, 3), flag = wx.BOTTOM, border = 8)
sizer.Add(xCB, (1, 0), (1, 1))
sizer.Add(xCtrl, (1, 1), (1, 1))
sizer.Add(xPixels, (1, 2), (1, 1))
sizer.Add(yCB, (2, 0), (1, 1))
sizer.Add(yCtrl, (2, 1), (1, 1))
sizer.Add(yPixels, (2, 2), (1, 1))
sizer.Add(note, (3, 0), (1, 3))
sizer.Add(displayCB, (4, 0), (1, 1), flag = wx.TOP, border = 14)
sizer.Add(displayChoice, (4, 1), (1, 2), flag = wx.TOP, border = 13)
sizer.Add(uAMCB, (5, 0), (1, 3))
panel.sizer.Add(sizer, 1, wx.EXPAND)
panel.sizer.Add(monsCtrl, 0, wx.TOP, 8)
def HandleCenterCheckBox(event = None):
val = not cCB.GetValue()
xCB.Enable(val)
xCtrl.Enable(val)
xPixels.Enable(val)
yCB.Enable(val)
yCtrl.Enable(val)
yPixels.Enable(val)
if not val:
xCB.SetValue(False)
yCB.SetValue(False)
xCtrl.SetValue(0)
yCtrl.SetValue(0)
if event:
event.Skip()
cCB.Bind(wx.EVT_CHECKBOX, HandleCenterCheckBox)
HandleCenterCheckBox()
def HandleXCheckBox(event):
xCtrl.Enable(event.IsChecked())
event.Skip()
xCB.Bind(wx.EVT_CHECKBOX, HandleXCheckBox)
def HandleYCheckBox(event):
yCtrl.Enable(event.IsChecked())
event.Skip()
yCB.Bind(wx.EVT_CHECKBOX, HandleYCheckBox)
def HandleDisplayCB(event):
flag = event.IsChecked()
displayChoice.Enable(flag)
if flag:
display = 0 if displayNumber is None else displayNumber
else:
display = -1
displayChoice.SetValue(display)
event.Skip()
displayCB.Bind(wx.EVT_CHECKBOX, HandleDisplayCB)
while panel.Affirmed():
if xCtrl.IsEnabled():
x = xCtrl.GetValue()
else:
x = None
if yCtrl.IsEnabled():
y = yCtrl.GetValue()
else:
y = None
if displayChoice.IsEnabled():
displayNumber = displayChoice.GetValue()
else:
displayNumber = None
panel.SetResult(x, y, displayNumber, cCB.GetValue(), uAMCB.GetValue())
def GetLabel(self, x, y, displayNumber, center, useAlternateMethod=False):
if center:
res = self.text.display + " " + self.text.center
if displayNumber is not None:
res += ": %s" % (self.text.label_M % (displayNumber + 1))
return res
else:
return self.text.display + ": %s%s%s" % (
self.text.label_M % (displayNumber + 1) if displayNumber is not None else "",
self.text.label_X % x if x is not None else "",
self.text.label_Y % y if y is not None else "",
)
class MoveRelative(eg.ActionBase):
name = "Move Relative"
description = "Moves the cursor to a relative position."
class text:
label = "Change cursor position by x:%s, y:%s"
text1 = "Change horizontal position X by"
text2 = "pixels"
text3 = "Change vertical position Y by"
label_AM = "Use alternate method"
def __call__(self, x, y, useAlternateMethod=False):
if x is None:
x = 0
if y is None:
y = 0
if useAlternateMethod:
mouse_event2(MOUSEEVENTF_MOVE, x, y)
else:
point = POINT()
GetCursorPos(point)
SetCursorPos(point.x + x, point.y + y)
def Configure(self, x=0, y=0, useAlternateMethod=False):
panel = eg.ConfigPanel()
text = self.text
uAMCB = panel.CheckBox(useAlternateMethod, text.label_AM)
xCB = panel.CheckBox(x is not None, text.text1)
def HandleXCheckBox(event):
xCtrl.Enable(event.IsChecked())
event.Skip()
xCB.Bind(wx.EVT_CHECKBOX, HandleXCheckBox)
xCtrl = panel.SpinIntCtrl(x or 0, min=-maxint - 1, max=maxint)
xCtrl.Enable(x is not None)
yCB = panel.CheckBox(y is not None, text.text3)
def HandleYCheckBox(event):
yCtrl.Enable(event.IsChecked())
event.Skip()
yCB.Bind(wx.EVT_CHECKBOX, HandleYCheckBox)
yCtrl = panel.SpinIntCtrl(y or 0, min=-maxint - 1, max=maxint)
yCtrl.Enable(y is not None)
panel.AddLine(xCB, xCtrl, text.text2)
panel.AddLine(yCB, yCtrl, text.text2)
panel.AddLine(uAMCB)
while panel.Affirmed():
if xCtrl.IsEnabled():
x = xCtrl.GetValue()
else:
x = None
if yCtrl.IsEnabled():
y = yCtrl.GetValue()
else:
y = None
panel.SetResult(x, y, uAMCB.GetValue())
def GetLabel(self, x, y, useAlternateMethod=False):
return self.text.label % (str(x), str(y))
class RightButton(eg.ActionBase):
name = "Right Mouse Click"
description = "Clicks the right mouse button."
def __call__(self):
def UpFunc():
mouse_event(0x0010, 0, 0, 0, 0)
mouse_event(0x0008, 0, 0, 0, 0)
eg.event.AddUpFunc(UpFunc)
class RightDoubleClick(eg.ActionBase):
name = "Right Mouse Double-Click"
description = "Double-clicks the right mouse button."
def __call__(self):
def UpFunc():
mouse_event(0x0010, 0, 0, 0, 0)
mouse_event(0x0008, 0, 0, 0, 0)
mouse_event(0x0010, 0, 0, 0, 0)
mouse_event(0x0008, 0, 0, 0, 0)
eg.event.AddUpFunc(UpFunc)
class ToggleLeftButton(eg.ActionBase):
class text:
name = "Left Mouse Toggle"
description = "Changes the status of the left mouse button."
radioBoxLabel = "Option"
radioBoxOptions = [
"Toggle left mouse button",
"Set left mouse button \"Up\"",
"Set left mouse button \"Down\""
]
def __call__(self, data=0):
if self.plugin.leftMouseButtonDown and data == 0 or data == 1:
mouse_event(0x0004, 0, 0, 0, 0)
self.plugin.leftMouseButtonDown = False
else:
mouse_event(0x0002, 0, 0, 0, 0)
self.plugin.leftMouseButtonDown = True
def GetLabel(self, data=0):
return self.plugin.label + ': ' + self.text.radioBoxOptions[data]
def Configure(self, data=0):
panel = eg.ConfigPanel()
radioBox = wx.RadioBox(
panel,
label=self.text.radioBoxLabel,
choices=self.text.radioBoxOptions,
style=wx.RA_SPECIFY_ROWS
)
radioBox.SetSelection(data)
panel.sizer.Add(radioBox, 0, wx.EXPAND)
while panel.Affirmed():
panel.SetResult(radioBox.GetSelection())
| tfroehlich82/EventGhost | plugins/Mouse/__init__.py | Python | gpl-2.0 | 21,487 | 0.004328 |
# Copyright (C) 2012 Statoil ASA, Norway.
#
# The file 'ecl_kw.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
from ert.cwrap import BaseCClass, CWrapper
from ert.enkf import AnalysisConfig, EclConfig, EnkfObs, EnKFState, LocalConfig, ModelConfig, EnsembleConfig, PlotConfig, SiteConfig, ENKF_LIB, EnkfSimulationRunner, EnkfFsManager, ErtWorkflowList, PostSimulationHook
from ert.enkf.enums import EnkfInitModeEnum
from ert.util import SubstitutionList, Log
class EnKFMain(BaseCClass):
def __init__(self, model_config, strict=True):
c_ptr = EnKFMain.cNamespace().bootstrap(model_config, strict, False)
super(EnKFMain, self).__init__(c_ptr)
self.__simulation_runner = EnkfSimulationRunner(self)
self.__fs_manager = EnkfFsManager(self)
@classmethod
def createCReference(cls, c_pointer, parent=None):
obj = super(EnKFMain, cls).createCReference(c_pointer, parent)
obj.__simulation_runner = EnkfSimulationRunner(obj)
obj.__fs_manager = EnkfFsManager(obj)
return obj
@staticmethod
def createNewConfig(config_file, storage_path, case_name, dbase_type, num_realizations):
EnKFMain.cNamespace().create_new_config(config_file, storage_path, case_name, dbase_type, num_realizations)
def getRealisation(self , iens):
""" @rtype: EnKFState """
if 0 <= iens < self.getEnsembleSize():
return EnKFMain.cNamespace().iget_state(self, iens).setParent(self)
else:
raise IndexError("iens value:%d invalid Valid range: [0,%d)" % (iens , len(self)))
def set_eclbase(self, eclbase):
EnKFMain.cNamespace().set_eclbase(self, eclbase)
def umount(self):
self.__fs_manager.umount()
def free(self):
self.umount()
EnKFMain.cNamespace().free(self)
def getEnsembleSize(self):
""" @rtype: int """
return EnKFMain.cNamespace().get_ensemble_size(self)
def resizeEnsemble(self, value):
EnKFMain.cNamespace().resize_ensemble(self, value)
def ensembleConfig(self):
""" @rtype: EnsembleConfig """
return EnKFMain.cNamespace().get_ens_config(self).setParent(self)
def analysisConfig(self):
""" @rtype: AnalysisConfig """
return EnKFMain.cNamespace().get_analysis_config(self).setParent(self)
def getModelConfig(self):
""" @rtype: ModelConfig """
return EnKFMain.cNamespace().get_model_config(self).setParent(self)
def logh(self):
""" @rtype: Log """
return EnKFMain.cNamespace().get_logh(self).setParent(self)
def local_config(self):
""" @rtype: LocalConfig """
return EnKFMain.cNamespace().get_local_config(self).setParent(self)
def siteConfig(self):
""" @rtype: SiteConfig """
return EnKFMain.cNamespace().get_site_config(self).setParent(self)
def eclConfig(self):
""" @rtype: EclConfig """
return EnKFMain.cNamespace().get_ecl_config(self).setParent(self)
def plotConfig(self):
""" @rtype: PlotConfig """
return EnKFMain.cNamespace().get_plot_config(self).setParent(self)
def set_datafile(self, datafile):
EnKFMain.cNamespace().set_datafile(self, datafile)
def get_schedule_prediction_file(self):
schedule_prediction_file = EnKFMain.cNamespace().get_schedule_prediction_file(self)
return schedule_prediction_file
def set_schedule_prediction_file(self, file):
EnKFMain.cNamespace().set_schedule_prediction_file(self, file)
def getDataKW(self):
""" @rtype: SubstitutionList """
return EnKFMain.cNamespace().get_data_kw(self)
def clearDataKW(self):
EnKFMain.cNamespace().clear_data_kw(self)
def addDataKW(self, key, value):
EnKFMain.cNamespace().add_data_kw(self, key, value)
def getMountPoint(self):
return EnKFMain.cNamespace().get_mount_point(self)
def del_node(self, key):
EnKFMain.cNamespace().del_node(self, key)
def getObservations(self):
""" @rtype: EnkfObs """
return EnKFMain.cNamespace().get_obs(self).setParent(self)
def load_obs(self, obs_config_file):
EnKFMain.cNamespace().load_obs(self, obs_config_file)
def reload_obs(self):
EnKFMain.cNamespace().reload_obs(self)
def get_pre_clear_runpath(self):
pre_clear = EnKFMain.cNamespace().get_pre_clear_runpath(self)
return pre_clear
def set_pre_clear_runpath(self, value):
EnKFMain.cNamespace().set_pre_clear_runpath(self, value)
def iget_keep_runpath(self, iens):
ikeep = EnKFMain.cNamespace().iget_keep_runpath(self, iens)
return ikeep
def iset_keep_runpath(self, iens, keep_runpath):
EnKFMain.cNamespace().iset_keep_runpath(self, iens, keep_runpath)
def get_templates(self):
return EnKFMain.cNamespace().get_templates(self).setParent(self)
def get_site_config_file(self):
site_conf_file = EnKFMain.cNamespace().get_site_config_file(self)
return site_conf_file
def getUserConfigFile(self):
""" @rtype: str """
config_file = EnKFMain.cNamespace().get_user_config_file(self)
return config_file
def getHistoryLength(self):
return EnKFMain.cNamespace().get_history_length(self)
def getMemberRunningState(self, ensemble_member):
""" @rtype: EnKFState """
return EnKFMain.cNamespace().iget_state(self, ensemble_member).setParent(self)
def get_observations(self, user_key, obs_count, obs_x, obs_y, obs_std):
EnKFMain.cNamespace().get_observations(self, user_key, obs_count, obs_x, obs_y, obs_std)
def get_observation_count(self, user_key):
return EnKFMain.cNamespace().get_observation_count(self, user_key)
def getEnkfSimulationRunner(self):
""" @rtype: EnkfSimulationRunner """
return self.__simulation_runner
def getEnkfFsManager(self):
""" @rtype: EnkfFsManager """
return self.__fs_manager
def getWorkflowList(self):
""" @rtype: ErtWorkflowList """
return EnKFMain.cNamespace().get_workflow_list(self).setParent(self)
def getPostSimulationHook(self):
""" @rtype: PostSimulationHook """
return EnKFMain.cNamespace().get_qc_module(self)
def exportField(self, keyword, path, iactive, file_type, report_step, state, enkfFs):
"""
@type keyword: str
@type path: str
@type iactive: BoolVector
@type file_type: EnkfFieldFileFormatEnum
@type report_step: int
@type state: EnkfStateType
@type enkfFs: EnkfFs
"""
assert isinstance(keyword, str)
return EnKFMain.cNamespace().export_field_with_fs(self, keyword, path, iactive, file_type, report_step, state, enkfFs)
def loadFromForwardModel(self, realization, iteration, fs):
EnKFMain.cNamespace().load_from_forward_model(self, iteration, realization, fs)
def submitSimulation(self , run_arg):
EnKFMain.cNamespace().submit_simulation( self , run_arg)
def getRunContextENSEMPLE_EXPERIMENT(self , fs , iactive , init_mode = EnkfInitModeEnum.INIT_CONDITIONAL , iteration = 0):
return EnKFMain.cNamespace().alloc_run_context_ENSEMBLE_EXPERIMENT( self , fs , iactive , init_mode , iteration )
##################################################################
cwrapper = CWrapper(ENKF_LIB)
cwrapper.registerType("enkf_main", EnKFMain)
cwrapper.registerType("enkf_main_ref", EnKFMain.createCReference)
EnKFMain.cNamespace().bootstrap = cwrapper.prototype("c_void_p enkf_main_bootstrap(char*, bool, bool)")
EnKFMain.cNamespace().free = cwrapper.prototype("void enkf_main_free(enkf_main)")
EnKFMain.cNamespace().get_ensemble_size = cwrapper.prototype("int enkf_main_get_ensemble_size( enkf_main )")
EnKFMain.cNamespace().get_ens_config = cwrapper.prototype("ens_config_ref enkf_main_get_ensemble_config( enkf_main )")
EnKFMain.cNamespace().get_model_config = cwrapper.prototype("model_config_ref enkf_main_get_model_config( enkf_main )")
EnKFMain.cNamespace().get_local_config = cwrapper.prototype("local_config_ref enkf_main_get_local_config( enkf_main )")
EnKFMain.cNamespace().get_analysis_config = cwrapper.prototype("analysis_config_ref enkf_main_get_analysis_config( enkf_main)")
EnKFMain.cNamespace().get_site_config = cwrapper.prototype("site_config_ref enkf_main_get_site_config( enkf_main)")
EnKFMain.cNamespace().get_ecl_config = cwrapper.prototype("ecl_config_ref enkf_main_get_ecl_config( enkf_main)")
EnKFMain.cNamespace().get_plot_config = cwrapper.prototype("plot_config_ref enkf_main_get_plot_config( enkf_main)")
EnKFMain.cNamespace().set_eclbase = cwrapper.prototype("ui_return_obj enkf_main_set_eclbase( enkf_main, char*)")
EnKFMain.cNamespace().set_datafile = cwrapper.prototype("void enkf_main_set_data_file( enkf_main, char*)")
EnKFMain.cNamespace().get_schedule_prediction_file = cwrapper.prototype("char* enkf_main_get_schedule_prediction_file( enkf_main )")
EnKFMain.cNamespace().set_schedule_prediction_file = cwrapper.prototype("void enkf_main_set_schedule_prediction_file( enkf_main , char*)")
EnKFMain.cNamespace().get_data_kw = cwrapper.prototype("subst_list_ref enkf_main_get_data_kw(enkf_main)")
EnKFMain.cNamespace().clear_data_kw = cwrapper.prototype("void enkf_main_clear_data_kw(enkf_main)")
EnKFMain.cNamespace().add_data_kw = cwrapper.prototype("void enkf_main_add_data_kw(enkf_main, char*, char*)")
EnKFMain.cNamespace().resize_ensemble = cwrapper.prototype("void enkf_main_resize_ensemble(enkf_main, int)")
EnKFMain.cNamespace().del_node = cwrapper.prototype("void enkf_main_del_node(enkf_main, char*)")
EnKFMain.cNamespace().get_obs = cwrapper.prototype("enkf_obs_ref enkf_main_get_obs(enkf_main)")
EnKFMain.cNamespace().load_obs = cwrapper.prototype("void enkf_main_load_obs(enkf_main, char*)")
EnKFMain.cNamespace().reload_obs = cwrapper.prototype("void enkf_main_reload_obs(enkf_main)")
EnKFMain.cNamespace().get_pre_clear_runpath = cwrapper.prototype("bool enkf_main_get_pre_clear_runpath(enkf_main)")
EnKFMain.cNamespace().set_pre_clear_runpath = cwrapper.prototype("void enkf_main_set_pre_clear_runpath(enkf_main, bool)")
EnKFMain.cNamespace().iget_keep_runpath = cwrapper.prototype("int enkf_main_iget_keep_runpath(enkf_main, int)")
EnKFMain.cNamespace().iset_keep_runpath = cwrapper.prototype("void enkf_main_iset_keep_runpath(enkf_main, int, int_vector)")
EnKFMain.cNamespace().get_templates = cwrapper.prototype("ert_templates_ref enkf_main_get_templates(enkf_main)")
EnKFMain.cNamespace().get_site_config_file = cwrapper.prototype("char* enkf_main_get_site_config_file(enkf_main)")
EnKFMain.cNamespace().get_history_length = cwrapper.prototype("int enkf_main_get_history_length(enkf_main)")
EnKFMain.cNamespace().get_observations = cwrapper.prototype("void enkf_main_get_observations(enkf_main, char*, int, long*, double*, double*)")
EnKFMain.cNamespace().get_observation_count = cwrapper.prototype("int enkf_main_get_observation_count(enkf_main, char*)")
EnKFMain.cNamespace().iget_state = cwrapper.prototype("enkf_state_ref enkf_main_iget_state(enkf_main, int)")
EnKFMain.cNamespace().get_workflow_list = cwrapper.prototype("ert_workflow_list_ref enkf_main_get_workflow_list(enkf_main)")
EnKFMain.cNamespace().get_qc_module = cwrapper.prototype("qc_module_ref enkf_main_get_qc_module(enkf_main)")
EnKFMain.cNamespace().fprintf_config = cwrapper.prototype("void enkf_main_fprintf_config(enkf_main)")
EnKFMain.cNamespace().create_new_config = cwrapper.prototype("void enkf_main_create_new_config(char* , char*, char* , char* , int)")
EnKFMain.cNamespace().get_user_config_file = cwrapper.prototype("char* enkf_main_get_user_config_file(enkf_main)")
EnKFMain.cNamespace().get_mount_point = cwrapper.prototype("char* enkf_main_get_mount_root( enkf_main )")
EnKFMain.cNamespace().export_field = cwrapper.prototype("bool enkf_main_export_field(enkf_main, char*, char*, bool_vector, enkf_field_file_format_enum, int, enkf_state_type_enum)")
EnKFMain.cNamespace().export_field_with_fs = cwrapper.prototype("bool enkf_main_export_field_with_fs(enkf_main, char*, char*, bool_vector, enkf_field_file_format_enum, int, enkf_state_type_enum, enkf_fs_manager)")
EnKFMain.cNamespace().load_from_forward_model = cwrapper.prototype("void enkf_main_load_from_forward_model_from_gui(enkf_main, int, bool_vector, enkf_fs)")
EnKFMain.cNamespace().submit_simulation = cwrapper.prototype("void enkf_main_isubmit_job(enkf_main , run_arg)")
EnKFMain.cNamespace().alloc_run_context_ENSEMBLE_EXPERIMENT= cwrapper.prototype("ert_run_context_obj enkf_main_alloc_ert_run_context_ENSEMBLE_EXPERIMENT( enkf_main , enkf_fs , bool_vector , enkf_init_mode_enum , int)")
| iLoop2/ResInsight | ThirdParty/Ert/devel/python/python/ert/enkf/enkf_main.py | Python | gpl-3.0 | 13,314 | 0.007736 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Romeo Theriault <romeot () hawaii.edu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# see examples/playbooks/uri.yml
import cgi
import shutil
import tempfile
import base64
import datetime
try:
import json
except ImportError:
import simplejson as json
DOCUMENTATION = '''
---
module: uri
short_description: Interacts with webservices
description:
- Interacts with HTTP and HTTPS web services and supports Digest, Basic and WSSE
HTTP authentication mechanisms.
version_added: "1.1"
options:
url:
description:
- HTTP or HTTPS URL in the form (http|https)://host.domain[:port]/path
required: true
default: null
aliases: []
dest:
description:
- path of where to download the file to (if desired). If I(dest) is a directory, the basename of the file on the remote server will be used.
required: false
default: null
user:
description:
- username for the module to use for Digest, Basic or WSSE authentication.
required: false
default: null
password:
description:
- password for the module to use for Digest, Basic or WSSE authentication.
required: false
default: null
body:
description:
- The body of the http request/response to the web service.
required: false
default: null
body_format:
description:
- The serialization format of the body. When set to json, encodes the body argument and automatically sets the Content-Type header accordingly.
required: false
choices: [ "raw", "json" ]
default: raw
method:
description:
- The HTTP method of the request or response.
required: false
choices: [ "GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH", "TRACE", "CONNECT", "REFRESH" ]
default: "GET"
return_content:
description:
- Whether or not to return the body of the request as a "content" key in the dictionary result. If the reported Content-type is "application/json", then the JSON is additionally loaded into a key called C(json) in the dictionary results.
required: false
choices: [ "yes", "no" ]
default: "no"
force_basic_auth:
description:
- httplib2, the library used by the uri module only sends authentication information when a webservice
responds to an initial request with a 401 status. Since some basic auth services do not properly
send a 401, logins will fail. This option forces the sending of the Basic authentication header
upon initial request.
required: false
choices: [ "yes", "no" ]
default: "no"
follow_redirects:
description:
- Whether or not the URI module should follow redirects. C(all) will follow all redirects.
C(safe) will follow only "safe" redirects, where "safe" means that the client is only
doing a GET or HEAD on the URI to which it is being redirected. C(none) will not follow
any redirects. Note that C(yes) and C(no) choices are accepted for backwards compatibility,
where C(yes) is the equivalent of C(all) and C(no) is the equivalent of C(safe). C(yes) and C(no)
are deprecated and will be removed in some future version of Ansible.
required: false
choices: [ "all", "safe", "none" ]
default: "safe"
creates:
description:
- a filename, when it already exists, this step will not be run.
required: false
removes:
description:
- a filename, when it does not exist, this step will not be run.
required: false
status_code:
description:
- A valid, numeric, HTTP status code that signifies success of the request. Can also be comma separated list of status codes.
required: false
default: 200
timeout:
description:
- The socket level timeout in seconds
required: false
default: 30
HEADER_:
description:
- Any parameter starting with "HEADER_" is a sent with your request as a header.
For example, HEADER_Content-Type="application/json" would send the header
"Content-Type" along with your request with a value of "application/json".
required: false
default: null
others:
description:
- all arguments accepted by the M(file) module also work here
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only
set to C(no) used on personally controlled sites using self-signed
certificates. Prior to 1.9.2 the code defaulted to C(no).
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: '1.9.2'
# informational: requirements for nodes
requirements: [ urlparse, httplib2 ]
author: "Romeo Theriault (@romeotheriault)"
'''
EXAMPLES = '''
# Check that you can connect (GET) to a page and it returns a status 200
- uri: url=http://www.example.com
# Check that a page returns a status 200 and fail if the word AWESOME is not in the page contents.
- action: uri url=http://www.example.com return_content=yes
register: webpage
- action: fail
when: "'illustrative' not in webpage.content"
# Create a JIRA issue
- uri:
url: https://your.jira.example.com/rest/api/2/issue/
method: POST
user: your_username
password: your_pass
body: "{{ lookup('file','issue.json') }}"
force_basic_auth: yes
status_code: 201
body_format: json
# Login to a form based webpage, then use the returned cookie to
# access the app in later tasks
- uri:
url: https://your.form.based.auth.examle.com/index.php
method: POST
body: "name=your_username&password=your_password&enter=Sign%20in"
status_code: 302
HEADER_Content-Type: "application/x-www-form-urlencoded"
register: login
- uri:
url: https://your.form.based.auth.example.com/dashboard.php
method: GET
return_content: yes
HEADER_Cookie: "{{login.set_cookie}}"
# Queue build of a project in Jenkins:
- uri:
url: "http://{{ jenkins.host }}/job/{{ jenkins.job }}/build?token={{ jenkins.token }}"
method: GET
user: "{{ jenkins.user }}"
password: "{{ jenkins.password }}"
force_basic_auth: yes
status_code: 201
'''
HAS_HTTPLIB2 = True
try:
import httplib2
except ImportError:
HAS_HTTPLIB2 = False
HAS_URLPARSE = True
try:
import urlparse
import socket
except ImportError:
HAS_URLPARSE = False
def write_file(module, url, dest, content):
# create a tempfile with some test content
fd, tmpsrc = tempfile.mkstemp()
f = open(tmpsrc, 'wb')
try:
f.write(content)
except Exception, err:
os.remove(tmpsrc)
module.fail_json(msg="failed to create temporary content file: %s" % str(err))
f.close()
checksum_src = None
checksum_dest = None
# raise an error if there is no tmpsrc file
if not os.path.exists(tmpsrc):
os.remove(tmpsrc)
module.fail_json(msg="Source %s does not exist" % (tmpsrc))
if not os.access(tmpsrc, os.R_OK):
os.remove(tmpsrc)
module.fail_json( msg="Source %s not readable" % (tmpsrc))
checksum_src = module.sha1(tmpsrc)
# check if there is no dest file
if os.path.exists(dest):
# raise an error if copy has no permission on dest
if not os.access(dest, os.W_OK):
os.remove(tmpsrc)
module.fail_json( msg="Destination %s not writable" % (dest))
if not os.access(dest, os.R_OK):
os.remove(tmpsrc)
module.fail_json( msg="Destination %s not readable" % (dest))
checksum_dest = module.sha1(dest)
else:
if not os.access(os.path.dirname(dest), os.W_OK):
os.remove(tmpsrc)
module.fail_json( msg="Destination dir %s not writable" % (os.path.dirname(dest)))
if checksum_src != checksum_dest:
try:
shutil.copyfile(tmpsrc, dest)
except Exception, err:
os.remove(tmpsrc)
module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err)))
os.remove(tmpsrc)
def url_filename(url):
fn = os.path.basename(urlparse.urlsplit(url)[2])
if fn == '':
return 'index.html'
return fn
def uri(module, url, dest, user, password, body, body_format, method, headers, redirects, socket_timeout, validate_certs):
# To debug
#httplib2.debuglevel = 4
# Handle Redirects
if redirects == "all" or redirects == "yes":
follow_redirects = True
follow_all_redirects = True
elif redirects == "none":
follow_redirects = False
follow_all_redirects = False
else:
follow_redirects = True
follow_all_redirects = False
# Create a Http object and set some default options.
disable_validation = not validate_certs
h = httplib2.Http(disable_ssl_certificate_validation=disable_validation, timeout=socket_timeout)
h.follow_all_redirects = follow_all_redirects
h.follow_redirects = follow_redirects
h.forward_authorization_headers = True
# If they have a username or password verify they have both, then add them to the request
if user is not None and password is None:
module.fail_json(msg="Both a username and password need to be set.")
if password is not None and user is None:
module.fail_json(msg="Both a username and password need to be set.")
if user is not None and password is not None:
h.add_credentials(user, password)
# is dest is set and is a directory, let's check if we get redirected and
# set the filename from that url
redirected = False
resp_redir = {}
r = {}
if dest is not None:
dest = os.path.expanduser(dest)
if os.path.isdir(dest):
# first check if we are redirected to a file download
h.follow_redirects=False
# Try the request
try:
resp_redir, content_redir = h.request(url, method=method, body=body, headers=headers)
# if we are redirected, update the url with the location header,
# and update dest with the new url filename
except:
pass
if 'status' in resp_redir and resp_redir['status'] in ["301", "302", "303", "307"]:
url = resp_redir['location']
redirected = True
dest = os.path.join(dest, url_filename(url))
# if destination file already exist, only download if file newer
if os.path.exists(dest):
t = datetime.datetime.utcfromtimestamp(os.path.getmtime(dest))
tstamp = t.strftime('%a, %d %b %Y %H:%M:%S +0000')
headers['If-Modified-Since'] = tstamp
# do safe redirects now, including 307
h.follow_redirects=follow_redirects
# Make the request, or try to :)
try:
resp, content = h.request(url, method=method, body=body, headers=headers)
r['redirected'] = redirected
r.update(resp_redir)
r.update(resp)
return r, content, dest
except httplib2.RedirectMissingLocation:
module.fail_json(msg="A 3xx redirect response code was provided but no Location: header was provided to point to the new location.")
except httplib2.RedirectLimit:
module.fail_json(msg="The maximum number of redirections was reached without coming to a final URI.")
except httplib2.ServerNotFoundError:
module.fail_json(msg="Unable to resolve the host name given.")
except httplib2.RelativeURIError:
module.fail_json(msg="A relative, as opposed to an absolute URI, was passed in.")
except httplib2.FailedToDecompressContent:
module.fail_json(msg="The headers claimed that the content of the response was compressed but the decompression algorithm applied to the content failed.")
except httplib2.UnimplementedDigestAuthOptionError:
module.fail_json(msg="The server requested a type of Digest authentication that we are unfamiliar with.")
except httplib2.UnimplementedHmacDigestAuthOptionError:
module.fail_json(msg="The server requested a type of HMACDigest authentication that we are unfamiliar with.")
except httplib2.UnimplementedHmacDigestAuthOptionError:
module.fail_json(msg="The server requested a type of HMACDigest authentication that we are unfamiliar with.")
except httplib2.CertificateHostnameMismatch:
module.fail_json(msg="The server's certificate does not match with its hostname.")
except httplib2.SSLHandshakeError:
module.fail_json(msg="Unable to validate server's certificate against available CA certs.")
except socket.error, e:
module.fail_json(msg="Socket error: %s to %s" % (e, url))
def main():
module = AnsibleModule(
argument_spec = dict(
url = dict(required=True),
dest = dict(required=False, default=None),
user = dict(required=False, default=None),
password = dict(required=False, default=None),
body = dict(required=False, default=None),
body_format = dict(required=False, default='raw', choices=['raw', 'json']),
method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH', 'TRACE', 'CONNECT', 'REFRESH']),
return_content = dict(required=False, default='no', type='bool'),
force_basic_auth = dict(required=False, default='no', type='bool'),
follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']),
creates = dict(required=False, default=None),
removes = dict(required=False, default=None),
status_code = dict(required=False, default=[200], type='list'),
timeout = dict(required=False, default=30, type='int'),
validate_certs = dict(required=False, default=True, type='bool'),
),
check_invalid_arguments=False,
add_file_common_args=True
)
if not HAS_HTTPLIB2:
module.fail_json(msg="httplib2 is not installed")
if not HAS_URLPARSE:
module.fail_json(msg="urlparse is not installed")
url = module.params['url']
user = module.params['user']
password = module.params['password']
body = module.params['body']
body_format = module.params['body_format']
method = module.params['method']
dest = module.params['dest']
return_content = module.params['return_content']
force_basic_auth = module.params['force_basic_auth']
redirects = module.params['follow_redirects']
creates = module.params['creates']
removes = module.params['removes']
status_code = [int(x) for x in list(module.params['status_code'])]
socket_timeout = module.params['timeout']
validate_certs = module.params['validate_certs']
dict_headers = {}
# If body_format is json, encodes the body (wich can be a dict or a list) and automatically sets the Content-Type header
if body_format == 'json':
body = json.dumps(body)
dict_headers['Content-Type'] = 'application/json'
# Grab all the http headers. Need this hack since passing multi-values is currently a bit ugly. (e.g. headers='{"Content-Type":"application/json"}')
for key, value in module.params.iteritems():
if key.startswith("HEADER_"):
skey = key.replace("HEADER_", "")
dict_headers[skey] = value
if creates is not None:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of uri executions.
creates = os.path.expanduser(creates)
if os.path.exists(creates):
module.exit_json(stdout="skipped, since %s exists" % creates, changed=False, stderr=False, rc=0)
if removes is not None:
# do not run the command if the line contains removes=filename
# and the filename do not exists. This allows idempotence
# of uri executions.
v = os.path.expanduser(removes)
if not os.path.exists(removes):
module.exit_json(stdout="skipped, since %s does not exist" % removes, changed=False, stderr=False, rc=0)
# httplib2 only sends authentication after the server asks for it with a 401.
# Some 'basic auth' servies fail to send a 401 and require the authentication
# up front. This creates the Basic authentication header and sends it immediately.
if force_basic_auth:
dict_headers["Authorization"] = "Basic {0}".format(base64.b64encode("{0}:{1}".format(user, password)))
# Make the request
resp, content, dest = uri(module, url, dest, user, password, body, body_format, method, dict_headers, redirects, socket_timeout, validate_certs)
resp['status'] = int(resp['status'])
# Write the file out if requested
if dest is not None:
if resp['status'] == 304:
changed = False
else:
write_file(module, url, dest, content)
# allow file attribute changes
changed = True
module.params['path'] = dest
file_args = module.load_file_common_arguments(module.params)
file_args['path'] = dest
changed = module.set_fs_attributes_if_different(file_args, changed)
resp['path'] = dest
else:
changed = False
# Transmogrify the headers, replacing '-' with '_', since variables dont work with dashes.
uresp = {}
for key, value in resp.iteritems():
ukey = key.replace("-", "_")
uresp[ukey] = value
# Default content_encoding to try
content_encoding = 'utf-8'
if 'content_type' in uresp:
content_type, params = cgi.parse_header(uresp['content_type'])
if 'charset' in params:
content_encoding = params['charset']
u_content = unicode(content, content_encoding, errors='xmlcharrefreplace')
if content_type.startswith('application/json') or \
content_type.startswith('text/json'):
try:
js = json.loads(u_content)
uresp['json'] = js
except:
pass
else:
u_content = unicode(content, content_encoding, errors='xmlcharrefreplace')
if resp['status'] not in status_code:
module.fail_json(msg="Status code was not " + str(status_code), content=u_content, **uresp)
elif return_content:
module.exit_json(changed=changed, content=u_content, **uresp)
else:
module.exit_json(changed=changed, **uresp)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| akirakoyasu/ansible-modules-core | network/basics/uri.py | Python | gpl-3.0 | 19,263 | 0.006489 |
import string
from pyparsing import (
Literal, White, Word, alphanums, CharsNotIn, Forward, Group, SkipTo,
Optional, OneOrMore, ZeroOrMore, pythonStyleComment)
class Parser(object):
left_bracket = Literal("{").suppress()
right_bracket = Literal("}").suppress()
semicolon = Literal(";").suppress()
space = White().suppress()
key = Word(alphanums + "_/")
value = CharsNotIn("{};")
value2 = CharsNotIn(";")
location = CharsNotIn("{};," + string.whitespace)
ifword = Literal("if")
setword = Literal("set")
modifier = Literal("=") | Literal("~*") | Literal("~") | Literal("^~")
assignment = (key + Optional(space + value) + semicolon)
setblock = (setword + OneOrMore(space + value2) + semicolon)
block = Forward()
ifblock = Forward()
subblock = Forward()
ifblock << (
ifword
+ SkipTo('{')
+ left_bracket
+ subblock
+ right_bracket)
subblock << ZeroOrMore(
Group(assignment) | block | ifblock | setblock
)
block << Group(
Group(key + Optional(space + modifier) + Optional(space + location))
+ left_bracket
+ Group(subblock)
+ right_bracket
)
script = OneOrMore(Group(assignment) | block).ignore(pythonStyleComment)
def __init__(self, source):
self.source = source
def parse(self):
return self.script.parseString(self.source)
def as_list(self):
return self.parse().asList()
class Dumper(object):
def __init__(self, blocks, indentation=4):
self.blocks = blocks
self.indentation = indentation
def __iter__(self, blocks=None, current_indent=0, spacer=' '):
blocks = blocks or self.blocks
for key, values in blocks:
if current_indent:
yield spacer
indentation = spacer * current_indent
if isinstance(key, list):
yield indentation + spacer.join(key) + ' {'
for parameter in values:
if isinstance(parameter[0], list):
dumped = self.__iter__(
[parameter],
current_indent + self.indentation)
for line in dumped:
yield line
else:
dumped = spacer.join(parameter) + ';'
yield spacer * (
current_indent + self.indentation) + dumped
yield indentation + '}'
else:
yield spacer * current_indent + key + spacer + values + ';'
def as_string(self):
return '\n'.join(self)
def to_file(self, out):
for line in self:
out.write(line+"\n")
out.close()
return out
def loads(source):
return Parser(source).as_list()
def load(_file):
return loads(_file.read())
def dumps(blocks, indentation=4):
return Dumper(blocks, indentation).as_string()
def dump(blocks, _file, indentation=4):
return Dumper(blocks, indentation).to_file(_file)
| Nat-Lab/pac.py | lib/confParser.py | Python | mit | 3,119 | 0 |
from JumpScale import j
class test_complextype_user_osismodelbase(j.code.classGetJSRootModelBase()):
"""
group of users
"""
def __init__(self):
pass
self._P_id=0
self._P_organization=""
self._P_name=""
self._P_emails=list()
self._P_groups=list()
self._P_guid=""
self._P__meta=list()
self._P__meta=["osismodel","test_complextype","user",1] #@todo version not implemented now, just already foreseen
@property
def id(self):
return self._P_id
@id.setter
def id(self, value):
if not isinstance(value, int) and value is not None:
if isinstance(value, basestring) and j.basetype.integer.checkString(value):
value = j.basetype.integer.fromString(value)
else:
msg="property id input error, needs to be int, specfile: /opt/jumpscale/apps/osis/logic/test_complextype/model.spec, name model: user, value was:" + str(value)
raise TypeError(msg)
self._P_id=value
@id.deleter
def id(self):
del self._P_id
@property
def organization(self):
return self._P_organization
@organization.setter
def organization(self, value):
if not isinstance(value, str) and value is not None:
if isinstance(value, basestring) and j.basetype.string.checkString(value):
value = j.basetype.string.fromString(value)
else:
msg="property organization input error, needs to be str, specfile: /opt/jumpscale/apps/osis/logic/test_complextype/model.spec, name model: user, value was:" + str(value)
raise TypeError(msg)
self._P_organization=value
@organization.deleter
def organization(self):
del self._P_organization
@property
def name(self):
return self._P_name
@name.setter
def name(self, value):
if not isinstance(value, str) and value is not None:
if isinstance(value, basestring) and j.basetype.string.checkString(value):
value = j.basetype.string.fromString(value)
else:
msg="property name input error, needs to be str, specfile: /opt/jumpscale/apps/osis/logic/test_complextype/model.spec, name model: user, value was:" + str(value)
raise TypeError(msg)
self._P_name=value
@name.deleter
def name(self):
del self._P_name
@property
def emails(self):
return self._P_emails
@emails.setter
def emails(self, value):
if not isinstance(value, list) and value is not None:
if isinstance(value, basestring) and j.basetype.list.checkString(value):
value = j.basetype.list.fromString(value)
else:
msg="property emails input error, needs to be list, specfile: /opt/jumpscale/apps/osis/logic/test_complextype/model.spec, name model: user, value was:" + str(value)
raise TypeError(msg)
self._P_emails=value
@emails.deleter
def emails(self):
del self._P_emails
@property
def groups(self):
return self._P_groups
@groups.setter
def groups(self, value):
if not isinstance(value, list) and value is not None:
if isinstance(value, basestring) and j.basetype.list.checkString(value):
value = j.basetype.list.fromString(value)
else:
msg="property groups input error, needs to be list, specfile: /opt/jumpscale/apps/osis/logic/test_complextype/model.spec, name model: user, value was:" + str(value)
raise TypeError(msg)
self._P_groups=value
@groups.deleter
def groups(self):
del self._P_groups
@property
def guid(self):
return self._P_guid
@guid.setter
def guid(self, value):
if not isinstance(value, str) and value is not None:
if isinstance(value, basestring) and j.basetype.string.checkString(value):
value = j.basetype.string.fromString(value)
else:
msg="property guid input error, needs to be str, specfile: /opt/jumpscale/apps/osis/logic/test_complextype/model.spec, name model: user, value was:" + str(value)
raise TypeError(msg)
self._P_guid=value
@guid.deleter
def guid(self):
del self._P_guid
@property
def _meta(self):
return self._P__meta
@_meta.setter
def _meta(self, value):
if not isinstance(value, list) and value is not None:
if isinstance(value, basestring) and j.basetype.list.checkString(value):
value = j.basetype.list.fromString(value)
else:
msg="property _meta input error, needs to be list, specfile: /opt/jumpscale/apps/osis/logic/test_complextype/model.spec, name model: user, value was:" + str(value)
raise TypeError(msg)
self._P__meta=value
@_meta.deleter
def _meta(self):
del self._P__meta
| Jumpscale/jumpscale6_core | apps/osis/logic/test_complextype/user/test_complextype_user_osismodelbase.py | Python | bsd-2-clause | 5,069 | 0.00868 |
"""
This file includes commonly used utilities for this app.
"""
from datetime import datetime
today = datetime.now()
year = today.year
month = today.month
day = today.day
# Following are for images upload helper functions. The first two are used for product upload for the front and back.
# The last two are used for design product upload for the front and back.
def front_image(instance, filename):
# file will be uploaded to MEDIA_ROOT/product_imgs/owner_<id>/product_<id>/Y/m/d/front/<filename>
return 'product_imgs/owner_{0}/product_{1}/{2}/{3}/{4}/front/{5}'.format(instance.owner.id, instance.slug, year, month, day, filename)
def back_image(instance, filename):
# file will be uploaded to MEDIA_ROOT/product_imgs/owner_<id>/product_<id>/Y/m/d/back/<filename>
return 'product_imgs/owner_{0}/product_{1}/{2}/{3}/{4}/back/{5}'.format(instance.owner.id, instance.slug, year, month, day, filename)
'''
def front_design_image(instance, filename):
# file will be uploaded to MEDIA_ROOT/product_imgs/designer_<id>/design_product_<id>/Y/m/d/front/<filename>
return 'product_imgs/designer_{0}/design_product_{1}/{2}/{3}/{4}/front/{5}'.format(instance.designer.id, instance.id, year, month, day, filename)
def back_design_image(instance, filename):
# file will be uploaded to MEDIA_ROOT/product_imgs/designer_<id>/design_product_<id>/Y/m/d/back/<filename>
return 'product_imgs/designer_{0}/design_product_{1}/{2}/{3}/{4}/back/{5}'.format(instance.designer.id, instance.id, year, month, day, filename)
'''
def fill_category_tree(model, deep=0, parent_id=0, tree=[]):
'''
NAME::
fill_category_tree
DESCRIPTION::
一般用来针对带有parent产品分类表字段的进行遍历,并生成树形结构
PARAMETERS::
:param model: 被遍历的model,具有parent属性
:param deep: 本例中,为了明确表示父子的层次关系,用短线---的多少来表示缩进
:param parent_id: 表示从哪个父类开始,=0表示从最顶层开始
:param tree: 要生成的树形tuple
RETURN::
这里是不需要返回值的,但是如果某个调用中需要可以画蛇添足一下
USAGE::
调用时,可以这样:
choices = [()]
fill_topic_tree(choices=choices)
这里使用[],而不是(),是因为只有[],才能做为“引用”类型传递数据。
'''
if parent_id == 0:
ts = model.objects.filter(parent = None)
# tree[0] += ((None, '选择产品类型'),)
for t in ts:
tmp = [()]
fill_category_tree(4, t.id, tmp)
tree[0] += ((t.id, '-'*deep + t.name,),)
for tt in tmp[0]:
tree[0] += (tt,)
else:
ts = Category.objects.filter(parent_id = parent_id)
for t in ts:
tree[0] += ((t.id, '-'*deep + t.name,),)
fill_category_tree(deep + 4, t.id, tree)
return tree | sunlaiqi/fundiy | src/shop/utils.py | Python | mit | 2,973 | 0.006805 |
# -*- coding: utf-8 -*-
#
# test_disconnect_multiple.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import unittest
__author__ = 'naveau'
class TestDisconnect(unittest.TestCase):
def setUp(self):
nest.ResetKernel()
nest.hl_api.set_verbosity('M_ERROR')
self.exclude_synapse_model = [
'stdp_dopamine_synapse',
'stdp_dopamine_synapse_lbl',
'stdp_dopamine_synapse_hpc',
'stdp_dopamine_synapse_hpc_lbl',
'gap_junction',
'gap_junction_lbl',
'diffusion_connection',
'diffusion_connection_lbl',
'rate_connection_instantaneous',
'rate_connection_instantaneous_lbl',
'rate_connection_delayed',
'rate_connection_delayed_lbl',
'clopath_synapse',
'clopath_synapse_lbl'
]
def test_multiple_synapse_deletion_all_to_all(self):
for syn_model in nest.Models('synapses'):
if syn_model not in self.exclude_synapse_model:
nest.ResetKernel()
nest.CopyModel('static_synapse', 'my_static_synapse')
nest.SetDefaults(syn_model, {'delay': 0.5})
syn_dict = {
'model': syn_model,
'pre_synaptic_element': 'SE1',
'post_synaptic_element': 'SE2'
}
nest.SetKernelStatus({
'min_delay': 0.1,
'max_delay': 1.0,
'structural_plasticity_synapses': {'syn1': syn_dict}
})
neurons = nest.Create('iaf_psc_alpha', 10, {
'synaptic_elements': {
'SE1': {'z': 0.0, 'growth_rate': 0.0},
'SE2': {'z': 0.0, 'growth_rate': 0.0}
}
})
nest.Connect(neurons, neurons, "all_to_all", syn_dict)
# Test if the connected synaptic elements before the simulation
# are correct
status = nest.GetStatus(neurons, 'synaptic_elements')
for st_neuron in status:
self.assertEqual(10, st_neuron['SE1']['z_connected'])
self.assertEqual(10, st_neuron['SE2']['z_connected'])
srcId = range(0, 5)
targId = range(5, 10)
conns = nest.GetConnections(srcId, targId, syn_model)
assert conns
conndictionary = {'rule': 'all_to_all'}
syndictionary = {'model': syn_model}
nest.Disconnect(
[neurons[i] for i in srcId],
[neurons[i] for i in targId],
conndictionary,
syndictionary
)
status = nest.GetStatus(neurons, 'synaptic_elements')
for st_neuron in status[0:5]:
self.assertEqual(5, st_neuron['SE1']['z_connected'])
self.assertEqual(10, st_neuron['SE2']['z_connected'])
for st_neuron in status[5:10]:
self.assertEqual(10, st_neuron['SE1']['z_connected'])
self.assertEqual(5, st_neuron['SE2']['z_connected'])
def test_multiple_synapse_deletion_one_to_one(self):
for syn_model in nest.Models('synapses'):
if syn_model not in self.exclude_synapse_model:
nest.ResetKernel()
nest.CopyModel('static_synapse', 'my_static_synapse')
nest.SetDefaults(syn_model, {'delay': 0.5})
syn_dict = {
'model': syn_model,
'pre_synaptic_element': 'SE1',
'post_synaptic_element': 'SE2'
}
nest.SetKernelStatus({
'min_delay': 0.1,
'max_delay': 1.0,
'structural_plasticity_synapses': {'syn1': syn_dict}
})
neurons = nest.Create('iaf_psc_alpha', 10, {
'synaptic_elements': {
'SE1': {'z': 0.0, 'growth_rate': 0.0},
'SE2': {'z': 0.0, 'growth_rate': 0.0}
}
})
nest.Connect(neurons, neurons, "all_to_all", syn_dict)
# Test if the connected synaptic elements before the simulation
# are correct
status = nest.GetStatus(neurons, 'synaptic_elements')
for st_neuron in status:
self.assertEqual(10, st_neuron['SE1']['z_connected'])
self.assertEqual(10, st_neuron['SE2']['z_connected'])
srcId = range(0, 5)
targId = range(5, 10)
conns = nest.GetConnections(srcId, targId, syn_model)
assert conns
conndictionary = {'rule': 'one_to_one'}
syndictionary = {'model': syn_model}
nest.Disconnect(
[neurons[i] for i in srcId],
[neurons[i] for i in targId],
conndictionary,
syndictionary
)
status = nest.GetStatus(neurons, 'synaptic_elements')
for st_neuron in status[0:5]:
self.assertEqual(9, st_neuron['SE1']['z_connected'])
self.assertEqual(10, st_neuron['SE2']['z_connected'])
for st_neuron in status[5:10]:
self.assertEqual(10, st_neuron['SE1']['z_connected'])
self.assertEqual(9, st_neuron['SE2']['z_connected'])
def test_multiple_synapse_deletion_one_to_one_no_sp(self):
for syn_model in nest.Models('synapses'):
if syn_model not in self.exclude_synapse_model:
nest.ResetKernel()
nest.CopyModel('static_synapse', 'my_static_synapse')
neurons = nest.Create('iaf_psc_alpha', 10)
syn_dict = {'model': syn_model}
nest.Connect(neurons, neurons, "all_to_all", syn_dict)
srcId = range(0, 5)
targId = range(5, 10)
conns = nest.GetConnections(srcId, targId, syn_model)
assert len(conns) == 20
conndictionary = {'rule': 'one_to_one'}
syndictionary = {'model': syn_model}
nest.Disconnect(
[neurons[i] for i in srcId],
[neurons[i] for i in targId],
conndictionary,
syndictionary
)
conns = nest.GetConnections(srcId, targId, syn_model)
assert len(conns) == 16
def test_single_synapse_deletion_sp(self):
for syn_model in nest.Models('synapses'):
if syn_model not in self.exclude_synapse_model:
nest.ResetKernel()
nest.CopyModel('static_synapse', 'my_static_synapse')
syn_dict = {
'model': syn_model,
'pre_synaptic_element': 'SE1',
'post_synaptic_element': 'SE2'
}
# nest.SetKernelStatus(
# {'structural_plasticity_synapses': {'syn1': syn_dict}}
# )
neurons = nest.Create('iaf_psc_alpha', 2, {
'synaptic_elements': {
'SE1': {'z': 0.0, 'growth_rate': 0.0},
'SE2': {'z': 0.0, 'growth_rate': 0.0}
}
})
nest.Connect(neurons, neurons, "all_to_all", syn_dict)
nest.Connect(neurons, neurons, "all_to_all",
{'model': 'my_static_synapse'})
# Test if the connected synaptic elements before the simulation
# are correct
status = nest.GetStatus(neurons, 'synaptic_elements')
for st_neuron in status:
self.assertEqual(2, st_neuron['SE1']['z_connected'])
self.assertEqual(2, st_neuron['SE2']['z_connected'])
srcId = 0
targId = 1
conns = nest.GetConnections(
[neurons[srcId]], [neurons[targId]], syn_model)
assert conns
nest.DisconnectOneToOne(
neurons[srcId], neurons[targId], syn_dict)
status = nest.GetStatus(neurons, 'synaptic_elements')
self.assertEqual(1, status[srcId]['SE1']['z_connected'])
self.assertEqual(2, status[srcId]['SE2']['z_connected'])
self.assertEqual(2, status[targId]['SE1']['z_connected'])
self.assertEqual(1, status[targId]['SE2']['z_connected'])
conns = nest.GetConnections(
[neurons[srcId]], [neurons[targId]], syn_model)
assert not conns
def test_disconnect_defaults(self):
nodes = nest.Create('iaf_psc_alpha', 5)
nest.Connect(nodes, nodes)
self.assertEqual(nest.GetKernelStatus('num_connections'), 25)
nest.Disconnect(nodes, nodes)
self.assertEqual(nest.GetKernelStatus('num_connections'), 20)
def test_disconnect_all_to_all(self):
nodes = nest.Create('iaf_psc_alpha', 5)
nest.Connect(nodes, nodes)
self.assertEqual(nest.GetKernelStatus('num_connections'), 25)
nest.Disconnect(nodes, nodes, 'all_to_all')
self.assertEqual(nest.GetKernelStatus('num_connections'), 0)
def test_disconnect_static_synapse(self):
nodes = nest.Create('iaf_psc_alpha', 5)
nest.Connect(nodes, nodes)
self.assertEqual(nest.GetKernelStatus('num_connections'), 25)
nest.Disconnect(nodes, nodes, syn_spec='static_synapse')
self.assertEqual(nest.GetKernelStatus('num_connections'), 20)
def suite():
test_suite = unittest.makeSuite(TestDisconnect, 'test')
return test_suite
if __name__ == '__main__':
unittest.main()
| terhorstd/nest-simulator | pynest/nest/tests/test_sp/test_disconnect_multiple.py | Python | gpl-2.0 | 10,751 | 0 |
"""File holds the three classes Bit, DigitProduct, and PartialProduct."""
class Bit:
"""Class Bit represents a single bit of a digit-product."""
def __init__(self, identifier, absolute, relative):
self.identifier = identifier
self.absolute = absolute
self.relative = relative
def shift(self, x_bits):
"""Shift bit in its absolute position by x_bits."""
self.absolute %= x_bits
def print_info(self):
"""Print class info."""
print("identifier =", self.identifier)
print("absolute =", self.absolute)
print("relative =", self.relative)
class DigitProduct():
"""Class DigitProduct represents a DSP multiplier i.e. digit-product."""
def __init__(self, identifier, lsb, msb):
self.identifier = identifier
self.lsb = lsb
self.msb = msb
def slice_block(self):
"""Slice digit-product in single bits."""
bit_list = []
for i in range(0, self.msb-self.lsb+1):
bit_list.append(Bit(self.identifier, self.lsb+i, i))
return bit_list
def print_info(self):
"""Print class info."""
print("identifier =", self.identifier)
print(self.msb, "downto", self.lsb)
class PartialProduct:
"""Class PartialProduct represents a partial-product that can hold an
undefined amount of class Bit instances."""
def __init__(self, exp_prime):
self.bit_list = []
self.exp_prime = exp_prime
def add_bit(self, new_bit):
"""Add bit to current partial-product."""
for current_bit in self.bit_list:
if current_bit.absolute == new_bit.absolute:
return False
self.bit_list.append(new_bit)
return True
def print_info(self):
"""Print class info of all bits contained in this partial-product."""
for current_bit in self.bit_list:
current_bit.print_info()
def print_line(self, line_number):
"""Print partial-product indicating whether bit positions are taken."""
print("PP%#02d"% line_number, end=" ")
for i in range(0, self.exp_prime):
success = 0
for current_bit in self.bit_list:
if current_bit.absolute == i:
success = 1
if success == 1:
print("o", end="")
else:
print("x", end="")
print("")
| Koppermann/mod-mul-mersenne | mod_mul_mersenne/classes.py | Python | mit | 2,433 | 0.000822 |
from enigma import eEPGCache, getBestPlayableServiceReference, \
eServiceReference, iRecordableService, quitMainloop
from Components.config import config
from Components.UsageConfig import defaultMoviePath
from Components.TimerSanityCheck import TimerSanityCheck
from Screens.MessageBox import MessageBox
import Screens.Standby
from Tools import Directories, Notifications, ASCIItranslit, Trashcan
from Tools.XMLTools import stringToXML
import timer
import xml.etree.cElementTree
import NavigationInstance
from ServiceReference import ServiceReference
from time import localtime, strftime, ctime, time
from bisect import insort
# ok, for descriptions etc we have:
# service reference (to get the service name)
# name (title)
# description (description)
# event data (ONLY for time adjustments etc.)
# parses an event, and gives out a (begin, end, name, duration, eit)-tuple.
# begin and end will be corrected
def parseEvent(ev, description = True):
if description:
name = ev.getEventName()
description = ev.getShortDescription()
if description == "":
description = ev.getExtendedDescription()
else:
name = ""
description = ""
begin = ev.getBeginTime()
end = begin + ev.getDuration()
eit = ev.getEventId()
begin -= config.recording.margin_before.value * 60
end += config.recording.margin_after.value * 60
return (begin, end, name, description, eit)
class AFTEREVENT:
NONE = 0
STANDBY = 1
DEEPSTANDBY = 2
AUTO = 3
# please do not translate log messages
class RecordTimerEntry(timer.TimerEntry, object):
######### the following static methods and members are only in use when the box is in (soft) standby
receiveRecordEvents = False
@staticmethod
def shutdown():
quitMainloop(1)
@staticmethod
def staticGotRecordEvent(recservice, event):
if event == iRecordableService.evEnd:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evEnd)"
recordings = NavigationInstance.instance.getRecordings()
if not recordings: # no more recordings exist
rec_time = NavigationInstance.instance.RecordTimer.getNextRecordingTime()
if rec_time > 0 and (rec_time - time()) < 360:
print "another recording starts in", rec_time - time(), "seconds... do not shutdown yet"
else:
print "no starting records in the next 360 seconds... immediate shutdown"
RecordTimerEntry.shutdown() # immediate shutdown
elif event == iRecordableService.evStart:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evStart)"
@staticmethod
def stopTryQuitMainloop():
print "RecordTimer.stopTryQuitMainloop"
NavigationInstance.instance.record_event.remove(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = False
@staticmethod
def TryQuitMainloop(default_yes = True):
if not RecordTimerEntry.receiveRecordEvents:
print "RecordTimer.TryQuitMainloop"
NavigationInstance.instance.record_event.append(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = True
# send fake event.. to check if another recordings are running or
# other timers start in a few seconds
RecordTimerEntry.staticGotRecordEvent(None, iRecordableService.evEnd)
# send normal notification for the case the user leave the standby now..
Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 1, onSessionOpenCallback=RecordTimerEntry.stopTryQuitMainloop, default_yes = default_yes)
#################################################################
def __init__(self, serviceref, begin, end, name, description, eit, disabled = False, justplay = False, afterEvent = AFTEREVENT.AUTO, checkOldTimers = False, dirname = None, tags = None):
timer.TimerEntry.__init__(self, int(begin), int(end))
if checkOldTimers == True:
if self.begin < time() - 1209600:
self.begin = int(time())
if self.end < self.begin:
self.end = self.begin
assert isinstance(serviceref, ServiceReference)
if serviceref.isRecordable():
self.service_ref = serviceref
else:
self.service_ref = ServiceReference(None)
self.eit = eit
self.dontSave = False
self.name = name
self.description = description
self.disabled = disabled
self.timer = None
self.__record_service = None
self.start_prepare = 0
self.justplay = justplay
self.afterEvent = afterEvent
self.dirname = dirname
self.dirnameHadToFallback = False
self.autoincrease = False
self.autoincreasetime = 3600 * 24 # 1 day
self.tags = tags or []
self.log_entries = []
self.resetState()
def log(self, code, msg):
self.log_entries.append((int(time()), code, msg))
print "[TIMER]", msg
def calculateFilename(self):
service_name = self.service_ref.getServiceName()
begin_date = strftime("%Y%m%d %H%M", localtime(self.begin))
begin_shortdate = strftime("%Y%m%d", localtime(self.begin))
print "begin_date: ", begin_date
print "service_name: ", service_name
print "name:", self.name
print "description: ", self.description
filename = begin_date + " - " + service_name
if self.name:
if config.usage.setup_level.index >= 2: # expert+
if config.recording.filename_composition.value == "short":
filename = begin_shortdate + " - " + self.name
elif config.recording.filename_composition.value == "long":
filename += " - " + self.name + " - " + self.description
else:
filename += " - " + self.name # standard
else:
filename += " - " + self.name
if config.recording.ascii_filenames.value:
filename = ASCIItranslit.legacyEncode(filename)
if not self.dirname or not Directories.fileExists(self.dirname, 'w'):
if self.dirname:
self.dirnameHadToFallback = True
dirname = defaultMoviePath()
else:
dirname = self.dirname
self.Filename = Directories.getRecordingFilename(filename, dirname)
self.log(0, "Filename calculated as: '%s'" % self.Filename)
#begin_date + " - " + service_name + description)
def tryPrepare(self):
if self.justplay:
return True
else:
self.calculateFilename()
rec_ref = self.service_ref and self.service_ref.ref
if rec_ref and rec_ref.flags & eServiceReference.isGroup:
rec_ref = getBestPlayableServiceReference(rec_ref, eServiceReference())
if not rec_ref:
self.log(1, "'get best playable service for group... record' failed")
return False
self.record_service = rec_ref and NavigationInstance.instance.recordService(rec_ref)
if not self.record_service:
self.log(1, "'record service' failed")
return False
if self.repeated:
epgcache = eEPGCache.getInstance()
queryTime=self.begin+(self.end-self.begin)/2
evt = epgcache.lookupEventTime(rec_ref, queryTime)
if evt:
self.description = evt.getShortDescription()
if self.description == "":
description = evt.getExtendedDescription()
event_id = evt.getEventId()
else:
event_id = -1
else:
event_id = self.eit
if event_id is None:
event_id = -1
prep_res=self.record_service.prepare(self.Filename + ".ts", self.begin, self.end, event_id, self.name.replace("\n", ""), self.description.replace("\n", ""), ' '.join(self.tags))
if prep_res:
if prep_res == -255:
self.log(4, "failed to write meta information")
else:
self.log(2, "'prepare' failed: error %d" % prep_res)
# we must calc nur start time before stopRecordService call because in Screens/Standby.py TryQuitMainloop tries to get
# the next start time in evEnd event handler...
self.do_backoff()
self.start_prepare = time() + self.backoff
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
return False
return True
def do_backoff(self):
if self.backoff == 0:
self.backoff = 5
else:
self.backoff *= 2
if self.backoff > 100:
self.backoff = 100
self.log(10, "backoff: retry in %d seconds" % self.backoff)
def activate(self):
next_state = self.state + 1
self.log(5, "activating state %d" % next_state)
if next_state == self.StatePrepared:
if self.tryPrepare():
self.log(6, "prepare ok, waiting for begin")
# create file to "reserve" the filename
# because another recording at the same time on another service can try to record the same event
# i.e. cable / sat.. then the second recording needs an own extension... when we create the file
# here than calculateFilename is happy
if not self.justplay:
open(self.Filename + ".ts", "w").close()
# Give the Trashcan a chance to clean up
try:
Trashcan.instance.cleanIfIdle()
except Exception, e:
print "[TIMER] Failed to call Trashcan.instance.cleanIfIdle()"
print "[TIMER] Error:", e
# fine. it worked, resources are allocated.
self.next_activation = self.begin
self.backoff = 0
return True
self.log(7, "prepare failed")
if self.first_try_prepare:
self.first_try_prepare = False
cur_ref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if cur_ref and not cur_ref.getPath():
if not config.recording.asktozap.value:
self.log(8, "asking user to zap away")
Notifications.AddNotificationWithCallback(self.failureCB, MessageBox, _("A timer failed to record!\nDisable TV and try again?\n"), timeout=20)
else: # zap without asking
self.log(9, "zap without asking")
Notifications.AddNotification(MessageBox, _("In order to record a timer, the TV was switched to the recording service!\n"), type=MessageBox.TYPE_INFO, timeout=20)
self.failureCB(True)
elif cur_ref:
self.log(8, "currently running service is not a live service.. so stop it makes no sense")
else:
self.log(8, "currently no service running... so we dont need to stop it")
return False
elif next_state == self.StateRunning:
# if this timer has been cancelled, just go to "end" state.
if self.cancelled:
return True
if self.justplay:
if Screens.Standby.inStandby:
self.log(11, "wakeup and zap")
#set service to zap after standby
Screens.Standby.inStandby.prev_running_service = self.service_ref.ref
#wakeup standby
Screens.Standby.inStandby.Power()
else:
self.log(11, "zapping")
NavigationInstance.instance.playService(self.service_ref.ref)
return True
else:
self.log(11, "start recording")
record_res = self.record_service.start()
if record_res:
self.log(13, "start record returned %d" % record_res)
self.do_backoff()
# retry
self.begin = time() + self.backoff
return False
return True
elif next_state == self.StateEnded:
old_end = self.end
if self.setAutoincreaseEnd():
self.log(12, "autoincrase recording %d minute(s)" % int((self.end - old_end)/60))
self.state -= 1
return True
self.log(12, "stop recording")
if not self.justplay:
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
if self.afterEvent == AFTEREVENT.STANDBY:
if not Screens.Standby.inStandby: # not already in standby
Notifications.AddNotificationWithCallback(self.sendStandbyNotification, MessageBox, _("A finished record timer wants to set your\nBox to standby. Do that now?"), timeout = 20)
elif self.afterEvent == AFTEREVENT.DEEPSTANDBY:
if not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
if Screens.Standby.inStandby: # in standby
RecordTimerEntry.TryQuitMainloop() # start shutdown handling without screen
else:
Notifications.AddNotificationWithCallback(self.sendTryQuitMainloopNotification, MessageBox, _("A finished record timer wants to shut down\nyour Box. Shutdown now?"), timeout = 20)
return True
def setAutoincreaseEnd(self, entry = None):
if not self.autoincrease:
return False
if entry is None:
new_end = int(time()) + self.autoincreasetime
else:
new_end = entry.begin -30
dummyentry = RecordTimerEntry(self.service_ref, self.begin, new_end, self.name, self.description, self.eit, disabled=True, justplay = self.justplay, afterEvent = self.afterEvent, dirname = self.dirname, tags = self.tags)
dummyentry.disabled = self.disabled
timersanitycheck = TimerSanityCheck(NavigationInstance.instance.RecordTimer.timer_list, dummyentry)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
new_end = simulTimerList[1].begin
del simulTimerList
new_end -= 30 # 30 Sekunden Prepare-Zeit lassen
del dummyentry
if new_end <= time():
return False
self.end = new_end
return True
def sendStandbyNotification(self, answer):
if answer:
Notifications.AddNotification(Screens.Standby.Standby)
def sendTryQuitMainloopNotification(self, answer):
if answer:
Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 1)
def getNextActivation(self):
if self.state == self.StateEnded:
return self.end
next_state = self.state + 1
return {self.StatePrepared: self.start_prepare,
self.StateRunning: self.begin,
self.StateEnded: self.end }[next_state]
def failureCB(self, answer):
if answer == True:
self.log(13, "ok, zapped away")
#NavigationInstance.instance.stopUserServices()
NavigationInstance.instance.playService(self.service_ref.ref)
else:
self.log(14, "user didn't want to zap away, record will probably fail")
def timeChanged(self):
old_prepare = self.start_prepare
self.start_prepare = self.begin - self.prepare_time
self.backoff = 0
if int(old_prepare) != int(self.start_prepare):
self.log(15, "record time changed, start prepare is now: %s" % ctime(self.start_prepare))
def gotRecordEvent(self, record, event):
# TODO: this is not working (never true), please fix. (comparing two swig wrapped ePtrs)
if self.__record_service.__deref__() != record.__deref__():
return
self.log(16, "record event %d" % event)
if event == iRecordableService.evRecordWriteError:
print "WRITE ERROR on recording, disk full?"
# show notification. the 'id' will make sure that it will be
# displayed only once, even if more timers are failing at the
# same time. (which is very likely in case of disk fullness)
Notifications.AddPopup(text = _("Write error while recording. Disk full?\n"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "DiskFullMessage")
# ok, the recording has been stopped. we need to properly note
# that in our state, with also keeping the possibility to re-try.
# TODO: this has to be done.
elif event == iRecordableService.evStart:
text = _("A record has been started:\n%s") % self.name
if self.dirnameHadToFallback:
text = '\n'.join((text, _("Please note that the previously selected media could not be accessed and therefore the default directory is being used instead.")))
if config.usage.show_message_when_recording_starts.value:
Notifications.AddPopup(text = text, type = MessageBox.TYPE_INFO, timeout = 8)
# we have record_service as property to automatically subscribe to record service events
def setRecordService(self, service):
if self.__record_service is not None:
print "[remove callback]"
NavigationInstance.instance.record_event.remove(self.gotRecordEvent)
self.__record_service = service
if self.__record_service is not None:
print "[add callback]"
NavigationInstance.instance.record_event.append(self.gotRecordEvent)
record_service = property(lambda self: self.__record_service, setRecordService)
def createTimer(xml):
begin = int(xml.get("begin"))
end = int(xml.get("end"))
serviceref = ServiceReference(xml.get("serviceref").encode("utf-8"))
description = xml.get("description").encode("utf-8")
repeated = xml.get("repeated").encode("utf-8")
disabled = long(xml.get("disabled") or "0")
justplay = long(xml.get("justplay") or "0")
afterevent = str(xml.get("afterevent") or "nothing")
afterevent = {
"nothing": AFTEREVENT.NONE,
"standby": AFTEREVENT.STANDBY,
"deepstandby": AFTEREVENT.DEEPSTANDBY,
"auto": AFTEREVENT.AUTO
}[afterevent]
eit = xml.get("eit")
if eit and eit != "None":
eit = long(eit);
else:
eit = None
location = xml.get("location")
if location and location != "None":
location = location.encode("utf-8")
else:
location = None
tags = xml.get("tags")
if tags and tags != "None":
tags = tags.encode("utf-8").split(' ')
else:
tags = None
name = xml.get("name").encode("utf-8")
#filename = xml.get("filename").encode("utf-8")
entry = RecordTimerEntry(serviceref, begin, end, name, description, eit, disabled, justplay, afterevent, dirname = location, tags = tags)
entry.repeated = int(repeated)
for l in xml.findall("log"):
time = int(l.get("time"))
code = int(l.get("code"))
msg = l.text.strip().encode("utf-8")
entry.log_entries.append((time, code, msg))
return entry
class RecordTimer(timer.Timer):
def __init__(self):
timer.Timer.__init__(self)
self.Filename = Directories.resolveFilename(Directories.SCOPE_CONFIG, "timers.xml")
try:
self.loadTimer()
except IOError:
print "unable to load timers from file!"
def doActivate(self, w):
# when activating a timer which has already passed,
# simply abort the timer. don't run trough all the stages.
if w.shouldSkip():
w.state = RecordTimerEntry.StateEnded
else:
# when active returns true, this means "accepted".
# otherwise, the current state is kept.
# the timer entry itself will fix up the delay then.
if w.activate():
w.state += 1
self.timer_list.remove(w)
# did this timer reached the last state?
if w.state < RecordTimerEntry.StateEnded:
# no, sort it into active list
insort(self.timer_list, w)
else:
# yes. Process repeated, and re-add.
if w.repeated:
w.processRepeated()
w.state = RecordTimerEntry.StateWaiting
self.addTimerEntry(w)
else:
# Remove old timers as set in config
self.cleanupDaily(config.recording.keep_timers.value)
insort(self.processed_timers, w)
self.stateChanged(w)
def isRecording(self):
isRunning = False
for timer in self.timer_list:
if timer.isRunning() and not timer.justplay:
isRunning = True
return isRunning
def loadTimer(self):
# TODO: PATH!
if not Directories.fileExists(self.Filename):
return
try:
doc = xml.etree.cElementTree.parse(self.Filename)
except SyntaxError:
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("The timer file (timers.xml) is corrupt and could not be loaded."), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
print "timers.xml failed to load!"
try:
import os
os.rename(self.Filename, self.Filename + "_old")
except (IOError, OSError):
print "renaming broken timer failed"
return
except IOError:
print "timers.xml not found!"
return
root = doc.getroot()
# put out a message when at least one timer overlaps
checkit = True
for timer in root.findall("timer"):
newTimer = createTimer(timer)
if (self.record(newTimer, True, dosave=False) is not None) and (checkit == True):
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("Timer overlap in timers.xml detected!\nPlease recheck it!"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
checkit = False # at moment it is enough when the message is displayed one time
def saveTimer(self):
#root_element = xml.etree.cElementTree.Element('timers')
#root_element.text = "\n"
#for timer in self.timer_list + self.processed_timers:
# some timers (instant records) don't want to be saved.
# skip them
#if timer.dontSave:
#continue
#t = xml.etree.cElementTree.SubElement(root_element, 'timers')
#t.set("begin", str(int(timer.begin)))
#t.set("end", str(int(timer.end)))
#t.set("serviceref", str(timer.service_ref))
#t.set("repeated", str(timer.repeated))
#t.set("name", timer.name)
#t.set("description", timer.description)
#t.set("afterevent", str({
# AFTEREVENT.NONE: "nothing",
# AFTEREVENT.STANDBY: "standby",
# AFTEREVENT.DEEPSTANDBY: "deepstandby",
# AFTEREVENT.AUTO: "auto"}))
#if timer.eit is not None:
# t.set("eit", str(timer.eit))
#if timer.dirname is not None:
# t.set("location", str(timer.dirname))
#t.set("disabled", str(int(timer.disabled)))
#t.set("justplay", str(int(timer.justplay)))
#t.text = "\n"
#t.tail = "\n"
#for time, code, msg in timer.log_entries:
#l = xml.etree.cElementTree.SubElement(t, 'log')
#l.set("time", str(time))
#l.set("code", str(code))
#l.text = str(msg)
#l.tail = "\n"
#doc = xml.etree.cElementTree.ElementTree(root_element)
#doc.write(self.Filename)
list = []
list.append('<?xml version="1.0" ?>\n')
list.append('<timers>\n')
for timer in self.timer_list + self.processed_timers:
if timer.dontSave:
continue
list.append('<timer')
list.append(' begin="' + str(int(timer.begin)) + '"')
list.append(' end="' + str(int(timer.end)) + '"')
list.append(' serviceref="' + stringToXML(str(timer.service_ref)) + '"')
list.append(' repeated="' + str(int(timer.repeated)) + '"')
list.append(' name="' + str(stringToXML(timer.name)) + '"')
list.append(' description="' + str(stringToXML(timer.description)) + '"')
list.append(' afterevent="' + str(stringToXML({
AFTEREVENT.NONE: "nothing",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.DEEPSTANDBY: "deepstandby",
AFTEREVENT.AUTO: "auto"
}[timer.afterEvent])) + '"')
if timer.eit is not None:
list.append(' eit="' + str(timer.eit) + '"')
if timer.dirname is not None:
list.append(' location="' + str(stringToXML(timer.dirname)) + '"')
if timer.tags is not None:
list.append(' tags="' + str(stringToXML(' '.join(timer.tags))) + '"')
list.append(' disabled="' + str(int(timer.disabled)) + '"')
list.append(' justplay="' + str(int(timer.justplay)) + '"')
list.append('>\n')
if config.recording.debug.value:
for time, code, msg in timer.log_entries:
list.append('<log')
list.append(' code="' + str(code) + '"')
list.append(' time="' + str(time) + '"')
list.append('>')
list.append(str(stringToXML(msg)))
list.append('</log>\n')
list.append('</timer>\n')
list.append('</timers>\n')
file = open(self.Filename, "w")
for x in list:
file.write(x)
file.close()
def getNextZapTime(self):
now = time()
for timer in self.timer_list:
if not timer.justplay or timer.begin < now:
continue
return timer.begin
return -1
def getNextRecordingTime(self):
now = time()
for timer in self.timer_list:
next_act = timer.getNextActivation()
if timer.justplay or next_act < now:
continue
return next_act
return -1
def isNextRecordAfterEventActionAuto(self):
now = time()
t = None
for timer in self.timer_list:
if timer.justplay or timer.begin < now:
continue
if t is None or t.begin == timer.begin:
t = timer
if t.afterEvent == AFTEREVENT.AUTO:
return True
return False
def record(self, entry, ignoreTSC=False, dosave=True): #wird von loadTimer mit dosave=False aufgerufen
timersanitycheck = TimerSanityCheck(self.timer_list,entry)
if not timersanitycheck.check():
if ignoreTSC != True:
print "timer conflict detected!"
print timersanitycheck.getSimulTimerList()
return timersanitycheck.getSimulTimerList()
else:
print "ignore timer conflict"
elif timersanitycheck.doubleCheck():
print "ignore double timer"
return None
entry.timeChanged()
print "[Timer] Record " + str(entry)
entry.Timer = self
self.addTimerEntry(entry)
if dosave:
self.saveTimer()
return None
def isInTimer(self, eventid, begin, duration, service):
time_match = 0
chktime = None
chktimecmp = None
chktimecmp_end = None
end = begin + duration
refstr = str(service)
for x in self.timer_list:
check = x.service_ref.ref.toString() == refstr
if not check:
sref = x.service_ref.ref
parent_sid = sref.getUnsignedData(5)
parent_tsid = sref.getUnsignedData(6)
if parent_sid and parent_tsid: # check for subservice
sid = sref.getUnsignedData(1)
tsid = sref.getUnsignedData(2)
sref.setUnsignedData(1, parent_sid)
sref.setUnsignedData(2, parent_tsid)
sref.setUnsignedData(5, 0)
sref.setUnsignedData(6, 0)
check = sref.toCompareString() == refstr
num = 0
if check:
check = False
event = eEPGCache.getInstance().lookupEventId(sref, eventid)
num = event and event.getNumOfLinkageServices() or 0
sref.setUnsignedData(1, sid)
sref.setUnsignedData(2, tsid)
sref.setUnsignedData(5, parent_sid)
sref.setUnsignedData(6, parent_tsid)
for cnt in range(num):
subservice = event.getLinkageService(sref, cnt)
if sref.toCompareString() == subservice.toCompareString():
check = True
break
if check:
if x.repeated != 0:
if chktime is None:
chktime = localtime(begin)
chktimecmp = chktime.tm_wday * 1440 + chktime.tm_hour * 60 + chktime.tm_min
chktimecmp_end = chktimecmp + (duration / 60)
time = localtime(x.begin)
for y in (0, 1, 2, 3, 4, 5, 6):
if x.repeated & (1 << y) and (x.begin <= begin or begin <= x.begin <= end):
timecmp = y * 1440 + time.tm_hour * 60 + time.tm_min
if timecmp <= chktimecmp < (timecmp + ((x.end - x.begin) / 60)):
time_match = ((timecmp + ((x.end - x.begin) / 60)) - chktimecmp) * 60
elif chktimecmp <= timecmp < chktimecmp_end:
time_match = (chktimecmp_end - timecmp) * 60
else: #if x.eit is None:
if begin <= x.begin <= end:
diff = end - x.begin
if time_match < diff:
time_match = diff
elif x.begin <= begin <= x.end:
diff = x.end - begin
if time_match < diff:
time_match = diff
if time_match:
break
return time_match
def removeEntry(self, entry):
print "[Timer] Remove " + str(entry)
# avoid re-enqueuing
entry.repeated = False
# abort timer.
# this sets the end time to current time, so timer will be stopped.
entry.autoincrease = False
entry.abort()
if entry.state != entry.StateEnded:
self.timeChanged(entry)
print "state: ", entry.state
print "in processed: ", entry in self.processed_timers
print "in running: ", entry in self.timer_list
# autoincrease instanttimer if possible
if not entry.dontSave:
for x in self.timer_list:
if x.setAutoincreaseEnd():
self.timeChanged(x)
# now the timer should be in the processed_timers list. remove it from there.
self.processed_timers.remove(entry)
self.saveTimer()
def shutdown(self):
self.saveTimer()
| openpli-arm/enigma2-arm | RecordTimer.py | Python | gpl-2.0 | 26,731 | 0.033781 |
import theano
theano.test()
| 1iyiwei/pyml | code/ch13/theano_test.py | Python | mit | 29 | 0 |
#!/usr/bin/python
# Copyright (C) 2013 Patrick Totzke <patricktotzke@gmail.com>
# This file is released under the GNU GPL, version 3 or a later revision.
import urwid
import os
from example1 import palette # example data
from widgets import TreeBox
from tree import Tree
from decoration import CollapsibleArrowTree
# define selectable urwid.Text widgets to display paths
class FocusableText(urwid.WidgetWrap):
"""Widget to display paths lines"""
def __init__(self, txt):
t = urwid.Text(txt)
w = urwid.AttrMap(t, 'body', 'focus')
urwid.WidgetWrap.__init__(self, w)
def selectable(self):
return True
def keypress(self, size, key):
return key
# define Tree that can walk your filesystem
class DirectoryTree(Tree):
"""
A custom Tree representing our filesystem structure.
This implementation is rather inefficient: basically every position-lookup
will call `os.listdir`.. This makes navigation in the tree quite slow.
In real life you'd want to do some caching.
As positions we use absolute path strings.
"""
# determine dir separator and form of root node
pathsep = os.path.sep
drive, _ = os.path.splitdrive(pathsep)
# define root node This is part of the Tree API!
root = drive + pathsep
def __getitem__(self, pos):
return FocusableText(pos)
# generic helper
def _list_dir(self, path):
"""returns absolute paths for all entries in a directory"""
try:
elements = [os.path.join(
path, x) for x in os.listdir(path) if os.path.isdir(path)]
elements.sort()
except OSError:
elements = None
return elements
def _get_siblings(self, pos):
"""lists the parent directory of pos """
parent = self.parent_position(pos)
siblings = [pos]
if parent is not None:
siblings = self._list_dir(parent)
return siblings
# Tree API
def parent_position(self, pos):
parent = None
if pos != '/':
parent = os.path.split(pos)[0]
return parent
def first_child_position(self, pos):
candidate = None
if os.path.isdir(pos):
children = self._list_dir(pos)
if children:
candidate = children[0]
return candidate
def last_child_position(self, pos):
candidate = None
if os.path.isdir(pos):
children = self._list_dir(pos)
if children:
candidate = children[-1]
return candidate
def next_sibling_position(self, pos):
candidate = None
siblings = self._get_siblings(pos)
myindex = siblings.index(pos)
if myindex + 1 < len(siblings): # pos is not the last entry
candidate = siblings[myindex + 1]
return candidate
def prev_sibling_position(self, pos):
candidate = None
siblings = self._get_siblings(pos)
myindex = siblings.index(pos)
if myindex > 0: # pos is not the first entry
candidate = siblings[myindex - 1]
return candidate
def unhandled_input(k):
#exit on q
if k in ['q', 'Q']: raise urwid.ExitMainLoop()
if __name__ == "__main__":
cwd = os.getcwd() # get current working directory
dtree = DirectoryTree() # get a directory walker
# Use CollapsibleArrowTree for decoration.
# define initial collapse:
as_deep_as_cwd = lambda pos: dtree.depth(pos) >= dtree.depth(cwd)
# We hide the usual arrow tip and use a customized collapse-icon.
decorated_tree = CollapsibleArrowTree(dtree,
is_collapsed=as_deep_as_cwd,
arrow_tip_char=None,
icon_frame_left_char=None,
icon_frame_right_char=None,
icon_collapsed_char=u'\u25B6',
icon_expanded_char=u'\u25B7',)
# stick it into a TreeBox and use 'body' color attribute for gaps
tb = TreeBox(decorated_tree, focus=cwd)
root_widget = urwid.AttrMap(tb, 'body')
#add a text footer
footer = urwid.AttrMap(urwid.Text('Q to quit'), 'focus')
#enclose all in a frame
urwid.MainLoop(urwid.Frame(root_widget, footer=footer), palette, unhandled_input = unhandled_input).run() # go
| audebert/alot | alot/foreign/urwidtrees/example4.filesystem.py | Python | gpl-3.0 | 4,462 | 0.002465 |
# -*- coding: utf-8 -*-
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class ExternalSyncTemplate(models.Model):
_description = 'External Sync Template'
_name = 'clv.external_sync.template'
_order = 'name'
name = fields.Char(
string='Name',
required=True,
help='External Sync Template Name'
)
external_host_id = fields.Many2one(
comodel_name='clv.external_sync.host',
string='External Host'
)
external_max_task = fields.Integer(
string='Max Task Registers'
)
external_disable_identification = fields.Boolean(
string='Disable Identification'
)
external_disable_check_missing = fields.Boolean(
string='Disable Check Missing'
)
external_disable_inclusion = fields.Boolean(
string='Disable Inclusion'
)
external_disable_sync = fields.Boolean(
string='Disable Sync'
)
external_last_update_start = fields.Datetime(
string="Last Update (Start)"
)
external_last_update_end = fields.Datetime(
string="Last Update (End)"
)
enable_sequence_code_sync = fields.Boolean(
string='Enable Sequence Code Sync'
)
notes = fields.Text(string='Notes')
date_inclusion = fields.Datetime(
string='Inclusion Date',
default=fields.Datetime.now)
model = fields.Char(
string='Model',
required=True,
help="Model name of the object on which the synchronization method to be called is located, e.g. 'res.partner'"
)
method = fields.Char(
string='Method',
required=True,
help="Name of the method to be called when the synchronization job is processed."
)
sequence_code = fields.Char(
string='Sequence Code',
required=False,
help="Code of the Sequence to be synchronized when the synchronization job is processed."
)
external_model = fields.Char(
string='External Model',
required=True,
help="External model name, e.g. 'res.partner'"
)
external_sequence_code = fields.Char(
string='External Sequence Code',
required=False,
help="External Sequence Code, e.g. 'clv_address.code."
)
active = fields.Boolean(string='Active', default=1)
_sql_constraints = [
('name_uniq',
'UNIQUE (name)',
u'Error! The Name must be unique!'),
]
| CLVsol/clvsol_odoo_addons | clv_external_sync/models/external_sync_template.py | Python | agpl-3.0 | 2,540 | 0.001181 |
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import volatility.obj as obj
import volatility.plugins.mac.common as common
import volatility.plugins.mac.lsmod as lsmod
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
class mac_socket_filters(lsmod.mac_lsmod):
""" Reports socket filters """
def calculate(self):
common.set_plugin_members(self)
# get the symbols need to check for if rootkit or not
(kernel_symbol_addresses, kmods) = common.get_kernel_addrs(self)
members = ["sf_unregistered", "sf_attach", "sf_detach", "sf_notify", "sf_getpeername", "sf_getsockname"]
members = members + ["sf_data_in", "sf_data_out", "sf_connect_in", "sf_connect_out", "sf_bind", "sf_setoption"]
members = members + ["sf_getoption", "sf_listen", "sf_ioctl"]
sock_filter_head_addr = self.addr_space.profile.get_symbol("_sock_filter_head")
sock_filter_list = obj.Object("socket_filter_list", offset = sock_filter_head_addr, vm = self.addr_space)
cur = sock_filter_list.tqh_first
while cur:
filter = cur.sf_filter
filter_name = self.addr_space.read(filter.sf_name, 256)
idx = filter_name.index("\x00")
if idx != -1:
filter_name = filter_name[:idx]
filter_socket = cur.sf_entry_head.sfe_socket.obj_offset
for member in members:
ptr = filter.m(member)
if not ptr:
continue
(good, module) = common.is_known_address_name(ptr.v(), kernel_symbol_addresses, kmods)
yield good, filter, filter_name, filter_socket, member, ptr, module
cur = cur.sf_global_next.tqe_next
def unified_output(self, data):
return TreeGrid([("Offset (V)", Address),
("Filter Name", str),
("Filter Member", str),
("Socket (V)", Address),
("Handler", Address),
("Module", str),
("Status", str),
], self.generator(data))
def generator(self, data):
for (good, filter, filter_name, filter_socket, member, ptr, module) in data:
if good == 0:
status = "UNKNOWN"
else:
status = "OK"
yield(0, [
Address(filter.obj_offset),
str(filter_name),
str(member),
Address(filter_socket),
Address(ptr),
str(module),
str(status),
])
| cyli/volatility | volatility/plugins/mac/socket_filters.py | Python | gpl-2.0 | 3,566 | 0.00673 |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from .base import FunctionalTest
class RecipeEditTest(FunctionalTest):
def test_can_add_a_recipe(self):
# Ben goes to the recipe website homepage
self.browser.get(self.server_url)
# He notices the page title mention cookbook
self.assertIn('cookbook', self.browser.title)
# He is invited to enter his name to create his own cookbook or
# view other user's cookbook's
# Ben wants to create his own right now, so he enters his name
# and then clicks the 'get started button'
# TODO -- duplication here. consider refactoring if there is a third instance
username_input = self.browser.find_element_by_id('id_username')
username_input.send_keys('ben')
username_input.send_keys(Keys.ENTER)
# Ben goes to a unique URL which includes his name
ben_url = self.browser.current_url
self.assertRegex(ben_url, '/users/ben.+')
# He is invited to click on a link to add a new recipe
add_recipe_button = self.browser.find_element_by_id('id_add_recipe_button')
self.assertIn('Add recipe', add_recipe_button.text)
# He clicks on the link and new page appears
add_recipe_button.click()
# When he adds a new recipe, he is taken to a new URL
self.assertRegex(self.browser.current_url, '/users/.*/add_recipe')
# He sees a form with a textbox for name, ingredients, directions and servings
# along with a 'cancel' and 'add' button
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('Add Recipe', header_text)
name_textbox = self.browser.find_element_by_id('id_title')
self.assertEqual(name_textbox.get_attribute('placeholder'),
'Enter the title of the recipe')
ingredients_textbox = self.browser.find_element_by_id('id_ingredients')
directions_textbox = self.browser.find_element_by_id('id_directions')
servings_textbox = self.browser.find_element_by_id('id_servings')
add_button = self.browser.find_element_by_id('id_add_button')
# He types in Grilled Halibut with Mango-Avocado Salsa into the textbox for name
name_textbox.send_keys('Grilled Halibut with Mango-Avocado Salsa')
# He types in ingredients:
ingredients_textbox.send_keys('1 medium ripe avocado, peeled and cut into 1/2" dice')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('1 medium ripe mango, peeled and cut into 1/2" dice')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('1 cup cherry tomatoes, quartered')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('4 large fresh basil leaves, thinly sliced')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('3 tablespoons extra-virgin olive oil, divided, plus more for brushing')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('3 tablespoons fresh lime juice, divided')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('Kosher salt and freshly ground black pepper')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('4 6-ounce halibut or mahi-mahi fillets')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('4 lime wedges')
# He then types in the following for directions:
directions_textbox.send_keys('Prepare a grill to medium-high heat. Gently combine the avocado, mango, '
'tomatoes, basil, 1 tablespoon oil, and 1 tablespoon lime juice in a large mixing '
'bowl. Season salsa to taste with salt and pepper and set aside at room '
'temperature, gently tossing occasionally.')
directions_textbox.send_keys(Keys.ENTER)
directions_textbox.send_keys('Place fish fillets in a 13x9x2" glass baking dish. Drizzle remaining 2 '
'tablespoon oil and 2 tablespoon lime juice over. Season fish with salt and '
'pepper. Let marinate at room temperature for 10 minutes, turning fish '
'occasionally.')
directions_textbox.send_keys(Keys.ENTER)
directions_textbox.send_keys('Brush grill rack with oil. Grill fish until just opaque in center, about 5 '
'minutes per side. Transfer to plates. Spoon mango-avocado salsa over fish. '
'Squeeze a lime wedge over each and serve.')
# He then types in the servings
servings_textbox.send_keys('7')
# Finally, he clicks the add button
add_button.click()
# He is returned to the main page
# He sees that the recipe appears in the list of recipes
self.check_for_row_in_list_table('Grilled Halibut with Mango-Avocado Salsa')
# Ben then clicks on a recipe to get the full info
recipe_link = self.browser.find_element_by_link_text('Grilled Halibut with Mango-Avocado Salsa')
recipe_link.click()
# He is taken to a new page which has the title in the url
self.assertRegex(self.browser.current_url, '/users/(\S+)/recipe/grilled-halibut-with-mango-avocado-salsa')
# The new page lists all of the ingredients and directions
page_text = self.browser.find_element_by_tag_name('body').text
self.assertIn('1 medium ripe avocado, peeled and cut into 1/2" dice', page_text)
self.assertIn('Prepare a grill to medium-high heat. Gently combine the avocado, mango, ', page_text)
# He then remembers that the servings are for 8 people and a chili pepper is needed. He clicks
# on the edit button to start editing
edit_button = self.browser.find_element_by_id('id_edit_button')
self.assertIn('Edit', edit_button.text)
edit_button.click()
# The edit page shows the same text as before
page_text = self.browser.find_element_by_tag_name('body').text
self.assertIn('1 medium ripe avocado, peeled and cut into 1/2" dice', page_text)
self.assertIn('Prepare a grill to medium-high heat. Gently combine the avocado, mango, ', page_text)
# He changes the number of servings from 7 to 8
servings_textbox = self.browser.find_element_by_id('id_servings')
servings_textbox.send_keys(Keys.BACKSPACE)
servings_textbox.send_keys('8')
# He adds chili pepper to the list of ingredients
ingredients_textbox = self.browser.find_element_by_id('id_ingredients')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('1 chili pepper')
# He adds a note for next time
notes_textbox = self.browser.find_element_by_id('id_notes')
notes_textbox.send_keys("Wasn't that spicy, added a pepper")
# He then clicks the save button
save_button = self.browser.find_element_by_id('id_save_button')
self.assertIn('Save', save_button.text)
save_button.click()
# He is returned to the recipe page
self.assertRegex(self.browser.current_url, '/users/(\S+)/recipe/grilled-halibut-with-mango-avocado-salsa')
# He can see his changes reflected on the page
page_text = self.browser.find_element_by_tag_name('body').text
self.assertIn('8', page_text)
self.assertNotIn('7', page_text)
self.assertIn('1 chili pepper', page_text)
self.assertIn('added a pepper', page_text)
#self.fail('Finish the test')
# He changes his mind and cancels
# cancel_button = self.browser.find_element_by_name('id_cancel_button')
#cancel_button.click()
# He is returned to the main page
# The number of recipes is still 1
# table = self.browser.find_element_by_id('id_recipe_table')
# rows = table.find_element_by_tag_name('tr')
#self.assertEqual(len(rows), 1)
| benosment/recipes | functional_tests/test_edit_recipe.py | Python | mit | 8,273 | 0.00411 |
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.dual import eig
from scipy.special import comb
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'morlet2', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.zeros((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {'0': v / sm}
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float, optional
Omega0. Default is 5
s : float, optional
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool, optional
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
morlet2 : Implementation of Morlet wavelet, compatible with `cwt`.
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of `w`.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
This version has a correction
term to improve admissibility. For `w` greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to `s`.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where `r` is the sampling rate.
Note: This function was created before `cwt` and is not compatible
with it.
"""
x = np.linspace(-s * 2 * np.pi, s * 2 * np.pi, M)
output = np.exp(1j * w * x)
if complete:
output -= np.exp(-0.5 * (w**2))
output *= np.exp(-0.5 * (x**2)) * np.pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A * (1 - (x/a)**2) * exp(-0.5*(x/a)**2)``,
where ``A = 2/(sqrt(3*a)*(pi**0.25))``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def morlet2(M, s, w=5):
"""
Complex Morlet wavelet, designed to work with `cwt`.
Returns the complete version of morlet wavelet, normalised
according to `s`::
exp(1j*w*x/s) * exp(-0.5*(x/s)**2) * pi**(-0.25) * sqrt(1/s)
Parameters
----------
M : int
Length of the wavelet.
s : float
Width parameter of the wavelet.
w : float, optional
Omega0. Default is 5
Returns
-------
morlet : (M,) ndarray
See Also
--------
morlet : Implementation of Morlet wavelet, incompatible with `cwt`
Notes
-----
.. versionadded:: 1.4.0
This function was designed to work with `cwt`. Because `morlet2`
returns an array of complex numbers, the `dtype` argument of `cwt`
should be set to `complex128` for best results.
Note the difference in implementation with `morlet`.
The fundamental frequency of this wavelet in Hz is given by::
f = w*fs / (2*s*np.pi)
where ``fs`` is the sampling rate and `s` is the wavelet width parameter.
Similarly we can get the wavelet width parameter at ``f``::
s = w*fs / (2*f*np.pi)
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> M = 100
>>> s = 4.0
>>> w = 2.0
>>> wavelet = signal.morlet2(M, s, w)
>>> plt.plot(abs(wavelet))
>>> plt.show()
This example shows basic use of `morlet2` with `cwt` in time-frequency
analysis:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t, dt = np.linspace(0, 1, 200, retstep=True)
>>> fs = 1/dt
>>> w = 6.
>>> sig = np.cos(2*np.pi*(50 + 10*t)*t) + np.sin(40*np.pi*t)
>>> freq = np.linspace(1, fs/2, 100)
>>> widths = w*fs / (2*freq*np.pi)
>>> cwtm = signal.cwt(sig, signal.morlet2, widths, w=w)
>>> plt.pcolormesh(t, freq, np.abs(cwtm), cmap='viridis')
>>> plt.show()
"""
x = np.arange(0, M) - (M - 1.0) / 2
x = x / s
wavelet = np.exp(1j * w * x) * np.exp(-0.5 * x**2) * np.pi**(-0.25)
output = np.sqrt(1/s) * wavelet
return output
def cwt(data, wavelet, widths, dtype=None, **kwargs):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter. The `wavelet` function
is allowed to be complex.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(length,width)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
dtype : data-type, optional
The desired data type of output. Defaults to ``float64`` if the
output of `wavelet` is real and ``complex128`` if it is complex.
.. versionadded:: 1.4.0
kwargs
Keyword arguments passed to wavelet function.
.. versionadded:: 1.4.0
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
.. versionadded:: 1.4.0
For non-symmetric, complex-valued wavelets, the input signal is convolved
with the time-reversed complex-conjugate of the wavelet data [1].
::
length = min(10 * width[ii], len(data))
cwt[ii,:] = signal.convolve(data, np.conj(wavelet(length, width[ii],
**kwargs))[::-1], mode='same')
References
----------
.. [1] S. Mallat, "A Wavelet Tour of Signal Processing (3rd Edition)",
Academic Press, 2009.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
>>> widths = np.arange(1, 31)
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
>>> plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
>>> plt.show()
"""
# Determine output type
if dtype is None:
if np.asarray(wavelet(1, widths[0], **kwargs)).dtype.char in 'FDG':
dtype = np.complex128
else:
dtype = np.float64
output = np.zeros((len(widths), len(data)), dtype=dtype)
for ind, width in enumerate(widths):
N = np.min([10 * width, len(data)])
wavelet_data = np.conj(wavelet(N, width, **kwargs)[::-1])
output[ind] = convolve(data, wavelet_data, mode='same')
return output
| jamestwebber/scipy | scipy/signal/wavelets.py | Python | bsd-3-clause | 13,701 | 0.000292 |
# Copyright 2017 The Vispek Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==========================================================================
""" Example code about how to run raw_file_io
python3 -m vispek.examples.run_raw_file_io \
--in_path /Users/huaminli/Downloads/data \
--out_path /Users/huaminli/Desktop/vispek/data
"""
import argparse
from vispek.lib.io.raw_file_io import RawFileIO
def run_file_io(args):
my_file_io = RawFileIO(args.in_path, args.out_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Example code about how tun run raw_file_io')
parser.add_argument(
'--in_path', type=str,
help='absolute path to the directories that contains raw csv files')
parser.add_argument(
'--out_path', type=str,
help='absolute path to the directories that contains ' +
'preproceed files')
args = parser.parse_args()
print(args.in_path)
print(args.out_path)
run_file_io(args)
| hl475/vispek | examples/run_raw_file_io.py | Python | apache-2.0 | 1,604 | 0.003117 |
import sys
import platform
import twisted
import scrapy
from scrapy.command import ScrapyCommand
class Command(ScrapyCommand):
def syntax(self):
return "[-v]"
def short_desc(self):
return "Print Scrapy version"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("--verbose", "-v", dest="verbose", action="store_true",
help="also display twisted/python/platform info (useful for bug reports)")
def run(self, args, opts):
if opts.verbose:
print "Scrapy : %s" % scrapy.__version__
print "Twisted : %s" % twisted.version.short()
print "Python : %s" % sys.version.replace("\n", "- ")
print "Platform: %s" % platform.platform()
else:
print "Scrapy %s" % scrapy.__version__
| openhatch/oh-mainline | vendor/packages/scrapy/scrapy/commands/version.py | Python | agpl-3.0 | 850 | 0.004706 |
# This code can be put in any Python module, it does not require IPython
# itself to be running already. It only creates the magics subclass but
# doesn't instantiate it yet.
# from __future__ import print_function
import json
from IPython.core.magic import (Magics, magics_class, line_magic,
cell_magic, line_cell_magic)
from IPython.display import HTML, display
# The class MUST call this class decorator at creation time
@magics_class
class WebpplMagics(Magics):
def __init__(self, **kwargs):
super(WebpplMagics, self).__init__(**kwargs)
@line_magic
def lmagic(self, line):
"my line magic"
print("Full access to the main IPython object:", self.shell)
print("Variables in the user namespace:", list(self.shell.user_ns.keys()))
return line
@cell_magic
def webppl(self, line, cell):
"my cell magic"
code = json.dumps(cell)
store = json.dumps(self.shell.user_ns['store'])
h = """
<script>
requirejs.config({
paths: {
webppl: "//cdn.webppl.org/webppl-v0.9.1"
}
});
require(['webppl'], function(webppl) {
window.webppl = webppl;
});
</script>
<script>
const code = JSON.parse('""" + code + """');
const initialStore = JSON.parse('""" + store + """');
var result;
webppl.run(code, function(s,x) {result = x},
{initialStore: initialStore});
let return = JSON.stringify(result)
IPython.kernel.Notebook.execute("result='"+return+"'")
result
</script>
"""
display(HTML(h))
@line_cell_magic
def lcmagic(self, line, cell=None):
"Magic that works both as %lcmagic and as %%lcmagic"
if cell is None:
print("Called as line magic")
return line
else:
print("Called as cell magic")
return line, cell
def load_ipython_extension(ipython):
ip = ipython
# ip = get_ipython()
ip.register_magics(WebpplMagics)
if __name__ == "__main__":
load_ipython_extension(get_ipython())
| tbenst/jupyter_webppl | jupyter_webppl/jupyter_webppl.py | Python | gpl-3.0 | 2,273 | 0.00132 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ImageViewTemplate.ui'
#
# Created: Thu May 1 15:20:40 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtWidgets.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtWidgets.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(726, 588)
self.gridLayout_3 = QtWidgets.QGridLayout(Form)
#self.gridLayout_3.setMargin(0)
self.gridLayout_3.setSpacing(0)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.splitter = QtWidgets.QSplitter(Form)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.layoutWidget = QtWidgets.QWidget(self.splitter)
self.layoutWidget.setObjectName(_fromUtf8("layoutWidget"))
self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget)
self.gridLayout.setSpacing(0)
#self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.graphicsView = GraphicsView(self.layoutWidget)
self.graphicsView.setObjectName(_fromUtf8("graphicsView"))
self.gridLayout.addWidget(self.graphicsView, 0, 0, 2, 1)
self.histogram = HistogramLUTWidget(self.layoutWidget)
self.histogram.setObjectName(_fromUtf8("histogram"))
self.gridLayout.addWidget(self.histogram, 0, 1, 1, 2)
self.roiBtn = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.roiBtn.sizePolicy().hasHeightForWidth())
self.roiBtn.setSizePolicy(sizePolicy)
self.roiBtn.setCheckable(True)
self.roiBtn.setObjectName(_fromUtf8("roiBtn"))
self.gridLayout.addWidget(self.roiBtn, 1, 1, 1, 1)
self.menuBtn = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.menuBtn.sizePolicy().hasHeightForWidth())
self.menuBtn.setSizePolicy(sizePolicy)
self.menuBtn.setObjectName(_fromUtf8("menuBtn"))
self.gridLayout.addWidget(self.menuBtn, 1, 2, 1, 1)
self.roiPlot = PlotWidget(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.roiPlot.sizePolicy().hasHeightForWidth())
self.roiPlot.setSizePolicy(sizePolicy)
self.roiPlot.setMinimumSize(QtCore.QSize(0, 40))
self.roiPlot.setObjectName(_fromUtf8("roiPlot"))
self.gridLayout_3.addWidget(self.splitter, 0, 0, 1, 1)
self.normGroup = QtWidgets.QGroupBox(Form)
self.normGroup.setObjectName(_fromUtf8("normGroup"))
self.gridLayout_2 = QtWidgets.QGridLayout(self.normGroup)
self.gridLayout_2.setContentsMargins(0,0,0,0)
self.gridLayout_2.setSpacing(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.normSubtractRadio = QtWidgets.QRadioButton(self.normGroup)
self.normSubtractRadio.setObjectName(_fromUtf8("normSubtractRadio"))
self.gridLayout_2.addWidget(self.normSubtractRadio, 0, 2, 1, 1)
self.normDivideRadio = QtWidgets.QRadioButton(self.normGroup)
self.normDivideRadio.setChecked(False)
self.normDivideRadio.setObjectName(_fromUtf8("normDivideRadio"))
self.gridLayout_2.addWidget(self.normDivideRadio, 0, 1, 1, 1)
self.label_5 = QtWidgets.QLabel(self.normGroup)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout_2.addWidget(self.label_5, 0, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.normGroup)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_2.addWidget(self.label_3, 1, 0, 1, 1)
self.label_4 = QtWidgets.QLabel(self.normGroup)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_2.addWidget(self.label_4, 2, 0, 1, 1)
self.normROICheck = QtWidgets.QCheckBox(self.normGroup)
self.normROICheck.setObjectName(_fromUtf8("normROICheck"))
self.gridLayout_2.addWidget(self.normROICheck, 1, 1, 1, 1)
self.normXBlurSpin = QtWidgets.QDoubleSpinBox(self.normGroup)
self.normXBlurSpin.setObjectName(_fromUtf8("normXBlurSpin"))
self.gridLayout_2.addWidget(self.normXBlurSpin, 2, 2, 1, 1)
self.label_8 = QtWidgets.QLabel(self.normGroup)
self.label_8.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout_2.addWidget(self.label_8, 2, 1, 1, 1)
self.label_9 = QtWidgets.QLabel(self.normGroup)
self.label_9.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout_2.addWidget(self.label_9, 2, 3, 1, 1)
self.normYBlurSpin = QtWidgets.QDoubleSpinBox(self.normGroup)
self.normYBlurSpin.setObjectName(_fromUtf8("normYBlurSpin"))
self.gridLayout_2.addWidget(self.normYBlurSpin, 2, 4, 1, 1)
self.label_10 = QtWidgets.QLabel(self.normGroup)
self.label_10.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout_2.addWidget(self.label_10, 2, 5, 1, 1)
self.normOffRadio = QtWidgets.QRadioButton(self.normGroup)
self.normOffRadio.setChecked(True)
self.normOffRadio.setObjectName(_fromUtf8("normOffRadio"))
self.gridLayout_2.addWidget(self.normOffRadio, 0, 3, 1, 1)
self.normTimeRangeCheck = QtWidgets.QCheckBox(self.normGroup)
self.normTimeRangeCheck.setObjectName(_fromUtf8("normTimeRangeCheck"))
self.gridLayout_2.addWidget(self.normTimeRangeCheck, 1, 3, 1, 1)
self.normFrameCheck = QtWidgets.QCheckBox(self.normGroup)
self.normFrameCheck.setObjectName(_fromUtf8("normFrameCheck"))
self.gridLayout_2.addWidget(self.normFrameCheck, 1, 2, 1, 1)
self.normTBlurSpin = QtWidgets.QDoubleSpinBox(self.normGroup)
self.normTBlurSpin.setObjectName(_fromUtf8("normTBlurSpin"))
self.gridLayout_2.addWidget(self.normTBlurSpin, 2, 6, 1, 1)
self.gridLayout_3.addWidget(self.normGroup, 1, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.roiBtn.setText(_translate("Form", "ROI", None))
self.menuBtn.setText(_translate("Form", "Menu", None))
self.normGroup.setTitle(_translate("Form", "Normalization", None))
self.normSubtractRadio.setText(_translate("Form", "Subtract", None))
self.normDivideRadio.setText(_translate("Form", "Divide", None))
self.label_5.setText(_translate("Form", "Operation:", None))
self.label_3.setText(_translate("Form", "Mean:", None))
self.label_4.setText(_translate("Form", "Blur:", None))
self.normROICheck.setText(_translate("Form", "ROI", None))
self.label_8.setText(_translate("Form", "X", None))
self.label_9.setText(_translate("Form", "Y", None))
self.label_10.setText(_translate("Form", "T", None))
self.normOffRadio.setText(_translate("Form", "Off", None))
self.normTimeRangeCheck.setText(_translate("Form", "Time range", None))
self.normFrameCheck.setText(_translate("Form", "Frame", None))
from ..widgets.HistogramLUTWidget import HistogramLUTWidget
from ..widgets.GraphicsView import GraphicsView
from ..widgets.PlotWidget import PlotWidget
| mylxiaoyi/mypyqtgraph-qt5 | pyqtgraph/imageview/ImageViewTemplate_pyqt.py | Python | mit | 8,950 | 0.003017 |
import sqlite3
def cursor():
global conn
return conn.cursor()
def commit():
global conn
conn.commit()
def insert(table, data):
global conn
c = conn.cursor()
keys = [*data]
template_list = ','.join(['?'] * len(data))
query = "INSERT INTO {} ({}) VALUES ({})".format(table, ','.join(keys), template_list)
c.execute(query, tuple(data[k] for k in keys))
conn.commit()
def start():
global conn
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS quotes (content TEXT)")
c.execute("CREATE TABLE IF NOT EXISTS alerts (target TEXT, time INTEGER, message TEXT)")
conn.commit()
conn = sqlite3.connect('persist.db')
start()
| flukiluke/eris | db.py | Python | mit | 686 | 0.010204 |
""" This module should trigger a linting error for camelcase function name. """
def camelCaseFunc():
""" This function has a bad name. """
| thusoy/grunt-pylint | test/fixtures/test_package/camelcasefunc.py | Python | mit | 144 | 0.006944 |
from keyman.interface import app
| sahabi/keyman | main.py | Python | mit | 33 | 0 |
import unittest
from openerp.tools import misc
class test_countingstream(unittest.TestCase):
def test_empty_stream(self):
s = misc.CountingStream(iter([]))
self.assertEqual(s.index, -1)
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 0)
def test_single(self):
s = misc.CountingStream(xrange(1))
self.assertEqual(s.index, -1)
self.assertEqual(next(s, None), 0)
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 1)
def test_full(self):
s = misc.CountingStream(xrange(42))
for _ in s:
pass
self.assertEqual(s.index, 42)
def test_repeated(self):
""" Once the CountingStream has stopped iterating, the index should not
increase anymore (the internal state should not be allowed to change)
"""
s = misc.CountingStream(iter([]))
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 0)
self.assertIsNone(next(s, None))
self.assertEqual(s.index, 0)
if __name__ == '__main__':
unittest.main()
| vileopratama/vitech | src/openerp/addons/base/tests/test_misc.py | Python | mit | 1,108 | 0.000903 |
import sys
import os
import unittest
sys.path.append(os.path.abspath("."))
import pymzml
from pymzml.spec import PROTON
import pymzml.run as run
import test_file_paths
import numpy as np
class SpectrumMS2Test(unittest.TestCase):
"""
BSA test file
Peptide @
Scan: 2548
RT [min] 28.96722412109367
Selected_precursor [(443.711242675781, 0.0)]
"""
def setUp(self):
"""
"""
# self.paths = [
# os.path.join( DATA_FOLDER, file ) for file in DATA_FILES]
self.paths = test_file_paths.paths
path = self.paths[9]
self.Run = run.Reader(path)
self.spec = self.Run[2548]
def test_scan_time(self):
scan_time = self.spec.scan_time_in_minutes()
self.assertIsNotNone(scan_time)
self.assertIsInstance(scan_time, float)
self.assertEqual(round(scan_time, 4), round(28.96722412109367, 4))
def test_select_precursors(self):
selected_precursor = self.spec.selected_precursors
self.assertIsInstance(selected_precursor[0], dict)
self.assertIsInstance(selected_precursor[0]["mz"], float)
self.assertIsInstance(selected_precursor[0]["i"], float)
self.assertIsInstance(selected_precursor[0]["charge"], int)
self.assertEqual(
selected_precursor, [{"mz": 443.711242675781, "i": 0.0, "charge": 2}]
)
@unittest.skipIf(pymzml.spec.DECON_DEP is False, "ms_deisotope was not installed")
def test_deconvolute_peaks(self):
charge = 3
test_mz = 430.313
arr = np.array([(test_mz, 100), (test_mz + PROTON / charge, 49)])
spec = self.Run[2548]
spec.set_peaks(arr, "centroided")
decon = spec.peaks("deconvoluted")
self.assertEqual(len(decon), 1)
decon_mz = (test_mz * charge) - charge * PROTON
self.assertEqual(decon[0][0], decon_mz)
self.assertEqual(decon[0][1], 149) # 149 since itensities are 100 and 49
self.assertEqual(decon[0][2], 3)
def test_remove_precursor_peak(self):
# breakpoint()
# breakpoint()
test_mz = 443.71124268 # precursor peak
self.spec.set_peaks(np.array([(test_mz, 200)]), "centroided")
self.spec.set_peaks(np.array([(test_mz, 200)]), "raw")
assert self.spec.has_peak(test_mz)
self.spec._transformed_mz_with_error = None
new_peaks = self.spec.remove_precursor_peak()
found_peaks = self.spec.has_peak(test_mz)
assert len(found_peaks) == 0
if __name__ == "__main__":
unittest.main(verbosity=3)
| StSchulze/pymzML | tests/ms2_spec_test.py | Python | mit | 2,583 | 0.003097 |
# -*- coding: utf-8 -*-
"""Base stuff for providers that handle filesystem directories."""
from xal.provider import ResourceProvider
from xal.dir.resource import Dir
class DirProvider(ResourceProvider):
"""Base class for filesystem directories."""
def __init__(self, resource_factory=Dir):
super(DirProvider, self).__init__(resource_factory=resource_factory)
@property
def home(self):
raise NotImplementedError()
@property
def sep(self):
if self.xal_session.sys.is_posix:
return '/'
elif self.xal_session.sys.is_windows:
return '\\'
def join(self, *args):
modified_args = args[:]
for key, value in enumerate(modified_args):
modified_args[key] = value.strip(self.sep)
return self.sep.join(*modified_args)
def abspath(self, path):
raise NotImplementedError()
| benoitbryon/xal | xal/dir/provider.py | Python | bsd-3-clause | 895 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualHubsOperations(object):
"""VirtualHubsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHub"
"""Retrieves the details of a VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualHub, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.VirtualHub
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
virtual_hub_parameters, # type: "_models.VirtualHub"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHub"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_hub_parameters, 'VirtualHub')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
virtual_hub_parameters, # type: "_models.VirtualHub"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualHub"]
"""Creates a VirtualHub resource if it doesn't exist else updates the existing VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param virtual_hub_parameters: Parameters supplied to create or update VirtualHub.
:type virtual_hub_parameters: ~azure.mgmt.network.v2019_11_01.models.VirtualHub
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualHub or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_11_01.models.VirtualHub]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
virtual_hub_parameters=virtual_hub_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
virtual_hub_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHub"
"""Updates VirtualHub tags.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param virtual_hub_parameters: Parameters supplied to update VirtualHub tags.
:type virtual_hub_parameters: ~azure.mgmt.network.v2019_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualHub, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.VirtualHub
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_hub_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVirtualHubsResult"]
"""Lists all the VirtualHubs in a resource group.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualHubsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_11_01.models.ListVirtualHubsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualHubsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVirtualHubsResult"]
"""Lists all the VirtualHubs in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualHubsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_11_01.models.ListVirtualHubsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualHubsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualHubs'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_11_01/operations/_virtual_hubs_operations.py | Python | mit | 26,953 | 0.004786 |
from django.conf import settings
from . import defaults
__title__ = 'fobi.contrib.plugins.form_elements.fields.' \
'select_multiple_with_max.conf'
__author__ = 'Artur Barseghyan <artur.barseghyan@gmail.com>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('get_setting',)
def get_setting(setting, override=None):
"""Get setting.
Get a setting from
`fobi.contrib.plugins.form_elements.fields.select_multiple_with_max`
conf module, falling back to the default.
If override is not None, it will be used instead of the setting.
:param setting: String with setting name
:param override: Value to use when no setting is available. Defaults
to None.
:return: Setting value.
"""
if override is not None:
return override
if hasattr(
settings,
'FOBI_FORM_ELEMENT_SELECT_MULTIPLE_WITH_MAX_{0}'.format(setting)
):
return getattr(
settings,
'FOBI_FORM_ELEMENT_SELECT_MULTIPLE_WITH_MAX_{0}'.format(setting)
)
else:
return getattr(defaults, setting)
| mansonul/events | events/contrib/plugins/form_elements/fields/select_multiple_with_max/conf.py | Python | mit | 1,127 | 0 |
import os
files = os.listdir(os.path.join('src','effects'))
for file in files:
if file[-3:] == '.py' and file[:2] != '__':
exec('from .%s import *' % (file[:-3]))
| cycladesnz/chambersAndCreatures | src/effects/__init__.py | Python | gpl-2.0 | 175 | 0.005714 |
#!/usr/bin/env python
import wx
from wx.lib.agw import floatspin as fs
import numpy as np
class MyFrame(wx.Frame):
def __init__(self, *args, **kws):
super(self.__class__,self).__init__(*args, **kws)
nb1 = MyNB(self)
self.Show()
class MyFrame1(wx.Frame):
def __init__(self, *args, **kws):
super(self.__class__,self).__init__(*args, **kws)
self.curvalue = 2.3
self.minValue = 0.2
self.maxValue = 9.1
self.incValue = 0.1
self.facValue = 10
self.slider_min = self.minValue*self.facValue
self.slider_max = self.maxValue*self.facValue
self.slider_cur = self.curvalue*self.facValue
self.slider_num = int((self.slider_max - self.slider_min)/(self.incValue*self.facValue) + 1)
self.sliderTicRange = np.linspace(self.slider_min, self.slider_max, self.slider_num)
self.sliderValRange = np.linspace(self.minValue, self.maxValue, self.slider_num)
self.iniUI1()
#self.iniUI()
self.Show()
def iniUI1(self):
self.panel = wx.Panel(self)
self.slider = FloatSlider(self.panel, value = 1.0, minValue = 1, maxValue = 100)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(self.slider, proportion = 0, flag = wx.EXPAND)
self.panel.SetSizer(vbox)
self.Bind(wx.EVT_SLIDER, self.onSlider, self.slider)
def onSlider(self, event):
obj = event.GetEventObject()
print obj.GetValue()
def iniUI(self):
self.panel = wx.Panel(self)
self.panel.SetBackgroundColour((170, 238, 170))
self._slider = wx.Slider(self.panel, value = self.slider_cur,
minValue = self.slider_min, maxValue = self.slider_max,
style = wx.SL_HORIZONTAL)
self._min_label = wx.StaticText(self.panel, label = str(self.minValue))
self._max_label = wx.StaticText(self.panel, label = str(self.maxValue))
self._val_label = wx.StaticText(self.panel, label = str(self.curvalue))
self.hbox_top = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_top.Add(self._val_label, proportion = 0, flag = wx.ALIGN_CENTER)
self.hbox_down = wx.BoxSizer(wx.HORIZONTAL)
self.hbox_down.Add(self._min_label, proportion = 0, flag = wx.EXPAND | wx.ALIGN_CENTRE)
self.hbox_down.Add(self._slider, proportion = 2, flag = wx.EXPAND | wx.ALIGN_CENTRE | wx.LEFT | wx.RIGHT, border = 10)
self.hbox_down.Add(self._max_label, proportion = 0, flag = wx.EXPAND | wx.ALIGN_CENTRE)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.hbox_top, proportion = 0, flag = wx.ALIGN_CENTER | wx.LEFT | wx.RIGHT, border = 10)
self.vbox.Add(self.hbox_down, proportion = 0, flag = wx.EXPAND | wx.ALIGN_CENTER | wx.LEFT | wx.RIGHT, border = 10)
self.panel.SetSizer(self.vbox)
self.Bind(wx.EVT_SLIDER, self.onFSlider, self._slider)
def onFSlider(self, event):
obj = event.GetEventObject()
ticValue = obj.GetValue() - obj.GetMin()
curVal = self.sliderValRange[ticValue]
print ticValue, curVal
self._val_label.SetLabel(str(curVal))
class MyNB(wx.Notebook):
def __init__(self, parent, *args, **kws):
super(self.__class__, self).__init__(parent=parent, style = wx.NB_TOP, *args, **kws)
# panel #1
self.panel1 = MyPanel(self)
self.panel1.SetBackgroundColour(wx.Colour(0, 0, 255))
self.spinctrl = fs.FloatSpin(self.panel1, value = '0.1', min_val = 0.1, max_val = 0.9, digits = 2, increment = 0.01)#, agwStyle = fs.FS_READONLY)
# panel #2
self.panel2 = MyPanel(self)
self.panel2.SetBackgroundColour(wx.Colour(0, 255, 255))
self.btn2 = wx.Button(self.panel2, label = 'choose color')
# panel #3
self.panel3 = MyPanel(self)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
p1 = FloatSlider(self.panel3, value = 1.0, minValue = 0.1, maxValue = 9.1)
print p1.GetSize()
#p1 = wx.Panel(self.panel3)
#p1 = wx.Button(self.panel3, label = 'btn1')
#p1.SetBackgroundColour('red')
p2 = wx.Panel(self.panel3)
#p2 = wx.Button(self.panel3, label = 'btn2')
p2.SetBackgroundColour('blue')
p3 = wx.Panel(self.panel3)
#p3 = wx.Button(self.panel3, label = 'btn3')
p3.SetBackgroundColour('yellow')
p4 = wx.Panel(self.panel3)
#p4 = wx.Button(self.panel3, label = 'btn4')
p4.SetBackgroundColour('green')
hbox1.Add(p1, proportion = 1, flag = wx.EXPAND)
hbox1.Add(p2, proportion = 1, flag = wx.EXPAND)
hbox2.Add(p3, proportion = 1, flag = wx.EXPAND)
hbox2.Add(p4, proportion = 1, flag = wx.EXPAND)
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(hbox1, proportion = 1, flag = wx.EXPAND)
vbox.Add(hbox2, proportion = 1, flag = wx.EXPAND)
self.panel3.SetSizer(vbox)
# #1 Tab
self.AddPage(self.panel1, 'First Tab')
# #2 Tab
self.AddPage(self.panel2, 'Second Tab')
# #3 Tab
self.AddPage(self.panel3, 'Third Tab')
# events
self.Bind(wx.EVT_BUTTON, self.onChooseColor, self.btn2)
def onChooseColor(self, event):
dlg = wx.ColourDialog(self)
dlg.GetColourData().SetChooseFull(True)
if dlg.ShowModal() == wx.ID_OK:
color = dlg.GetColourData().GetColour()
self.panel2.SetBackgroundColour(color)
print color.GetAsString(wx.C2S_HTML_SYNTAX)
dlg.Destroy()
class MyFrame2(wx.Frame):
def __init__(self, *args, **kws):
super(self.__class__,self).__init__(*args, **kws)
slide = FloatSlider(self,0.2,0.1,1.0,0.01)
self.Show()
class MyPanel(wx.Panel):
def __init__(self, parent, *args, **kws):
super(self.__class__, self).__init__(parent=parent, *args, **kws)
"""
class FloatSlider(wx.Slider):
#def __init__(self, parent, *args, **kws):
# super(self.__class__, self).__init__(parent, *args, **kws)
def GetValue(self):
return float(wx.Slider.GetValue(self))/self.GetMax()
"""
class FloatSlider(wx.Slider):
def __init__(self, parent, id = wx.ID_ANY, value = 0, minvValue = 0, maxValue = 10, increment = 0.1,
size = wx.DefaultSize, style = wx.SL_HORIZONTAL, *args, **kws):
self._value = value
self._min = minValue
self._max = maxValue
self._inc = increment
ival, imin, imax = [round(v/res) for v in (value, minValue, maxValue)]
self._islider = super(FloatSlider, self)
self._islider.__init__(parent = parent, value = ival, minValue = imin, maxValue = imax, id = id, size = size, style = style, *args, **kws)
self.Bind(wx.EVT_SCROLL, self._OnScroll, self._islider)
def _OnScroll(self, event):
ival = self._islider.GetValue()
imin = self._islider.GetMin()
imax = self._islider.GetMax()
if ival == imin:
self._value = self._min
elif ival == imax:
self._value = self._max
else:
self._value = ival * self._inc
event.Skip()
print 'OnScroll: value=%f, ival=%d' % (self._value, ival)
def GetValue(self):
return self._value
def GetMin(self):
return self._min
def GetMax(self):
return self._max
def GetInc(self):
return self._inc
def SetValue(self, value):
self._islider.SetValue(round(value/self._res))
self._value = value
def SetMin(self, minval):
self._islider.SetMin(round(minval/self._res))
self._min = minval
def SetMax(self, maxval):
self._islider.SetMax(round(maxval/self._res))
self._max = maxval
def SetInc(self, inc):
self._islider.SetRange(round(self._min/inc), round(self._max/inc))
self._islider.SetValue(round(self._value/inc))
self._inc = inc
def SetRange(self, minval, maxval):
self._islider.SetRange(round(minval/self._res), round(maxval/self._res))
self._min = minval
self._max = maxval
def main():
app = wx.App()
#myframe = MyFrame(None)
#myframe = MyFrame1(None)
myframe = MyFrame2(None)
app.MainLoop()
if __name__ == '__main__':
main()
| Archman/felapps | tests/test2.py | Python | mit | 8,381 | 0.022193 |
# Programmer: Noah Osterhout
# Date: September 30th 2016 1:40PM EST
# Project: Kirby_Physics.py
#Ask what Problem they will be using
print()
print("This Program will find the misisng Variables using the three known ones and using PEMDAS")
print()
beetles_mem = input("What Beetles member will you be using? ")
gravity_global = -9.8
if beetles_mem == "John":
john_time = int(input("What is the Time in seconds? "))
new_john_time = john_time ** 2
john_Vi = int(input("What is the Initial Velocity? "))
#Calculate using John Formula
john_formula = .5 * gravity_global * new_john_time
print("The Distance would be", john_formula)
elif beetles_mem == "Paul":
paul_Vf = int(input("What is the Final Velocity? "))
paul_Vi = int(input("What is the Intial Velocity? "))
paul_time = int(input("What is the Time in seconds? "))
#Calculate using Paul Formula
paul_formula = .5 * (paul_Vf + paul_Vi) * paul_time
print("The Distance would be", paul_formula)
elif beetles_mem == "George":
george_Vi = int(input("What is the Intial Velocity? "))
george_time = int(input("What is the Time in seconds? "))
#Calculate using George Formula
george_formula = george_Vi + gravity_global * george_time
print("The Final Velocity is", george_formula)
elif beetles_mem == "Ringo":
ringo_Vi = int(input("What is the Initial Velocity? "))
new_ringo_Vi = ringo_Vi ** 2
ringo_dist = int(input("What is the Distance? "))
#Calculate using Ringo Formula
ringo_formula = new_ringo_Vi + 2 * gravity_global * ringo_dist
print("The Final Velocity is", ringo_formula, "EE 2")
elif beetles_mem == "Kirby":
print("Kirby wishes he was a Beetles member")
else: print("ERROR! Unknown Beetles Member!")
| NoahFlowa/CTC_Projects | Osterhout_Python/Kirby_Physics.py | Python | mit | 1,794 | 0.043478 |
from webob import Response
from keystone import utils
from keystone.common import template, wsgi
class ExtensionsController(wsgi.Controller):
"""Controller for extensions related methods"""
def __init__(self, options):
super(ExtensionsController, self).__init__()
self.options = options
@utils.wrap_error
def get_extensions_info(self, req, path):
resp = Response()
if utils.is_xml_response(req):
resp_file = "%s.xml" % path
mime_type = "application/xml"
else:
resp_file = "%s.json" % path
mime_type = "application/json"
return template.static_file(resp, req, resp_file,
root=utils.get_app_root(), mimetype=mime_type)
| ntt-pf-lab/backup_keystone | keystone/controllers/extensions.py | Python | apache-2.0 | 752 | 0.00133 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from solution import Solution
from solution import TreeNode
def constructOne(s):
if s == '#':
return None
else:
return TreeNode(int(s))
def createTree(tree):
q = []
root = constructOne(tree[0]);
q.append(root);
idx = 1;
while q:
tn = q.pop(0)
if not tn:
continue
if idx == len(tree):
break
left = constructOne(tree[idx])
tn.left = left
q.append(left)
idx += 1
if idx == len(tree):
break
right = constructOne(tree[idx])
idx += 1
tn.right = right
q.append(right)
return root
# inpt = createTree(['1', '#', '2', '3'])
inpt = createTree(['1', '2', '3', '#' , '#', '4', '#', '#', '5'])
sol = Solution()
res = sol.inorderTraversal(inpt)
print(res)
| zhlinh/leetcode | 0094.Binary Tree Inorder Traversal/test.py | Python | apache-2.0 | 874 | 0.006865 |
from django.conf.urls.defaults import *
from django.views.generic.dates import YearArchiveView, MonthArchiveView, DayArchiveView, DateDetailView
from tinymce.widgets import TinyMCE
from tinymce.views import preview
from opstel.models import Entry
entry_info_dict = {'queryset':Entry.live.all(), 'date_field': 'pub_date', }
urlpatterns = patterns('',
# Pagination for the equivalent of archive_index generic view.
# The url is of the form http://host/page/4/
# In urls.py for example, ('^blog/page/(?P<page>\d)/$', get_archive_index),
url(r'^$', 'opstel.views.get_archive_index_first', ),
url(r'^page/(?P<page>\d)/$', 'opstel.views.get_archive_index', ),
#url(r'^preview/$', 'preview', name= "preview"),
url(r'^(?P<year>\d{4})/$', YearArchiveView.as_view(**entry_info_dict), name= 'opstel_entry_archive_year'),
url(r'^(?P<year>\d{4})/(?P<month>\w{3})/$', MonthArchiveView.as_view(**entry_info_dict), name= 'opstel_entry_archive_month'),
url(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/$', DayArchiveView.as_view(**entry_info_dict), name= 'opstel_entry_archive_day'),
url(r'^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{2})/(?P<slug>[-\w]+)/$', DateDetailView.as_view(**entry_info_dict), name= 'opstel_entry_detail'),
) | davogler/venus | opstel/urls/entries.py | Python | mit | 1,232 | 0.018669 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class PolicyAssignmentsOperations(object):
"""PolicyAssignmentsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version to use for the operation. Constant value: "2016-12-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def delete(
self, scope, policy_assignment_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a policy assignment.
:param scope: The scope of the policy assignment.
:type scope: str
:param policy_assignment_name: The name of the policy assignment to
delete.
:type policy_assignment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignment
<azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/{scope}/providers/Microsoft.Authorization/policyassignments/{policyAssignmentName}'
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'policyAssignmentName': self._serialize.url("policy_assignment_name", policy_assignment_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, scope, policy_assignment_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates a policy assignment.
Policy assignments are inherited by child resources. For example, when
you apply a policy to a resource group that policy is assigned to all
resources in the group.
:param scope: The scope of the policy assignment.
:type scope: str
:param policy_assignment_name: The name of the policy assignment.
:type policy_assignment_name: str
:param parameters: Parameters for the policy assignment.
:type parameters: :class:`PolicyAssignment
<azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignment
<azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/{scope}/providers/Microsoft.Authorization/policyassignments/{policyAssignmentName}'
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'policyAssignmentName': self._serialize.url("policy_assignment_name", policy_assignment_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PolicyAssignment')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('PolicyAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get(
self, scope, policy_assignment_name, custom_headers=None, raw=False, **operation_config):
"""Gets a policy assignment.
:param scope: The scope of the policy assignment.
:type scope: str
:param policy_assignment_name: The name of the policy assignment to
get.
:type policy_assignment_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignment
<azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/{scope}/providers/Microsoft.Authorization/policyassignments/{policyAssignmentName}'
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'policyAssignmentName': self._serialize.url("policy_assignment_name", policy_assignment_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_for_resource_group(
self, resource_group_name, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets policy assignments for the resource group.
:param resource_group_name: The name of the resource group that
contains policy assignments.
:type resource_group_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignmentPaged
<azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignmentPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/policyAssignments'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str', skip_quote=True)
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PolicyAssignmentPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyAssignmentPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_for_resource(
self, resource_group_name, resource_provider_namespace, parent_resource_path, resource_type, resource_name, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets policy assignments for a resource.
:param resource_group_name: The name of the resource group containing
the resource. The name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource
provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource path.
:type parent_resource_path: str
:param resource_type: The resource type.
:type resource_type: str
:param resource_name: The name of the resource with policy
assignments.
:type resource_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignmentPaged
<azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignmentPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/policyassignments'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PolicyAssignmentPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyAssignmentPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets all the policy assignments for a subscription.
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignmentPaged
<azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignmentPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyassignments'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PolicyAssignmentPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyAssignmentPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def delete_by_id(
self, policy_assignment_id, custom_headers=None, raw=False, **operation_config):
"""Deletes a policy assignment by ID.
When providing a scope for the assigment, use
'/subscriptions/{subscription-id}/' for subscriptions,
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}'
for resource groups, and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
for resources.
:param policy_assignment_id: The ID of the policy assignment to
delete. Use the format
'/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
:type policy_assignment_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignment
<azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/{policyAssignmentId}'
path_format_arguments = {
'policyAssignmentId': self._serialize.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_by_id(
self, policy_assignment_id, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates a policy assignment by ID.
Policy assignments are inherited by child resources. For example, when
you apply a policy to a resource group that policy is assigned to all
resources in the group. When providing a scope for the assigment, use
'/subscriptions/{subscription-id}/' for subscriptions,
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}'
for resource groups, and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
for resources.
:param policy_assignment_id: The ID of the policy assignment to
create. Use the format
'/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
:type policy_assignment_id: str
:param parameters: Parameters for policy assignment.
:type parameters: :class:`PolicyAssignment
<azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignment
<azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/{policyAssignmentId}'
path_format_arguments = {
'policyAssignmentId': self._serialize.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PolicyAssignment')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('PolicyAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_by_id(
self, policy_assignment_id, custom_headers=None, raw=False, **operation_config):
"""Gets a policy assignment by ID.
When providing a scope for the assigment, use
'/subscriptions/{subscription-id}/' for subscriptions,
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}'
for resource groups, and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
for resources.
:param policy_assignment_id: The ID of the policy assignment to get.
Use the format
'/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
:type policy_assignment_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`PolicyAssignment
<azure.mgmt.resource.policy.v2016_12_01.models.PolicyAssignment>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/{policyAssignmentId}'
path_format_arguments = {
'policyAssignmentId': self._serialize.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyAssignment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| SUSE/azure-sdk-for-python | azure-mgmt-resource/azure/mgmt/resource/policy/v2016_12_01/operations/policy_assignments_operations.py | Python | mit | 30,831 | 0.002465 |
""" Physics test sandbox for the space race game!
Alistair Reid 2015
"""
import matplotlib.pyplot as pl
import matplotlib as mpl
import numpy as np
from numpy.linalg import norm
from time import time, sleep
import os
def integrate(states, props, inp, walls, bounds, dt):
""" Implementing 4th order Runge-Kutta for a time stationary DE.
"""
derivs = lambda y: physics(y, props, inp, walls, bounds)
k1 = derivs(states)
k2 = derivs(states + 0.5*k1*dt)
k3 = derivs(states + 0.5*k2*dt)
k4 = derivs(states + k3*dt)
states += (k1 + 2*k2 + 2*k3 + k4)/6. * dt
def physics(states, props, inp, walls, bounds):
# Unpack state, input and property vectors
P = states[:, :2]
Th = states[:, 2:3]
V = states[:, 3:5]
W = states[:, 5:6]
m = props[:, 0:1]
I = props[:, 1:2]
rad = props[:, 2:3]
cd_a = props[:, 3:4] # coeff drag * area
f = inp[:, :1] * np.hstack((np.cos(Th), np.sin(Th)))
trq = inp[:, 1:2]
n = P.shape[0]
# Physics model parameters (hand tuned to feel right)
rho = 0.1 # Air density (or absorb into cd_a?)
k_elastic = 4000. # spring normal force
spin_drag_ratio = 1.8 # spin drag to forward drag
eps = 1e-5 # avoid divide by zero warnings
mu = 0.05 # coefficient of friction (tangent force/normal force)
mu_wall = 0.01 # wall friction param
sigmoid = lambda x: -1 + 2./(1. + np.exp(-x))
# Compute drag
f -= cd_a * rho * V * norm(V, axis=1)[:, np.newaxis]
trq -= spin_drag_ratio*cd_a * rho * W * np.abs(W) * rad**2
# Inter-ship collisions
checks = shortlist_collisions(P, 1.) # Apply test spatial hashing
for i, j in checks:
dP = P[j] - P[i]
dist = norm(dP) + eps
diameter = rad[i] + rad[j]
if dist < diameter:
# Direct collision: linear spring normal force
f_magnitude = (diameter-dist)*k_elastic
f_norm = f_magnitude * dP
f[i] -= f_norm
f[j] += f_norm
# Spin effects (ask Al to draw a free body diagram)
perp = np.array([-dP[1], dP[0]])/dist # surface perpendicular
v_rel = rad[i]*W[i] + rad[j]*W[j] + np.dot(V[i] - V[j], perp)
fric = f_magnitude * mu * sigmoid(v_rel)
f_fric = fric * perp
f[i] += f_fric
f[j] -= f_fric
trq[i] -= fric * rad[i]
trq[j] -= fric * rad[j]
# Wall collisions --> single body collisions
wall_info = linear_interpolate(walls, bounds, P)
# import IPython
# IPython.embed()
# exit()
for i in range(n):
dist = wall_info[i][0] - rad[i]
if dist < 0:
normal = wall_info[i][1:3]
# Linear spring normal force
f_norm_mag = -dist*k_elastic
f[i] += f_norm_mag * normal
# surface tangential force
perp = [-normal[1], normal[0]] # points left 90 degrees
v_rel = W[i] * rad[i] - np.dot(V[i], perp)
fric = f_norm_mag * mu_wall * sigmoid(v_rel)
f[i] += fric*perp
trq[i] -= fric * rad[i]
# Compose the gradient vector
return np.hstack((V, W, f/m, trq/I))
def shortlist_collisions(P, r):
# Use spatial hashing to shortlist possible collisions
n = P.shape[0]
all_cells = dict() # potential collisions
checks = set()
grid = r * 2. + 1e-5 # base off diameter
offsets = r*np.array([[1,1],[1,-1], [-1,1], [-1,-1]])
for my_id in range(n):
bins = [tuple(m) for m in np.floor((P[my_id] + offsets)/grid)]
for bin in bins:
if bin in all_cells:
for friend in all_cells[bin]:
checks.add((my_id, friend))
all_cells[bin].append(my_id)
else:
all_cells[bin] = [my_id]
return checks
def main():
resources = os.getcwd()[:-8]+'/mapbuilder/testmap_%s.npy'
wnx = np.load(resources % 'wnormx')
wny = np.load(resources % 'wnormy')
norm = np.sqrt(wnx**2 + wny**2) + 1e-5
wnx /= norm
wny /= norm
wdist = np.load(resources % 'walldist')
mapscale = 10
walls = np.dstack((wdist/mapscale, wnx, wny))
map_img = np.load(resources % 'occupancy') # 'walldist')
all_shape = np.array(map_img.shape).astype(float) / mapscale
bounds = [0, all_shape[1], 0, all_shape[0]]
# map_img = 0.25*(map_img[::2, ::2] + map_img[1::2,::2] + \
# map_img[::2, 1::2] + map_img[1::2, 1::2])
spawn = np.array([25, 25])/2. # x, y
spawn_size = 6/2.
n = 30
masses = 1. + 2*np.random.random(n)
masses[0] = 1.
Is = 0.25*masses
radius = np.ones(n)
cda = np.ones(n)
properties = np.vstack((masses, Is, radius, cda)).T
colours = ['r', 'b', 'g', 'c', 'm', 'y']
colours = (colours * np.ceil(n/len(colours)))[:n]
colours[0] = 'k'
# x, y, th, vx, vy, w
x0 = 2*(np.random.random(n) - 0.5) * spawn_size + spawn[0]
y0 = 2*(np.random.random(n) - 0.5) * spawn_size + spawn[1]
th0 = np.random.random(n) * np.pi * 2
vx0 = np.random.random(n) * 2 - 1
vy0 = np.random.random(n) * 2 - 1
w0 = np.random.random(n) * 2 - 1
states0 = np.vstack((x0, y0, th0, vx0, vy0, w0)).T
# Set up our spaceships
fig = pl.figure()
ax = pl.subplot(111)
# Draw the backdrop:
mapview = pl.imshow(-map_img, extent=bounds, cmap=pl.cm.gray, origin='lower')
cx = np.linspace(bounds[0], bounds[1], map_img.shape[1])
cy = np.linspace(bounds[2], bounds[3], map_img.shape[0])
cX, cY = np.meshgrid(cx, cy)
pl.contour(cX, cY, map_img, 1)
pl.show(block=False)
fig.canvas.draw()
background = [fig.canvas.copy_from_bbox(ax.bbox)]
sprites = []
for s, col, r in zip(states0, colours, radius):
vis = draw_outline(ax, s, col, r)
sprites.append(vis)
ax.set_xlim(bounds[0:2])
ax.set_ylim(bounds[2:4])
ax.set_aspect('equal')
dt = 0.02
start_time = time()
t = 0.
states = states0
event_count = 0
frame_rate = 30.
frame_time = 1./frame_rate
next_draw = frame_time
keys = set()
def press(event):
keys.add(event.key)
def unpress(event):
keys.remove(event.key)
def redo_background(event):
for s in sprites:
s.set_visible(False)
fig.canvas.draw()
background[0] = fig.canvas.copy_from_bbox(ax.bbox)
for s in sprites:
s.set_visible(True)
# event.width, event.height accessible
fig.canvas.mpl_connect('key_press_event', press)
fig.canvas.mpl_connect('key_release_event', unpress)
fig.canvas.mpl_connect('resize_event', redo_background)
print('Press Q to exit')
while 'q' not in keys:
# Advance the game state
while t < next_draw:
inputs = np.zeros((n, 2))
inputs[:, 1] = 3.0 # try to turn
inputs[:, 0] = 100 # some forward thrust!
# give the user control of ship 0
if 'right' in keys:
inputs[0, 1] = -10.
elif 'left' in keys:
inputs[0, 1] = 10.
else:
inputs[0, 1] = 0.
if 'up' in keys:
inputs[0, 0] = 100
else:
inputs[0, 0] = 0
t += dt
integrate(states, properties, inputs, walls, bounds, dt)
# Draw at the desired framerate
this_time = time() - start_time
if this_time > next_draw:
next_draw += frame_time
# blit the background
fig.canvas.restore_region(background[0])
for state, col, r, sprite in zip(states, colours, radius, sprites):
draw_outline(ax, state, col, r, handle=sprite)
fig.canvas.blit(ax.bbox)
event_count += 1
fig.canvas.flush_events()
else:
sleep((next_draw - this_time)*0.25)
def draw_outline(ax, state, c, radius, handle=None, n=15):
x, y, th, vx, vy, w = state
# m, I, radius, c = props
# base_theta = np.linspace(np.pi-2, np.pi+2, n-1)
# base_theta[0] = 0
# base_theta[-1] = 0
base_theta = np.linspace(0, 2*np.pi, n)
theta = base_theta + th + np.pi
theta[0] = th
theta[-1] = th
# size = np.sqrt(1. - np.abs(base_theta - np.pi)/np.pi)
size = 1
vx = np.cos(theta) * radius * size
vy = np.sin(theta) * radius * size
vx += x
vy += y
if handle:
handle.set_data(vx, vy)
ax.draw_artist(handle)
else:
handle, = ax.plot(vx, vy, c)
return handle
def linear_interpolate(img, bounds, pos):
""" Used for interpreting Dan-maps
Args:
img - n*m*k
bounds - (xmin, xmax, ymin, ymax)
pos - n*2 position vector of query
Returns:
interpolated vector
"""
h, w, ch = np.shape(img)
xmin, xmax, ymin, ymax = bounds
x, y = pos.T
ix = (x - xmin) / (xmax - xmin) * (w - 1.)
iy = (y - ymin) / (ymax - ymin) * (h - 1.)
ix = np.minimum(np.maximum(0, ix), w-2)
iy = np.minimum(np.maximum(0, iy), h-2)
L = ix.astype(int)
T = iy.astype(int)
alphax = (ix - L)[:,np.newaxis]
alphay = (iy - T)[:,np.newaxis]
out = (1.-alphax)*(1.-alphay)*img[T, L] + \
(1.-alphax)*alphay*img[T+1, L] + \
alphax*(1-alphay)*img[T, L+1] + \
alphax*alphay*img[T+1, L+1]
return out
def old_shortlist_collisions(P, r):
# Use spatial hashing to shortlist possible collisions
# Requires radius = 1 even though I originally allowed it to vary.
n = P.shape[0]
all_cells = dict() # potential collisions
checks = set()
grid = r * 2. + 1e-5 # base off diameter
UP = [tuple(v) for v in np.floor((P+[r, r]) / grid)]
DOWN = [tuple(v) for v in np.floor((P+[r, -r]) / grid)]
LEFT = [tuple(v) for v in np.floor((P+[-r, r]) / grid)]
RIGHT = [tuple(v) for v in np.floor((P+[-r, -r]) / grid)]
indx = list(range(n))
for i, u, d, l, r in zip(indx, UP, DOWN, LEFT, RIGHT):
for my_id in (u, d, l, r):
if my_id in all_cells:
for friend in all_cells[my_id]:
checks.add((i, friend))
all_cells[my_id].append(i)
else:
all_cells[my_id] = [i]
return checks
if __name__ == '__main__':
main()
| lmccalman/spacerace | physics/engine.py | Python | mit | 10,367 | 0.002026 |
def getSublists(L, n):
sublists = []
for i in range(len(L)):
next_sublist = L[i:i+n]
if len(next_sublist) == n:
sublists.append(next_sublist)
return sublists
# Test Cases
L = [10, 4, 6, 8, 3, 4, 5, 7, 7, 2]
print getSublists(L, 4) == [[10, 4, 6, 8], [4, 6, 8, 3], [6, 8, 3, 4], [8, 3, 4, 5], [3, 4, 5, 7], [4, 5, 7, 7], [5, 7, 7, 2]]
L = [1, 1, 1, 1, 4]
print getSublists(L, 2) == [[1, 1], [1, 1], [1, 1], [1, 4]]
| NicholasAsimov/courses | 6.00.1x/final/p4-1.py | Python | mit | 459 | 0.004357 |
import csbuild
#csbuild.SetActiveToolchain("android")
@csbuild.project("AndroidTest_Basic", "AndroidTest_Basic")
def AndroidTest_Basic():
csbuild.Toolchain("android").SetCcCommand("gcc")
csbuild.Toolchain("android").SetCxxCommand("g++")
csbuild.Toolchain("android").SetPackageName("csbuild.UnitTest.AndroidBasic")
csbuild.Toolchain("android").SetActivityName("CSBUnitTestAndroidBasic")
csbuild.DisablePrecompile()
csbuild.SetOutput("AndroidTest_Basic", csbuild.ProjectType.Application)
csbuild.Toolchain("android").AddLibraries("android", "m", "log", "dl", "c")
csbuild.SetSupportedToolchains("msvc", "android")
| ShadauxCat/csbuild | UnitTests/Android/unit_test_android.py | Python | mit | 622 | 0.016077 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for asserts module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.converters import asserts
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.platform import test
class AssertsTest(converter_testing.TestCase):
def test_transform(self):
def test_fn(a):
assert a > 0
node, ctx = self.prepare(test_fn, {})
node = asserts.transform(node, ctx)
self.assertTrue(isinstance(node.body[0].value, gast.Call))
if __name__ == '__main__':
test.main()
| kobejean/tensorflow | tensorflow/python/autograph/converters/asserts_test.py | Python | apache-2.0 | 1,319 | 0.002274 |
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from rest_framework import response, schemas
from rest_framework.decorators import (
api_view,
renderer_classes,
)
from drf_yasg.renderers import (
OpenAPIRenderer,
SwaggerUIRenderer,
)
@api_view()
@renderer_classes([OpenAPIRenderer, SwaggerUIRenderer])
def schema_view(request):
generator = schemas.SchemaGenerator(title='Impact API')
return response.Response(generator.get_schema(request=request))
| masschallenge/impact-api | web/impact/impact/schema.py | Python | mit | 482 | 0 |
a = "1"
b = 1
print("Arvud on " + 5 * a + " ja " + str(5 * b)) | captainhungrykaboom/MTAT.TK.006 | 6. märts - 12. märts ülesanded/harjutus ülesanne 6.py | Python | mit | 62 | 0.016129 |
from builtins import str
from qgis.PyQt.QtCore import QFileInfo
from qgis.PyQt.QtWidgets import QFileDialog
def update_directory_key(settings, settings_dir_key, fileName):
"""
modified from module RASTERCALC by Barry Rowlingson
"""
path = QFileInfo(fileName).absolutePath()
settings.setValue(settings_dir_key,
str(path))
def new_file_path(parent, show_msg, path, filter_text):
output_filename, __ = QFileDialog.getSaveFileName(
parent,
show_msg,
path,
filter_text
)
if not output_filename:
return ''
else:
return output_filename
def old_file_path(parent, show_msg, filter_extension, filter_text):
input_filename, __ = QFileDialog.getOpenFileName(parent,
parent.tr(show_msg),
filter_extension,
filter_text)
if not input_filename:
return ''
else:
return input_filename
| mauroalberti/gsf | pygsf/utils/qt_utils/filesystem.py | Python | gpl-3.0 | 1,060 | 0.003774 |
# Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection level utilities for Mongo."""
import warnings
from bson.code import Code
from bson.objectid import ObjectId
from bson.son import SON
from pymongo import (bulk,
common,
helpers,
message,
results)
from pymongo.command_cursor import CommandCursor
from pymongo.cursor import Cursor
from pymongo.errors import InvalidName, OperationFailure
from pymongo.helpers import _check_write_command_response
from pymongo.message import _INSERT, _UPDATE, _DELETE
from pymongo.operations import _WriteOp
from pymongo.read_preferences import ReadPreference
try:
from collections import OrderedDict
ordered_types = (SON, OrderedDict)
except ImportError:
ordered_types = SON
def _gen_index_name(keys):
"""Generate an index name from the set of fields it is over.
"""
return u"_".join([u"%s_%s" % item for item in keys])
class ReturnDocument(object):
"""An enum used with
:meth:`~pymongo.collection.Collection.find_one_and_replace` and
:meth:`~pymongo.collection.Collection.find_one_and_update`.
"""
BEFORE = False
"""Return the original document before it was updated/replaced, or
``None`` if no document matches the query.
"""
AFTER = True
"""Return the updated/replaced or inserted document."""
class Collection(common.BaseObject):
"""A Mongo collection.
"""
def __init__(self, database, name, create=False, codec_options=None,
read_preference=None, write_concern=None, **kwargs):
"""Get / create a Mongo collection.
Raises :class:`TypeError` if `name` is not an instance of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~pymongo.errors.InvalidName` if `name` is not a valid
collection name. Any additional keyword arguments will be used
as options passed to the create command. See
:meth:`~pymongo.database.Database.create_collection` for valid
options.
If `create` is ``True`` or additional keyword arguments are
present a create command will be sent. Otherwise, a create
command will not be sent and the collection will be created
implicitly on first use.
:Parameters:
- `database`: the database to get a collection from
- `name`: the name of the collection to get
- `create` (optional): if ``True``, force collection
creation even without options being set
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) database.codec_options is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) database.read_preference is used.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) database.write_concern is used.
- `**kwargs` (optional): additional keyword arguments will
be passed as options for the create collection command
.. versionchanged:: 2.9
Added the codec_options, read_preference, and write_concern options.
.. versionchanged:: 2.2
Removed deprecated argument: options
.. versionadded:: 2.1
uuid_subtype attribute
.. versionchanged:: 1.5
deprecating `options` in favor of kwargs
.. versionadded:: 1.5
the `create` parameter
.. mongodoc:: collections
"""
opts, mode, tags, wc_doc = helpers._get_common_options(
database, codec_options, read_preference, write_concern)
salms = database.secondary_acceptable_latency_ms
super(Collection, self).__init__(
codec_options=opts,
read_preference=mode,
tag_sets=tags,
secondary_acceptable_latency_ms=salms,
slave_okay=database.slave_okay,
safe=database.safe,
**wc_doc)
if not isinstance(name, basestring):
raise TypeError("name must be an instance "
"of %s" % (basestring.__name__,))
if not name or ".." in name:
raise InvalidName("collection names cannot be empty")
if "$" in name and not (name.startswith("oplog.$main") or
name.startswith("$cmd")):
raise InvalidName("collection names must not "
"contain '$': %r" % name)
if name[0] == "." or name[-1] == ".":
raise InvalidName("collection names must not start "
"or end with '.': %r" % name)
if "\x00" in name:
raise InvalidName("collection names must not contain the "
"null character")
self.__database = database
self.__name = unicode(name)
self.__full_name = u"%s.%s" % (self.__database.name, self.__name)
if create or kwargs:
self.__create(kwargs)
def __create(self, options):
"""Sends a create command with the given options.
"""
if options:
if "size" in options:
options["size"] = float(options["size"])
self.__database.command("create", self.__name,
read_preference=ReadPreference.PRIMARY,
**options)
else:
self.__database.command("create", self.__name,
read_preference=ReadPreference.PRIMARY)
def __getattr__(self, name):
"""Get a sub-collection of this collection by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
return Collection(self.__database, u"%s.%s" % (self.__name, name))
def __getitem__(self, name):
return self.__getattr__(name)
def __repr__(self):
return "Collection(%r, %r)" % (self.__database, self.__name)
def __eq__(self, other):
if isinstance(other, Collection):
us = (self.__database, self.__name)
them = (other.__database, other.__name)
return us == them
return NotImplemented
def __ne__(self, other):
return not self == other
@property
def full_name(self):
"""The full name of this :class:`Collection`.
The full name is of the form `database_name.collection_name`.
.. versionchanged:: 1.3
``full_name`` is now a property rather than a method.
"""
return self.__full_name
@property
def name(self):
"""The name of this :class:`Collection`.
.. versionchanged:: 1.3
``name`` is now a property rather than a method.
"""
return self.__name
@property
def database(self):
"""The :class:`~pymongo.database.Database` that this
:class:`Collection` is a part of.
.. versionchanged:: 1.3
``database`` is now a property rather than a method.
"""
return self.__database
def with_options(
self, codec_options=None, read_preference=None, write_concern=None):
"""Get a clone of this collection changing the specified settings.
>>> from pymongo import ReadPreference
>>> coll1.read_preference == ReadPreference.PRIMARY
True
>>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY)
>>> coll1.read_preference == ReadPreference.PRIMARY
True
>>> coll2.read_preference == ReadPreference.SECONDARY
True
:Parameters:
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`Collection`
is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`Collection` is used. See :mod:`~pymongo.read_preferences`
for options.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`Collection`
is used.
.. versionadded:: 2.9
"""
opts, mode, tags, wc_doc = helpers._get_common_options(
self, codec_options, read_preference, write_concern)
coll = Collection(self.__database, self.__name, False, opts)
coll._write_concern = wc_doc
coll._read_pref = mode
coll._tag_sets = tags
return coll
def initialize_unordered_bulk_op(self):
"""Initialize an unordered batch of write operations.
Operations will be performed on the server in arbitrary order,
possibly in parallel. All operations will be attempted.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`unordered_bulk` for examples.
.. versionadded:: 2.7
"""
return bulk.BulkOperationBuilder(self, ordered=False)
def initialize_ordered_bulk_op(self):
"""Initialize an ordered batch of write operations.
Operations will be performed on the server serially, in the
order provided. If an error occurs all remaining operations
are aborted.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`ordered_bulk` for examples.
.. versionadded:: 2.7
"""
return bulk.BulkOperationBuilder(self, ordered=True)
def bulk_write(self, requests, ordered=True):
"""Send a batch of write operations to the server.
Requests are passed as a list of write operation instances (
:class:`~pymongo.operations.InsertOne`,
:class:`~pymongo.operations.UpdateOne`,
:class:`~pymongo.operations.UpdateMany`,
:class:`~pymongo.operations.ReplaceOne`,
:class:`~pymongo.operations.DeleteOne`, or
:class:`~pymongo.operations.DeleteMany`).
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634ef')}
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
>>> # DeleteMany, UpdateOne, and UpdateMany are also available.
...
>>> from pymongo import InsertOne, DeleteOne, ReplaceOne
>>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}),
... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)]
>>> result = db.test.bulk_write(requests)
>>> result.inserted_count
1
>>> result.deleted_count
1
>>> result.modified_count
0
>>> result.upserted_ids
{2: ObjectId('54f62ee28891e756a6e1abd5')}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f62e60fba5226811f634f0')}
{u'y': 1, u'_id': ObjectId('54f62ee2fba5226811f634f1')}
{u'z': 1, u'_id': ObjectId('54f62ee28891e756a6e1abd5')}
:Parameters:
- `requests`: A list of write operations (see examples above).
- `ordered` (optional): If ``True`` (the default) requests will be
performed on the server serially, in the order provided. If an error
occurs all remaining operations are aborted. If ``False`` requests
will be performed on the server in arbitrary order, possibly in
parallel, and all operations will be attempted.
:Returns:
An instance of :class:`~pymongo.results.BulkWriteResult`.
.. versionadded:: 2.9
"""
if not isinstance(requests, list):
raise TypeError("requests must be a list")
blk = bulk._Bulk(self, ordered)
for request in requests:
if not isinstance(request, _WriteOp):
raise TypeError("%r is not a valid request" % (request,))
request._add_to_bulk(blk)
bulk_api_result = blk.execute(self.write_concern)
if bulk_api_result is not None:
return results.BulkWriteResult(bulk_api_result, True)
return results.BulkWriteResult({}, False)
def save(self, to_save, manipulate=True,
safe=None, check_keys=True, **kwargs):
"""Save a document in this collection.
If `to_save` already has an ``"_id"`` then an :meth:`update`
(upsert) operation is performed and any existing document with
that ``"_id"`` is overwritten. Otherwise an :meth:`insert`
operation is performed. In this case if `manipulate` is ``True``
an ``"_id"`` will be added to `to_save` and this method returns
the ``"_id"`` of the saved document. If `manipulate` is ``False``
the ``"_id"`` will be added by the server but this method will
return ``None``.
Raises :class:`TypeError` if `to_save` is not an instance of
:class:`dict`.
Write concern options can be passed as keyword arguments, overriding
any global defaults. Valid options include w=<int/string>,
wtimeout=<int>, j=<bool>, or fsync=<bool>. See the parameter list below
for a detailed explanation of these options.
By default an acknowledgment is requested from the server that the
save was successful, raising :class:`~pymongo.errors.OperationFailure`
if an error occurred. **Passing w=0 disables write acknowledgement
and all other write concern options.**
:Parameters:
- `to_save`: the document to be saved
- `manipulate` (optional): manipulate the document before
saving it?
- `safe` (optional): **DEPRECATED** - Use `w` instead.
- `check_keys` (optional): check if keys start with '$' or
contain '.', raising :class:`~pymongo.errors.InvalidName`
in either case.
- `w` (optional): (integer or string) If this is a replica set, write
operations will block until they have been replicated to the
specified number or tagged set of servers. `w=<int>` always includes
the replica set primary (e.g. w=3 means write to the primary and wait
until replicated to **two** secondaries). **Passing w=0 disables
write acknowledgement and all other write concern options.**
- `wtimeout` (optional): (integer) Used in conjunction with `w`.
Specify a value in milliseconds to control how long to wait for
write propagation to complete. If replication does not complete in
the given timeframe, a timeout exception is raised.
- `j` (optional): If ``True`` block until write operations have been
committed to the journal. Ignored if the server is running without
journaling.
- `fsync` (optional): If ``True`` force the database to fsync all
files before returning. When used with `j` the server awaits the
next group commit before returning.
:Returns:
- The ``'_id'`` value of `to_save` or ``[None]`` if `manipulate` is
``False`` and `to_save` has no '_id' field.
.. versionadded:: 1.8
Support for passing `getLastError` options as keyword
arguments.
.. mongodoc:: insert
"""
if not isinstance(to_save, dict):
raise TypeError("cannot save object of type %s" % type(to_save))
if "_id" not in to_save:
return self.insert(to_save, manipulate, safe, check_keys, **kwargs)
else:
self.update({"_id": to_save["_id"]}, to_save, True,
manipulate, safe, check_keys=check_keys, **kwargs)
return to_save.get("_id", None)
def insert(self, doc_or_docs, manipulate=True,
safe=None, check_keys=True, continue_on_error=False, **kwargs):
"""Insert a document(s) into this collection.
If `manipulate` is ``True``, the document(s) are manipulated using
any :class:`~pymongo.son_manipulator.SONManipulator` instances
that have been added to this :class:`~pymongo.database.Database`.
In this case an ``"_id"`` will be added if the document(s) does
not already contain one and the ``"id"`` (or list of ``"_id"``
values for more than one document) will be returned.
If `manipulate` is ``False`` and the document(s) does not include
an ``"_id"`` one will be added by the server. The server
does not return the ``"_id"`` it created so ``None`` is returned.
Write concern options can be passed as keyword arguments, overriding
any global defaults. Valid options include w=<int/string>,
wtimeout=<int>, j=<bool>, or fsync=<bool>. See the parameter list below
for a detailed explanation of these options.
By default an acknowledgment is requested from the server that the
insert was successful, raising :class:`~pymongo.errors.OperationFailure`
if an error occurred. **Passing w=0 disables write acknowledgement
and all other write concern options.**
:Parameters:
- `doc_or_docs`: a document or list of documents to be
inserted
- `manipulate` (optional): If ``True`` manipulate the documents
before inserting.
- `safe` (optional): **DEPRECATED** - Use `w` instead.
- `check_keys` (optional): If ``True`` check if keys start with '$'
or contain '.', raising :class:`~pymongo.errors.InvalidName` in
either case.
- `continue_on_error` (optional): If ``True``, the database will not
stop processing a bulk insert if one fails (e.g. due to duplicate
IDs). This makes bulk insert behave similarly to a series of single
inserts, except lastError will be set if any insert fails, not just
the last one. If multiple errors occur, only the most recent will
be reported by :meth:`~pymongo.database.Database.error`.
- `w` (optional): (integer or string) If this is a replica set, write
operations will block until they have been replicated to the
specified number or tagged set of servers. `w=<int>` always includes
the replica set primary (e.g. w=3 means write to the primary and wait
until replicated to **two** secondaries). **Passing w=0 disables
write acknowledgement and all other write concern options.**
- `wtimeout` (optional): (integer) Used in conjunction with `w`.
Specify a value in milliseconds to control how long to wait for
write propagation to complete. If replication does not complete in
the given timeframe, a timeout exception is raised.
- `j` (optional): If ``True`` block until write operations have been
committed to the journal. Ignored if the server is running without
journaling.
- `fsync` (optional): If ``True`` force the database to fsync all
files before returning. When used with `j` the server awaits the
next group commit before returning.
:Returns:
- The ``'_id'`` value (or list of '_id' values) of `doc_or_docs` or
``[None]`` if manipulate is ``False`` and the documents passed
as `doc_or_docs` do not include an '_id' field.
.. note:: `continue_on_error` requires server version **>= 1.9.1**
.. versionadded:: 2.1
Support for continue_on_error.
.. versionadded:: 1.8
Support for passing `getLastError` options as keyword
arguments.
.. versionchanged:: 1.1
Bulk insert works with an iterable sequence of documents.
.. mongodoc:: insert
"""
client = self.database.connection
# Batch inserts require us to know the connected primary's
# max_bson_size, max_message_size, and max_write_batch_size.
# We have to be connected to the primary to know that.
client._ensure_connected(True)
docs = doc_or_docs
return_one = False
if isinstance(docs, dict):
return_one = True
docs = [docs]
ids = []
if manipulate:
def gen():
db = self.__database
for doc in docs:
# Apply user-configured SON manipulators. This order of
# operations is required for backwards compatibility,
# see PYTHON-709.
doc = db._apply_incoming_manipulators(doc, self)
if '_id' not in doc:
doc['_id'] = ObjectId()
doc = db._apply_incoming_copying_manipulators(doc, self)
ids.append(doc['_id'])
yield doc
else:
def gen():
for doc in docs:
ids.append(doc.get('_id'))
yield doc
safe, options = self._get_write_mode(safe, **kwargs)
if client.max_wire_version > 1 and safe:
# Insert command
command = SON([('insert', self.name),
('ordered', not continue_on_error)])
if options:
command['writeConcern'] = options
results = message._do_batched_write_command(
self.database.name + ".$cmd", _INSERT, command,
gen(), check_keys, self.uuid_subtype, client)
_check_write_command_response(results)
else:
# Legacy batched OP_INSERT
message._do_batched_insert(self.__full_name, gen(), check_keys,
safe, options, continue_on_error,
self.uuid_subtype, client)
if return_one:
return ids[0]
else:
return ids
def insert_one(self, document):
"""Insert a single document.
>>> db.test.count({'x': 1})
0
>>> result = db.test.insert_one({'x': 1})
>>> result.inserted_id
ObjectId('54f112defba522406c9cc208')
>>> db.test.find_one({'x': 1})
{u'x': 1, u'_id': ObjectId('54f112defba522406c9cc208')}
:Parameters:
- `document`: The document to insert. Must be a mapping
type. If the document does not have an _id field one will be
added automatically.
:Returns:
- An instance of :class:`~pymongo.results.InsertOneResult`.
.. versionadded:: 2.9
"""
common.validate_is_dict("document", document)
ids = self.insert(document)
return results.InsertOneResult(ids, self._get_write_mode()[0])
def insert_many(self, documents, ordered=True):
"""Insert a list of documents.
>>> db.test.count()
0
>>> result = db.test.insert_many([{'x': i} for i in range(2)])
>>> result.inserted_ids
[ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')]
>>> db.test.count()
2
:Parameters:
- `documents`: A list of documents to insert.
- `ordered` (optional): If ``True`` (the default) documents will be
inserted on the server serially, in the order provided. If an error
occurs all remaining inserts are aborted. If ``False``, documents
will be inserted on the server in arbitrary order, possibly in
parallel, and all document inserts will be attempted.
:Returns:
An instance of :class:`~pymongo.results.InsertManyResult`.
.. versionadded:: 2.9
"""
if not isinstance(documents, list) or not documents:
raise TypeError("documents must be a non-empty list")
inserted_ids = []
def gen():
"""A generator that validates documents and handles _ids."""
for document in documents:
common.validate_is_dict("document", document)
if "_id" not in document:
document["_id"] = ObjectId()
inserted_ids.append(document["_id"])
yield (_INSERT, document)
blk = bulk._Bulk(self, ordered)
blk.ops = [doc for doc in gen()]
blk.execute(self.write_concern)
return results.InsertManyResult(inserted_ids,
self._get_write_mode()[0])
def update(self, spec, document, upsert=False, manipulate=False,
safe=None, multi=False, check_keys=True, **kwargs):
"""Update a document(s) in this collection.
Raises :class:`TypeError` if either `spec` or `document` is
not an instance of ``dict`` or `upsert` is not an instance of
``bool``.
Write concern options can be passed as keyword arguments, overriding
any global defaults. Valid options include w=<int/string>,
wtimeout=<int>, j=<bool>, or fsync=<bool>. See the parameter list below
for a detailed explanation of these options.
By default an acknowledgment is requested from the server that the
update was successful, raising :class:`~pymongo.errors.OperationFailure`
if an error occurred. **Passing w=0 disables write acknowledgement
and all other write concern options.**
There are many useful `update modifiers`_ which can be used
when performing updates. For example, here we use the
``"$set"`` modifier to modify some fields in a matching
document:
.. doctest::
>>> db.test.insert({"x": "y", "a": "b"})
ObjectId('...')
>>> import pprint
>>> pprint.pprint(list(db.test.find()))
[{u'_id': ObjectId('...'), u'a': u'b', u'x': u'y'}]
>>> db.test.update({"x": "y"}, {"$set": {"a": "c"}})
{...}
>>> pprint.pprint(list(db.test.find()))
[{u'_id': ObjectId('...'), u'a': u'c', u'x': u'y'}]
:Parameters:
- `spec`: a ``dict`` or :class:`~bson.son.SON` instance
specifying elements which must be present for a document
to be updated
- `document`: a ``dict`` or :class:`~bson.son.SON`
instance specifying the document to be used for the update
or (in the case of an upsert) insert - see docs on MongoDB
`update modifiers`_
- `upsert` (optional): perform an upsert if ``True``
- `manipulate` (optional): manipulate the document before
updating? If ``True`` all instances of
:mod:`~pymongo.son_manipulator.SONManipulator` added to
this :class:`~pymongo.database.Database` will be applied
to the document before performing the update.
- `check_keys` (optional): check if keys in `document` start
with '$' or contain '.', raising
:class:`~pymongo.errors.InvalidName`. Only applies to
document replacement, not modification through $
operators.
- `safe` (optional): **DEPRECATED** - Use `w` instead.
- `multi` (optional): update all documents that match
`spec`, rather than just the first matching document. The
default value for `multi` is currently ``False``, but this
might eventually change to ``True``. It is recommended
that you specify this argument explicitly for all update
operations in order to prepare your code for that change.
- `w` (optional): (integer or string) If this is a replica set, write
operations will block until they have been replicated to the
specified number or tagged set of servers. `w=<int>` always includes
the replica set primary (e.g. w=3 means write to the primary and wait
until replicated to **two** secondaries). **Passing w=0 disables
write acknowledgement and all other write concern options.**
- `wtimeout` (optional): (integer) Used in conjunction with `w`.
Specify a value in milliseconds to control how long to wait for
write propagation to complete. If replication does not complete in
the given timeframe, a timeout exception is raised.
- `j` (optional): If ``True`` block until write operations have been
committed to the journal. Ignored if the server is running without
journaling.
- `fsync` (optional): If ``True`` force the database to fsync all
files before returning. When used with `j` the server awaits the
next group commit before returning.
:Returns:
- A document (dict) describing the effect of the update or ``None``
if write acknowledgement is disabled.
.. versionadded:: 1.8
Support for passing `getLastError` options as keyword
arguments.
.. versionchanged:: 1.4
Return the response to *lastError* if `safe` is ``True``.
.. versionadded:: 1.1.1
The `multi` parameter.
.. _update modifiers: http://www.mongodb.org/display/DOCS/Updating
.. mongodoc:: update
"""
if not isinstance(spec, dict):
raise TypeError("spec must be an instance of dict")
if not isinstance(document, dict):
raise TypeError("document must be an instance of dict")
if not isinstance(upsert, bool):
raise TypeError("upsert must be an instance of bool")
client = self.database.connection
# Need to connect to know the wire version, and may want to connect
# before applying SON manipulators.
client._ensure_connected(True)
if manipulate:
document = self.__database._fix_incoming(document, self)
safe, options = self._get_write_mode(safe, **kwargs)
if document:
# If a top level key begins with '$' this is a modify operation
# and we should skip key validation. It doesn't matter which key
# we check here. Passing a document with a mix of top level keys
# starting with and without a '$' is invalid and the server will
# raise an appropriate exception.
first = (document.iterkeys()).next()
if first.startswith('$'):
check_keys = False
if client.max_wire_version > 1 and safe:
# Update command
command = SON([('update', self.name)])
if options:
command['writeConcern'] = options
docs = [SON([('q', spec), ('u', document),
('multi', multi), ('upsert', upsert)])]
results = message._do_batched_write_command(
self.database.name + '.$cmd', _UPDATE, command,
docs, check_keys, self.uuid_subtype, client)
_check_write_command_response(results)
_, result = results[0]
# Add the updatedExisting field for compatibility
if result.get('n') and 'upserted' not in result:
result['updatedExisting'] = True
else:
result['updatedExisting'] = False
# MongoDB >= 2.6.0 returns the upsert _id in an array
# element. Break it out for backward compatibility.
if isinstance(result.get('upserted'), list):
result['upserted'] = result['upserted'][0]['_id']
return result
else:
# Legacy OP_UPDATE
return client._send_message(
message.update(self.__full_name, upsert, multi,
spec, document, safe, options,
check_keys, self.uuid_subtype), safe)
def update_one(self, filter, update, upsert=False):
"""Update a single document matching the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. versionadded:: 2.9
"""
common.validate_ok_for_update(update)
result = self.update(
filter, update, upsert, multi=False, check_keys=False)
return results.UpdateResult(result, self._get_write_mode()[0])
def update_many(self, filter, update, upsert=False):
"""Update one or more documents that match the filter.
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> result = db.test.update_many({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
3
>>> result.modified_count
3
>>> for doc in db.test.find():
... print(doc)
...
{u'x': 4, u'_id': 0}
{u'x': 4, u'_id': 1}
{u'x': 4, u'_id': 2}
:Parameters:
- `filter`: A query that matches the documents to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. versionadded:: 2.9
"""
common.validate_ok_for_update(update)
result = self.update(
filter, update, upsert, multi=True, check_keys=False)
return results.UpdateResult(result, self._get_write_mode()[0])
def drop(self):
"""Alias for :meth:`~pymongo.database.Database.drop_collection`.
The following two calls are equivalent:
>>> db.foo.drop()
>>> db.drop_collection("foo")
.. versionadded:: 1.8
"""
self.__database.drop_collection(self.__name)
def remove(self, spec_or_id=None, safe=None, multi=True, **kwargs):
"""Remove a document(s) from this collection.
.. warning:: Calls to :meth:`remove` should be performed with
care, as removed data cannot be restored.
If `spec_or_id` is ``None``, all documents in this collection
will be removed. This is not equivalent to calling
:meth:`~pymongo.database.Database.drop_collection`, however,
as indexes will not be removed.
Write concern options can be passed as keyword arguments, overriding
any global defaults. Valid options include w=<int/string>,
wtimeout=<int>, j=<bool>, or fsync=<bool>. See the parameter list below
for a detailed explanation of these options.
By default an acknowledgment is requested from the server that the
remove was successful, raising :class:`~pymongo.errors.OperationFailure`
if an error occurred. **Passing w=0 disables write acknowledgement
and all other write concern options.**
:Parameters:
- `spec_or_id` (optional): a dictionary specifying the
documents to be removed OR any other type specifying the
value of ``"_id"`` for the document to be removed
- `safe` (optional): **DEPRECATED** - Use `w` instead.
- `multi` (optional): If ``True`` (the default) remove all documents
matching `spec_or_id`, otherwise remove only the first matching
document.
- `w` (optional): (integer or string) If this is a replica set, write
operations will block until they have been replicated to the
specified number or tagged set of servers. `w=<int>` always includes
the replica set primary (e.g. w=3 means write to the primary and wait
until replicated to **two** secondaries). **Passing w=0 disables
write acknowledgement and all other write concern options.**
- `wtimeout` (optional): (integer) Used in conjunction with `w`.
Specify a value in milliseconds to control how long to wait for
write propagation to complete. If replication does not complete in
the given timeframe, a timeout exception is raised.
- `j` (optional): If ``True`` block until write operations have been
committed to the journal. Ignored if the server is running without
journaling.
- `fsync` (optional): If ``True`` force the database to fsync all
files before returning. When used with `j` the server awaits the
next group commit before returning.
:Returns:
- A document (dict) describing the effect of the remove or ``None``
if write acknowledgement is disabled.
.. versionadded:: 1.8
Support for passing `getLastError` options as keyword arguments.
.. versionchanged:: 1.7 Accept any type other than a ``dict``
instance for removal by ``"_id"``, not just
:class:`~bson.objectid.ObjectId` instances.
.. versionchanged:: 1.4
Return the response to *lastError* if `safe` is ``True``.
.. versionchanged:: 1.2
The `spec_or_id` parameter is now optional. If it is
not specified *all* documents in the collection will be
removed.
.. versionadded:: 1.1
The `safe` parameter.
.. mongodoc:: remove
"""
if spec_or_id is None:
spec_or_id = {}
if not isinstance(spec_or_id, dict):
spec_or_id = {"_id": spec_or_id}
safe, options = self._get_write_mode(safe, **kwargs)
client = self.database.connection
# Need to connect to know the wire version.
client._ensure_connected(True)
if client.max_wire_version > 1 and safe:
# Delete command
command = SON([('delete', self.name)])
if options:
command['writeConcern'] = options
docs = [SON([('q', spec_or_id), ('limit', int(not multi))])]
results = message._do_batched_write_command(
self.database.name + '.$cmd', _DELETE, command,
docs, False, self.uuid_subtype, client)
_check_write_command_response(results)
_, result = results[0]
return result
else:
# Legacy OP_DELETE
return client._send_message(
message.delete(self.__full_name, spec_or_id, safe,
options, self.uuid_subtype, int(not multi)), safe)
def delete_one(self, filter):
"""Delete a single document matching the filter.
>>> db.test.count({'x': 1})
3
>>> result = db.test.delete_one({'x': 1})
>>> result.deleted_count
1
>>> db.test.count({'x': 1})
2
:Parameters:
- `filter`: A query that matches the document to delete.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionadded:: 2.9
"""
result = self.remove(filter, multi=False)
return results.DeleteResult(result, self._get_write_mode()[0])
def delete_many(self, filter):
"""Delete one or more documents matching the filter.
>>> db.test.count({'x': 1})
3
>>> result = db.test.delete_many({'x': 1})
>>> result.deleted_count
3
>>> db.test.count({'x': 1})
0
:Parameters:
- `filter`: A query that matches the documents to delete.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionadded:: 2.9
"""
result = self.remove(filter, multi=True)
return results.DeleteResult(result, self._get_write_mode()[0])
def replace_one(self, filter, replacement, upsert=False):
"""Replace a single document matching the filter.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')}
>>> result = db.test.replace_one({'x': 1}, {'y': 1})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': ObjectId('54f4c5befba5220aa4d6dee7')}
The *upsert* option can be used to insert a new document if a matching
document does not exist.
>>> result = db.test.replace_one({'x': 1}, {'x': 1}, True)
>>> result.matched_count
0
>>> result.modified_count
0
>>> result.upserted_id
ObjectId('54f11e5c8891e756a6e1abd4')
>>> db.test.find_one({'x': 1})
{u'x': 1, u'_id': ObjectId('54f11e5c8891e756a6e1abd4')}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The new document.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. versionadded:: 2.9
"""
common.validate_ok_for_replace(replacement)
result = self.update(filter, replacement, upsert, multi=False)
return results.UpdateResult(result, self._get_write_mode()[0])
def find_one(self, spec_or_id=None, *args, **kwargs):
"""Get a single document from the database.
All arguments to :meth:`find` are also valid arguments for
:meth:`find_one`, although any `limit` argument will be
ignored. Returns a single document, or ``None`` if no matching
document is found.
:Parameters:
- `spec_or_id` (optional): a dictionary specifying
the query to be performed OR any other type to be used as
the value for a query for ``"_id"``.
- `*args` (optional): any additional positional arguments
are the same as the arguments to :meth:`find`.
- `**kwargs` (optional): any additional keyword arguments
are the same as the arguments to :meth:`find`.
- `max_time_ms` (optional): a value for max_time_ms may be
specified as part of `**kwargs`, e.g.
>>> find_one(max_time_ms=100)
.. versionchanged:: 1.7
Allow passing any of the arguments that are valid for
:meth:`find`.
.. versionchanged:: 1.7 Accept any type other than a ``dict``
instance as an ``"_id"`` query, not just
:class:`~bson.objectid.ObjectId` instances.
"""
if spec_or_id is not None and not isinstance(spec_or_id, dict):
spec_or_id = {"_id": spec_or_id}
max_time_ms = kwargs.pop("max_time_ms", None)
cursor = self.find(spec_or_id,
*args, **kwargs).max_time_ms(max_time_ms)
for result in cursor.limit(-1):
return result
return None
def find(self, *args, **kwargs):
"""Query the database.
The `spec` argument is a prototype document that all results
must match. For example:
>>> db.test.find({"hello": "world"})
only matches documents that have a key "hello" with value
"world". Matches can have other keys *in addition* to
"hello". The `fields` argument is used to specify a subset of
fields that should be included in the result documents. By
limiting results to a certain subset of fields you can cut
down on network traffic and decoding time.
Raises :class:`TypeError` if any of the arguments are of
improper type. Returns an instance of
:class:`~pymongo.cursor.Cursor` corresponding to this query.
:Parameters:
- `spec` (optional): a SON object specifying elements which
must be present for a document to be included in the
result set
- `fields` (optional): a list of field names that should be
returned in the result set or a dict specifying the fields
to include or exclude. If `fields` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. fields={'_id': False}).
- `skip` (optional): the number of documents to omit (from
the start of the result set) when returning the results
- `limit` (optional): the maximum number of results to
return
- `timeout` (optional): if True (the default), any returned
cursor is closed by the server after 10 minutes of
inactivity. If set to False, the returned cursor will never
time out on the server. Care should be taken to ensure that
cursors with timeout turned off are properly closed.
- `snapshot` (optional): if True, snapshot mode will be used
for this query. Snapshot mode assures no duplicates are
returned, or objects missed, which were present at both
the start and end of the query's execution. For details,
see the `snapshot documentation
<http://dochub.mongodb.org/core/snapshot>`_.
- `tailable` (optional): the result of this find call will
be a tailable cursor - tailable cursors aren't closed when
the last data is retrieved but are kept open and the
cursors location marks the final document's position. if
more data is received iteration of the cursor will
continue from the last document received. For details, see
the `tailable cursor documentation
<http://www.mongodb.org/display/DOCS/Tailable+Cursors>`_.
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for this query. See
:meth:`~pymongo.cursor.Cursor.sort` for details.
- `max_scan` (optional): limit the number of documents
examined when performing the query
- `as_class` (optional): class to use for documents in the
query result (default is
:attr:`~pymongo.mongo_client.MongoClient.document_class`)
- `slave_okay` (optional): if True, allows this query to
be run against a replica secondary.
- `await_data` (optional): if True, the server will block for
some extra time before returning, waiting for more data to
return. Ignored if `tailable` is False.
- `partial` (optional): if True, mongos will return partial
results if some shards are down instead of returning an error.
- `manipulate`: (optional): If True (the default), apply any
outgoing SON manipulators before returning.
- `read_preference` (optional): The read preference for
this query.
- `tag_sets` (optional): The tag sets for this query.
- `secondary_acceptable_latency_ms` (optional): Any replica-set
member whose ping time is within secondary_acceptable_latency_ms of
the nearest member may accept reads. Default 15 milliseconds.
**Ignored by mongos** and must be configured on the command line.
See the localThreshold_ option for more information.
- `exhaust` (optional): If ``True`` create an "exhaust" cursor.
MongoDB will stream batched results to the client without waiting
for the client to request each batch, reducing latency.
- `compile_re` (optional): if ``False``, don't attempt to compile
BSON regex objects into Python regexes. Return instances of
:class:`~bson.regex.Regex` instead.
- `oplog_replay` (optional): If True, set the oplogReplay query
flag.
- `modifiers` (optional): A dict specifying the MongoDB `query
modifiers`_ that should be used for this query. For example::
>>> db.test.find(modifiers={"$maxTimeMS": 500})
- `network_timeout` (optional): specify a timeout to use for
this query, which will override the
:class:`~pymongo.mongo_client.MongoClient`-level default
- `filter` (optional): a SON object specifying elements which
must be present for a document to be included in the
result set. Takes precedence over `spec`.
- `projection` (optional): a list of field names that should be
returned in the result set or a dict specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}). Takes precedence
over `fields`.
- `no_cursor_timeout` (optional): if False (the default), any
returned cursor is closed by the server after 10 minutes of
inactivity. If set to True, the returned cursor will never
time out on the server. Care should be taken to ensure that
cursors with no_cursor_timeout turned on are properly closed.
Takes precedence over `timeout`.
- `allow_partial_results` (optional): if True, mongos will return
partial results if some shards are down instead of returning an
error. Takes precedence over `partial`.
- `cursor_type` (optional): the type of cursor to return. Takes
precedence over `tailable`, `await_data` and `exhaust`. The valid
options are defined by :class:`~pymongo.cursor.CursorType`:
- :attr:`~pymongo.cursor.CursorType.NON_TAILABLE` - the result of
this find call will return a standard cursor over the result set.
- :attr:`~pymongo.cursor.CursorType.TAILABLE` - the result of this
find call will be a tailable cursor - tailable cursors are only
for use with capped collections. They are not closed when the
last data is retrieved but are kept open and the cursor location
marks the final document position. If more data is received
iteration of the cursor will continue from the last document
received. For details, see the `tailable cursor documentation
<http://www.mongodb.org/display/DOCS/Tailable+Cursors>`_.
- :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result
of this find call will be a tailable cursor with the await flag
set. The server will wait for a few seconds after returning the
full result set so that it can capture and return additional data
added during the query.
- :attr:`~pymongo.cursor.CursorType.EXHAUST` - the result of this
find call will be an exhaust cursor. MongoDB will stream batched
results to the client without waiting for the client to request
each batch, reducing latency. See notes on compatibility below.
.. note:: There are a number of caveats to using the `exhaust`
parameter:
1. The `exhaust` and `limit` options are incompatible and can
not be used together.
2. The `exhaust` option is not supported by mongos and can not be
used with a sharded cluster.
3. A :class:`~pymongo.cursor.Cursor` instance created with the
`exhaust` option requires an exclusive :class:`~socket.socket`
connection to MongoDB. If the :class:`~pymongo.cursor.Cursor` is
discarded without being completely iterated the underlying
:class:`~socket.socket` connection will be closed and discarded
without being returned to the connection pool.
4. A :class:`~pymongo.cursor.Cursor` instance created with the
`exhaust` option in a :doc:`request </examples/requests>` **must**
be completely iterated before executing any other operation.
5. The `network_timeout` option is ignored when using the
`exhaust` option.
.. note:: The `manipulate` and `compile_re` parameters may default to
False in future releases.
.. note:: The `max_scan` parameter requires server
version **>= 1.5.1**
.. versionadded:: 2.9
The ``filter``, ``projection``, ``no_cursor_timeout``,
``allow_partial_results``, ``cursor_type``, ``modifiers`` parameters.
.. versionadded:: 2.7
The ``compile_re`` parameter.
.. versionadded:: 2.3
The `tag_sets` and `secondary_acceptable_latency_ms` parameters.
.. versionadded:: 1.11+
The `await_data`, `partial`, and `manipulate` parameters.
.. versionadded:: 1.8
The `network_timeout` parameter.
.. versionadded:: 1.7
The `sort`, `max_scan` and `as_class` parameters.
.. versionchanged:: 1.7
The `fields` parameter can now be a dict or any iterable in
addition to a list.
.. versionadded:: 1.1
The `tailable` parameter.
.. mongodoc:: find
.. _query modifiers:
http://docs.mongodb.org/manual/reference/operator/query-modifier
"""
if not 'slave_okay' in kwargs:
kwargs['slave_okay'] = self.slave_okay
if not 'read_preference' in kwargs:
kwargs['read_preference'] = self.read_preference
if not 'tag_sets' in kwargs:
kwargs['tag_sets'] = self.tag_sets
if not 'secondary_acceptable_latency_ms' in kwargs:
kwargs['secondary_acceptable_latency_ms'] = (
self.secondary_acceptable_latency_ms)
return Cursor(self, *args, **kwargs)
def parallel_scan(self, num_cursors, **kwargs):
"""Scan this entire collection in parallel.
Returns a list of up to ``num_cursors`` cursors that can be iterated
concurrently. As long as the collection is not modified during
scanning, each document appears once in one of the cursors' result
sets.
For example, to process each document in a collection using some
thread-safe ``process_document()`` function::
def process_cursor(cursor):
for document in cursor:
# Some thread-safe processing function:
process_document(document)
# Get up to 4 cursors.
cursors = collection.parallel_scan(4)
threads = [
threading.Thread(target=process_cursor, args=(cursor,))
for cursor in cursors]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# All documents have now been processed.
With :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`
or :class:`~pymongo.master_slave_connection.MasterSlaveConnection`,
if the `read_preference` attribute of this instance is not set to
:attr:`pymongo.read_preferences.ReadPreference.PRIMARY` or the
(deprecated) `slave_okay` attribute of this instance is set to `True`
the command will be sent to a secondary or slave.
:Parameters:
- `num_cursors`: the number of cursors to return
.. note:: Requires server version **>= 2.5.5**.
"""
use_master = not self.slave_okay and not self.read_preference
compile_re = kwargs.get('compile_re', False)
command_kwargs = {
'numCursors': num_cursors,
'read_preference': self.read_preference,
'tag_sets': self.tag_sets,
'secondary_acceptable_latency_ms': (
self.secondary_acceptable_latency_ms),
'slave_okay': self.slave_okay,
'_use_master': use_master}
command_kwargs.update(kwargs)
result, conn_id = self.__database._command(
"parallelCollectionScan", self.__name, **command_kwargs)
return [CommandCursor(self,
cursor['cursor'],
conn_id,
compile_re) for cursor in result['cursors']]
def count(self):
"""Get the number of documents in this collection.
To get the number of documents matching a specific query use
:meth:`pymongo.cursor.Cursor.count`.
"""
return self.find().count()
def create_index(self, key_or_list, cache_for=300, **kwargs):
"""Creates an index on this collection.
Takes either a single key or a list of (key, direction) pairs.
The key(s) must be an instance of :class:`basestring`
(:class:`str` in python 3), and the direction(s) should be one of
(:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`,
:data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`,
:data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`,
:data:`~pymongo.TEXT`).
To create a simple ascending index on the key ``'mike'`` we just
use a string argument::
>>> my_collection.create_index("mike")
For a compound index on ``'mike'`` descending and ``'eliot'``
ascending we need to use a list of tuples::
>>> my_collection.create_index([("mike", pymongo.DESCENDING),
... ("eliot", pymongo.ASCENDING)])
All optional index creation parameters should be passed as
keyword arguments to this method. For example::
>>> my_collection.create_index([("mike", pymongo.DESCENDING)],
... background=True)
Valid options include, but are not limited to:
- `name`: custom name to use for this index - if none is
given, a name will be generated
- `unique`: if ``True`` creates a unique constraint on the index
- `background`: if ``True`` this index should be created in the
background
- `sparse`: if ``True``, omit from the index any documents that lack
the indexed field
- `bucketSize` or `bucket_size`: for use with geoHaystack indexes.
Number of documents to group together within a certain proximity
to a given longitude and latitude.
- `min`: minimum value for keys in a :data:`~pymongo.GEO2D`
index
- `max`: maximum value for keys in a :data:`~pymongo.GEO2D`
index
- `expireAfterSeconds`: <int> Used to create an expiring (TTL)
collection. MongoDB will automatically delete documents from
this collection after <int> seconds. The indexed field must
be a UTC datetime or the data will not expire.
- `dropDups` or `drop_dups` (**deprecated**): if ``True`` duplicate
values are dropped during index creation when creating a unique
index
See the MongoDB documentation for a full list of supported options by
server version.
.. warning:: `dropDups` / `drop_dups` is no longer supported by
MongoDB starting with server version 2.7.5. The option is silently
ignored by the server and unique index builds using the option will
fail if a duplicate value is detected.
.. note:: `expireAfterSeconds` requires server version **>= 2.1.2**
:Parameters:
- `key_or_list`: a single key or a list of (key, direction)
pairs specifying the index to create
- `cache_for` (optional): time window (in seconds) during which
this index will be recognized by subsequent calls to
:meth:`ensure_index` - see documentation for
:meth:`ensure_index` for details
- `**kwargs` (optional): any additional index creation
options (see the above list) should be passed as keyword
arguments
- `ttl` (deprecated): Use `cache_for` instead.
.. versionchanged:: 2.3
The `ttl` parameter has been deprecated to avoid confusion with
TTL collections. Use `cache_for` instead.
.. versionchanged:: 2.2
Removed deprecated argument: deprecated_unique
.. versionchanged:: 1.5.1
Accept kwargs to support all index creation options.
.. versionadded:: 1.5
The `name` parameter.
.. seealso:: :meth:`ensure_index`
.. mongodoc:: indexes
"""
if 'ttl' in kwargs:
cache_for = kwargs.pop('ttl')
warnings.warn("ttl is deprecated. Please use cache_for instead.",
DeprecationWarning, stacklevel=2)
# The types supported by datetime.timedelta. 2to3 removes long.
if not isinstance(cache_for, (int, long, float)):
raise TypeError("cache_for must be an integer or float.")
keys = helpers._index_list(key_or_list)
index_doc = helpers._index_document(keys)
name = "name" in kwargs and kwargs["name"] or _gen_index_name(keys)
index = {"key": index_doc, "name": name}
if "drop_dups" in kwargs:
kwargs["dropDups"] = kwargs.pop("drop_dups")
if "bucket_size" in kwargs:
kwargs["bucketSize"] = kwargs.pop("bucket_size")
index.update(kwargs)
try:
self.__database.command('createIndexes', self.name,
read_preference=ReadPreference.PRIMARY,
indexes=[index])
except OperationFailure, exc:
if exc.code in common.COMMAND_NOT_FOUND_CODES:
index["ns"] = self.__full_name
self.__database.system.indexes.insert(index, manipulate=False,
check_keys=False,
**self._get_wc_override())
else:
raise
self.__database.connection._cache_index(self.__database.name,
self.__name, name, cache_for)
return name
def ensure_index(self, key_or_list, cache_for=300, **kwargs):
"""Ensures that an index exists on this collection.
Takes either a single key or a list of (key, direction) pairs.
The key(s) must be an instance of :class:`basestring`
(:class:`str` in python 3), and the direction(s) should be one of
(:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`,
:data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`,
:data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`,
:data:`pymongo.TEXT`).
See :meth:`create_index` for detailed examples.
Unlike :meth:`create_index`, which attempts to create an index
unconditionally, :meth:`ensure_index` takes advantage of some
caching within the driver such that it only attempts to create
indexes that might not already exist. When an index is created
(or ensured) by PyMongo it is "remembered" for `cache_for`
seconds. Repeated calls to :meth:`ensure_index` within that
time limit will be lightweight - they will not attempt to
actually create the index.
Care must be taken when the database is being accessed through
multiple clients at once. If an index is created using
this client and deleted using another, any call to
:meth:`ensure_index` within the cache window will fail to
re-create the missing index.
Returns the specified or generated index name used if
:meth:`ensure_index` attempts to create the index. Returns
``None`` if the index is already cached.
All optional index creation parameters should be passed as
keyword arguments to this method. Valid options include, but are not
limited to:
- `name`: custom name to use for this index - if none is
given, a name will be generated
- `unique`: if ``True`` creates a unique constraint on the index
- `background`: if ``True`` this index should be created in the
background
- `sparse`: if ``True``, omit from the index any documents that lack
the indexed field
- `bucketSize` or `bucket_size`: for use with geoHaystack indexes.
Number of documents to group together within a certain proximity
to a given longitude and latitude.
- `min`: minimum value for keys in a :data:`~pymongo.GEO2D`
index
- `max`: maximum value for keys in a :data:`~pymongo.GEO2D`
index
- `expireAfterSeconds`: <int> Used to create an expiring (TTL)
collection. MongoDB will automatically delete documents from
this collection after <int> seconds. The indexed field must
be a UTC datetime or the data will not expire.
- `dropDups` or `drop_dups` (**deprecated**): if ``True`` duplicate
values are dropped during index creation when creating a unique
index
See the MongoDB documentation for a full list of supported options by
server version.
.. warning:: `dropDups` / `drop_dups` is no longer supported by
MongoDB starting with server version 2.7.5. The option is silently
ignored by the server and unique index builds using the option will
fail if a duplicate value is detected.
.. note:: `expireAfterSeconds` requires server version **>= 2.1.2**
:Parameters:
- `key_or_list`: a single key or a list of (key, direction)
pairs specifying the index to create
- `cache_for` (optional): time window (in seconds) during which
this index will be recognized by subsequent calls to
:meth:`ensure_index`
- `**kwargs` (optional): any additional index creation
options (see the above list) should be passed as keyword
arguments
- `ttl` (deprecated): Use `cache_for` instead.
.. versionchanged:: 2.3
The `ttl` parameter has been deprecated to avoid confusion with
TTL collections. Use `cache_for` instead.
.. versionchanged:: 2.2
Removed deprecated argument: deprecated_unique
.. versionchanged:: 1.5.1
Accept kwargs to support all index creation options.
.. versionadded:: 1.5
The `name` parameter.
.. seealso:: :meth:`create_index`
"""
if "name" in kwargs:
name = kwargs["name"]
else:
keys = helpers._index_list(key_or_list)
name = kwargs["name"] = _gen_index_name(keys)
# Note that there is a race condition here. One thread could
# check if the index is cached and be preempted before creating
# and caching the index. This means multiple threads attempting
# to create the same index concurrently could send the index
# to the server two or more times. This has no practical impact
# other than wasted round trips.
if not self.__database.connection._cached(self.__database.name,
self.__name, name):
return self.create_index(key_or_list, cache_for, **kwargs)
return None
def drop_indexes(self):
"""Drops all indexes on this collection.
Can be used on non-existant collections or collections with no indexes.
Raises OperationFailure on an error.
"""
self.__database.connection._purge_index(self.__database.name,
self.__name)
self.drop_index(u"*")
def drop_index(self, index_or_name):
"""Drops the specified index on this collection.
Can be used on non-existant collections or collections with no
indexes. Raises OperationFailure on an error (e.g. trying to
drop an index that does not exist). `index_or_name`
can be either an index name (as returned by `create_index`),
or an index specifier (as passed to `create_index`). An index
specifier should be a list of (key, direction) pairs. Raises
TypeError if index is not an instance of (str, unicode, list).
.. warning::
if a custom name was used on index creation (by
passing the `name` parameter to :meth:`create_index` or
:meth:`ensure_index`) the index **must** be dropped by name.
:Parameters:
- `index_or_name`: index (or name of index) to drop
"""
name = index_or_name
if isinstance(index_or_name, list):
name = _gen_index_name(index_or_name)
if not isinstance(name, basestring):
raise TypeError("index_or_name must be an index name or list")
self.__database.connection._purge_index(self.__database.name,
self.__name, name)
self.__database.command("dropIndexes", self.__name,
read_preference=ReadPreference.PRIMARY,
index=name,
allowable_errors=["ns not found"])
def reindex(self):
"""Rebuilds all indexes on this collection.
.. warning:: reindex blocks all other operations (indexes
are built in the foreground) and will be slow for large
collections.
.. versionadded:: 1.11+
"""
return self.__database.command("reIndex", self.__name,
read_preference=ReadPreference.PRIMARY)
def index_information(self):
"""Get information on this collection's indexes.
Returns a dictionary where the keys are index names (as
returned by create_index()) and the values are dictionaries
containing information about each index. The dictionary is
guaranteed to contain at least a single key, ``"key"`` which
is a list of (key, direction) pairs specifying the index (as
passed to create_index()). It will also contain any other
metadata about the indexes, except for the ``"ns"`` and
``"name"`` keys, which are cleaned. Example output might look
like this:
>>> db.test.ensure_index("x", unique=True)
u'x_1'
>>> db.test.index_information()
{u'_id_': {u'key': [(u'_id', 1)]},
u'x_1': {u'unique': True, u'key': [(u'x', 1)]}}
.. versionchanged:: 1.7
The values in the resultant dictionary are now dictionaries
themselves, whose ``"key"`` item contains the list that was
the value in previous versions of PyMongo.
"""
client = self.database.connection
client._ensure_connected(True)
slave_okay = not client._rs_client and not client.is_mongos
if client.max_wire_version > 2:
res, addr = self.__database._command(
"listIndexes", self.__name, as_class=SON,
cursor={}, slave_okay=slave_okay,
read_preference=ReadPreference.PRIMARY)
# MongoDB 2.8rc2
if "indexes" in res:
raw = res["indexes"]
# >= MongoDB 2.8rc3
else:
raw = CommandCursor(self, res["cursor"], addr)
else:
raw = self.__database.system.indexes.find({"ns": self.__full_name},
{"ns": 0}, as_class=SON,
slave_okay=slave_okay,
_must_use_master=True)
info = {}
for index in raw:
index["key"] = index["key"].items()
index = dict(index)
info[index.pop("name")] = index
return info
def options(self):
"""Get the options set on this collection.
Returns a dictionary of options and their values - see
:meth:`~pymongo.database.Database.create_collection` for more
information on the possible options. Returns an empty
dictionary if the collection has not been created yet.
"""
client = self.database.connection
client._ensure_connected(True)
result = None
slave_okay = not client._rs_client and not client.is_mongos
if client.max_wire_version > 2:
res, addr = self.__database._command(
"listCollections",
cursor={},
filter={"name": self.__name},
read_preference=ReadPreference.PRIMARY,
slave_okay=slave_okay)
# MongoDB 2.8rc2
if "collections" in res:
results = res["collections"]
# >= MongoDB 2.8rc3
else:
results = CommandCursor(self, res["cursor"], addr)
for doc in results:
result = doc
break
else:
result = self.__database.system.namespaces.find_one(
{"name": self.__full_name},
slave_okay=slave_okay,
_must_use_master=True)
if not result:
return {}
options = result.get("options", {})
if "create" in options:
del options["create"]
return options
def aggregate(self, pipeline, **kwargs):
"""Perform an aggregation using the aggregation framework on this
collection.
With :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`
or :class:`~pymongo.master_slave_connection.MasterSlaveConnection`,
if the `read_preference` attribute of this instance is not set to
:attr:`pymongo.read_preferences.ReadPreference.PRIMARY` or the
(deprecated) `slave_okay` attribute of this instance is set to `True`
the `aggregate command`_ will be sent to a secondary or slave.
:Parameters:
- `pipeline`: a single command or list of aggregation commands
- `**kwargs`: send arbitrary parameters to the aggregate command
.. note:: Requires server version **>= 2.1.0**.
With server version **>= 2.5.1**, pass
``cursor={}`` to retrieve unlimited aggregation results
with a :class:`~pymongo.command_cursor.CommandCursor`::
pipeline = [{'$project': {'name': {'$toUpper': '$name'}}}]
cursor = collection.aggregate(pipeline, cursor={})
for doc in cursor:
print doc
.. versionchanged:: 2.9
The :meth:`aggregate` helper always returns a
:class:`~pymongo.command_cursor.CommandCursor` when the cursor
option is passed, regardless of MongoDB server version.
.. versionchanged:: 2.7
When the cursor option is used, return
:class:`~pymongo.command_cursor.CommandCursor` instead of
:class:`~pymongo.cursor.Cursor`.
.. versionchanged:: 2.6
Added cursor support.
.. versionadded:: 2.3
.. _aggregate command:
http://docs.mongodb.org/manual/applications/aggregation
"""
if not isinstance(pipeline, (dict, list, tuple)):
raise TypeError("pipeline must be a dict, list or tuple")
if isinstance(pipeline, dict):
pipeline = [pipeline]
use_master = not self.slave_okay and not self.read_preference
command_kwargs = {
'pipeline': pipeline,
'codec_options': self.codec_options,
'read_preference': self.read_preference,
'tag_sets': self.tag_sets,
'secondary_acceptable_latency_ms': (
self.secondary_acceptable_latency_ms),
'slave_okay': self.slave_okay,
'_use_master': use_master}
command_kwargs.update(kwargs)
# If the server version can't support 'cursor'.
if self.database.connection.max_wire_version < 1:
command_kwargs.pop('cursor', None)
result, conn_id = self.__database._command(
"aggregate", self.__name, **command_kwargs)
if "cursor" in kwargs:
if 'cursor' in result:
cursor = result['cursor']
else:
# Pre-MongoDB 2.6. Fake a cursor.
cursor = {
"id": 0,
"firstBatch": result["result"],
"ns": self.full_name,
}
return CommandCursor(
self,
cursor,
conn_id,
command_kwargs.get('compile_re', True))
else:
return result
# TODO key and condition ought to be optional, but deprecation
# could be painful as argument order would have to change.
def group(self, key, condition, initial, reduce, finalize=None, **kwargs):
"""Perform a query similar to an SQL *group by* operation.
Returns an array of grouped items.
The `key` parameter can be:
- ``None`` to use the entire document as a key.
- A :class:`list` of keys (each a :class:`basestring`
(:class:`str` in python 3)) to group by.
- A :class:`basestring` (:class:`str` in python 3), or
:class:`~bson.code.Code` instance containing a JavaScript
function to be applied to each document, returning the key
to group by.
With :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`
or :class:`~pymongo.master_slave_connection.MasterSlaveConnection`,
if the `read_preference` attribute of this instance is not set to
:attr:`pymongo.read_preferences.ReadPreference.PRIMARY` or
:attr:`pymongo.read_preferences.ReadPreference.PRIMARY_PREFERRED`, or
the (deprecated) `slave_okay` attribute of this instance is set to
`True`, the group command will be sent to a secondary or slave.
:Parameters:
- `key`: fields to group by (see above description)
- `condition`: specification of rows to be
considered (as a :meth:`find` query specification)
- `initial`: initial value of the aggregation counter object
- `reduce`: aggregation function as a JavaScript string
- `finalize`: function to be called on each object in output list.
.. versionchanged:: 2.2
Removed deprecated argument: command
.. versionchanged:: 1.4
The `key` argument can now be ``None`` or a JavaScript function,
in addition to a :class:`list` of keys.
.. versionchanged:: 1.3
The `command` argument now defaults to ``True`` and is deprecated.
"""
group = {}
if isinstance(key, basestring):
group["$keyf"] = Code(key)
elif key is not None:
group = {"key": helpers._fields_list_to_dict(key)}
group["ns"] = self.__name
group["$reduce"] = Code(reduce)
group["cond"] = condition
group["initial"] = initial
if finalize is not None:
group["finalize"] = Code(finalize)
use_master = not self.slave_okay and not self.read_preference
return self.__database.command("group", group,
codec_options=self.codec_options,
read_preference=self.read_preference,
tag_sets=self.tag_sets,
secondary_acceptable_latency_ms=(
self.secondary_acceptable_latency_ms),
slave_okay=self.slave_okay,
_use_master=use_master,
**kwargs)["retval"]
def rename(self, new_name, **kwargs):
"""Rename this collection.
If operating in auth mode, client must be authorized as an
admin to perform this operation. Raises :class:`TypeError` if
`new_name` is not an instance of :class:`basestring`
(:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName`
if `new_name` is not a valid collection name.
:Parameters:
- `new_name`: new name for this collection
- `**kwargs` (optional): any additional rename options
should be passed as keyword arguments
(i.e. ``dropTarget=True``)
.. versionadded:: 1.7
support for accepting keyword arguments for rename options
"""
if not isinstance(new_name, basestring):
raise TypeError("new_name must be an instance "
"of %s" % (basestring.__name__,))
if not new_name or ".." in new_name:
raise InvalidName("collection names cannot be empty")
if new_name[0] == "." or new_name[-1] == ".":
raise InvalidName("collecion names must not start or end with '.'")
if "$" in new_name and not new_name.startswith("oplog.$main"):
raise InvalidName("collection names must not contain '$'")
new_name = "%s.%s" % (self.__database.name, new_name)
client = self.__database.connection
client.admin.command("renameCollection", self.__full_name,
read_preference=ReadPreference.PRIMARY,
to=new_name, **kwargs)
def distinct(self, key):
"""Get a list of distinct values for `key` among all documents
in this collection.
Raises :class:`TypeError` if `key` is not an instance of
:class:`basestring` (:class:`str` in python 3).
To get the distinct values for a key in the result set of a
query use :meth:`~pymongo.cursor.Cursor.distinct`.
:Parameters:
- `key`: name of key for which we want to get the distinct values
.. note:: Requires server version **>= 1.1.0**
.. versionadded:: 1.1.1
"""
return self.find().distinct(key)
def map_reduce(self, map, reduce, out, full_response=False, **kwargs):
"""Perform a map/reduce operation on this collection.
If `full_response` is ``False`` (default) returns a
:class:`~pymongo.collection.Collection` instance containing
the results of the operation. Otherwise, returns the full
response from the server to the `map reduce command`_.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `out`: output collection name or `out object` (dict). See
the `map reduce command`_ documentation for available options.
Note: `out` options are order sensitive. :class:`~bson.son.SON`
can be used to specify multiple options.
e.g. SON([('replace', <collection name>), ('db', <database name>)])
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.map_reduce(map, reduce, "myresults", limit=2)
.. note:: Requires server version **>= 1.1.1**
.. seealso:: :doc:`/examples/aggregation`
.. versionchanged:: 2.2
Removed deprecated arguments: merge_output and reduce_output
.. versionchanged:: 1.11+
DEPRECATED The merge_output and reduce_output parameters.
.. versionadded:: 1.2
.. _map reduce command: http://www.mongodb.org/display/DOCS/MapReduce
.. mongodoc:: mapreduce
"""
if not isinstance(out, (basestring, dict)):
raise TypeError("'out' must be an instance of "
"%s or dict" % (basestring.__name__,))
if isinstance(out, dict) and out.get('inline'):
must_use_master = False
else:
must_use_master = True
response = self.__database.command("mapreduce", self.__name,
codec_options=self.codec_options,
map=map, reduce=reduce,
read_preference=self.read_preference,
tag_sets=self.tag_sets,
secondary_acceptable_latency_ms=(
self.secondary_acceptable_latency_ms),
out=out, _use_master=must_use_master,
**kwargs)
if full_response or not response.get('result'):
return response
elif isinstance(response['result'], dict):
dbase = response['result']['db']
coll = response['result']['collection']
return self.__database.connection[dbase][coll]
else:
return self.__database[response["result"]]
def inline_map_reduce(self, map, reduce, full_response=False, **kwargs):
"""Perform an inline map/reduce operation on this collection.
Perform the map/reduce operation on the server in RAM. A result
collection is not created. The result set is returned as a list
of documents.
If `full_response` is ``False`` (default) returns the
result documents in a list. Otherwise, returns the full
response from the server to the `map reduce command`_.
With :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`
or :class:`~pymongo.master_slave_connection.MasterSlaveConnection`,
if the `read_preference` attribute of this instance is not set to
:attr:`pymongo.read_preferences.ReadPreference.PRIMARY` or
:attr:`pymongo.read_preferences.ReadPreference.PRIMARY_PREFERRED`, or
the (deprecated) `slave_okay` attribute of this instance is set to
`True`, the inline map reduce will be run on a secondary or slave.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.inline_map_reduce(map, reduce, limit=2)
.. note:: Requires server version **>= 1.7.4**
.. versionadded:: 1.10
"""
use_master = not self.slave_okay and not self.read_preference
res = self.__database.command("mapreduce", self.__name,
codec_options=self.codec_options,
read_preference=self.read_preference,
tag_sets=self.tag_sets,
secondary_acceptable_latency_ms=(
self.secondary_acceptable_latency_ms),
slave_okay=self.slave_okay,
_use_master=use_master,
map=map, reduce=reduce,
out={"inline": 1}, **kwargs)
if full_response:
return res
else:
return res.get("results")
def find_and_modify(self, query={}, update=None,
upsert=False, sort=None, full_response=False,
manipulate=False, fields=None, **kwargs):
"""Update and return an object.
This is a thin wrapper around the findAndModify_ command. The
positional arguments are designed to match the first three arguments
to :meth:`update` however most options should be passed as named
parameters. Either `update` or `remove` arguments are required, all
others are optional.
Returns either the object before or after modification based on `new`
parameter. If no objects match the `query` and `upsert` is false,
returns ``None``. If upserting and `new` is false, returns ``{}``.
If the full_response parameter is ``True``, the return value will be
the entire response object from the server, including the 'ok' and
'lastErrorObject' fields, rather than just the modified object.
This is useful mainly because the 'lastErrorObject' document holds
information about the command's execution.
:Parameters:
- `query`: filter for the update (default ``{}``)
- `update`: see second argument to :meth:`update` (no default)
- `upsert`: insert if object doesn't exist (default ``False``)
- `sort`: a list of (key, direction) pairs specifying the sort
order for this query. See :meth:`~pymongo.cursor.Cursor.sort`
for details.
- `full_response`: return the entire response object from the
server (default ``False``)
- `remove`: remove rather than updating (default ``False``)
- `new`: return updated rather than original object
(default ``False``)
- `fields`: (optional): see second argument to :meth:`find` (default all)
- `manipulate`: (optional): If ``True``, apply any outgoing SON
manipulators before returning. Ignored when `full_response`
is set to True. Defaults to ``False``.
- `**kwargs`: any other options the findAndModify_ command
supports can be passed here.
.. mongodoc:: findAndModify
.. _findAndModify: http://dochub.mongodb.org/core/findAndModify
.. note:: Requires server version **>= 1.3.0**
.. versionchanged:: 2.9
Made fields a named parameter.
.. versionchanged:: 2.8
Added the optional manipulate parameter
.. versionchanged:: 2.5
Added the optional full_response parameter
.. versionchanged:: 2.4
Deprecated the use of mapping types for the sort parameter
.. versionadded:: 1.10
"""
if (not update and not kwargs.get('remove', None)):
raise ValueError("Must either update or remove")
if (update and kwargs.get('remove', None)):
raise ValueError("Can't do both update and remove")
# No need to include empty args
if query:
kwargs['query'] = query
if update:
kwargs['update'] = update
if upsert:
kwargs['upsert'] = upsert
if fields:
kwargs['fields'] = fields
if sort:
# Accept a list of tuples to match Cursor's sort parameter.
if isinstance(sort, list):
kwargs['sort'] = helpers._index_document(sort)
# Accept OrderedDict, SON, and dict with len == 1 so we
# don't break existing code already using find_and_modify.
elif (isinstance(sort, ordered_types) or
isinstance(sort, dict) and len(sort) == 1):
warnings.warn("Passing mapping types for `sort` is deprecated,"
" use a list of (key, direction) pairs instead",
DeprecationWarning, stacklevel=2)
kwargs['sort'] = sort
else:
raise TypeError("sort must be a list of (key, direction) "
"pairs, a dict of len 1, or an instance of "
"SON or OrderedDict")
no_obj_error = "No matching object found"
out = self.__database.command("findAndModify", self.__name,
allowable_errors=[no_obj_error],
read_preference=ReadPreference.PRIMARY,
codec_options=self.codec_options,
**kwargs)
if not out['ok']:
if out["errmsg"] == no_obj_error:
return None
else:
# Should never get here b/c of allowable_errors
raise ValueError("Unexpected Error: %s" % (out,))
if full_response:
return out
else:
document = out.get('value')
if manipulate:
document = self.__database._fix_outgoing(document, self)
return document
def find_one_and_delete(self, filter,
projection=None, sort=None, **kwargs):
"""Finds a single document and deletes it, returning the document.
>>> db.test.count({'x': 1})
2
>>> db.test.find_one_and_delete({'x': 1})
{u'x': 1, u'_id': ObjectId('54f4e12bfba5220aa4d6dee8')}
>>> db.test.count({'x': 1})
1
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'x': 1}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_delete(
... {'x': 1}, sort=[('_id', pymongo.DESCENDING)])
{u'x': 1, u'_id': 2}
The *projection* option can be used to limit the fields returned.
>>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False})
{u'x': 1}
:Parameters:
- `filter`: A query that matches the document to delete.
- `projection` (optional): a list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is deleted.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionadded:: 2.9
"""
common.validate_is_dict("filter", filter)
kwargs['remove'] = True
return self.find_and_modify(filter, fields=projection, sort=sort,
**kwargs)
def find_one_and_replace(self, filter, replacement,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE, **kwargs):
"""Finds a single document and replaces it, returning either the
original or the replaced document.
The :meth:`find_one_and_replace` method differs from
:meth:`find_one_and_update` by replacing the document matched by
*filter*, rather than modifying the existing document.
>>> for doc in db.test.find({}):
... print(doc)
...
{u'x': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
>>> db.test.find_one_and_replace({'x': 1}, {'y': 1})
{u'x': 1, u'_id': 0}
>>> for doc in db.test.find({}):
... print(doc)
...
{u'y': 1, u'_id': 0}
{u'x': 1, u'_id': 1}
{u'x': 1, u'_id': 2}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The replacement document.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is replaced.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was replaced, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the replaced
or inserted document.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionadded:: 2.9
"""
common.validate_ok_for_replace(replacement)
kwargs['update'] = replacement
new = return_document == ReturnDocument.AFTER
return self.find_and_modify(filter, fields=projection,
sort=sort, upsert=upsert,
new=new,
**kwargs)
def find_one_and_update(self, filter, update,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE, **kwargs):
"""Finds a single document and updates it, returning either the
original or the updated document.
>>> db.test.find_one_and_update(
... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}})
{u'_id': 665, u'done': False, u'count': 25}}
By default :meth:`find_one_and_update` returns the original version of
the document before the update was applied. To return the updated
version of the document instead, use the *return_document* option.
>>> from pymongo import ReturnDocument
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... return_document=ReturnDocument.AFTER)
{u'_id': u'userid', u'seq': 1}
You can limit the fields returned with the *projection* option.
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... return_document=ReturnDocument.AFTER)
{u'seq': 2}
The *upsert* option can be used to create the document if it doesn't
already exist.
>>> db.example.delete_many({}).deleted_count
1
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... upsert=True,
... return_document=ReturnDocument.AFTER)
{u'seq': 1}
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'done': True}):
... print(doc)
...
{u'_id': 665, u'done': True, u'result': {u'count': 26}}
{u'_id': 701, u'done': True, u'result': {u'count': 17}}
>>> db.test.find_one_and_update(
... {'done': True},
... {'$set': {'final': True}},
... sort=[('_id', pymongo.DESCENDING)])
{u'_id': 701, u'done': True, u'result': {u'count': 17}}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The update operations to apply.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is updated.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was updated, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the updated
or inserted document.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionadded:: 2.9
"""
common.validate_ok_for_update(update)
kwargs['update'] = update
new = return_document == ReturnDocument.AFTER
return self.find_and_modify(filter, fields=projection,
sort=sort, upsert=upsert,
new=new,
**kwargs)
def __iter__(self):
return self
def next(self):
raise TypeError("'Collection' object is not iterable")
def __call__(self, *args, **kwargs):
"""This is only here so that some API misusages are easier to debug.
"""
if "." not in self.__name:
raise TypeError("'Collection' object is not callable. If you "
"meant to call the '%s' method on a 'Database' "
"object it is failing because no such method "
"exists." %
self.__name)
raise TypeError("'Collection' object is not callable. If you meant to "
"call the '%s' method on a 'Collection' object it is "
"failing because no such method exists." %
self.__name.split(".")[-1])
| neharejanjeva/techstitution | venv/lib/python2.7/site-packages/pymongo/collection.py | Python | cc0-1.0 | 104,519 | 0.000287 |
#
# [The "BSD license"]
# Copyright (c) 2012 Terence Parr
# Copyright (c) 2012 Sam Harwell
# Copyright (c) 2014 Eric Vergnaud
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#/
# When we hit an accept state in either the DFA or the ATN, we
# have to notify the character stream to start buffering characters
# via {@link IntStream#mark} and record the current state. The current sim state
# includes the current index into the input, the current line,
# and current character position in that line. Note that the Lexer is
# tracking the starting line and characterization of the token. These
# variables track the "state" of the simulator when it hits an accept state.
#
# <p>We track these variables separately for the DFA and ATN simulation
# because the DFA simulation often has to fail over to the ATN
# simulation. If the ATN simulation fails, we need the DFA to fall
# back to its previously accepted state, if any. If the ATN succeeds,
# then the ATN does the accept and the DFA simulator that invoked it
# can simply return the predicted token type.</p>
#/
from antlr4.PredictionContext import PredictionContextCache, SingletonPredictionContext, PredictionContext
from antlr4.InputStream import InputStream
from antlr4.Token import Token
from antlr4.atn.ATN import ATN
from antlr4.atn.ATNConfig import LexerATNConfig
from antlr4.atn.ATNSimulator import ATNSimulator
from antlr4.atn.ATNConfigSet import ATNConfigSet, OrderedATNConfigSet
from antlr4.atn.ATNState import RuleStopState, ATNState
from antlr4.atn.LexerActionExecutor import LexerActionExecutor
from antlr4.atn.Transition import Transition
from antlr4.dfa.DFAState import DFAState
from antlr4.error.Errors import LexerNoViableAltException, UnsupportedOperationException
class SimState(object):
def __init__(self):
self.reset()
def reset(self):
self.index = -1
self.line = 0
self.column = -1
self.dfaState = None
# need forward declaration
Lexer = None
LexerATNSimulator = None
class LexerATNSimulator(ATNSimulator):
debug = False
dfa_debug = False
MIN_DFA_EDGE = 0
MAX_DFA_EDGE = 127 # forces unicode to stay in ATN
ERROR = None
match_calls = 0
def __init__(self, recog:Lexer, atn:ATN, decisionToDFA:list, sharedContextCache:PredictionContextCache):
super().__init__(atn, sharedContextCache)
self.decisionToDFA = decisionToDFA
self.recog = recog
# The current token's starting index into the character stream.
# Shared across DFA to ATN simulation in case the ATN fails and the
# DFA did not have a previous accept state. In this case, we use the
# ATN-generated exception object.
self.startIndex = -1
# line number 1..n within the input#/
self.line = 1
# The index of the character relative to the beginning of the line 0..n-1#/
self.column = 0
from antlr4.Lexer import Lexer
self.mode = Lexer.DEFAULT_MODE
# Used during DFA/ATN exec to record the most recent accept configuration info
self.prevAccept = SimState()
def copyState(self, simulator:LexerATNSimulator ):
self.column = simulator.column
self.line = simulator.line
self.mode = simulator.mode
self.startIndex = simulator.startIndex
def match(self, input:InputStream , mode:int):
self.match_calls += 1
self.mode = mode
mark = input.mark()
try:
self.startIndex = input.index
self.prevAccept.reset()
dfa = self.decisionToDFA[mode]
if dfa.s0 is None:
return self.matchATN(input)
else:
return self.execATN(input, dfa.s0)
finally:
input.release(mark)
def reset(self):
self.prevAccept.reset()
self.startIndex = -1
self.line = 1
self.column = 0
self.mode = Lexer.DEFAULT_MODE
def matchATN(self, input:InputStream):
startState = self.atn.modeToStartState[self.mode]
if self.debug:
print("matchATN mode " + str(self.mode) + " start: " + str(startState))
old_mode = self.mode
s0_closure = self.computeStartState(input, startState)
suppressEdge = s0_closure.hasSemanticContext
s0_closure.hasSemanticContext = False
next = self.addDFAState(s0_closure)
if not suppressEdge:
self.decisionToDFA[self.mode].s0 = next
predict = self.execATN(input, next)
if self.debug:
print("DFA after matchATN: " + str(self.decisionToDFA[old_mode].toLexerString()))
return predict
def execATN(self, input:InputStream, ds0:DFAState):
if self.debug:
print("start state closure=" + str(ds0.configs))
if ds0.isAcceptState:
# allow zero-length tokens
self.captureSimState(self.prevAccept, input, ds0)
t = input.LA(1)
s = ds0 # s is current/from DFA state
while True: # while more work
if self.debug:
print("execATN loop starting closure: %s\n", s.configs)
# As we move src->trg, src->trg, we keep track of the previous trg to
# avoid looking up the DFA state again, which is expensive.
# If the previous target was already part of the DFA, we might
# be able to avoid doing a reach operation upon t. If s!=null,
# it means that semantic predicates didn't prevent us from
# creating a DFA state. Once we know s!=null, we check to see if
# the DFA state has an edge already for t. If so, we can just reuse
# it's configuration set; there's no point in re-computing it.
# This is kind of like doing DFA simulation within the ATN
# simulation because DFA simulation is really just a way to avoid
# computing reach/closure sets. Technically, once we know that
# we have a previously added DFA state, we could jump over to
# the DFA simulator. But, that would mean popping back and forth
# a lot and making things more complicated algorithmically.
# This optimization makes a lot of sense for loops within DFA.
# A character will take us back to an existing DFA state
# that already has lots of edges out of it. e.g., .* in comments.
# print("Target for:" + str(s) + " and:" + str(t))
target = self.getExistingTargetState(s, t)
# print("Existing:" + str(target))
if target is None:
target = self.computeTargetState(input, s, t)
# print("Computed:" + str(target))
if target == self.ERROR:
break
# If this is a consumable input element, make sure to consume before
# capturing the accept state so the input index, line, and char
# position accurately reflect the state of the interpreter at the
# end of the token.
if t != Token.EOF:
self.consume(input)
if target.isAcceptState:
self.captureSimState(self.prevAccept, input, target)
if t == Token.EOF:
break
t = input.LA(1)
s = target # flip; current DFA target becomes new src/from state
return self.failOrAccept(self.prevAccept, input, s.configs, t)
# Get an existing target state for an edge in the DFA. If the target state
# for the edge has not yet been computed or is otherwise not available,
# this method returns {@code null}.
#
# @param s The current DFA state
# @param t The next input symbol
# @return The existing target DFA state for the given input symbol
# {@code t}, or {@code null} if the target state for this edge is not
# already cached
def getExistingTargetState(self, s:DFAState, t:int):
if s.edges is None or t < self.MIN_DFA_EDGE or t > self.MAX_DFA_EDGE:
return None
target = s.edges[t - self.MIN_DFA_EDGE]
if self.debug and target is not None:
print("reuse state "+s.stateNumber+ " edge to "+target.stateNumber)
return target
# Compute a target state for an edge in the DFA, and attempt to add the
# computed state and corresponding edge to the DFA.
#
# @param input The input stream
# @param s The current DFA state
# @param t The next input symbol
#
# @return The computed target DFA state for the given input symbol
# {@code t}. If {@code t} does not lead to a valid DFA state, this method
# returns {@link #ERROR}.
def computeTargetState(self, input:InputStream, s:DFAState, t:int):
reach = OrderedATNConfigSet()
# if we don't find an existing DFA state
# Fill reach starting from closure, following t transitions
self.getReachableConfigSet(input, s.configs, reach, t)
if len(reach)==0: # we got nowhere on t from s
if not reach.hasSemanticContext:
# we got nowhere on t, don't throw out this knowledge; it'd
# cause a failover from DFA later.
self. addDFAEdge(s, t, self.ERROR)
# stop when we can't match any more char
return self.ERROR
# Add an edge from s to target DFA found/created for reach
return self.addDFAEdge(s, t, cfgs=reach)
def failOrAccept(self, prevAccept:SimState , input:InputStream, reach:ATNConfigSet, t:int):
if self.prevAccept.dfaState is not None:
lexerActionExecutor = prevAccept.dfaState.lexerActionExecutor
self.accept(input, lexerActionExecutor, self.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
return prevAccept.dfaState.prediction
else:
# if no accept and EOF is first char, return EOF
if t==Token.EOF and input.index==self.startIndex:
return Token.EOF
raise LexerNoViableAltException(self.recog, input, self.startIndex, reach)
# Given a starting configuration set, figure out all ATN configurations
# we can reach upon input {@code t}. Parameter {@code reach} is a return
# parameter.
def getReachableConfigSet(self, input:InputStream, closure:ATNConfigSet, reach:ATNConfigSet, t:int):
# this is used to skip processing for configs which have a lower priority
# than a config that already reached an accept state for the same rule
skipAlt = ATN.INVALID_ALT_NUMBER
for cfg in closure:
currentAltReachedAcceptState = ( cfg.alt == skipAlt )
if currentAltReachedAcceptState and cfg.passedThroughNonGreedyDecision:
continue
if self.debug:
print("testing %s at %s\n", self.getTokenName(t), cfg.toString(self.recog, True))
for trans in cfg.state.transitions: # for each transition
target = self.getReachableTarget(trans, t)
if target is not None:
lexerActionExecutor = cfg.lexerActionExecutor
if lexerActionExecutor is not None:
lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.index - self.startIndex)
treatEofAsEpsilon = (t == Token.EOF)
config = LexerATNConfig(state=target, lexerActionExecutor=lexerActionExecutor, config=cfg)
if self.closure(input, config, reach, currentAltReachedAcceptState, True, treatEofAsEpsilon):
# any remaining configs for this alt have a lower priority than
# the one that just reached an accept state.
skipAlt = cfg.alt
def accept(self, input:InputStream, lexerActionExecutor:LexerActionExecutor, startIndex:int, index:int, line:int, charPos:int):
if self.debug:
print("ACTION %s\n", lexerActionExecutor)
# seek to after last char in token
input.seek(index)
self.line = line
self.column = charPos
if lexerActionExecutor is not None and self.recog is not None:
lexerActionExecutor.execute(self.recog, input, startIndex)
def getReachableTarget(self, trans:Transition, t:int):
if trans.matches(t, 0, 0xFFFE):
return trans.target
else:
return None
def computeStartState(self, input:InputStream, p:ATNState):
initialContext = PredictionContext.EMPTY
configs = OrderedATNConfigSet()
for i in range(0,len(p.transitions)):
target = p.transitions[i].target
c = LexerATNConfig(state=target, alt=i+1, context=initialContext)
self.closure(input, c, configs, False, False, False)
return configs
# Since the alternatives within any lexer decision are ordered by
# preference, this method stops pursuing the closure as soon as an accept
# state is reached. After the first accept state is reached by depth-first
# search from {@code config}, all other (potentially reachable) states for
# this rule would have a lower priority.
#
# @return {@code true} if an accept state is reached, otherwise
# {@code false}.
def closure(self, input:InputStream, config:LexerATNConfig, configs:ATNConfigSet, currentAltReachedAcceptState:bool,
speculative:bool, treatEofAsEpsilon:bool):
if self.debug:
print("closure("+config.toString(self.recog, True)+")")
if isinstance( config.state, RuleStopState ):
if self.debug:
if self.recog is not None:
print("closure at %s rule stop %s\n", self.recog.getRuleNames()[config.state.ruleIndex], config)
else:
print("closure at rule stop %s\n", config)
if config.context is None or config.context.hasEmptyPath():
if config.context is None or config.context.isEmpty():
configs.add(config)
return True
else:
configs.add(LexerATNConfig(state=config.state, config=config, context=PredictionContext.EMPTY))
currentAltReachedAcceptState = True
if config.context is not None and not config.context.isEmpty():
for i in range(0,len(config.context)):
if config.context.getReturnState(i) != PredictionContext.EMPTY_RETURN_STATE:
newContext = config.context.getParent(i) # "pop" return state
returnState = self.atn.states[config.context.getReturnState(i)]
c = LexerATNConfig(state=returnState, config=config, context=newContext)
currentAltReachedAcceptState = self.closure(input, c, configs,
currentAltReachedAcceptState, speculative, treatEofAsEpsilon)
return currentAltReachedAcceptState
# optimization
if not config.state.epsilonOnlyTransitions:
if not currentAltReachedAcceptState or not config.passedThroughNonGreedyDecision:
configs.add(config)
for t in config.state.transitions:
c = self.getEpsilonTarget(input, config, t, configs, speculative, treatEofAsEpsilon)
if c is not None:
currentAltReachedAcceptState = self.closure(input, c, configs, currentAltReachedAcceptState, speculative, treatEofAsEpsilon)
return currentAltReachedAcceptState
# side-effect: can alter configs.hasSemanticContext
def getEpsilonTarget(self, input:InputStream, config:LexerATNConfig, t:Transition, configs:ATNConfigSet,
speculative:bool, treatEofAsEpsilon:bool):
c = None
if t.serializationType==Transition.RULE:
newContext = SingletonPredictionContext.create(config.context, t.followState.stateNumber)
c = LexerATNConfig(state=t.target, config=config, context=newContext)
elif t.serializationType==Transition.PRECEDENCE:
raise UnsupportedOperationException("Precedence predicates are not supported in lexers.")
elif t.serializationType==Transition.PREDICATE:
# Track traversing semantic predicates. If we traverse,
# we cannot add a DFA state for this "reach" computation
# because the DFA would not test the predicate again in the
# future. Rather than creating collections of semantic predicates
# like v3 and testing them on prediction, v4 will test them on the
# fly all the time using the ATN not the DFA. This is slower but
# semantically it's not used that often. One of the key elements to
# this predicate mechanism is not adding DFA states that see
# predicates immediately afterwards in the ATN. For example,
# a : ID {p1}? | ID {p2}? ;
# should create the start state for rule 'a' (to save start state
# competition), but should not create target of ID state. The
# collection of ATN states the following ID references includes
# states reached by traversing predicates. Since this is when we
# test them, we cannot cash the DFA state target of ID.
if self.debug:
print("EVAL rule "+ str(t.ruleIndex) + ":" + str(t.predIndex))
configs.hasSemanticContext = True
if self.evaluatePredicate(input, t.ruleIndex, t.predIndex, speculative):
c = LexerATNConfig(state=t.target, config=config)
elif t.serializationType==Transition.ACTION:
if config.context is None or config.context.hasEmptyPath():
# execute actions anywhere in the start rule for a token.
#
# TODO: if the entry rule is invoked recursively, some
# actions may be executed during the recursive call. The
# problem can appear when hasEmptyPath() is true but
# isEmpty() is false. In this case, the config needs to be
# split into two contexts - one with just the empty path
# and another with everything but the empty path.
# Unfortunately, the current algorithm does not allow
# getEpsilonTarget to return two configurations, so
# additional modifications are needed before we can support
# the split operation.
lexerActionExecutor = LexerActionExecutor.append(config.lexerActionExecutor,
self.atn.lexerActions[t.actionIndex])
c = LexerATNConfig(state=t.target, config=config, lexerActionExecutor=lexerActionExecutor)
else:
# ignore actions in referenced rules
c = LexerATNConfig(state=t.target, config=config)
elif t.serializationType==Transition.EPSILON:
c = LexerATNConfig(state=t.target, config=config)
elif t.serializationType in [ Transition.ATOM, Transition.RANGE, Transition.SET ]:
if treatEofAsEpsilon:
if t.matches(Token.EOF, 0, 0xFFFF):
c = LexerATNConfig(state=t.target, config=config)
return c
# Evaluate a predicate specified in the lexer.
#
# <p>If {@code speculative} is {@code true}, this method was called before
# {@link #consume} for the matched character. This method should call
# {@link #consume} before evaluating the predicate to ensure position
# sensitive values, including {@link Lexer#getText}, {@link Lexer#getLine},
# and {@link Lexer#getcolumn}, properly reflect the current
# lexer state. This method should restore {@code input} and the simulator
# to the original state before returning (i.e. undo the actions made by the
# call to {@link #consume}.</p>
#
# @param input The input stream.
# @param ruleIndex The rule containing the predicate.
# @param predIndex The index of the predicate within the rule.
# @param speculative {@code true} if the current index in {@code input} is
# one character before the predicate's location.
#
# @return {@code true} if the specified predicate evaluates to
# {@code true}.
#/
def evaluatePredicate(self, input:InputStream, ruleIndex:int, predIndex:int, speculative:bool):
# assume true if no recognizer was provided
if self.recog is None:
return True
if not speculative:
return self.recog.sempred(None, ruleIndex, predIndex)
savedcolumn = self.column
savedLine = self.line
index = input.index
marker = input.mark()
try:
self.consume(input)
return self.recog.sempred(None, ruleIndex, predIndex)
finally:
self.column = savedcolumn
self.line = savedLine
input.seek(index)
input.release(marker)
def captureSimState(self, settings:SimState, input:InputStream, dfaState:DFAState):
settings.index = input.index
settings.line = self.line
settings.column = self.column
settings.dfaState = dfaState
def addDFAEdge(self, from_:DFAState, tk:int, to:DFAState=None, cfgs:ATNConfigSet=None) -> DFAState:
if to is None and cfgs is not None:
# leading to this call, ATNConfigSet.hasSemanticContext is used as a
# marker indicating dynamic predicate evaluation makes this edge
# dependent on the specific input sequence, so the static edge in the
# DFA should be omitted. The target DFAState is still created since
# execATN has the ability to resynchronize with the DFA state cache
# following the predicate evaluation step.
#
# TJP notes: next time through the DFA, we see a pred again and eval.
# If that gets us to a previously created (but dangling) DFA
# state, we can continue in pure DFA mode from there.
#/
suppressEdge = cfgs.hasSemanticContext
cfgs.hasSemanticContext = False
to = self.addDFAState(cfgs)
if suppressEdge:
return to
# add the edge
if tk < self.MIN_DFA_EDGE or tk > self.MAX_DFA_EDGE:
# Only track edges within the DFA bounds
return to
if self.debug:
print("EDGE " + str(from_) + " -> " + str(to) + " upon "+ chr(tk))
if from_.edges is None:
# make room for tokens 1..n and -1 masquerading as index 0
from_.edges = [ None ] * (self.MAX_DFA_EDGE - self.MIN_DFA_EDGE + 1)
from_.edges[tk - self.MIN_DFA_EDGE] = to # connect
return to
# Add a new DFA state if there isn't one with this set of
# configurations already. This method also detects the first
# configuration containing an ATN rule stop state. Later, when
# traversing the DFA, we will know which rule to accept.
def addDFAState(self, configs:ATNConfigSet) -> DFAState:
proposed = DFAState(configs=configs)
firstConfigWithRuleStopState = None
for c in configs:
if isinstance(c.state, RuleStopState):
firstConfigWithRuleStopState = c
break
if firstConfigWithRuleStopState is not None:
proposed.isAcceptState = True
proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor
proposed.prediction = self.atn.ruleToTokenType[firstConfigWithRuleStopState.state.ruleIndex]
dfa = self.decisionToDFA[self.mode]
existing = dfa.states.get(proposed, None)
if existing is not None:
return existing
newState = proposed
newState.stateNumber = len(dfa.states)
configs.setReadonly(True)
newState.configs = configs
dfa.states[newState] = newState
return newState
def getDFA(self, mode:int):
return self.decisionToDFA[mode]
# Get the text matched so far for the current token.
def getText(self, input:InputStream):
# index is first lookahead char, don't include.
return input.getText(self.startIndex, input.index-1)
def consume(self, input:InputStream):
curChar = input.LA(1)
if curChar==ord('\n'):
self.line += 1
self.column = 0
else:
self.column += 1
input.consume()
def getTokenName(self, t:int):
if t==-1:
return "EOF"
else:
return "'" + chr(t) + "'"
LexerATNSimulator.ERROR = DFAState(0x7FFFFFFF, ATNConfigSet())
del Lexer | sidhart/antlr4 | runtime/Python3/src/antlr4/atn/LexerATNSimulator.py | Python | bsd-3-clause | 26,465 | 0.007255 |
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright 2012 Canonical
# Author: Marco Trevisan (Treviño)
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
from __future__ import absolute_import
import logging
from time import sleep
from autopilot.input import Mouse
from autopilot.keybindings import KeybindingsHelper
from unity.emulators import UnityIntrospectionObject
logger = logging.getLogger(__name__)
class PanelController(UnityIntrospectionObject):
"""The PanelController class."""
def get_panel_for_monitor(self, monitor_num):
"""Return an instance of panel for the specified monitor, or None."""
panels = self.get_children_by_type(UnityPanel, monitor=monitor_num)
assert(len(panels) == 1)
return panels[0]
def get_active_panel(self):
"""Return the active panel, or None."""
panels = self.get_children_by_type(UnityPanel, active=True)
assert(len(panels) == 1)
return panels[0]
def get_active_indicator(self):
for panel in self.get_panels:
active = panel.get_active_indicator()
if active:
return active
return None
@property
def get_panels(self):
"""Return the available panels, or None."""
return self.get_children_by_type(UnityPanel)
class UnityPanel(UnityIntrospectionObject, KeybindingsHelper):
"""An individual panel for a monitor."""
def __init__(self, *args, **kwargs):
super(UnityPanel, self).__init__(*args, **kwargs)
self._mouse = Mouse.create()
def __get_menu_view(self):
"""Return the menu view."""
menus = self.get_children_by_type(MenuView)
assert(len(menus) == 1)
return menus[0]
def __get_window_buttons(self):
"""Return the window buttons view."""
buttons = self.menus.get_children_by_type(WindowButtons)
assert(len(buttons) == 1)
return buttons[0]
def __get_grab_area(self):
"""Return the panel grab area."""
grab_areas = self.menus.get_children_by_type(GrabArea)
assert(len(grab_areas) == 1)
return grab_areas[0]
def __get_indicators_view(self):
"""Return the menu view."""
indicators = self.get_children_by_type(Indicators)
assert(len(indicators) == 1)
return indicators[0]
def move_mouse_below_the_panel(self):
"""Places the mouse to bottom of this panel."""
(x, y, w, h) = self.geometry
target_x = x + w / 2
target_y = y + h + 10
logger.debug("Moving mouse away from panel.")
self._mouse.move(target_x, target_y)
def move_mouse_over_menus(self):
"""Move the mouse over the menu area for this panel."""
(x, y, w, h) = self.menus.geometry
target_x = x + w / 2
target_y = y + h / 2
# The menu view has bigger geometry than the real layout
menu_entries = self.menus.get_entries()
if len(menu_entries) > 0:
first_x = menu_entries[0].x
last_x = menu_entries[-1].x + menu_entries[-1].width / 2
target_x = first_x + (last_x - first_x) / 2
logger.debug("Moving mouse to center of menu area.")
self._mouse.move(target_x, target_y)
def move_mouse_over_grab_area(self):
"""Move the mouse over the grab area for this panel."""
(x, y, w, h) = self.grab_area.geometry
target_x = x + w / 2
target_y = y + h / 2
logger.debug("Moving mouse to center of grab area.")
self._mouse.move(target_x, target_y)
def move_mouse_over_window_buttons(self):
"""Move the mouse over the center of the window buttons area for this panel."""
(x, y, w, h) = self.window_buttons.geometry
target_x = x + w / 2
target_y = y + h / 2
logger.debug("Moving mouse to center of the window buttons.")
self._mouse.move(target_x, target_y)
def move_mouse_over_indicators(self):
"""Move the mouse over the center of the indicators area for this panel."""
(x, y, w, h) = self.indicators.geometry
target_x = x + w / 2
target_y = y + h / 2
logger.debug("Moving mouse to center of the indicators area.")
self._mouse.move(target_x, target_y)
def get_indicator_entries(self, visible_only=True, include_hidden_menus=False):
"""Returns a list of entries for this panel including both menus and indicators"""
entries = []
if include_hidden_menus or self.menus_shown:
entries = self.menus.get_entries()
entries += self.indicators.get_ordered_entries(visible_only)
return entries
def get_active_indicator(self):
"""Returns the indicator entry that is currently active"""
entries = self.get_indicator_entries(False, True)
entries = filter(lambda e: e.active == True, entries)
assert(len(entries) <= 1)
return entries[0] if entries else None
def get_indicator_entry(self, entry_id):
"""Returns the indicator entry for the given ID or None"""
entries = self.get_indicator_entries(False, True)
entries = filter(lambda e: e.entry_id == entry_id, entries)
assert(len(entries) <= 1)
return entries[0] if entries else None
@property
def title(self):
return self.menus.panel_title
@property
def desktop_is_active(self):
return self.menus.desktop_active
@property
def menus_shown(self):
return self.active and self.menus.draw_menus
@property
def window_buttons_shown(self):
return self.menus.draw_window_buttons
@property
def window_buttons(self):
return self.__get_window_buttons()
@property
def menus(self):
return self.__get_menu_view()
@property
def grab_area(self):
return self.__get_grab_area()
@property
def indicators(self):
return self.__get_indicators_view()
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the current panel."""
return (self.x, self.y, self.width, self.height)
class MenuView(UnityIntrospectionObject):
"""The Menu View class."""
def get_entries(self):
"""Return a list of menu entries"""
entries = self.get_children_by_type(IndicatorEntry)
# We need to filter out empty entries, which are seperators - those
# are not valid, visible and working entries
# For instance, gedit adds some of those, breaking our tests
entries = [e for e in entries if (e.label != "")]
return entries
def get_menu_by_label(self, entry_label):
"""Return the first indicator entry found with the given label"""
indicators = self.get_children_by_type(IndicatorEntry, label=entry_label)
return indicators[0] if indicators else None
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the current menu view."""
return (self.x, self.y, self.width, self.height)
class WindowButtons(UnityIntrospectionObject):
"""The window buttons class"""
def get_buttons(self, visible_only=True):
"""Return a list of window buttons"""
if visible_only:
return self.get_children_by_type(WindowButton, visible=True)
else:
return self.get_children_by_type(WindowButton)
def get_button(self, type):
buttons = self.get_children_by_type(WindowButton, type=type)
assert(len(buttons) == 1)
return buttons[0]
@property
def visible(self):
return len(self.get_buttons()) != 0
@property
def close(self):
return self.get_button("Close")
@property
def minimize(self):
return self.get_button("Minimize")
@property
def unmaximize(self):
return self.get_button("Unmaximize")
@property
def maximize(self):
return self.get_button("Maximize")
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the current panel."""
return (self.x, self.y, self.width, self.height)
class WindowButton(UnityIntrospectionObject):
"""The Window WindowButton class."""
def __init__(self, *args, **kwargs):
super(WindowButton, self).__init__(*args, **kwargs)
self._mouse = Mouse.create()
def mouse_move_to(self):
target_x = self.x + self.width / 2
target_y = self.y + self.height / 2
self._mouse.move(target_x, target_y, rate=20, time_between_events=0.005)
def mouse_click(self):
self.mouse_move_to()
sleep(.2)
self._mouse.click(press_duration=.1)
sleep(.01)
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the window button."""
return (self.x, self.y, self.width, self.height)
class GrabArea(UnityIntrospectionObject):
"""The grab area class"""
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the grab area."""
return (self.x, self.y, self.width, self.height)
class Indicators(UnityIntrospectionObject):
"""The Indicators View class."""
def get_ordered_entries(self, visible_only=True):
"""Return a list of indicators, ordered by their priority"""
if visible_only:
entries = self.get_children_by_type(IndicatorEntry, visible=True)
else:
entries = self.get_children_by_type(IndicatorEntry)
return sorted(entries, key=lambda entry: entry.priority)
def get_indicator_by_name_hint(self, name_hint):
"""Return the IndicatorEntry with the name_hint"""
indicators = self.get_children_by_type(IndicatorEntry, name_hint=name_hint)
assert(len(indicators) == 1)
return indicators[0]
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the indicators area."""
return (self.x, self.y, self.width, self.height)
class IndicatorEntry(UnityIntrospectionObject):
"""The IndicatorEntry View class."""
def __init__(self, *args, **kwargs):
super(IndicatorEntry, self).__init__(*args, **kwargs)
self._mouse = Mouse.create()
def mouse_move_to(self):
target_x = self.x + self.width / 2
target_y = self.y + self.height / 2
self._mouse.move(target_x, target_y, rate=20, time_between_events=0.005)
def mouse_click(self, button=1):
self.mouse_move_to()
sleep(.2)
assert(self.visible)
self._mouse.click(press_duration=.1)
sleep(.01)
@property
def geometry(self):
"""Returns a tuple of (x,y,w,h) for the indicator entry."""
return (self.x, self.y, self.width, self.height)
@property
def menu_geometry(self):
"""Returns a tuple of (x,y,w,h) for the opened menu geometry."""
return (self.menu_x, self.menu_y, self.menu_width, self.menu_height)
def __repr__(self):
with self.no_automatic_refreshing():
return "<IndicatorEntry 0x%x (%s)>" % (id(self), self.label)
class Tray(UnityIntrospectionObject):
"""A panel tray object."""
| foer/linuxmuster-client-unity | tests/autopilot/unity/emulators/panel.py | Python | gpl-3.0 | 11,333 | 0.000794 |
#!/usr/bin/python
import json
from urllib import urlopen
import requests
import getpass
from string import Template
import sys
import os
import subprocess
class RunError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def run(command, **kwargs):
fail_hard = kwargs.pop("fail_hard", True)
# output to /dev/null by default:
kwargs.setdefault("stdout", open('/dev/null', 'w'))
kwargs.setdefault("stderr", open('/dev/null', 'w'))
command = Template(command).substitute(os.environ)
if "TRACE" in os.environ:
if 'cwd' in kwargs:
print("[cwd=%s] %s"%(kwargs['cwd'], command))
else: print(command)
try:
process = subprocess.Popen(command.split(' '), **kwargs)
process.wait()
except KeyboardInterrupt:
process.terminate()
raise
if process.returncode != 0 and fail_hard:
raise RunError("Failed: "+command)
return process.returncode
def checkout_pull(clone_url, commit, out):
# Init
build_dir=os.environ["BUILD_DIR"]
run("umount ${CHROOT_COPY}/proc", fail_hard=False)
run("rsync --delete -apv ${CHROOT_MASTER}/ ${CHROOT_COPY}")
run("rm -rf ${CHROOT_COPY}${SCRIPTS_DIR}")
run("cp -a ${SCRIPTS_DIR} ${CHROOT_COPY}${SCRIPTS_DIR}")
# Merge onto upstream/master
run("rm -rf ${BUILD_DIR}")
run("mkdir -p ${BUILD_DIR}")
run("git clone ${CLONE_URL} ${BUILD_DIR}")
run("git remote add pull "+clone_url, cwd=build_dir, stdout=out, stderr=out)
run("git fetch pull", cwd=build_dir, stdout=out, stderr=out)
if run("git merge "+ commit, fail_hard=False, cwd=build_dir, stdout=out, stderr=out) != 0:
return False
run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${BUILD_DIR}", stdout=out, stderr=out)
run("mount --bind /proc ${CHROOT_COPY}/proc")
return True
def commentOn(commentUrl, success, inMerge, needTests, linkUrl):
common_message = """
This test script verifies pulls every time they are updated. It, however, dies sometimes and fails to test properly. If you are waiting on a test, please check timestamps to verify that the test.log is moving at http://jenkins.bluematt.me/pull-tester/current/
Contact BlueMatt on freenode if something looks broken."""
# Remove old CoinsBazarPullTester comments (I'm being lazy and not paginating here)
recentcomments = requests.get(commentUrl+"?sort=created&direction=desc",
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
for comment in recentcomments:
if comment["user"]["login"] == os.environ["GITHUB_USER"] and common_message in comment["body"]:
requests.delete(comment["url"],
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"]))
if success == True:
if needTests:
message = "Automatic sanity-testing: PLEASE ADD TEST-CASES, though technically passed. See " + linkUrl + " for binaries and test log."
else:
message = "Automatic sanity-testing: PASSED, see " + linkUrl + " for binaries and test log."
post_data = { "body" : message + common_message}
elif inMerge:
post_data = { "body" : "Automatic sanity-testing: FAILED MERGE, see " + linkUrl + " for test log." + """
This pull does not merge cleanly onto current master""" + common_message}
else:
post_data = { "body" : "Automatic sanity-testing: FAILED BUILD/TEST, see " + linkUrl + " for binaries and test log." + """
This could happen for one of several reasons:
1. It chanages changes build scripts in a way that made them incompatible with the automated testing scripts (please tweak those patches in qa/pull-tester)
2. It adds/modifies tests which test network rules (thanks for doing that), which conflicts with a patch applied at test time
3. It does not build on either Linux i386 or Win32 (via MinGW cross compile)
4. The test suite fails on either Linux i386 or Win32
5. The block test-cases failed (lookup the first bNN identifier which failed in https://github.com/TheBlueMatt/test-scripts/blob/master/FullBlockTestGenerator.java)
If you believe this to be in error, please ping BlueMatt on freenode or TheBlueMatt here.
""" + common_message}
resp = requests.post(commentUrl, json.dumps(post_data), auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"]))
def testpull(number, comment_url, clone_url, commit):
print("Testing pull %d: %s : %s"%(number, clone_url,commit))
dir = os.environ["RESULTS_DIR"] + "/" + commit + "/"
print(" ouput to %s"%dir)
if os.path.exists(dir):
os.system("rm -r " + dir)
os.makedirs(dir)
currentdir = os.environ["RESULTS_DIR"] + "/current"
os.system("rm -r "+currentdir)
os.system("ln -s " + dir + " " + currentdir)
out = open(dir + "test.log", 'w+')
resultsurl = os.environ["RESULTS_URL"] + commit
checkedout = checkout_pull(clone_url, commit, out)
if checkedout != True:
print("Failed to test pull - sending comment to: " + comment_url)
commentOn(comment_url, False, True, False, resultsurl)
open(os.environ["TESTED_DB"], "a").write(commit + "\n")
return
run("rm -rf ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False);
run("mkdir -p ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False);
run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False)
script = os.environ["BUILD_PATH"]+"/qa/pull-tester/pull-tester.sh"
script += " ${BUILD_PATH} ${MINGW_DEPS_DIR} ${SCRIPTS_DIR}/CoinsBazardComparisonTool_jar/CoinsBazardComparisonTool.jar 0 6 ${OUT_DIR}"
returncode = run("chroot ${CHROOT_COPY} sudo -u ${BUILD_USER} -H timeout ${TEST_TIMEOUT} "+script,
fail_hard=False, stdout=out, stderr=out)
run("mv ${CHROOT_COPY}/${OUT_DIR} " + dir)
run("mv ${BUILD_DIR} " + dir)
if returncode == 42:
print("Successfully tested pull (needs tests) - sending comment to: " + comment_url)
commentOn(comment_url, True, False, True, resultsurl)
elif returncode != 0:
print("Failed to test pull - sending comment to: " + comment_url)
commentOn(comment_url, False, False, False, resultsurl)
else:
print("Successfully tested pull - sending comment to: " + comment_url)
commentOn(comment_url, True, False, False, resultsurl)
open(os.environ["TESTED_DB"], "a").write(commit + "\n")
def environ_default(setting, value):
if not setting in os.environ:
os.environ[setting] = value
if getpass.getuser() != "root":
print("Run me as root!")
sys.exit(1)
if "GITHUB_USER" not in os.environ or "GITHUB_AUTH_TOKEN" not in os.environ:
print("GITHUB_USER and/or GITHUB_AUTH_TOKEN environment variables not set")
sys.exit(1)
environ_default("CLONE_URL", "https://github.com/bitcoin/bitcoin.git")
environ_default("MINGW_DEPS_DIR", "/mnt/w32deps")
environ_default("SCRIPTS_DIR", "/mnt/test-scripts")
environ_default("CHROOT_COPY", "/mnt/chroot-tmp")
environ_default("CHROOT_MASTER", "/mnt/chroot")
environ_default("OUT_DIR", "/mnt/out")
environ_default("BUILD_PATH", "/mnt/bitcoin")
os.environ["BUILD_DIR"] = os.environ["CHROOT_COPY"] + os.environ["BUILD_PATH"]
environ_default("RESULTS_DIR", "/mnt/www/pull-tester")
environ_default("RESULTS_URL", "http://jenkins.bluematt.me/pull-tester/")
environ_default("GITHUB_REPO", "bitcoin/bitcoin")
environ_default("TESTED_DB", "/mnt/commits-tested.txt")
environ_default("BUILD_USER", "matt")
environ_default("BUILD_GROUP", "matt")
environ_default("TEST_TIMEOUT", str(60*60*2))
print("Optional usage: pull-tester.py 2112")
f = open(os.environ["TESTED_DB"])
tested = set( line.rstrip() for line in f.readlines() )
f.close()
if len(sys.argv) > 1:
pull = requests.get("https://api.github.com/repos/"+os.environ["GITHUB_REPO"]+"/pulls/"+sys.argv[1],
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
testpull(pull["number"], pull["_links"]["comments"]["href"],
pull["head"]["repo"]["clone_url"], pull["head"]["sha"])
else:
for page in range(1,100):
result = requests.get("https://api.github.com/repos/"+os.environ["GITHUB_REPO"]+"/pulls?state=open&page=%d"%(page,),
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
if len(result) == 0: break;
for pull in result:
if pull["head"]["sha"] in tested:
print("Pull %d already tested"%(pull["number"],))
continue
testpull(pull["number"], pull["_links"]["comments"]["href"],
pull["head"]["repo"]["clone_url"], pull["head"]["sha"])
| aqavi-paracha/coinsbazar | qa/pull-tester/pull-tester.py | Python | mit | 8,761 | 0.007191 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Nexenta Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`nexenta.volume` -- Driver to store volumes on Nexenta Appliance
=====================================================================
.. automodule:: nexenta.volume
.. moduleauthor:: Yuriy Taraday <yorik.sar@gmail.com>
"""
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova.volume import driver
from nova.volume import nexenta
from nova.volume.nexenta import jsonrpc
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
nexenta_opts = [
cfg.StrOpt('nexenta_host',
default='',
help='IP address of Nexenta SA'),
cfg.IntOpt('nexenta_rest_port',
default=2000,
help='HTTP port to connect to Nexenta REST API server'),
cfg.StrOpt('nexenta_rest_protocol',
default='auto',
help='Use http or https for REST connection (default auto)'),
cfg.StrOpt('nexenta_user',
default='admin',
help='User name to connect to Nexenta SA'),
cfg.StrOpt('nexenta_password',
default='nexenta',
help='Password to connect to Nexenta SA'),
cfg.IntOpt('nexenta_iscsi_target_portal_port',
default=3260,
help='Nexenta target portal port'),
cfg.StrOpt('nexenta_volume',
default='nova',
help='pool on SA that will hold all volumes'),
cfg.StrOpt('nexenta_target_prefix',
default='iqn.1986-03.com.sun:02:nova-',
help='IQN prefix for iSCSI targets'),
cfg.StrOpt('nexenta_target_group_prefix',
default='nova/',
help='prefix for iSCSI target groups on SA'),
cfg.StrOpt('nexenta_blocksize',
default='',
help='block size for volumes (blank=default,8KB)'),
cfg.BoolOpt('nexenta_sparse',
default=False,
help='flag to create sparse volumes'),
]
FLAGS.register_opts(nexenta_opts)
class NexentaDriver(driver.ISCSIDriver): # pylint: disable=R0921
"""Executes volume driver commands on Nexenta Appliance."""
def __init__(self):
super(NexentaDriver, self).__init__()
def do_setup(self, context):
protocol = FLAGS.nexenta_rest_protocol
auto = protocol == 'auto'
if auto:
protocol = 'http'
self.nms = jsonrpc.NexentaJSONProxy(
'%s://%s:%s/rest/nms/' % (protocol, FLAGS.nexenta_host,
FLAGS.nexenta_rest_port),
FLAGS.nexenta_user, FLAGS.nexenta_password, auto=auto)
def check_for_setup_error(self):
"""Verify that the volume for our zvols exists.
:raise: :py:exc:`LookupError`
"""
if not self.nms.volume.object_exists(FLAGS.nexenta_volume):
raise LookupError(_("Volume %s does not exist in Nexenta SA"),
FLAGS.nexenta_volume)
@staticmethod
def _get_zvol_name(volume_name):
"""Return zvol name that corresponds given volume name."""
return '%s/%s' % (FLAGS.nexenta_volume, volume_name)
@staticmethod
def _get_target_name(volume_name):
"""Return iSCSI target name to access volume."""
return '%s%s' % (FLAGS.nexenta_target_prefix, volume_name)
@staticmethod
def _get_target_group_name(volume_name):
"""Return Nexenta iSCSI target group name for volume."""
return '%s%s' % (FLAGS.nexenta_target_group_prefix, volume_name)
def create_volume(self, volume):
"""Create a zvol on appliance.
:param volume: volume reference
"""
self.nms.zvol.create(
self._get_zvol_name(volume['name']),
'%sG' % (volume['size'],),
FLAGS.nexenta_blocksize, FLAGS.nexenta_sparse)
def delete_volume(self, volume):
"""Destroy a zvol on appliance.
:param volume: volume reference
"""
try:
self.nms.zvol.destroy(self._get_zvol_name(volume['name']), '')
except nexenta.NexentaException as exc:
if "zvol has children" in exc.args[1]:
raise exception.VolumeIsBusy
else:
raise
def create_snapshot(self, snapshot):
"""Create snapshot of existing zvol on appliance.
:param snapshot: shapshot reference
"""
self.nms.zvol.create_snapshot(
self._get_zvol_name(snapshot['volume_name']),
snapshot['name'], '')
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other's snapshot on appliance.
:param volume: reference of volume to be created
:param snapshot: reference of source snapshot
"""
self.nms.zvol.clone(
'%s@%s' % (self._get_zvol_name(snapshot['volume_name']),
snapshot['name']),
self._get_zvol_name(volume['name']))
def delete_snapshot(self, snapshot):
"""Delete volume's snapshot on appliance.
:param snapshot: shapshot reference
"""
try:
self.nms.snapshot.destroy(
'%s@%s' % (self._get_zvol_name(snapshot['volume_name']),
snapshot['name']),
'')
except nexenta.NexentaException as exc:
if "snapshot has dependent clones" in exc.args[1]:
raise exception.SnapshotIsBusy
else:
raise
def local_path(self, volume):
"""Return local path to existing local volume.
We never have local volumes, so it raises NotImplementedError.
:raise: :py:exc:`NotImplementedError`
"""
LOG.error(_("Call to local_path should not happen."
" Verify that use_local_volumes flag is turned off."))
raise NotImplementedError
def _do_export(self, _ctx, volume, ensure=False):
"""Do all steps to get zvol exported as LUN 0 at separate target.
:param volume: reference of volume to be exported
:param ensure: if True, ignore errors caused by already existing
resources
:return: iscsiadm-formatted provider location string
"""
zvol_name = self._get_zvol_name(volume['name'])
target_name = self._get_target_name(volume['name'])
target_group_name = self._get_target_group_name(volume['name'])
try:
self.nms.iscsitarget.create_target({'target_name': target_name})
except nexenta.NexentaException as exc:
if not ensure or 'already configured' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored target creation error "%s"'
' while ensuring export'), exc)
try:
self.nms.stmf.create_targetgroup(target_group_name)
except nexenta.NexentaException as exc:
if not ensure or 'already exists' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored target group creation error "%s"'
' while ensuring export'), exc)
try:
self.nms.stmf.add_targetgroup_member(target_group_name,
target_name)
except nexenta.NexentaException as exc:
if not ensure or 'already exists' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored target group member addition error "%s"'
' while ensuring export'), exc)
try:
self.nms.scsidisk.create_lu(zvol_name, {})
except nexenta.NexentaException as exc:
if not ensure or 'in use' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored LU creation error "%s"'
' while ensuring export'), exc)
try:
self.nms.scsidisk.add_lun_mapping_entry(zvol_name, {
'target_group': target_group_name,
'lun': '0'})
except nexenta.NexentaException as exc:
if not ensure or 'view entry exists' not in exc.args[1]:
raise
else:
LOG.info(_('Ignored LUN mapping entry addition error "%s"'
' while ensuring export'), exc)
return '%s:%s,1 %s' % (FLAGS.nexenta_host,
FLAGS.nexenta_iscsi_target_portal_port,
target_name)
def create_export(self, _ctx, volume):
"""Create new export for zvol.
:param volume: reference of volume to be exported
:return: iscsiadm-formatted provider location string
"""
loc = self._do_export(_ctx, volume, ensure=False)
return {'provider_location': loc}
def ensure_export(self, _ctx, volume):
"""Recreate parts of export if necessary.
:param volume: reference of volume to be exported
"""
self._do_export(_ctx, volume, ensure=True)
def remove_export(self, _ctx, volume):
"""Destroy all resources created to export zvol.
:param volume: reference of volume to be unexported
"""
zvol_name = self._get_zvol_name(volume['name'])
target_name = self._get_target_name(volume['name'])
target_group_name = self._get_target_group_name(volume['name'])
self.nms.scsidisk.delete_lu(zvol_name)
try:
self.nms.stmf.destroy_targetgroup(target_group_name)
except nexenta.NexentaException as exc:
# We assume that target group is already gone
LOG.warn(_('Got error trying to destroy target group'
' %(target_group)s, assuming it is already gone: %(exc)s'),
{'target_group': target_group_name, 'exc': exc})
try:
self.nms.iscsitarget.delete_target(target_name)
except nexenta.NexentaException as exc:
# We assume that target is gone as well
LOG.warn(_('Got error trying to delete target %(target)s,'
' assuming it is already gone: %(exc)s'),
{'target': target_name, 'exc': exc})
| tylertian/Openstack | openstack F/nova/nova/volume/nexenta/volume.py | Python | apache-2.0 | 11,026 | 0.001179 |
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['Texture', 'TextureSource', 'ShotTexture']
from pyasm.search import *
from pyasm.biz import Project
class Texture(SObject):
SEARCH_TYPE = "prod/texture"
def get_relation(my, name):
from asset import Asset
relations = {}
relations['asset'] = Asset
relations['texture'] = Texture
return relations[name]
def get_icon_context(my, context=None):
return "publish"
# static functions
def create(cls, asset, code=None, category=None, description=None, sobject_context=None):
sobject = SearchType.create( cls.SEARCH_TYPE )
asset_code = asset.get_code()
#asset_code = asset.get_code()
sobject.set_value("asset_code", asset.get_code())
if sobject_context != None:
sobject.set_value("asset_context", sobject_context)
if code != None:
sobject.set_value("code", code)
if category != None:
sobject.set_value("category", category)
if description != None:
sobject.set_value("description", description)
sobject.commit()
return sobject
create = classmethod(create)
def get(cls, texture_code, parent_code, project_code=None, is_multi=False):
'''TODO: use search_type, id for the parent search'''
if not project_code:
project_code = Project.get_project_code()
search = Search( cls.SEARCH_TYPE, project_code )
#search.set_show_retired(True)
if texture_code:
search.add_filter('code', texture_code)
search.add_filter('asset_code', parent_code)
search_type = search.get_search_type()
key = "%s|%s|%s" % (search_type, texture_code, parent_code)
sobj = cls.get_by_search(search, key, is_multi=is_multi)
return sobj
get = classmethod(get)
class TextureSource(Texture):
SEARCH_TYPE = "prod/texture_source"
def create(cls, asset_code, code=None, category=None, description=None, sobject_context=None):
sobject = SearchType.create( cls.SEARCH_TYPE )
sobject.set_value("asset_code", asset_code)
if sobject_context != None:
sobject.set_value("asset_context", sobject_context)
if code != None:
sobject.set_value("code", code)
if category != None:
sobject.set_value("category", category)
if description != None:
sobject.set_value("description", description)
sobject.commit()
return sobject
create = classmethod(create)
class ShotTexture(Texture):
SEARCH_TYPE = "prod/shot_texture"
def get_shot_code(my):
shot_code = ''
search_type = my.get_value('search_type')
search = Search( search_type )
search.add_filter( 'id', my.get_value('search_id') )
parent = search.get_sobject()
if not parent:
return shot_code
if search_type.startswith('prod/shot_instance'):
shot_code = parent.get_value('shot_code')
else:
shot_code = parent.get_value('code')
return shot_code
# static functions
def create(cls, sobject, code=None, category=None, description=None, sobject_context=None):
texture = SearchType.create( cls.SEARCH_TYPE )
texture.set_value("search_type", sobject.get_search_type() )
texture.set_value("search_id", sobject.get_id())
#texture.set_value("shot_code", shot_code)
if sobject_context != None:
texture.set_value("asset_context", sobject_context)
if code != None:
texture.set_value("code", code)
if category != None:
texture.set_value("category", category)
if description != None:
texture.set_value("description", description)
texture.commit()
return texture
create = classmethod(create)
def get(cls, texture_code, parent_code, project_code=None, is_multi=False):
if not project_code:
project_code = Project.get_project_code()
search = Search( cls.SEARCH_TYPE, project_code )
#search.set_show_retired(True)
if texture_code:
search.add_filter('code', texture_code)
# backward compatible with using shot code
if isinstance(parent_code, basestring):
from pyasm.prod.biz import Shot
parent = Shot.get_by_code(parent_code)
else:
parent = parent_code
if not parent:
if is_multi:
return []
else:
return None
search.add_filter('search_type', parent.get_search_type())
search.add_filter('search_id', parent.get_id())
parent_key = SearchKey.get_by_sobject(parent)
search_type = search.get_search_type()
key = "%s|%s|%s" % (search_type, texture_code, parent_key)
sobj = cls.get_by_search(search, key, is_multi=is_multi)
return sobj
get = classmethod(get)
| sadanandb/pmt | src/pyasm/prod/biz/texture.py | Python | epl-1.0 | 5,425 | 0.00977 |
import re
import optparse
from django.core.management.base import BaseCommand
from dbmail.models import MailTemplate
from dbmail.defaults import BACKEND
from dbmail import db_sender
def send_test_msg(pk, email, user=None, **kwargs):
template = MailTemplate.objects.get(pk=pk)
slug = template.slug
var_list = re.findall('\{\{\s?(\w+)\s?\}\}', template.message)
context = {}
for var in var_list:
context[var] = '%s' % var.upper().replace('_', '-')
return db_sender(slug, email, user, context, **kwargs)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
optparse.make_option('--email', dest='email', help='Recipients'),
optparse.make_option('--pk', dest='pk', help='DBMail template id'),
optparse.make_option('--without-celery', action='store_true',
default=False, dest='celery',
help='Send direct message'),
optparse.make_option('--provider', dest='provider', help='Provider'),
optparse.make_option(
'--backend', dest='backend', help='Backend', default='mail'),
)
@staticmethod
def get_kwargs(options):
kwargs = {
'use_celery': not options['celery'],
'backend': BACKEND['mail']}
if options['provider']:
kwargs['provider'] = options['provider']
if options['backend']:
kwargs['backend'] = BACKEND[options['backend']]
return kwargs
def handle(self, *args, **options):
send_test_msg(
options['pk'], options['email'], **self.get_kwargs(options)
)
print "Done. Message was sent."
| Shekharrajak/django-db-mailer | dbmail/management/commands/dbmail_test_send.py | Python | gpl-2.0 | 1,678 | 0.004172 |
from django.contrib import admin
class CandidateAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
'fields': ['email', 'first_name', 'last_name', 'gender', 'cv']
}),
('Contact Information', {
'classes': ('collapse',),
'fields': ['mobile_phone']
}),
('Address Information', {
'classes': ('collapse',),
'fields': ['address', 'city']
}),
('Additional Information', {
'classes': ('collapse',),
'fields': ['qualification', 'institute', 'experienced']
})
]
def get_fieldsets(self, request, obj=None):
if obj is None:
self.fieldsets[0][1]['fields'] = ['email', 'first_name',
'last_name', 'gender', 'cv']
else:
self.fieldsets[0][1]['fields'] = ['email', 'first_name',
'last_name', 'gender', 'cv',
'status']
return self.fieldsets
| QC-Technologies/HRMS | interview/admin/candidate.py | Python | gpl-3.0 | 1,067 | 0 |
# Stripped down configuration file for ipython-notebook in Topographica.
c = get_config()
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# NotebookApp will inherit config from: BaseIPythonApplication, Application
# The IP address the notebook server will listen on.
# c.NotebookApp.ip = '127.0.0.1'
# The base URL for the notebook server
# c.NotebookApp.base_project_url = '/'
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# Whether to prevent editing/execution of notebooks.
# c.NotebookApp.read_only = False
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = False
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = u''
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from IPython.lib import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = u''
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = u''
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: KernelApp, BaseIPythonApplication,
# Application, InteractiveShellApp
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# A file to be run
# c.IPKernelApp.file_to_run = ''
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# dotted module name of an IPython extension to load.
c.IPKernelApp.extra_extension = 'topo.misc.ipython'
#------------------------------------------------------------------------------
# InlineBackend configuration
#------------------------------------------------------------------------------
# An object to store configuration of the inline backend.
# The image format for figures with the inline backend.
# c.InlineBackend.figure_format = 'png'
# Close all figures at the end of each cell.
#
# When True, ensures that each cell starts with no active figures, but it also
# means that one must keep track of references in order to edit or redraw
# figures in subsequent cells. This mode is ideal for the notebook, where
# residual plots from other cells might be surprising.
#
# When False, one must call figure() to create new figures. This means that
# gcf() and getfigs() can reference figures created in other cells, and the
# active figure can continue to be edited with pylab/pyplot methods that
# reference the current active figure. This mode facilitates iterative editing
# of figures, and behaves most consistently with other matplotlib backends, but
# figure barriers between cells must be explicit.
# c.InlineBackend.close_figures = True
# Subset of matplotlib rcParams that should be different for the inline backend.
# c.InlineBackend.rc = {'font.size': 10, 'savefig.dpi': 72, 'figure.figsize': (6.0, 4.0), 'figure.subplot.bottom': 0.125}
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebok mapping and HTTP error handling
# MappingKernelManager will inherit config from: MultiKernelManager
# The max raw message size accepted from the browser over a WebSocket
# connection.
# c.MappingKernelManager.max_msg_size = 65536
# Kernel heartbeat interval in seconds.
# c.MappingKernelManager.time_to_dead = 3.0
# Delay (in seconds) before sending first heartbeat.
# c.MappingKernelManager.first_beat = 5.0
#------------------------------------------------------------------------------
# NotebookManager configuration
#------------------------------------------------------------------------------
# Automatically create a Python script when saving the notebook.
#
# For easier use of import, %run and %load across notebooks, a <notebook-
# name>.py script will be created next to any <notebook-name>.ipynb on each
# save. This can also be set with the short `--script` flag.
# c.NotebookManager.save_script = False
| Tasignotas/topographica_mirror | platform/ipython/profile_topo/ipython_notebook_config.py | Python | bsd-3-clause | 4,652 | 0.003869 |
#!/usr/bin/env python
import os
import sys
from setuptools import setup
SETUP_DIR = os.path.dirname(__file__)
README = os.path.join(SETUP_DIR, "README.rst")
NEEDS_PYTEST = {"pytest", "test", "ptr"}.intersection(sys.argv)
PYTEST_RUNNER = ["pytest-runner", "pytest-cov"] if NEEDS_PYTEST else []
setup(
name="cwl-upgrader",
version="1.2.2",
description="Common Workflow Language standalone document upgrader",
long_description=open(README).read(),
author="Common Workflow Language contributors",
author_email="common-workflow-language@googlegroups.com",
url="https://github.com/common-workflow-language/cwl-upgrader",
download_url="https://github.com/common-workflow-language/cwl-upgrader",
license="Apache 2.0",
packages=["cwlupgrader", "cwlupgrader.tests"],
include_package_data=True,
package_dir={"cwlupgrader.tests": "tests"},
package_data={"cwlupgrader.tests": ["../testdata/**/*.cwl"]},
install_requires=[
"setuptools",
"ruamel.yaml >= 0.14.12, < 0.17.21",
"schema_salad",
],
entry_points={"console_scripts": ["cwl-upgrader = cwlupgrader.main:main"]},
python_requires=">=3.6, <4",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
zip_safe=True,
setup_requires=PYTEST_RUNNER,
tests_require=["pytest < 7.1.0"],
test_suite="tests",
)
| common-workflow-language/cwl-upgrader | setup.py | Python | apache-2.0 | 2,104 | 0 |
def hello_again():
print("hello again")
| pdorrell/emacs-site-lisp | test/test-project/src/subdir_with_files/spaced dir name/hello.py | Python | gpl-2.0 | 45 | 0 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements an interface to the Henkelmann et al.'s excellent
Fortran code for calculating a Bader charge analysis.
This module depends on a compiled bader executable available in the path.
Please download the library at http://theory.cm.utexas.edu/vasp/bader/ and
follow the instructions to compile the executable.
If you use this module, please cite the following:
G. Henkelman, A. Arnaldsson, and H. Jonsson, "A fast and robust algorithm for
Bader decomposition of charge density", Comput. Mater. Sci. 36, 254-360 (2006).
"""
import glob
import os
import shutil
import subprocess
import warnings
import numpy as np
from monty.dev import requires
from monty.io import zopen
from monty.os.path import which
from monty.tempfile import ScratchDir
from pymatgen.io.cube import Cube
from pymatgen.io.vasp.inputs import Potcar
from pymatgen.io.vasp.outputs import Chgcar
__author__ = "shyuepingong"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Beta"
__date__ = "4/5/13"
BADEREXE = which("bader") or which("bader.exe")
class BaderAnalysis:
"""
Bader analysis for Cube files and VASP outputs.
.. attribute: data
Atomic data parsed from bader analysis. Essentially a list of dicts
of the form::
[
{
"atomic_vol": 8.769,
"min_dist": 0.8753,
"charge": 7.4168,
"y": 1.1598,
"x": 0.0079,
"z": 0.8348
},
...
]
.. attribute: vacuum_volume
Vacuum volume of the Bader analysis.
.. attribute: vacuum_charge
Vacuum charge of the Bader analysis.
.. attribute: nelectrons
Number of electrons of the Bader analysis.
.. attribute: chgcar
Chgcar object associated with input CHGCAR file.
.. attribute: atomic_densities
list of charge densities for each atom centered on the atom
excess 0's are removed from the array to reduce the size of the array
the charge densities are dicts with the charge density map,
the shift vector applied to move the data to the center, and the original dimension of the charge density map
charge:
{
"data": charge density array
"shift": shift used to center the atomic charge density
"dim": dimension of the original charge density map
}
"""
@requires(
which("bader") or which("bader.exe"),
"BaderAnalysis requires the executable bader to be in the path."
" Please download the library at http://theory.cm.utexas"
".edu/vasp/bader/ and compile the executable.",
)
def __init__(
self,
chgcar_filename=None,
potcar_filename=None,
chgref_filename=None,
parse_atomic_densities=False,
cube_filename=None,
):
"""
Initializes the Bader caller.
Args:
chgcar_filename (str): The filename of the CHGCAR.
parse_atomic_densities (bool): Optional. turns on atomic partition of the charge density
charge densities are atom centered
"""
if not BADEREXE:
raise RuntimeError(
"BaderAnalysis requires the executable bader to be in the path."
" Please download the library at http://theory.cm.utexas"
".edu/vasp/bader/ and compile the executable."
)
if not (cube_filename or chgcar_filename):
raise ValueError("You must provide a file! Either a cube file or a CHGCAR")
if cube_filename and chgcar_filename:
raise ValueError("You cannot parse a cube and a CHGCAR at the same time!")
self.parse_atomic_densities = parse_atomic_densities
if chgcar_filename:
fpath = os.path.abspath(chgcar_filename)
self.is_vasp = True
self.chgcar = Chgcar.from_file(chgcar_filename)
self.structure = self.chgcar.structure
self.potcar = Potcar.from_file(potcar_filename) if potcar_filename is not None else None
self.natoms = self.chgcar.poscar.natoms
chgrefpath = os.path.abspath(chgref_filename) if chgref_filename else None
self.reference_used = bool(chgref_filename)
# List of nelects for each atom from potcar
potcar_indices = []
for i, v in enumerate(self.natoms):
potcar_indices += [i] * v
self.nelects = (
[self.potcar[potcar_indices[i]].nelectrons for i in range(len(self.structure))] if self.potcar else []
)
else:
fpath = os.path.abspath(cube_filename)
self.is_vasp = False
self.cube = Cube(fpath)
self.structure = self.cube.structure
self.nelects = None
chgrefpath = os.path.abspath(chgref_filename) if chgref_filename else None
self.reference_used = bool(chgref_filename)
tmpfile = "CHGCAR" if chgcar_filename else "CUBE"
with ScratchDir("."):
with zopen(fpath, "rt") as f_in:
with open(tmpfile, "wt") as f_out:
shutil.copyfileobj(f_in, f_out)
args = [BADEREXE, tmpfile]
if chgref_filename:
with zopen(chgrefpath, "rt") as f_in:
with open("CHGCAR_ref", "wt") as f_out:
shutil.copyfileobj(f_in, f_out)
args += ["-ref", "CHGCAR_ref"]
if parse_atomic_densities:
args += ["-p", "all_atom"]
with subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, close_fds=True) as rs:
stdout, stderr = rs.communicate()
if rs.returncode != 0:
raise RuntimeError(
"bader exited with return code %d. Please check your bader installation." % rs.returncode
)
try:
self.version = float(stdout.split()[5])
except ValueError:
self.version = -1 # Unknown
if self.version < 1.0:
warnings.warn(
"Your installed version of Bader is outdated, calculation of vacuum charge may be incorrect."
)
data = []
with open("ACF.dat") as f:
raw = f.readlines()
headers = ("x", "y", "z", "charge", "min_dist", "atomic_vol")
raw.pop(0)
raw.pop(0)
while True:
l = raw.pop(0).strip()
if l.startswith("-"):
break
vals = map(float, l.split()[1:])
data.append(dict(zip(headers, vals)))
for l in raw:
toks = l.strip().split(":")
if toks[0] == "VACUUM CHARGE":
self.vacuum_charge = float(toks[1])
elif toks[0] == "VACUUM VOLUME":
self.vacuum_volume = float(toks[1])
elif toks[0] == "NUMBER OF ELECTRONS":
self.nelectrons = float(toks[1])
self.data = data
if self.parse_atomic_densities:
# convert the charge denisty for each atom spit out by Bader into Chgcar objects for easy parsing
atom_chgcars = [
Chgcar.from_file(f"BvAt{str(i).zfill(4)}.dat") for i in range(1, len(self.chgcar.structure) + 1)
]
atomic_densities = []
# For each atom in the structure
for atom, loc, chg in zip(
self.chgcar.structure,
self.chgcar.structure.frac_coords,
atom_chgcars,
):
# Find the index of the atom in the charge density atom
index = np.round(np.multiply(loc, chg.dim))
data = chg.data["total"]
# Find the shift vector in the array
shift = (np.divide(chg.dim, 2) - index).astype(int)
# Shift the data so that the atomic charge density to the center for easier manipulation
shifted_data = np.roll(data, shift, axis=(0, 1, 2))
# Slices a central window from the data array
def slice_from_center(data, xwidth, ywidth, zwidth):
x, y, z = data.shape
startx = x // 2 - (xwidth // 2)
starty = y // 2 - (ywidth // 2)
startz = z // 2 - (zwidth // 2)
return data[
startx : startx + xwidth,
starty : starty + ywidth,
startz : startz + zwidth,
]
# Finds the central encompassing volume which holds all the data within a precision
def find_encompassing_vol(data, prec=1e-3):
total = np.sum(data)
for i in range(np.max(data.shape)):
sliced_data = slice_from_center(data, i, i, i)
if total - np.sum(sliced_data) < 0.1:
return sliced_data
return None
d = {
"data": find_encompassing_vol(shifted_data),
"shift": shift,
"dim": self.chgcar.dim,
}
atomic_densities.append(d)
self.atomic_densities = atomic_densities
def get_charge(self, atom_index):
"""
Convenience method to get the charge on a particular atom. If the cube file
is a spin-density file, then this will return the spin density per atom with
positive being spin up and negative being spin down.
Args:
atom_index:
Index of atom.
Returns:
Charge associated with atom from the Bader analysis.
"""
return self.data[atom_index]["charge"]
def get_charge_transfer(self, atom_index, nelect=None):
"""
Returns the charge transferred for a particular atom. If the arg nelect
is not supplied, then POTCAR must be supplied to determine nelectrons.
Args:
atom_index:
Index of atom.
nelect:
number of electrons associated with an isolated atom at this index.
For most DFT codes this corresponds to the number of valence electrons
associated with the pseudopotential
Returns:
Charge transfer associated with atom from the Bader analysis.
Given by final charge on atom - nelectrons for
associated atom.
"""
if not self.nelects and nelect is None:
raise ValueError("No NELECT info! Need POTCAR for VASP or nelect argument for cube file")
return self.data[atom_index]["charge"] - (nelect if nelect is not None else self.nelects[atom_index])
def get_charge_decorated_structure(self):
"""
Returns an charge decorated structure
Note, this assumes that the Bader analysis was correctly performed on a file
with electron densities
"""
charges = [-self.get_charge(i) for i in range(len(self.structure))]
struc = self.structure.copy()
struc.add_site_property("charge", charges)
return struc
def get_oxidation_state_decorated_structure(self, nelects=None):
"""
Returns an oxidation state decorated structure based on bader analysis results.
Note, this assumes that the Bader analysis was correctly performed on a file
with electron densities
"""
charges = [
-self.get_charge_transfer(i, None if not nelects else nelects[i]) for i in range(len(self.structure))
]
struc = self.structure.copy()
struc.add_oxidation_state_by_site(charges)
return struc
def get_decorated_structure(self, property_name, average=False):
"""
Get a property-decorated structure from the Bader analysis.
This is distinct from getting charge decorated structure, which assumes
the "standard" Bader analysis of electron densities followed by converting
electron count to charge. The expected way to use this is to call Bader on
a non-charge density file such as a spin density file, electrostatic potential
file, etc., while using the charge density file as the reference (chgref_filename)
so that the partitioning is determined via the charge, but averaging or integrating
is done for another property.
User warning: Bader analysis cannot automatically determine what property is
inside of the file. So if you want to use this for a non-conventional property
like spin, you must ensure that you have the file is for the appropriate
property and you have an appropriate reference file.
Args:
property_name: name of the property to assign to the structure, note that
if name is "spin" this is handled as a special case, and the appropriate
spin properties are set on the species in the structure
average: whether or not to return the average of this property, rather
than the total, by dividing by the atomic volume.
Returns:
structure with site properties assigned via Bader Analysis
"""
vals = [self.get_charge(i) for i in range(len(self.structure))]
struc = self.structure.copy()
if average:
vals = np.divide(vals, [d["atomic_vol"] for d in self.data])
struc.add_site_property(property_name, vals)
if property_name == "spin":
struc.add_spin_by_site(vals)
return struc
@property
def summary(self):
"""
:return: Dict summary of key analysis, e.g., atomic volume, charge, etc.
"""
summary = {
"min_dist": [d["min_dist"] for d in self.data],
"charge": [d["charge"] for d in self.data],
"atomic_volume": [d["atomic_vol"] for d in self.data],
"vacuum_charge": self.vacuum_charge,
"vacuum_volume": self.vacuum_volume,
"reference_used": self.reference_used,
"bader_version": self.version,
}
if self.parse_atomic_densities:
summary["charge_densities"] = self.atomic_densities
if self.potcar:
charge_transfer = [self.get_charge_transfer(i) for i in range(len(self.data))]
summary["charge_transfer"] = charge_transfer
return summary
@classmethod
def from_path(cls, path, suffix=""):
"""
Convenient constructor that takes in the path name of VASP run
to perform Bader analysis.
Args:
path (str): Name of directory where VASP output files are
stored.
suffix (str): specific suffix to look for (e.g. '.relax1'
for 'CHGCAR.relax1.gz').
"""
def _get_filepath(filename):
name_pattern = filename + suffix + "*" if filename != "POTCAR" else filename + "*"
paths = glob.glob(os.path.join(path, name_pattern))
fpath = None
if len(paths) >= 1:
# using reverse=True because, if multiple files are present,
# they likely have suffixes 'static', 'relax', 'relax2', etc.
# and this would give 'static' over 'relax2' over 'relax'
# however, better to use 'suffix' kwarg to avoid this!
paths.sort(reverse=True)
warning_msg = (
"Multiple files detected, using %s" % os.path.basename(paths[0]) if len(paths) > 1 else None
)
fpath = paths[0]
else:
warning_msg = "Could not find %s" % filename
if filename in ["AECCAR0", "AECCAR2"]:
warning_msg += ", cannot calculate charge transfer."
elif filename == "POTCAR":
warning_msg += ", interpret Bader results with caution."
if warning_msg:
warnings.warn(warning_msg)
return fpath
chgcar_filename = _get_filepath("CHGCAR")
if chgcar_filename is None:
raise OSError("Could not find CHGCAR!")
potcar_filename = _get_filepath("POTCAR")
aeccar0 = _get_filepath("AECCAR0")
aeccar2 = _get_filepath("AECCAR2")
if aeccar0 and aeccar2:
# `chgsum.pl AECCAR0 AECCAR2` equivalent to obtain chgref_file
chgref = Chgcar.from_file(aeccar0) + Chgcar.from_file(aeccar2)
chgref_filename = "CHGREF"
chgref.write_file(chgref_filename)
else:
chgref_filename = None
return cls(
chgcar_filename=chgcar_filename,
potcar_filename=potcar_filename,
chgref_filename=chgref_filename,
)
def get_filepath(filename, warning, path, suffix):
"""
Args:
filename: Filename
warning: Warning message
path: Path to search
suffix: Suffixes to search.
"""
paths = glob.glob(os.path.join(path, filename + suffix + "*"))
if not paths:
warnings.warn(warning)
return None
if len(paths) > 1:
# using reverse=True because, if multiple files are present,
# they likely have suffixes 'static', 'relax', 'relax2', etc.
# and this would give 'static' over 'relax2' over 'relax'
# however, better to use 'suffix' kwarg to avoid this!
paths.sort(reverse=True)
warnings.warn(f"Multiple files detected, using {os.path.basename(path)}")
path = paths[0]
return path
def bader_analysis_from_path(path, suffix=""):
"""
Convenience method to run Bader analysis on a folder containing
typical VASP output files.
This method will:
1. Look for files CHGCAR, AECCAR0, AECCAR2, POTCAR or their gzipped
counterparts.
2. If AECCAR* files are present, constructs a temporary reference
file as AECCAR0 + AECCAR2
3. Runs Bader analysis twice: once for charge, and a second time
for the charge difference (magnetization density).
:param path: path to folder to search in
:param suffix: specific suffix to look for (e.g. '.relax1' for 'CHGCAR.relax1.gz'
:return: summary dict
"""
def _get_filepath(filename, warning, path=path, suffix=suffix):
paths = glob.glob(os.path.join(path, filename + suffix + "*"))
if not paths:
warnings.warn(warning)
return None
if len(paths) > 1:
# using reverse=True because, if multiple files are present,
# they likely have suffixes 'static', 'relax', 'relax2', etc.
# and this would give 'static' over 'relax2' over 'relax'
# however, better to use 'suffix' kwarg to avoid this!
paths.sort(reverse=True)
warnings.warn(f"Multiple files detected, using {os.path.basename(path)}")
path = paths[0]
return path
chgcar_path = _get_filepath("CHGCAR", "Could not find CHGCAR!")
chgcar = Chgcar.from_file(chgcar_path)
aeccar0_path = _get_filepath("AECCAR0", "Could not find AECCAR0, interpret Bader results with caution.")
aeccar0 = Chgcar.from_file(aeccar0_path) if aeccar0_path else None
aeccar2_path = _get_filepath("AECCAR2", "Could not find AECCAR2, interpret Bader results with caution.")
aeccar2 = Chgcar.from_file(aeccar2_path) if aeccar2_path else None
potcar_path = _get_filepath("POTCAR", "Could not find POTCAR, cannot calculate charge transfer.")
potcar = Potcar.from_file(potcar_path) if potcar_path else None
return bader_analysis_from_objects(chgcar, potcar, aeccar0, aeccar2)
def bader_analysis_from_objects(chgcar, potcar=None, aeccar0=None, aeccar2=None):
"""
Convenience method to run Bader analysis from a set
of pymatgen Chgcar and Potcar objects.
This method will:
1. If aeccar objects are present, constructs a temporary reference
file as AECCAR0 + AECCAR2
2. Runs Bader analysis twice: once for charge, and a second time
for the charge difference (magnetization density).
:param chgcar: Chgcar object
:param potcar: (optional) Potcar object
:param aeccar0: (optional) Chgcar object from aeccar0 file
:param aeccar2: (optional) Chgcar object from aeccar2 file
:return: summary dict
"""
with ScratchDir(".") as temp_dir:
if aeccar0 and aeccar2:
# construct reference file
chgref = aeccar0.linear_add(aeccar2)
chgref_path = os.path.join(temp_dir, "CHGCAR_ref")
chgref.write_file(chgref_path)
else:
chgref_path = None
chgcar.write_file("CHGCAR")
chgcar_path = os.path.join(temp_dir, "CHGCAR")
if potcar:
potcar.write_file("POTCAR")
potcar_path = os.path.join(temp_dir, "POTCAR")
else:
potcar_path = None
ba = BaderAnalysis(
chgcar_filename=chgcar_path,
potcar_filename=potcar_path,
chgref_filename=chgref_path,
)
summary = {
"min_dist": [d["min_dist"] for d in ba.data],
"charge": [d["charge"] for d in ba.data],
"atomic_volume": [d["atomic_vol"] for d in ba.data],
"vacuum_charge": ba.vacuum_charge,
"vacuum_volume": ba.vacuum_volume,
"reference_used": bool(chgref_path),
"bader_version": ba.version,
}
if potcar:
charge_transfer = [ba.get_charge_transfer(i) for i in range(len(ba.data))]
summary["charge_transfer"] = charge_transfer
if chgcar.is_spin_polarized:
# write a CHGCAR containing magnetization density only
chgcar.data["total"] = chgcar.data["diff"]
chgcar.is_spin_polarized = False
chgcar.write_file("CHGCAR_mag")
chgcar_mag_path = os.path.join(temp_dir, "CHGCAR_mag")
ba = BaderAnalysis(
chgcar_filename=chgcar_mag_path,
potcar_filename=potcar_path,
chgref_filename=chgref_path,
)
summary["magmom"] = [d["charge"] for d in ba.data]
return summary
| vorwerkc/pymatgen | pymatgen/command_line/bader_caller.py | Python | mit | 22,902 | 0.002358 |
import subprocess
import os
"""
What are the differences and similarities between ffmpeg, libav, and avconv?
https://stackoverflow.com/questions/9477115
ffmeg encoders high to lower quality
libopus > libvorbis >= libfdk_aac > aac > libmp3lame
libfdk_aac due to copyrights needs to be compiled by end user
on MacOS brew install ffmpeg --with-fdk-aac will do just that. Other OS?
https://trac.ffmpeg.org/wiki/Encode/AAC
"""
def song(input_song, output_song, folder, avconv=False, verbose=False):
"""Do the audio format conversion."""
if not input_song == output_song:
print('Converting {0} to {1}'.format(
input_song, output_song.split('.')[-1]))
if avconv:
exit_code = convert_with_avconv(input_song, output_song, folder, verbose)
else:
exit_code = convert_with_ffmpeg(input_song, output_song, folder, verbose)
return exit_code
return 0
def convert_with_avconv(input_song, output_song, folder, verbose):
"""Convert the audio file using avconv."""
if verbose:
level = 'debug'
else:
level = '0'
command = ['avconv',
'-loglevel', level,
'-i', os.path.join(folder, input_song),
'-ab', '192k',
os.path.join(folder, output_song)]
return subprocess.call(command)
def convert_with_ffmpeg(input_song, output_song, folder, verbose):
"""Convert the audio file using FFmpeg."""
ffmpeg_pre = 'ffmpeg -y '
if not verbose:
ffmpeg_pre += '-hide_banner -nostats -v panic '
input_ext = input_song.split('.')[-1]
output_ext = output_song.split('.')[-1]
if input_ext == 'm4a':
if output_ext == 'mp3':
ffmpeg_params = '-codec:v copy -codec:a libmp3lame -q:a 2 '
elif output_ext == 'webm':
ffmpeg_params = '-c:a libopus -vbr on -b:a 192k -vn '
elif input_ext == 'webm':
if output_ext == 'mp3':
ffmpeg_params = ' -ab 192k -ar 44100 -vn '
elif output_ext == 'm4a':
ffmpeg_params = '-cutoff 20000 -c:a libfdk_aac -b:a 192k -vn '
command = '{0}-i {1} {2}{3}'.format(
ffmpeg_pre, os.path.join(folder, input_song), ffmpeg_params, os.path.join(folder, output_song)).split(' ')
return subprocess.call(command)
| AndreaMordenti/spotydowny | core/convert.py | Python | mit | 2,314 | 0.001729 |
from __future__ import absolute_import
import os.path
import requests
import wtforms
from wtforms import validators
from ..forms import TextDatasetForm
from origae import utils
from origae.utils.forms import validate_required_iff, validate_greater_than
class TextClassificationDatasetForm(TextDatasetForm):
"""
Defines the form used to create a new TextClassificationDatasetJob
"""
backend = wtforms.SelectField('DB backend',
choices=[
('lmdb', 'LMDB'),
('hdf5', 'HDF5')
],
default='lmdb',
)
def validate_backend(form, field):
if field.data == 'lmdb':
form.compression.data = 'none'
elif field.data == 'tfrecords':
form.compression.data = 'none'
elif field.data == 'hdf5':
form.encoding.data = 'none'
compression = utils.forms.SelectField(
'DB compression',
choices=[
('none', 'None'),
('gzip', 'GZIP'),
],
default='none',
tooltip=('Compressing the dataset may significantly decrease the size '
'of your database files, but it may increase read and write times.'),
)
# Use a SelectField instead of a HiddenField so that the default value
# is used when nothing is provided (through the REST API)
method = wtforms.SelectField(u'Dataset type',
choices=[
('folder', 'Folder'),
('textfile', 'Textfiles'),
],
default='folder',
)
def validate_folder_path(form, field):
if not field.data:
pass
elif utils.is_url(field.data):
# make sure the URL exists
try:
r = requests.get(field.data,
allow_redirects=False,
timeout=utils.HTTP_TIMEOUT)
if r.status_code not in [requests.codes.ok, requests.codes.moved, requests.codes.found]:
raise validators.ValidationError('URL not found')
except Exception as e:
raise validators.ValidationError('Caught %s while checking URL: %s' % (type(e).__name__, e))
else:
return True
else:
# make sure the filesystem path exists
# and make sure the filesystem path is absolute
if not os.path.exists(field.data) or not os.path.isdir(field.data):
raise validators.ValidationError('Folder does not exist')
elif not os.path.isabs(field.data):
raise validators.ValidationError('Filesystem path is not absolute')
else:
return True
#
# Method - folder
#
folder_train = utils.forms.StringField(
u'Training Images',
validators=[
validate_required_iff(method='folder'),
validate_folder_path,
],
tooltip=('Indicate a folder which holds subfolders full of images. '
'Each subfolder should be named according to the desired label for the images that it holds. '
'Can also be a URL for an apache/nginx auto-indexed folder.'),
)
folder_pct_val = utils.forms.IntegerField(
u'% for validation',
default=25,
validators=[
validate_required_iff(method='folder'),
validators.NumberRange(min=0, max=100)
],
tooltip=('You can choose to set apart a certain percentage of images '
'from the training images for the validation set.'),
)
folder_pct_test = utils.forms.IntegerField(
u'% for testing',
default=0,
validators=[
validate_required_iff(method='folder'),
validators.NumberRange(min=0, max=100)
],
tooltip=('You can choose to set apart a certain percentage of images '
'from the training images for the test set.'),
)
folder_train_min_per_class = utils.forms.IntegerField(
u'Minimum samples per class',
default=2,
validators=[
validators.Optional(),
validators.NumberRange(min=1),
],
tooltip=('You can choose to specify a minimum number of samples per class. '
'If a class has fewer samples than the specified amount it will be ignored. '
'Leave blank to ignore this feature.'),
)
folder_train_max_per_class = utils.forms.IntegerField(
u'Maximum samples per class',
validators=[
validators.Optional(),
validators.NumberRange(min=1),
validate_greater_than('folder_train_min_per_class'),
],
tooltip=('You can choose to specify a maximum number of samples per class. '
'If a class has more samples than the specified amount extra samples will be ignored. '
'Leave blank to ignore this feature.'),
)
has_val_folder = wtforms.BooleanField(
'Separate validation images folder',
default=False,
validators=[
validate_required_iff(method='folder')
]
)
folder_val = wtforms.StringField(
u'Validation Images',
validators=[
validate_required_iff(
method='folder',
has_val_folder=True),
]
)
folder_val_min_per_class = utils.forms.IntegerField(
u'Minimum samples per class',
default=2,
validators=[
validators.Optional(),
validators.NumberRange(min=1),
],
tooltip=('You can choose to specify a minimum number of samples per class. '
'If a class has fewer samples than the specified amount it will be ignored. '
'Leave blank to ignore this feature.'),
)
folder_val_max_per_class = utils.forms.IntegerField(
u'Maximum samples per class',
validators=[
validators.Optional(),
validators.NumberRange(min=1),
validate_greater_than('folder_val_min_per_class'),
],
tooltip=('You can choose to specify a maximum number of samples per class. '
'If a class has more samples than the specified amount extra samples will be ignored. '
'Leave blank to ignore this feature.'),
)
has_test_folder = wtforms.BooleanField(
'Separate test images folder',
default=False,
validators=[
validate_required_iff(method='folder')
]
)
folder_test = wtforms.StringField(
u'Test Images',
validators=[
validate_required_iff(
method='folder',
has_test_folder=True),
validate_folder_path,
]
)
folder_test_min_per_class = utils.forms.IntegerField(
u'Minimum samples per class',
default=2,
validators=[
validators.Optional(),
validators.NumberRange(min=1)
],
tooltip=('You can choose to specify a minimum number of samples per class. '
'If a class has fewer samples than the specified amount it will be ignored. '
'Leave blank to ignore this feature.'),
)
folder_test_max_per_class = utils.forms.IntegerField(
u'Maximum samples per class',
validators=[
validators.Optional(),
validators.NumberRange(min=1),
validate_greater_than('folder_test_min_per_class'),
],
tooltip=('You can choose to specify a maximum number of samples per class. '
'If a class has more samples than the specified amount extra samples will be ignored. '
'Leave blank to ignore this feature.'),
)
#
# Method - textfile
#
textfile_use_local_files = wtforms.BooleanField(
u'Use local files',
default=False,
)
textfile_train_images = utils.forms.FileField(
u'Training images',
validators=[
validate_required_iff(method='textfile',
textfile_use_local_files=False)
]
)
textfile_local_train_images = wtforms.StringField(
u'Training images',
validators=[
validate_required_iff(method='textfile',
textfile_use_local_files=True)
]
)
textfile_train_folder = wtforms.StringField(u'Training images folder')
def validate_textfile_train_folder(form, field):
if form.method.data != 'textfile':
field.errors[:] = []
raise validators.StopValidation()
if not field.data.strip():
# allow null
return True
if not os.path.exists(field.data) or not os.path.isdir(field.data):
raise validators.ValidationError('folder does not exist')
return True
textfile_use_val = wtforms.BooleanField(u'Validation set',
default=True,
validators=[
validate_required_iff(method='textfile')
]
)
textfile_val_images = utils.forms.FileField(u'Validation images',
validators=[
validate_required_iff(
method='textfile',
textfile_use_val=True,
textfile_use_local_files=False)
]
)
textfile_local_val_images = wtforms.StringField(u'Validation images',
validators=[
validate_required_iff(
method='textfile',
textfile_use_val=True,
textfile_use_local_files=True)
]
)
textfile_val_folder = wtforms.StringField(u'Validation images folder')
def validate_textfile_val_folder(form, field):
if form.method.data != 'textfile' or not form.textfile_use_val.data:
field.errors[:] = []
raise validators.StopValidation()
if not field.data.strip():
# allow null
return True
if not os.path.exists(field.data) or not os.path.isdir(field.data):
raise validators.ValidationError('folder does not exist')
return True
textfile_use_test = wtforms.BooleanField(u'Test set',
default=False,
validators=[
validate_required_iff(method='textfile')
]
)
textfile_test_images = utils.forms.FileField(u'Test images',
validators=[
validate_required_iff(
method='textfile',
textfile_use_test=True,
textfile_use_local_files=False)
]
)
textfile_local_test_images = wtforms.StringField(u'Test images',
validators=[
validate_required_iff(
method='textfile',
textfile_use_test=True,
textfile_use_local_files=True)
]
)
textfile_test_folder = wtforms.StringField(u'Test images folder')
def validate_textfile_test_folder(form, field):
if form.method.data != 'textfile' or not form.textfile_use_test.data:
field.errors[:] = []
raise validators.StopValidation()
if not field.data.strip():
# allow null
return True
if not os.path.exists(field.data) or not os.path.isdir(field.data):
raise validators.ValidationError('folder does not exist')
return True
# Can't use a BooleanField here because HTML doesn't submit anything
# for an unchecked checkbox. Since we want to use a REST API and have
# this default to True when nothing is supplied, we have to use a
# SelectField
textfile_shuffle = utils.forms.SelectField(
'Shuffle lines',
choices=[
(1, 'Yes'),
(0, 'No'),
],
coerce=int,
default=1,
tooltip="Shuffle the list[s] of images before creating the database."
)
textfile_labels_file = utils.forms.FileField(
u'Labels',
validators=[
validate_required_iff(method='textfile',
textfile_use_local_files=False)
],
tooltip=("The 'i'th line of the file should give the string label "
"associated with the '(i-1)'th numeric label. (E.g. the string label "
"for the numeric label 0 is supposed to be on line 1.)"),
)
textfile_local_labels_file = utils.forms.StringField(
u'Labels',
validators=[
validate_required_iff(method='textfile',
textfile_use_local_files=True)
],
tooltip=("The 'i'th line of the file should give the string label "
"associated with the '(i-1)'th numeric label. (E.g. the string label "
"for the numeric label 0 is supposed to be on line 1.)"),
)
| winnerineast/Origae-6 | origae/dataset/text/classification/forms.py | Python | gpl-3.0 | 14,708 | 0.001904 |
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import logging.handlers
log = logging.getLogger('imc')
console = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console.setFormatter(formatter)
def enable_file_logging(filename="imcsdk.log"):
file_handler = logging.handlers.RotatingFileHandler(
filename, maxBytes=10*1024*1024, backupCount=5)
log.addHandler(file_handler)
def set_log_level(level=logging.DEBUG):
"""
Allows setting log level
Args:
level: logging level - import logging and pass enums from it(INFO/DEBUG/ERROR/etc..)
Returns:
None
Example:
from imcsdk import set_log_level
import logging
set_log_level(logging.INFO)
"""
log.setLevel(level)
console.setLevel(level)
set_log_level(logging.DEBUG)
log.addHandler(console)
if os.path.exists('/tmp/imcsdk_debug'):
enable_file_logging()
__author__ = 'Cisco Systems'
__email__ = 'ucs-python@cisco.com'
__version__ = '0.9.3.1'
| ragupta-git/ImcSdk | imcsdk/__init__.py | Python | apache-2.0 | 1,616 | 0.001238 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, André Paramés <git@andreparames.com>
# Based on the Git module by Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = u'''
---
module: bzr
author: "André Paramés (@andreparames)"
version_added: "1.1"
short_description: Deploy software (or files) from bzr branches
description:
- Manage I(bzr) branches to deploy files or software.
options:
name:
required: true
aliases: [ 'parent' ]
description:
- SSH or HTTP protocol address of the parent branch.
dest:
required: true
description:
- Absolute path of where the branch should be cloned to.
version:
required: false
default: "head"
description:
- What version of the branch to clone. This can be the
bzr revno or revid.
force:
required: false
default: "no"
choices: [ 'yes', 'no' ]
description:
- If C(yes), any modified files in the working
tree will be discarded. Before 1.9 the default
value was "yes".
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to bzr executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
'''
EXAMPLES = '''
# Example bzr checkout from Ansible Playbooks
- bzr:
name: 'bzr+ssh://foosball.example.org/path/to/branch'
dest: /srv/checkout
version: 22
'''
import re
class Bzr(object):
def __init__(self, module, parent, dest, version, bzr_path):
self.module = module
self.parent = parent
self.dest = dest
self.version = version
self.bzr_path = bzr_path
def _command(self, args_list, cwd=None, **kwargs):
(rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs)
return (rc, out, err)
def get_version(self):
'''samples the version of the bzr branch'''
cmd = "%s revno" % self.bzr_path
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
revno = stdout.strip()
return revno
def clone(self):
'''makes a new bzr branch if it does not already exist'''
dest_dirname = os.path.dirname(self.dest)
try:
os.makedirs(dest_dirname)
except:
pass
if self.version.lower() != 'head':
args_list = ["branch", "-r", self.version, self.parent, self.dest]
else:
args_list = ["branch", self.parent, self.dest]
return self._command(args_list, check_rc=True, cwd=dest_dirname)
def has_local_mods(self):
cmd = "%s status -S" % self.bzr_path
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
lines = stdout.splitlines()
lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
return len(lines) > 0
def reset(self, force):
'''
Resets the index and working tree to head.
Discards any changes to tracked files in the working
tree since that commit.
'''
if not force and self.has_local_mods():
self.module.fail_json(msg="Local modifications exist in branch (force=no).")
return self._command(["revert"], check_rc=True, cwd=self.dest)
def fetch(self):
'''updates branch from remote sources'''
if self.version.lower() != 'head':
(rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest)
else:
(rc, out, err) = self._command(["pull"], cwd=self.dest)
if rc != 0:
self.module.fail_json(msg="Failed to pull")
return (rc, out, err)
def switch_version(self):
'''once pulled, switch to a particular revno or revid'''
if self.version.lower() != 'head':
args_list = ["revert", "-r", self.version]
else:
args_list = ["revert"]
return self._command(args_list, check_rc=True, cwd=self.dest)
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
dest=dict(required=True, type='path'),
name=dict(required=True, aliases=['parent']),
version=dict(default='head'),
force=dict(default='no', type='bool'),
executable=dict(default=None),
)
)
dest = module.params['dest']
parent = module.params['name']
version = module.params['version']
force = module.params['force']
bzr_path = module.params['executable'] or module.get_bin_path('bzr', True)
bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf')
rc, out, err, status = (0, None, None, None)
bzr = Bzr(module, parent, dest, version, bzr_path)
# if there is no bzr configuration, do a branch operation
# else pull and switch the version
before = None
local_mods = False
if not os.path.exists(bzrconfig):
(rc, out, err) = bzr.clone()
else:
# else do a pull
local_mods = bzr.has_local_mods()
before = bzr.get_version()
(rc, out, err) = bzr.reset(force)
if rc != 0:
module.fail_json(msg=err)
(rc, out, err) = bzr.fetch()
if rc != 0:
module.fail_json(msg=err)
# switch to version specified regardless of whether
# we cloned or pulled
(rc, out, err) = bzr.switch_version()
# determine if we changed anything
after = bzr.get_version()
changed = False
if before != after or local_mods:
changed = True
module.exit_json(changed=changed, before=before, after=after)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| Rajeshkumar90/ansible-modules-extras | source_control/bzr.py | Python | gpl-3.0 | 6,658 | 0.001954 |
# -*- encoding: utf-8 -*-
from imaplib import ParseFlags
# mockimaplib: A very simple mock server module for imap client APIs
# Copyright (C) 2014 Alan Etkin <spametki@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or(at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/lgpl.html>
"""
mockimaplib allows you to test applications connecting to a dummy imap
service. For more details on the api subset implemented,
refer to the imaplib docs.
The client should configure a dictionary to map imap string queries to sets
of entries stored in a message dummy storage dictionary. The module includes
a small set of default message records (SPAM and MESSAGES), two mailboxes
(Draft and INBOX) and a list of query/resultset entries (RESULTS).
Usage:
>>> import mockimaplib
>>> connection = mockimaplib.IMAP4_SSL(<host>)
>>> connection.login(<user>, <password>)
None
>>> connection.select("INBOX")
("OK", ... <mailbox length>)
# fetch commands specifying single uid or message id
# will try to get messages recorded in SPAM
>>> connection.uid(...)
<search query or fetch result>
# returns a string list of matching message ids
>>> connection.search(<query>)
("OK", ... "1 2 ... n")
"""
MESSAGES = (
'MIME-Version: 1.0\r\nReceived: by 10.140.91.199 with HTTP; Mon, 27 Jan 2014 13:52:30 -0800 (PST)\r\nDate: Mon, 27 Jan 2014 19:52:30 -0200\r\nDelivered-To: nurse@example.com\r\nMessage-ID: <10101010101010010000010101010001010101001010010000001@mail.example.com>\r\nSubject: spam1\r\nFrom: Mr. Gumby <gumby@example.com>\r\nTo: The nurse <nurse@example.com>\r\nContent-Type: text/plain; charset=ISO-8859-1\r\n\r\nNurse!\r\n\r\n\r\n',
'MIME-Version: 1.0\r\nReceived: by 10.140.91.199 with HTTP; Mon, 27 Jan 2014 13:52:47 -0800 (PST)\r\nDate: Mon, 27 Jan 2014 19:52:47 -0200\r\nDelivered-To: nurse@example.com\r\nMessage-ID: <101010101010100100000101010100010101010010100100000010@mail.example.com>\r\nSubject: spam2\r\nFrom: Mr. Gumby <gumby@example.com>\r\nTo: The nurse <nurse@example.com>\r\nContent-Type: text/plain; charset=ISO-8859-1\r\n\r\nNurse, nurse!',
'MIME-Version: 1.0\r\nReceived: by 10.140.91.199 with HTTP; Mon, 27 Jan 2014 13:54:54 -0800 (PST)\r\nDate: Mon, 27 Jan 2014 19:54:54 -0200\r\nDelivered-To: nurse@example.com\r\nMessage-ID: <1010101010101001000001010101000101010100101001000000101@mail.example.com>\r\nSubject: spamalot1\r\nFrom: Mr. Gumby <gumby@example.com>\r\nTo: The nurse <nurse@example.com>\r\nContent-Type: text/plain; charset=ISO-8859-1\r\n\r\nNurse!\r\n\r\n\r\n',
'MIME-Version: 1.0\r\n\r\nReceived: by 10.140.91.199 with HTTP; Mon, 27 Jan 2014 13:54:54 -0800 (PST)\r\nDate: Mon, 27 Jan 2014 19:54:54 -0200\r\nDelivered-To: nurse@example.com\r\nMessage-ID: <101010101010100100000101010100010101010010100100000010101@mail.example.com>\r\nSubject: spamalot2\r\nFrom: Mr. Gumby <gumby@example.com>\r\nTo: The nurse <nurse@example.com>\r\nContent-Type: text/plain; charset=ISO-8859-1\r\n\r\nNurse! ... Nurse! ... Nurse!\r\n\r\n\r\n')
SPAM = {
"INBOX": [
{"uid": "483209",
"headers": MESSAGES[0],
"complete": MESSAGES[0],
"flags": ""},
{"uid": "483211",
"headers": MESSAGES[1],
"complete": MESSAGES[1],
"flags": ""},
{"uid": "483225",
"headers": MESSAGES[2],
"complete": MESSAGES[2],
"flags": ""}],
"Draft":[
{"uid": "483432",
"headers": MESSAGES[3],
"complete": MESSAGES[3],
"flags": ""},]
}
RESULTS = {
# <query string>: [<str uid> | <long id>, ...]
"INBOX": {
"(ALL)": (1, 2, 3),
"(1:3)": (1, 2, 3)},
"Draft": {
"(1:1)": (1,)},
}
class Connection(object):
"""Dummy connection object for the imap client.
By default, uses the module SPAM and RESULT
sets (use Connection.setup for custom values)"""
def login(self, user, password):
pass
def __init__(self):
self._readonly = False
self._mailbox = None
self.setup()
def list(self):
return ('OK', ['(\\HasNoChildren) "/" "%s"' % key for key in self.spam])
def select(self, tablename, readonly=False):
self._readonly = readonly
"""args: mailbox, boolean
result[1][0] -> int last message id / mailbox lenght
result[0] = 'OK'
"""
self._mailbox = tablename
return ('OK', (len(SPAM[self._mailbox]), None))
def uid(self, command, uid, arg):
""" args:
command: "search" | "fetch"
uid: None | uid
parts: "(ALL)" | "(RFC822 FLAGS)" | "(RFC822.HEADER FLAGS)"
"search", None, "(ALL)" -> ("OK", ("uid_1 uid_2 ... uid_<mailbox length>", None))
"search", None, "<query>" -> ("OK", ("uid_1 uid_2 ... uid_n", None))
"fetch", uid, parts -> ("OK", (("<id> ...", "<raw message as specified in parts>"), "<flags>")
[0] [1][0][0] [1][0][1] [1][1]
"""
if command == "search":
return self._search(arg)
elif command == "fetch":
return self._fetch(uid, arg)
def _search(self, query):
return ("OK", (" ".join([str(item["uid"]) for item in self._get_messages(query)]), None))
def _fetch(self, value, arg):
try:
message = self.spam[self._mailbox][value - 1]
message_id = value
except TypeError:
for x, item in enumerate(self.spam[self._mailbox]):
if item["uid"] == value:
message = item
message_id = x + 1
break
parts = "headers"
if arg in ("(ALL)", "(RFC822 FLAGS)"):
parts = "complete"
return ("OK", (("%s " % message_id, message[parts]), message["flags"]))
def _get_messages(self, query):
if query.strip().isdigit():
return [self.spam[self._mailbox][int(query.strip()) - 1],]
elif query[1:-1].strip().isdigit():
return [self.spam[self._mailbox][int(query[1:-1].strip()) -1],]
elif query[1:-1].replace("UID", "").strip().isdigit():
for item in self.spam[self._mailbox]:
if item["uid"] == query[1:-1].replace("UID", "").strip():
return [item,]
messages = []
try:
for m in self.results[self._mailbox][query]:
try:
self.spam[self._mailbox][m - 1]["id"] = m
messages.append(self.spam[self._mailbox][m - 1])
except TypeError:
for x, item in enumerate(self.spam[self._mailbox]):
if item["uid"] == m:
item["id"] = x + 1
messages.append(item)
break
except IndexError:
# message removed
pass
return messages
except KeyError:
raise ValueError("The client issued an unexpected query: %s" % query)
def setup(self, spam={}, results={}):
"""adds custom message and query databases or sets
the values to the module defaults.
"""
self.spam = spam
self.results = results
if not spam:
for key in SPAM:
self.spam[key] = []
for d in SPAM[key]:
self.spam[key].append(d.copy())
if not results:
for key in RESULTS:
self.results[key] = RESULTS[key].copy()
def search(self, first, query):
""" args:
first: None
query: string with mailbox query (flags, date, uid, id, ...)
example: '2:15723 BEFORE 27-Jan-2014 FROM "gumby"'
result[1][0] -> "id_1 id_2 ... id_n"
"""
messages = self._get_messages(query)
ids = " ".join([str(item["id"]) for item in messages])
return ("OK", (ids, None))
def append(self, mailbox, flags, struct_time, message):
"""
result, data = self.connection.append(mailbox, flags, struct_time, message)
if result == "OK":
uid = int(re.findall("\d+", str(data))[-1])
"""
last = self.spam[mailbox][-1]
try:
uid = int(last["uid"]) +1
except ValueError:
alluids = []
for _mailbox in self.spam.keys():
for item in self.spam[_mailbox]:
try:
alluids.append(int(item["uid"]))
except:
pass
if len(alluids) > 0:
uid = max(alluids) + 1
else:
uid = 1
flags = "FLAGS " + flags
item = {"uid": str(uid), "headers": message, "complete": message, "flags": flags}
self.spam[mailbox].append(item)
return ("OK", "spam spam %s spam" % uid)
def store(self, *args):
"""
implements some flag commands
args: ("<id>", "<+|->FLAGS", "(\\Flag1 \\Flag2 ... \\Flagn)")
"""
message = self.spam[self._mailbox][int(args[0] - 1)]
old_flags = ParseFlags(message["flags"])
flags = ParseFlags("FLAGS" + args[2])
if args[1].strip().startswith("+"):
message["flags"] = "FLAGS (%s)" % " ".join(set(flags + old_flags))
elif args[1].strip().startswith("-"):
message["flags"] = "FLAGS (%s)" % " ".join([flag for flag in old_flags if not flag in flags])
def expunge(self):
"""implements removal of deleted flag messages"""
for x, item in enumerate(self.spam[self._mailbox]):
if "\\Deleted" in item["flags"]:
self.spam[self._mailbox].pop(x)
class IMAP4(object):
""">>> connection = IMAP4() # creates the dummy imap4 client object"""
def __new__(self, *args, **kwargs):
# args: (server, port)
return Connection()
IMAP4_SSL = IMAP4
| SpaceKatt/CSPLN | apps/scaffolding/win/web2py/gluon/contrib/mockimaplib.py | Python | gpl-3.0 | 10,569 | 0.002933 |
#######################################################################
#
# Author: Gabi Roeger
# Modified by: Silvia Richter (silvia.richter@nicta.com.au)
# (C) Copyright 2008: Gabi Roeger and NICTA
#
# This file is part of LAMA.
#
# LAMA is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the license, or (at your option) any later version.
#
# LAMA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
#######################################################################
import string
import conditions
def parse_expression(exp):
if isinstance(exp, list):
functionsymbol = exp[0]
return PrimitiveNumericExpression(functionsymbol,
[conditions.parse_term(arg) for arg in exp[1:]])
elif exp.replace(".","").isdigit():
return NumericConstant(string.atof(exp))
else:
return PrimitiveNumericExpression(exp,[])
def parse_assignment(alist):
assert len(alist) == 3
op = alist[0]
head = parse_expression(alist[1])
exp = parse_expression(alist[2])
if op == "=":
return Assign(head, exp)
elif op == "increase":
return Increase(head, exp)
else:
assert False, "Assignment operator not supported."
class FunctionalExpression(object):
def __init__(self, parts):
self.parts = tuple(parts)
def dump(self, indent=" "):
print "%s%s" % (indent, self._dump())
for part in self.parts:
part.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def instantiate(self, var_mapping, init_facts):
raise ValueError("Cannot instantiate condition: not normalized")
class NumericConstant(FunctionalExpression):
parts = ()
def __init__(self, value):
self.value = value
def __eq__(self, other):
return (self.__class__ == other.__class__ and self.value == other.value)
def __str__(self):
return "%s %s" % (self.__class__.__name__, self.value)
def _dump(self):
return str(self)
def instantiate(self, var_mapping, init_facts):
return self
class PrimitiveNumericExpression(FunctionalExpression):
parts = ()
def __init__(self, symbol, args):
self.symbol = symbol
self.args = tuple(args)
def __eq__(self, other):
if not (self.__class__ == other.__class__ and self.symbol == other.symbol
and len(self.args) == len(other.args)):
return False
else:
for s,o in zip(self.args, other.args):
if not s == o:
return False
return True
def __str__(self):
return "%s %s(%s)" % ("PNE", self.symbol, ", ".join(map(str, self.args)))
def dump(self, indent=" "):
print "%s%s" % (indent, self._dump())
for arg in self.args:
arg.dump(indent + " ")
def _dump(self):
return str(self)
def instantiate(self, var_mapping, init_facts):
args = [conditions.ObjectTerm(var_mapping.get(arg.name, arg.name)) for arg in self.args]
pne = PrimitiveNumericExpression(self.symbol, args)
assert not self.symbol == "total-cost"
# We know this expression is constant. Substitute it by corresponding
# initialization from task.
for fact in init_facts:
if isinstance(fact, FunctionAssignment):
if fact.fluent == pne:
return fact.expression
assert False, "Could not find instantiation for PNE!"
class FunctionAssignment(object):
def __init__(self, fluent, expression):
self.fluent = fluent
self.expression = expression
def __str__(self):
return "%s %s %s" % (self.__class__.__name__, self.fluent, self.expression)
def dump(self, indent=" "):
print "%s%s" % (indent, self._dump())
self.fluent.dump(indent + " ")
self.expression.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def instantiate(self, var_mapping, init_facts):
if not (isinstance(self.expression, PrimitiveNumericExpression) or
isinstance(self.expression, NumericConstant)):
raise ValueError("Cannot instantiate assignment: not normalized")
# We know that this assignment is a cost effect of an action (for initial state
# assignments, "instantiate" is not called). Hence, we know that the fluent is
# the 0-ary "total-cost" which does not need to be instantiated
assert self.fluent.symbol == "total-cost"
fluent = self.fluent
expression = self.expression.instantiate(var_mapping, init_facts)
return self.__class__(fluent, expression)
class Assign(FunctionAssignment):
def __str__(self):
return "%s := %s" % (self.fluent, self.expression)
class Increase(FunctionAssignment):
pass
| PlanTool/plantool | wrappingPlanners/Deterministic/LAMA/seq-sat-lama/lama/translate/pddl/f_expression.py | Python | gpl-2.0 | 5,321 | 0.008081 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import web
from web import form as webform
import httpconfig
class Form(object):
"""Form class"""
def __init__(self, names=[]):
self._form = self.createForm(names)
self.httpConfig = httpconfig.HttpConfig(web.ctx.env["DOCUMENT_ROOT"])
@property
def form(self):
return self._form
def createForm(self, names=[]):
# Text area for sending path data
pathDataArea = webform.Textarea("", rows=30, cols=90, value="", id="pathData", hidden=True)
form = webform.Form(pathDataArea)
return form
| inbloom/legacy-projects | lri-middleware/path_builder/form.py | Python | apache-2.0 | 1,107 | 0.001807 |
# Copyright (c) 2014,Vienna University of Technology,
# Department of Geodesy and Geoinformation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Vienna University of Technology,
# Department of Geodesy and Geoinformation nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL VIENNA UNIVERSITY OF TECHNOLOGY,
# DEPARTMENT OF GEODESY AND GEOINFORMATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on Mar 7, 2014
Plot anomalies around climatology using colors
@author: Christoph Paulik christoph.paulik@geo.tuwien.ac.at
'''
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pandas as pd
import pytesmo.time_series.anomaly as anom
def plot_clim_anom(df, clim=None, axes=None, markersize=0.75,
mfc='0.3', mec='0.3', clim_color='0.0',
clim_linewidth=0.5, clim_linestyle='-',
pos_anom_color='#799ADA', neg_anom_color='#FD8086',
anom_linewidth=0.2, add_titles=True):
"""
Takes a pandas DataFrame and calculates the climatology and anomaly
and plots them in a nice way for each column
Parameters
----------
df : pandas.DataFrame
clim : pandas.DataFrame, optional
if given these climatologies will be used
if not given then climatologies will be calculated
this DataFrame must have the same number of columns as df
and also the column names.
each climatology must have doy as index.
axes : list of matplotlib.Axes, optional
list of axes on which each column should be plotted
if not given a standard layout is generated
markersize : float, optional
size of the markers for the datapoints
mfc : matplotlib color, optional
markerfacecolor, color of the marker face
mec : matplotlib color, optional
markeredgecolor
clim_color : matplotlib color, optional
color of the climatology
clim_linewidth : float, optional
linewidth of the climatology
clim_linestyle : string, optional
linestyle of the climatology
pos_anom_color : matplotlib color, optional
color of the positive anomaly
neg_anom_color : matplotlib color, optional
color of the negative anomaly
anom_linewidth : float, optional
linewidth of the anomaly lines
add_titles : boolean, optional
if set each subplot will have it's column name as title
Default : True
Returns
-------
Figure : matplotlib.Figure
if no axes were given
axes : list of matploblib.Axes
if no axes were given
"""
if type(df) == pd.Series:
df = pd.DataFrame(df)
nr_columns = len(df.columns)
# make own axis if necessary
if axes is None:
own_axis = True
gs = gridspec.GridSpec(nr_columns, 1, right=0.8)
fig = plt.figure(num=None, figsize=(6, 2 * nr_columns),
dpi=150, facecolor='w', edgecolor='k')
last_axis = fig.add_subplot(gs[nr_columns - 1])
axes = []
for i, grid in enumerate(gs):
if i < nr_columns - 1:
ax = fig.add_subplot(grid, sharex=last_axis)
axes.append(ax)
ax.xaxis.set_visible(False)
axes.append(last_axis)
else:
own_axis = False
for i, column in enumerate(df):
Ser = df[column]
ax = axes[i]
if clim is None:
clima = anom.calc_climatology(Ser)
else:
clima = clim[column]
anomaly = anom.calc_anomaly(Ser, climatology=clima, return_clim=True)
anomaly[Ser.name] = Ser
anomaly = anomaly.dropna()
pos_anom = anomaly[Ser.name].values > anomaly['climatology'].values
neg_anom = anomaly[Ser.name].values < anomaly['climatology'].values
ax.plot(anomaly.index, anomaly[Ser.name].values, 'o',
markersize=markersize, mfc=mfc, mec=mec)
ax.plot(anomaly.index, anomaly['climatology'].values,
linestyle=clim_linestyle,
color=clim_color,
linewidth=clim_linewidth)
ax.fill_between(anomaly.index,
anomaly[Ser.name].values,
anomaly['climatology'].values, interpolate=True,
where=pos_anom, color=pos_anom_color,
linewidth=anom_linewidth)
ax.fill_between(anomaly.index,
anomaly[Ser.name].values,
anomaly['climatology'].values, interpolate=True,
where=neg_anom, color=neg_anom_color,
linewidth=anom_linewidth)
if add_titles:
ax.set_title(column)
if own_axis:
return fig, axes
else:
return None, None
| christophreimer/pytesmo | pytesmo/time_series/plotting.py | Python | bsd-3-clause | 6,133 | 0.000163 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'boardOptions.ui'
#
# Created: Fri Oct 4 12:27:03 2013
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
from commons.i18n import *
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_BoardOptions(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.setWindowModality(QtCore.Qt.WindowModal)
Dialog.resize(450, 300)
Dialog.setMaximumSize(QtCore.QSize(450, 300))
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(60, 260, 251, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.widget = QtGui.QWidget(Dialog)
self.widget.setGeometry(QtCore.QRect(50, 30, 350, 191))
self.widget.setObjectName(_fromUtf8("widget"))
self.verticalLayout = QtGui.QVBoxLayout(self.widget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(self.widget)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.comboBox = QtGui.QComboBox(self.widget)
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.horizontalLayout.addWidget(self.comboBox)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label_2 = QtGui.QLabel(self.widget)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout_2.addWidget(self.label_2)
self.comboBox_2 = QtGui.QComboBox(self.widget)
self.comboBox_2.setObjectName(_fromUtf8("comboBox_2"))
self.horizontalLayout_2.addWidget(self.comboBox_2)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.label_3 = QtGui.QLabel(self.widget)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_3.addWidget(self.label_3)
self.comboBox_3 = QtGui.QComboBox(self.widget)
self.comboBox_3.setObjectName(_fromUtf8("comboBox_3"))
self.horizontalLayout_3.addWidget(self.comboBox_3)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", i18n('Options Board'), None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Dialog", i18n("Balls"), None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Dialog", i18n("Size"), None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("Dialog", i18n("Head"), None, QtGui.QApplication.UnicodeUTF8))
| mateoqac/unqTip | gui/views/boardOption.py | Python | gpl-3.0 | 3,726 | 0.002952 |
from compare_mlp import calculate_distance_pairs, load_models,get_filenames, plot_distance_pairs, plot_distances_from_target
import unittest
class DistanceTestCase(unittest.TestCase):
def setUp(self):
self.afiles = load_models(get_filenames("best_model_mlp", "zero_blur_a.pkl"))
self.bfiles = load_models(get_filenames("best_model_mlp", "rand.pkl"))
def testDistanceBetweenZeroAndRandModels(self):
distances = calculate_distance_pairs(self.afiles, self.bfiles)
plot_distance_pairs(distances)
def testDistanceBetweenZeroModelsAndZeroTarget(self):
plot_distances_from_target(self.afiles[-1], self.afiles)
def testDistanceBetweenRandModelsAndRandTarget(self):
plot_distances_from_target(self.bfiles[-1], self.bfiles)
# def testDistanceBetweenRandModelsAndItself(self):
# distances = calculate_distance_pairs(self.bfiles, self.bfiles)
# plot_distance_pairs(distances)
if __name__ == '__main__':
unittest.main() | laputian/dml | mlp_test/test_compare_mlp_unit.py | Python | mit | 1,003 | 0.004985 |
import db
import db.exceptions
import sqlalchemy
import string
import random
KEY_LENGTH = 40
def generate(owner_id):
"""Generate new key for a specified user.
Doesn't check if user exists.
Args:
owner_id: ID of a user that will be associated with a key.
Returns:
Value of the new key.
"""
with db.engine.connect() as connection:
value = _generate_key(KEY_LENGTH)
connection.execute(sqlalchemy.text("""
INSERT INTO api_key (value, owner)
VALUES (:value, :owner)
"""), {
"value": value,
"owner": owner_id
})
return value
def get_active(owner_id):
"""Get active keys for a user.
Doesn't check if user exists.
Args:
owner_id: ID of a user who owns the key.
Returns:
List of active API keys.
"""
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT value
FROM api_key
WHERE owner = :owner
"""), {"owner": owner_id})
return [row["value"] for row in result.fetchall()]
def revoke(value):
"""Revoke key with a given value."""
with db.engine.connect() as connection:
connection.execute(sqlalchemy.text("""
UPDATE api_key
SET is_active = FALSE
WHERE value = :value
"""), {"value": value})
def revoke_all(owner_id):
"""Revoke all keys owned by a user."""
with db.engine.connect() as connection:
connection.execute(sqlalchemy.text("""
UPDATE api_key
SET is_active = FALSE
WHERE owner = :owner
"""), {"owner": owner_id})
def is_active(value):
"""Check if key is active.
Args:
value: Value of a key.
Returns:
True if key is active, False if it's not.
Raises:
NoDataFoundException: Specified key was not found.
"""
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT is_active
FROM api_key
WHERE value = :value
"""), {"value": value})
row = result.fetchone()
if not row:
raise db.exceptions.NoDataFoundException("Can't find specified API key.")
return row["is_active"]
def _generate_key(length):
"""Generates random string with a specified length."""
return ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits)
for _ in range(length))
| metabrainz/acousticbrainz-server | db/api_key.py | Python | gpl-2.0 | 2,579 | 0.000775 |
from __future__ import absolute_import, division, print_function
import numpy as np
import os
import logging
def _read_amira(src_file):
"""
Reads all information contained within standard AmiraMesh data sets.
Separate the header information from the image/volume, data.
Parameters
----------
src_file : str
The path and file name pointing to the AmiraMesh file to be loaded.
Returns
-------
am_header : list of strings
This list contains all of the raw information contained in the
AmiraMesh file header. Contains all of the raw header information
am_data : str
A compiled string containing all of the image array data, that was
stored in the source AmiraMesh data file. Contains the raw image data
"""
am_header = []
am_data = []
with open(os.path.normpath(src_file), 'r') as input_file:
while True:
line = input_file.readline()
am_header.append(line)
if (line == '# Data section follows\n'):
input_file.readline()
break
am_data = input_file.read()
return am_header, am_data
def _amira_data_to_numpy(am_data, header_dict, flip_z=True):
"""
Transform output of `_read_amira` to a numpy array of the dtype listed in
the AmiraMesh header dictionary. The standard format for Avizo Binary
files is IEEE binary. Big or little endian-ness is stipulated in the header
information, and is be assessed and taken into account by this function as
well, during the conversion process.
Parameters
----------
am_data : str
String object containing all of the image array data, formatted as IEEE
binary. Current dType options include:
float
short
ushort
byte
header_dict : dict
Metadata dictionary containing all relevant attributes pertaining to
the image array. This metadata dictionary is the output from the
function `_create_md_dict`.
flip_z : bool, optional.
Defaults to True
This option is included because the .am data sets evaluated thus far
have opposite z-axis indexing than numpy arrays. This switch currently
defaults to "True" in order to ensure that z-axis indexing remains
consistent with data processed using Avizo.
Setting this switch to "True" will flip the z-axis during processing,
and a value of "False" will keep the array is initially assigned during
the array reshaping step.
Returns
-------
output : ndarray
Numpy ndarray containing the image data converted from the AmiraMesh
file. This data array is ready for further processing using the NSLS-II
function library, or other operations able to operate on numpy arrays.
"""
Zdim = header_dict['array_dimensions']['z_dimension']
Ydim = header_dict['array_dimensions']['y_dimension']
Xdim = header_dict['array_dimensions']['x_dimension']
# Strip out null characters from the string of binary values
# Dictionary of the encoding types for AmiraMesh files
am_format_dict = {'BINARY-LITTLE-ENDIAN': '<',
'BINARY': '>',
'ASCII': 'unknown'}
# Dictionary of the data types encountered so far in AmiraMesh files
am_dtype_dict = {'float': 'f4',
'short': 'h4',
'ushort': 'H4',
'byte': 'b'}
# Had to split out the stripping of new line characters and conversion
# of the original string data based on whether source data is BINARY
# format or ASCII format. These format types require different stripping
# tools and different string conversion tools.
if header_dict['data_format'] == 'BINARY-LITTLE-ENDIAN':
data_strip = am_data.strip('\n')
flt_values = np.fromstring(
data_strip, (am_format_dict[header_dict['data_format']] +
am_dtype_dict[header_dict['data_type']]))
if header_dict['data_format'] == 'ASCII':
data_strip = am_data.translate(None, '\n')
string_list = data_strip.split(" ")
string_list = string_list[0:(len(string_list)-2)]
flt_values = np.array(
string_list).astype(am_dtype_dict[header_dict['data_type']])
# Resize the 1D array to the correct ndarray dimensions
# Note that resize is in-place whereas reshape is not
flt_values.resize(Zdim, Ydim, Xdim)
output = flt_values
if flip_z:
output = flt_values[::-1, ..., ...]
return output
def _clean_amira_header(header_list):
"""
Strip the string list of all "empty" characters,including new line
characters ('\n') and empty lines. Splits each header line (which
originally is stored as a single string) into individual words, numbers or
characters, using spaces between words as the separating operator. The
output of this function is used to generate the metadata dictionary for
the image data set.
Parameters
----------
header_list : list of strings
This is the header output from the function _read_amira()
Returns
-------
clean_header : list of strings
This header list has been stripped and sorted and is now ready for
populating the metadata dictionary for the image data set.
"""
clean_header = []
for row in header_list:
split_header = filter(None, [word.translate(None, ',"')
for word in row.strip('\n').split()])
clean_header.append(split_header)
return clean_header
def _create_md_dict(clean_header):
"""
Populates the a dictionary with all information pertinent to the image
data set that was originally stored in the AmiraMesh file.
Parameters
----------
clean_header : list of strings
This is the output from the _sort_amira_header function.
"""
# Avizo specific metadata
md_dict = {'software_src': clean_header[0][1],
'data_format': clean_header[0][2],
'data_format_version': clean_header[0][3]}
if md_dict['data_format'] == '3D':
md_dict['data_format'] = clean_header[0][3]
md_dict['data_format_version'] = clean_header[0][4]
for header_line in clean_header:
hl = header_line
if 'define' in hl:
hl = hl
md_dict['array_dimensions'] = {
'x_dimension': int(hl[hl.index('define') + 2]),
'y_dimension': int(hl[hl.index('define') + 3]),
'z_dimension': int(hl[hl.index('define') + 4])}
elif 'Content' in hl:
md_dict['data_type'] = hl[hl.index('Content') + 2]
elif 'CoordType' in hl:
md_dict['coord_type'] = hl[hl.index('CoordType') + 1]
elif 'BoundingBox' in hl:
hl = hl
md_dict['bounding_box'] = {
'x_min': float(hl[hl.index('BoundingBox') + 1]),
'x_max': float(hl[hl.index('BoundingBox') + 2]),
'y_min': float(hl[hl.index('BoundingBox') + 3]),
'y_max': float(hl[hl.index('BoundingBox') + 4]),
'z_min': float(hl[hl.index('BoundingBox') + 5]),
'z_max': float(hl[hl.index('BoundingBox') + 6])}
# Parameter definition for voxel resolution calculations
bbox = [md_dict['bounding_box']['x_min'],
md_dict['bounding_box']['x_max'],
md_dict['bounding_box']['y_min'],
md_dict['bounding_box']['y_max'],
md_dict['bounding_box']['z_min'],
md_dict['bounding_box']['z_max']]
dims = [md_dict['array_dimensions']['x_dimension'],
md_dict['array_dimensions']['y_dimension'],
md_dict['array_dimensions']['z_dimension']]
# Voxel resolution calculation
resolution_list = []
for index in np.arange(len(dims)):
if dims[index] > 1:
resolution_list.append(
(bbox[(2*index+1)] - bbox[(2*index)]) /
(dims[index] - 1))
else:
resolution_list.append(0)
# isotropy determination (isotropic res, or anisotropic res)
if (resolution_list[1]/resolution_list[0] > 0.99 and
resolution_list[2]/resolution_list[0] > 0.99 and
resolution_list[1]/resolution_list[0] < 1.01 and
resolution_list[2]/resolution_list[0] < 1.01):
md_dict['resolution'] = {'zyx_value': resolution_list[0],
'type': 'isotropic'}
else:
md_dict['resolution'] = {
'zyx_value': (resolution_list[2],
resolution_list[1],
resolution_list[0]),
'type': 'anisotropic'}
elif 'Units' in hl:
try:
units = str(hl[hl.index('Units') + 2])
md_dict['units'] = units
except:
logging.debug('Units value undefined in source data set. '
'Reverting to default units value of pixels')
md_dict['units'] = 'pixels'
elif 'Coordinates' in hl:
coords = str(hl[hl.index('Coordinates') + 1])
md_dict['coordinates'] = coords
return md_dict
def load_amiramesh(file_path):
"""
Load and convert an AmiraMesh binary file to a numpy array.
Parameters
----------
file_path : str
The path and file name of the AmiraMesh file to be loaded.
Returns
-------
md_dict : dict
Dictionary containing all pertinent header information associated with
the data set.
np_array : ndarray
An ndarray containing the image data set to be loaded. Values contained
in the resulting volume are set to be of float data type by default.
"""
header, data = _read_amira(file_path)
clean_header = _clean_amira_header(header)
md_dict = _create_md_dict(clean_header)
np_array = _amira_data_to_numpy(data, md_dict)
return md_dict, np_array
| danielballan/scikit-xray | skbeam/io/avizo_io.py | Python | bsd-3-clause | 10,337 | 0.000097 |
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
name: lines
author: Daniel Hokka Zakrisson (!UNKNOWN) <daniel@hozac.com>
version_added: "0.9"
short_description: read lines from command
description:
- Run one or more commands and split the output into lines, returning them as a list
options:
_terms:
description: command(s) to run
required: True
notes:
- Like all lookups, this runs on the Ansible controller and is unaffected by other keywords such as 'become'.
If you need to use different permissions, you must change the command or run Ansible as another user.
- Alternatively, you can use a shell/command task that runs against localhost and registers the result.
"""
EXAMPLES = """
- name: We could read the file directly, but this shows output from command
ansible.builtin.debug: msg="{{ item }} is an output line from running cat on /etc/motd"
with_lines: cat /etc/motd
- name: More useful example of looping over a command result
ansible.builtin.shell: "/usr/bin/frobnicate {{ item }}"
with_lines:
- "/usr/bin/frobnications_per_host --param {{ inventory_hostname }}"
"""
RETURN = """
_list:
description:
- lines of stdout from command
type: list
elements: str
"""
import subprocess
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_text
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
ret = []
for term in terms:
p = subprocess.Popen(term, cwd=self._loader.get_basedir(), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
ret.extend([to_text(l) for l in stdout.splitlines()])
else:
raise AnsibleError("lookup_plugin.lines(%s) returned %d" % (term, p.returncode))
return ret
| mattclay/ansible | lib/ansible/plugins/lookup/lines.py | Python | gpl-3.0 | 2,214 | 0.005872 |
# -*- Mode: Python -*-
VERSION_STRING = "$Id: thread_channel.py,v 1.3 2002/03/19 22:49:40 amk Exp $"
# This will probably only work on Unix.
# The disadvantage to this technique is that it wastes file
# descriptors (especially when compared to select_trigger.py)
# May be possible to do it on Win32, using TCP localhost sockets.
# [does winsock support 'socketpair'?]
import asyncore_25 as asyncore
import asynchat_25 as asynchat
import fcntl
import FCNTL
import os
import socket
import string
import thread
# this channel slaves off of another one. it starts a thread which
# pumps its output through the 'write' side of the pipe. The 'read'
# side of the pipe will then notify us when data is ready. We push
# this data on the owning data channel's output queue.
class thread_channel (asyncore.file_dispatcher):
buffer_size = 8192
def __init__ (self, channel, function, *args):
self.parent = channel
self.function = function
self.args = args
self.pipe = rfd, wfd = os.pipe()
asyncore.file_dispatcher.__init__ (self, rfd)
def start (self):
rfd, wfd = self.pipe
# The read side of the pipe is set to non-blocking I/O; it is
# 'owned' by medusa.
flags = fcntl.fcntl (rfd, FCNTL.F_GETFL, 0)
fcntl.fcntl (rfd, FCNTL.F_SETFL, flags | FCNTL.O_NDELAY)
# The write side of the pipe is left in blocking mode; it is
# 'owned' by the thread. However, we wrap it up as a file object.
# [who wants to 'write()' to a number?]
of = os.fdopen (wfd, 'w')
thread.start_new_thread (
self.function,
# put the output file in front of the other arguments
(of,) + self.args
)
def writable (self):
return 0
def readable (self):
return 1
def handle_read (self):
data = self.recv (self.buffer_size)
self.parent.push (data)
def handle_close (self):
# Depending on your intentions, you may want to close
# the parent channel here.
self.close()
# Yeah, it's bad when the test code is bigger than the library code.
if __name__ == '__main__':
import time
def thread_function (output_file, i, n):
print 'entering thread_function'
while n:
time.sleep (5)
output_file.write ('%2d.%2d %s\r\n' % (i, n, output_file))
output_file.flush()
n = n - 1
output_file.close()
print 'exiting thread_function'
class thread_parent (asynchat.async_chat):
def __init__ (self, conn, addr):
self.addr = addr
asynchat.async_chat.__init__ (self, conn)
self.set_terminator ('\r\n')
self.buffer = ''
self.count = 0
def collect_incoming_data (self, data):
self.buffer = self.buffer + data
def found_terminator (self):
data, self.buffer = self.buffer, ''
n = string.atoi (string.split (data)[0])
tc = thread_channel (self, thread_function, self.count, n)
self.count = self.count + 1
tc.start()
class thread_server (asyncore.dispatcher):
def __init__ (self, family=socket.AF_INET, address=('127.0.0.1', 9003)):
asyncore.dispatcher.__init__ (self)
self.create_socket (family, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind (address)
self.listen (5)
def handle_accept (self):
conn, addr = self.accept()
tp = thread_parent (conn, addr)
thread_server()
#asyncore.loop(1.0, use_poll=1)
asyncore.loop ()
| zxl200406/minos | supervisor/supervisor/medusa/thread/thread_channel.py | Python | apache-2.0 | 3,713 | 0.01185 |
'''
Training file with functions for
1) Taking in the inputs
2) Defining the model
3) Reading the input and generating batches
4) Defining the loss, learning rate and optimization functions
5) Running multiple epochs on training and testing
'''
import argparse
from read_input import *
from model import *
import tensorflow as tf
import time
def run_epoch(session, model, train_op, data, max_batches, args):
'''
Run the model under given session for max_batches based on args
:param model: model on which the operations take place
:param session: session for tensorflow
:param train_op: training output variable name, pass as tf.no_op() for validation and testing
:param data: train, validation or testing data
:param max_batches: maximum number of batches that can be called
:param args: arguments provided by user in main
:return: perplexity
'''
# to run a session you need the list of tensors/graph nodes and the feed dict
# for us its the cost, final_state, and optimizer
# you feed in the (x,y) pairs, and you also propagate the state across the batches
state = np.zeros((args.batch_size,model.lstm_layer.state_size))
tot_cost = 0.0
start_time = time.time()
iters = 0
for i in range(max_batches):
x, y = data.next()
cur_cost, curr_state, _ = session.run([model.cost,model.final_state,train_op],
feed_dict={model.input_layer: x, model.targets: y, model.initial_state: state})
tot_cost += cur_cost
state = curr_state
iters += args.batch_len
if i % (max_batches//50) == 0:
print 'iteration %.3f perplexity: %.3f speed: %.0f wps' %\
(i, np.exp(tot_cost/iters), iters*args.batch_size/(time.time()-start_time))
return np.exp(tot_cost/iters)
# TODO: Add model saving and loading
def main():
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--filename', type=str, default='./data/tinyshakespeare/input.txt', help='data location for all data')
parser.add_argument('--split_ratio', type =list, default=[0.9,0.05,0.05], help='split ratio for train, validation and test')
parser.add_argument('--batch_size', type=int, default=1, help='batch size for data')
parser.add_argument('--batch_len', type=int, default=1, help='number of time steps to unroll')
parser.add_argument('--cell', type=str, default='lstm', help='the cell type to use, currently only LSTM')
parser.add_argument('--num_layers', type=int, default=1, help='depth of hidden units in the model')
parser.add_argument('--hidden_units', type=int, default=32, help='number of hidden units in the cell')
parser.add_argument('--num_epochs', type=int, default=50, help='max number of epochs to run the training')
parser.add_argument('--lr_rate', type=float, default=2e-5, help='learning rate')
parser.add_argument('--lr_decay', type=float, default=0.97, help='learning rate decay')
parser.add_argument('--drop_prob', type=float, default=0, help='optimization function to be used')
parser.add_argument('--grad_clip', type=float, default=5.0, help='clip gradients at this value')
parser.add_argument('--stateful', type=bool, default=True, help='save at every batches')
args = parser.parse_args()
# load data
if args.filename[-3:] == 'zip':
data = load_zip_data(args.filename)
elif args.filename[-3:] == 'txt':
data = load_csv_file(args.filename)
else:
raise NotImplementedError("File extension not supported")
train, val ,test = train_test_split(data, args.split_ratio)
batch_train = BatchGenerator(train,args.batch_size,args.batch_len)
batch_train.create_batches()
max_batches_train = batch_train.epoch_size
# New chars seen in test time will have a problem
args.data_dim = batch_train.vocab_size
batch_val = BatchGenerator(val,args.batch_size,args.batch_len)
batch_val.create_batches()
max_batches_val = batch_val.epoch_size
batch_test = BatchGenerator(test,args.batch_size,args.batch_len)
batch_test.create_batches()
max_batches_test = batch_test.epoch_size
print max_batches_train, max_batches_val, max_batches_test
# Initialize session and graph
with tf.Graph().as_default(), tf.Session() as session:
initializer = tf.random_uniform_initializer(-0.1,0.1)
with tf.variable_scope("model",reuse=None,initializer=initializer):
train_model = Model(args, is_training=True, is_inference=False)
with tf.variable_scope("model",reuse=True,initializer=initializer):
val_model = Model(args, is_training=False, is_inference=False)
test_model = Model(args, is_training=False, is_inference=False)
tf.initialize_all_variables().run()
for i in range(args.num_epochs):
# TODO: Add parameter for max_max_epochs
lr_decay = args.lr_decay ** max(i-10.0,0.0)
train_model.assign_lr(session, args.lr_rate*lr_decay)
# run a complete epoch and return appropriate variables
train_perplexity = run_epoch(session, train_model, train_model.train_op, batch_train, max_batches_train, args)
print 'Epoch %d, Train Perplexity: %.3f' %(i+1, train_perplexity)
val_perplexity = run_epoch(session, val_model, tf.no_op(), batch_val, max_batches_val, args)
print 'Epoch %d, Val Perplexity: %.3f' %(i+1, val_perplexity)
test_perplexity = run_epoch(session, test_model, tf.no_op(), batch_test, max_batches_test, args)
print 'Test Perplexity: %.3f' % test_perplexity
if __name__ == "__main__":
main()
| Kaushikpatnaik/LSTMChar2Char | train.py | Python | mit | 5,431 | 0.019518 |
import sqlite3
def main():
conn = sqlite3.connect("../database")
cursor = conn.cursor()
# I claim this gives the current score. Another formulation is
# select trackid, score, max(scoreid) from scores group by trackid;
# cursor.execute("""select trackid, score from scores
# group by trackid order by scoreid""")
# cursor.execute("""select scores.trackid, score, path from scores, tracks
# where scores.trackid = tracks.trackid
# group by scores.trackid order by scoreid""")
cursor.execute("""select score, path from tracks
where score is not null and missing is not 1""")
results = cursor.fetchall()
for result in results:
print(str(result[0]) + "\t" + result[1])
if __name__ == '__main__':
main()
| erbridge/NQr | src/export.py | Python | bsd-3-clause | 834 | 0 |
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
#
# audi_codes.py
#
# Copyright (C) Ben Van Mechelen 2008-2009 <me@benvm.be>
#
# This file is part of Garmon
#
# Garmon is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
from gettext import gettext as _
DTC_CODES_MANUFACTURER = {
"P1101": _("O2 Sensor Circ.,Bank1-Sensor1Voltage too Low/Air Leak"),
"P1102": _("O2 Sensor Heating Circ.,Bank1-Sensor1 Short to B+"),
"P1103": _("O2 Sensor Heating Circ.,Bank1-Sensor1 Output too Low"),
"P1104": _("Bank1-Sensor2 Voltage too Low/Air Leak"),
"P1105": _("O2 Sensor Heating Circ.,Bank1-Sensor2 Short to B+"),
"P1106": _("O2 Sensor Circ.,Bank2-Sensor1 Voltage too Low/Air Leak"),
"P1107": _("O2 Sensor Heating Circ.,Bank2-Sensor1 Short to B+"),
"P1108": _("O2 Sensor Heating Circ.,Bank2-Sensor1 Output too Low"),
"P1109": _("O2 Sensor Circ.,Bank2-Sensor2 Voltage too Low/Air Leak"),
"P1110": _("O2 Sensor Heating Circ.,Bank2-Sensor2 Short to B+"),
"P1111": _("O2 Control (Bank 1) System too lean"),
"P1112": _("O2 Control (Bank 1) System too rich"),
"P1113": _("Bank1-Sensor1 Internal Resistance too High"),
"P1114": _("Bank1-Sensor2 Internal Resistant too High"),
"P1115": _("O2 Sensor Heater Circ.,Bank1-Sensor1 Short to Ground"),
"P1116": _("O2 Sensor Heater Circ.,Bank1-Sensor1 Open"),
"P1117": _("O2 Sensor Heater Circ.,Bank1-Sensor2 Short to Ground"),
"P1118": _("O2 Sensor Heater Circ.,Bank1-Sensor2 Open"),
"P1119": _("O2 Sensor Heater Circ.,Bank2-Sensor1 Short to Ground"),
"P1120": _("O2 Sensor Heater Circ.,Bank2-Sensor1 Open"),
"P1121": _("O2 Sensor Heater Circ.,Bank2-Sensor2 Short to Ground"),
"P1122": _("O2 Sensor Heater Circ.,Bank2-Sensor2 Open"),
"P1123": _("Long Term Fuel Trim Add.Air.,Bank1 System too Rich"),
"P1124": _("Long Term Fuel Trim Add.Air.,Bank1 System too Lean"),
"P1125": _("Long Term Fuel Trim Add.Air.,Bank2 System too Rich"),
"P1126": _("Long Term Fuel Trim Add.Air.,Bank2 System too Lean"),
"P1127": _("Long Term Fuel Trim mult.,Bank1 System too Rich"),
"P1128": _("Long Term Fuel Trim mult.,Bank1 System too Lean"),
"P1129": _("Long Term Fuel Trim mult.,Bank2 System too Rich"),
"P1130": _("Long Term Fuel Trim mult.,Bank2 System too Lean"),
"P1131": _("Bank2-Sensor1 Internal Rsistance too High"),
"P1132": _("O2 Sensor Heating Circ.,Bank1+2-Sensor1 Short to B+"),
"P1133": _("O2 Sensor Heating Circ.,Bank1+2-Sensor1 Electrical Malfunction"),
"P1134": _("O2 Sensor Heating Circ.,Bank1+2-Sensor2 Short to B+"),
"P1135": _("O2 Sensor Heating Circ.,Bank1+2-Sensor2 Electrical Malfunction"),
"P1136": _("Long Term Fuel Trim Add.Fuel,Bank1 System too Lean"),
"P1137": _("Long Term Fuel Trim Add.Fuel,Bank1 System too Rich"),
"P1138": _("Long Term Fuel Trim Add.Fuel,Bank2 System too Lean"),
"P1139": _("Long Term Fuel Trim Add.Fuel,Bank2 System too Rich"),
"P1140": _("Bank2-Sensor2 Internal Resistance too High"),
"P1141": _("Load Calculation Cross Check Range/Performance"),
"P1142": _("Load Calculation Cross Check Lower Limit Exceeded"),
"P1143": _("Load Calculation Cross Check Upper Limit Exceeded"),
"P1144": _("Mass or Volume Air Flow Circ Open/Short to Ground"),
"P1145": _("Mass or Volume Air Flow Circ Short to B+"),
"P1146": _("Mass or Volume Air Flow Circ Supply Malfunction"),
"P1147": _("O2 Control (Bank 2) System too lean"),
"P1148": _("O2 Control (Bank 2) System too rich"),
"P1149": _("O2 Control (Bank 1) Out of range"),
"P1150": _("O2 Control (Bank 2) Out of range"),
"P1151": _("Bank1, Long Term Fuel Trim, Range 1 Leanness Lower Limit Exceeded"),
"P1152": _("Bank1, Long Term Fuel Trim, Range 2 Leanness Lower Limit Exceeded"),
"P1154": _("Manifold Switch Over Malfunction"),
"P1155": _("Manifold Abs.Pressure Sensor Circ. Short to B+"),
"P1156": _("Manifold Abs.Pressure Sensor Circ. Open/Short to Ground"),
"P1157": _("Manifold Abs.Pressure Sensor Circ. Power Supply Malfunction"),
"P1158": _("Manifold Abs.Pressure Sensor Circ. Range/Performance"),
"P1160": _("Manifold Temp.Sensor Circ. Short to Ground"),
"P1161": _("Manifold Temp.Sensor Circ. Open/Short to B+"),
"P1162": _("Fuel Temp.Sensor Circ. Short to Ground"),
"P1163": _("Fuel Temp.Sensor Circ. Open/Short to B+"),
"P1164": _("Fuel Temperature Sensor Range/Performance/Incorrect Signal"),
"P1165": _("Bank1, Long Term Fuel Trim, Range 1 Rich Limit Exceeded"),
"P1166": _("Bank1, Long Term Fuel Trim, Range 2 Rich Limit Exceeded"),
"P1171": _("Throttle Actuation Potentiometer Sign.2 Range/Performance"),
"P1172": _("Throttle Actuation Potentiometer Sign.2 Signal too Low"),
"P1173": _("Throttle Actuation Potentiometer Sign.2 Signal too High"),
"P1174": _("Fuel Trim, Bank 1 Different injection times"),
"P1176": _("O2 Correction Behind Catalyst,B1 Limit Attained"),
"P1177": _("O2 Correction Behind Catalyst,B2 Limit Attained"),
"P1178": _("Linear 02 Sensor / Pump Current Open Circuit"),
"P1179": _("Linear 02 Sensor / Pump Current Short to ground"),
"P1180": _("Linear 02 Sensor / Pump Current Short to B+"),
"P1181": _("Linear 02 Sensor / Reference Voltage Open Circuit"),
"P1182": _("Linear 02 Sensor / Reference Voltage Short to ground"),
"P1183": _("Linear 02 Sensor / Reference Voltage Short to B+"),
"P1184": _("Linear 02 Sensor / Common Ground Wire Open Circuit"),
"P1185": _("Linear 02 Sensor / Common Ground Wire Short to ground"),
"P1186": _("Linear 02 Sensor / Common Ground Wire Short to B+"),
"P1187": _("Linear 02 Sensor / Compens. Resistor Open Circuit"),
"P1188": _("Linear 02 Sensor / Compens. Resistor Short to ground"),
"P1189": _("Linear 02 Sensor / Compens. Resistor Short to B+"),
"P1190": _("Linear 02 Sensor / Reference Voltage Incorrect Signal"),
"P1196": _("O2 Sensor Heater Circ.,Bank1-Sensor1 Electrical Malfunction"),
"P1197": _("O2 Sensor Heater Circ.,Bank2-Sensor1 Electrical Malfunction"),
"P1198": _("O2 Sensor Heater Circ.,Bank1-Sensor2 Electrical Malfunction"),
"P1199": _("O2 Sensor Heater Circ.,Bank2-Sensor2 Electrical Malfunction"),
"P1201": _("Cyl.1-Fuel Inj.Circ. Electrical Malfunction"),
"P1202": _("Cyl.2-Fuel Inj.Circ. Electrical Malfunction"),
"P1203": _("Cyl.3-Fuel Inj.Circ. Electrical Malfunction"),
"P1204": _("Cyl.4-Fuel Inj.Circ. Electrical Malfunction"),
"P1205": _("Cyl.5-Fuel Inj.Circ. Electrical Malfunction"),
"P1206": _("Cyl.6-Fuel Inj.Circ. Electrical Malfunction"),
"P1207": _("Cyl.7-Fuel Inj.Circ. Electrical Malfunction"),
"P1208": _("Cyl.8-Fuel Inj.Circ. Electrical Malfunction"),
"P1209": _("Intake valves for cylinder shut-off Short circuit to ground"),
"P1210": _("Intake valves for cylinder shut-off Short to B+"),
"P1211": _("Intake valves for cylinder shut-off Open circuit"),
"P1213": _("Cyl.1-Fuel Inj.Circ. Short to B+"),
"P1214": _("Cyl.2-Fuel Inj.Circ. Short to B+"),
"P1215": _("Cyl.3-Fuel Inj.Circ. Short to B+"),
"P1216": _("Cyl.4-Fuel Inj.Circ. Short to B+"),
"P1217": _("Cyl.5-Fuel Inj.Circ. Short to B+"),
"P1218": _("Cyl.6-Fuel Inj.Circ. Short to B+"),
"P1219": _("Cyl.7-Fuel Inj.Circ. Short to B+"),
"P1220": _("Cyl.8-Fuel Inj.Circ. Short to B+"),
"P1221": _("Cylinder shut-off exhaust valves Short circuit to ground"),
"P1222": _("Cylinder shut-off exhaust valves Short to B+"),
"P1223": _("Cylinder shut-off exhaust valves Open circuit"),
"P1225": _("Cyl.1-Fuel Inj.Circ. Short to Ground"),
"P1226": _("Cyl.2-Fuel Inj.Circ. Short to Ground"),
"P1227": _("Cyl.3-Fuel Inj.Circ. Short to Ground"),
"P1228": _("Cyl.4-Fuel Inj.Circ. Short to Ground"),
"P1229": _("Cyl.5-Fuel Inj.Circ. Short to Ground"),
"P1230": _("Cyl.6-Fuel Inj.Circ. Short to Ground"),
"P1231": _("Cyl.7-Fuel Inj.Circ. Short to Ground"),
"P1232": _("Cyl.8-Fuel Inj.Circ. Short to Ground"),
"P1237": _("Cyl.1-Fuel Inj.Circ. Open Circ."),
"P1238": _("Cyl.2-Fuel Inj.Circ. Open Circ."),
"P1239": _("Cyl.3-Fuel Inj.Circ. Open Circ."),
"P1240": _("Cyl.4-Fuel Inj.Circ. Open Circ."),
"P1241": _("Cyl.5-Fuel Inj.Circ. Open Circ."),
"P1242": _("Cyl.6-Fuel Inj.Circ. Open Circ."),
"P1243": _("Cyl.7-Fuel Inj.Circ. Open Circ."),
"P1244": _("Cyl.8-Fuel Inj.Circ. Open Circ."),
"P1245": _("Needle Lift Sensor Circ. Short to Ground"),
"P1246": _("Needle Lift Sensor Circ. Range/Performance"),
"P1247": _("Needle Lift Sensor Circ. Open/Short to B+"),
"P1248": _("Injection Start Control Deviation"),
"P1249": _("Fuel consumption signal Electrical Fault in Circuit"),
"P1250": _("Fuel Level Too Low"),
"P1251": _("Start of Injection Solenoid Circ Short to B+"),
"P1252": _("Start of Injection Solenoid Circ Open/Short to Ground"),
"P1253": _("Fuel consumption signal Short to ground"),
"P1254": _("Fuel consumption signal Short to B+"),
"P1255": _("Engine Coolant Temp.Circ Short to Ground"),
"P1256": _("Engine Coolant Temp.Circ Open/Short to B+"),
"P1257": _("Engine Coolant System Valve Open"),
"P1258": _("Engine Coolant System Valve Short to B+"),
"P1259": _("Engine Coolant System Valve Short to Ground"),
"P1280": _("Fuel Inj.Air Contr.Valve Circ. Flow too Low"),
"P1283": _("Fuel Inj.Air Contr.Valve Circ. Electrical Malfunction"),
"P1284": _("Fuel Inj.Air Contr.Valve Circ. Open"),
"P1285": _("Fuel Inj.Air Contr.Valve Circ. Short to Ground"),
"P1286": _("Fuel Inj.Air Contr.Valve Circ. Short to B+"),
"P1287": _("Turbocharger bypass valve open"),
"P1288": _("Turbocharger bypass valve short to B+"),
"P1289": _("Turbocharger bypass valve short to ground"),
"P1296": _("Cooling system malfunction"),
"P1297": _("Connection turbocharger - throttle valve pressure hose"),
"P1300": _("Misfire detected Reason: Fuel level too low"),
"P1319": _("Knock Sensor 1 Circ. Short to Ground"),
"P1320": _("Knock Sensor 2 Circ. Short to Ground"),
"P1321": _("Knock Sensor 3 Circ. Low Input"),
"P1322": _("Knock Sensor 3 Circ. High Input"),
"P1323": _("Knock Sensor 4 Circ. Low Input"),
"P1324": _("Knock Sensor 4 Circ. High Input"),
"P1325": _("Cyl.1-Knock Contr. Limit Attained"),
"P1326": _("Cyl.2-Knock Contr. Limit Attained"),
"P1327": _("Cyl.3-Knock Contr. Limit Attained"),
"P1328": _("Cyl.4-Knock Contr. Limit Attained"),
"P1329": _("Cyl.5-Knock Contr. Limit Attained"),
"P1330": _("Cyl.6-Knock Contr. Limit Attained"),
"P1331": _("Cyl.7-Knock Contr. Limit Attained"),
"P1332": _("Cyl.8-Knock Contr. Limit Attained"),
"P1335": _("Engine Torque Monitoring 2 Control Limint Exceeded"),
"P1336": _("Engine Torque Monitoring Adaptation at limit"),
"P1337": _("Camshaft Pos.Sensor,Bank1 Short to Ground"),
"P1338": _("Camshaft Pos.Sensor,Bank1 Open Circ./Short to B+"),
"P1339": _("Crankshaft Pos./Engine Speed Sensor Cross Connected"),
"P1340": _("Crankshaft-/Camshaft Pos.Sens.Signals Out of Sequence"),
"P1341": _("Ignition Coil Power Output Stage 1 Short to Ground"),
"P1342": _("Ignition Coil Power Output Stage 1 Short to B+"),
"P1343": _("Ignition Coil Power Output Stage 2 Short to Ground"),
"P1344": _("Ignition Coil Power Output Stage 2 Short to B+"),
"P1345": _("Ignition Coil Power Output Stage 3 Short to Ground"),
"P1346": _("Ignition Coil Power Output Stage 3 Short to B+"),
"P1347": _("Bank2,Crankshaft-/Camshaft os.Sens.Sign. Out of Sequence"),
"P1348": _("Ignition Coil Power Output Stage 1 Open Circuit"),
"P1349": _("Ignition Coil Power Output Stage 2 Open Circuit"),
"P1350": _("Ignition Coil Power Output Stage 3 Open Circuit"),
"P1354": _("Modulation Piston Displ.Sensor Circ. Malfunction"),
"P1355": _("Cyl. 1, ignition circuit Open Circuit"),
"P1356": _("Cyl. 1, ignition circuit Short to B+"),
"P1357": _("Cyl. 1, ignition circuit Short to ground"),
"P1358": _("Cyl. 2, ignition circuit Open Circuit"),
"P1359": _("Cyl. 2, ignition circuit Short Circuit to B+"),
"P1360": _("Cyl. 2, ignition circuit Short Circuit to Ground"),
"P1361": _("Cyl. 3, ignition circuit Open Circuit"),
"P1362": _("Cyl. 3, ignition circuit Short Circuit to B+"),
"P1363": _("Cyl. 3, ignition circuit Short Circuit to ground"),
"P1364": _("Cyl. 4 ignition circuit Open Circuit"),
"P1365": _("Cyl. 4 ignition circuit Short circuit to B+"),
"P1366": _("Cyl. 4 ignition circuit Short circuit to ground"),
"P1367": _("Cyl. 5, ignition circuit Open Circuit"),
"P1368": _("Cyl. 5, ignition circuit Short Circuit to B+"),
"P1369": _("Cyl. 5, ignition circuit short to ground"),
"P1370": _("Cyl. 6, ignition circuit Open Circuit"),
"P1371": _("Cyl. 6, ignition circuit Short Circuit to B+"),
"P1372": _("Cyl. 6, ignition circuit short to ground"),
"P1373": _("Cyl. 7, ignition circuit Open Circuit"),
"P1374": _("Cyl. 7, ignition circuit Short Circuit to B+"),
"P1375": _("Cyl. 7, ignition circuit short to ground"),
"P1376": _("Cyl. 8, ignition circuit Open Circuit"),
"P1377": _("Cyl. 8, ignition circuit Short Circuit to B+"),
"P1378": _("Cyl. 8, ignition circuit short to ground"),
"P1386": _("Internal Control Module Knock Control Circ.Error"),
"P1387": _("Internal Contr. Module altitude sensor error"),
"P1388": _("Internal Contr. Module drive by wire error"),
"P1391": _("Camshaft Pos.Sensor,Bank2 Short to Ground"),
"P1392": _("Camshaft Pos.Sensor,Bank2 Open Circ./Short to B+"),
"P1393": _("Ignition Coil Power Output Stage 1 Electrical Malfunction"),
"P1394": _("Ignition Coil Power Output Stage 2 Electrical Malfunction"),
"P1395": _("Ignition Coil Power Output Stage 3 Electrical Malfunction"),
"P1396": _("Engine Speed Sensor Missing Tooth"),
"P1397": _("Engine speed wheel Adaptation limit reached"),
"P1398": _("Engine RPM signal, TD Short to ground"),
"P1399": _("Engine RPM signal, TD Short Circuit to B+"),
"P1400": _("EGR Valve Circ Electrical Malfunction"),
"P1401": _("EGR Valve Circ Short to Ground"),
"P1402": _("EGR Valve Circ Short to B+"),
"P1403": _("EGR Flow Deviation"),
"P1404": _("EGR Flow Basic Setting not carried out"),
"P1406": _("EGR Temp.Sensor Range/Performance"),
"P1407": _("EGR Temp.Sensor Signal too Low"),
"P1408": _("EGR Temp.Sensor Signal too High"),
"P1409": _("Tank Ventilation Valve Circ. Electrical Malfunction"),
"P1410": _("Tank Ventilation Valve Circ. Short to B+"),
"P1411": _("Sec.Air Inj.Sys.,Bank2 Flow too Flow"),
"P1412": _("EGR Different.Pressure Sensor Signal too Low"),
"P1413": _("EGR Different.Pressure Sensor Signal too High"),
"P1414": _("Sec.Air Inj.Sys.,Bank2 Leak Detected"),
"P1417": _("Fuel Level Sensor Circ Signal too Low"),
"P1418": _("Fuel Level Sensor Circ Signal too High"),
"P1420": _("Sec.Air Inj.Valve Circ Electrical Malfunction"),
"P1421": _("Sec.Air Inj.Valve Circ Short to Ground"),
"P1422": _("Sec.Air Inj.Sys.Contr.Valve Circ Short to B+"),
"P1423": _("Sec.Air Inj.Sys.,Bank1 Flow too Low"),
"P1424": _("Sec.Air Inj.Sys.,Bank1 Leak Detected"),
"P1425": _("Tank Vent.Valve Short to Ground"),
"P1426": _("Tank Vent.Valve Open"),
"P1432": _("Sec.Air Inj.Valve Open"),
"P1433": _("Sec.Air Inj.Sys.Pump Relay Circ. open"),
"P1434": _("Sec.Air Inj.Sys.Pump Relay Circ. Short to B+"),
"P1435": _("Sec.Air Inj.Sys.Pump Relay Circ. Short to ground"),
"P1436": _("Sec.Air Inj.Sys.Pump Relay Circ. Electrical Malfunction"),
"P1439": _("EGR Potentiometer Error in Basic Seting"),
"P1440": _("EGR Valve Power Stage Open"),
"P1441": _("EGR Valve Circ Open/Short to Ground"),
"P1442": _("EGR Valve Position Sensor Signal too high"),
"P1443": _("EGR Valve Position Sensor Signal too low"),
"P1444": _("EGR Valve Position Sensor range/performance"),
"P1445": _("Catalyst Temp.Sensor 2 Circ. Range/Performance"),
"P1446": _("Catalyst Temp.Circ Short to Ground"),
"P1447": _("Catalyst Temp.Circ Open/Short to B+"),
"P1448": _("Catalyst Temp.Sensor 2 Circ. Short to Ground"),
"P1449": _("Catalyst Temp.Sensor 2 Circ. Open/Short to B+"),
"P1450": _("Sec.Air Inj.Sys.Circ Short to B+"),
"P1451": _("Sec.Air Inj.Sys.Circ Short to Ground"),
"P1452": _("Sec.Air Inj.Sys. Open Circ."),
"P1453": _("Exhaust gas temperature sensor 1 open/short to B+"),
"P1454": _("Exhaust gas temperature sensor short 1 to ground"),
"P1455": _("Exhaust gas temperature sensor 1 range/performance"),
"P1456": _("Exhaust gas temperature control bank 1 limit attained"),
"P1457": _("Exhaust gas temperature sensor 2 open/short to B+"),
"P1458": _("Exhaust gas temperature sensor 2 short to ground"),
"P1459": _("Exhaust gas temperature sensor 2 range/performance"),
"P1460": _("Exhaust gas temperature control bank 2 limit attained"),
"P1461": _("Exhaust gas temperature control bank 1 Range/Performance"),
"P1462": _("Exhaust gas temperature control bank 2 Range/Performance"),
"P1465": _("Additive Pump Short Circuit to B+"),
"P1466": _("Additive Pump Open/Short to Ground"),
"P1467": _("EVAP Canister Purge Solenoid Valve Short Circuit to B+"),
"P1468": _("EVAP Canister Purge Solenoid Valve Short Circuit to Ground"),
"P1469": _("EVAP Canister Purge Solenoid Valve Open Circuit"),
"P1470": _("EVAP Emission Contr.LDP Circ Electrical Malfunction"),
"P1471": _("EVAP Emission Contr.LDP Circ Short to B+"),
"P1472": _("EVAP Emission Contr.LDP Circ Short to Ground"),
"P1473": _("EVAP Emission Contr.LDP Circ Open Circ."),
"P1474": _("EVAP Canister Purge Solenoid Valve electrical malfunction"),
"P1475": _("EVAP Emission Contr.LDP Circ Malfunction/Signal Circ.Open"),
"P1476": _("EVAP Emission Contr.LDP Circ Malfunction/Insufficient Vacuum"),
"P1477": _("EVAP Emission Contr.LDP Circ Malfunction"),
"P1478": _("EVAP Emission Contr.LDP Circ Clamped Tube Detected"),
"P1500": _("Fuel Pump Relay Circ. Electrical Malfunction"),
"P1501": _("Fuel Pump Relay Circ. Short to Ground"),
"P1502": _("Fuel Pump Relay Circ. Short to B+"),
"P1503": _("Load signal from Alternator Term. DF Range/performance/Incorrect Signal"),
"P1504": _("Intake Air Sys.Bypass Leak Detected"),
"P1505": _("Closed Throttle Pos. Does Not Close/Open Circ"),
"P1506": _("Closed Throttle Pos.Switch Does Not Open/Short to Ground"),
"P1507": _("Idle Sys.Learned Value Lower Limit Attained"),
"P1508": _("Idle Sys.Learned Value Upper Limit Attained"),
"P1509": _("Idle Air Control Circ. Electrical Malfunction"),
"P1510": _("Idle Air Control Circ. Short to B+"),
"P1511": _("Intake Manifold Changeover Valve circuit electrical malfunction"),
"P1512": _("Intake Manifold Changeover Valve circuit Short to B+"),
"P1513": _("Intake Manifold Changeover Valve2 circuit Short to B+"),
"P1514": _("Intake Manifold Changeover Valve2 circuit Short to ground"),
"P1515": _("Intake Manifold Changeover Valve circuit Short to Ground"),
"P1516": _("Intake Manifold Changeover Valve circuit Open"),
"P1517": _("Main Relay Circ. Electrical Malfunction"),
"P1518": _("Main Relay Circ. Short to B+"),
"P1519": _("Intake Camshaft Contr.,Bank1 Malfunction"),
"P1520": _("Intake Manifold Changeover Valve2 circuit Open"),
"P1521": _("Intake Manifold Changeover Valve2 circuit electrical malfunction"),
"P1522": _("Intake Camshaft Contr.,Bank2 Malfunction"),
"P1523": _("Crash Signal from Airbag Control Unit range/performance"),
"P1525": _("Intake Camshaft Contr.Circ.,Bank1 Electrical Malfunction"),
"P1526": _("Intake Camshaft Contr.Circ.,Bank1 Short to B+"),
"P1527": _("Intake Camshaft Contr.Circ.,Bank1 Short to Ground"),
"P1528": _("Intake Camshaft Contr.Circ.,Bank1 Open"),
"P1529": _("Camshaft Control Circuit Short to B+"),
"P1530": _("Camshaft Control Circuit Short to ground"),
"P1531": _("Camshaft Control Circuit open"),
"P1533": _("Intake Camshaft Contr.Circ.,Bank2 Electrical Malfunction"),
"P1534": _("Intake Camshaft Contr.Circ.,Bank2 Short to B+"),
"P1535": _("Intake Camshaft Contr.Circ.,Bank2 Short to Ground"),
"P1536": _("Intake Camshaft Contr.Circ.,Bank2 Open"),
"P1537": _("Engine Shutoff Solenoid Malfunction"),
"P1538": _("Engine Shutoff Solenoid Open/Short to Ground"),
"P1539": _("Clutch Vacuum Vent Valve Switch Incorrect signal"),
"P1540": _("Vehicle Speed Sensor High Input"),
"P1541": _("Fuel Pump Relay Circ Open"),
"P1542": _("Throttle Actuation Potentiometer Range/Performance"),
"P1543": _("Throttle Actuation Potentiometer Signal too Low"),
"P1544": _("Throttle Actuation Potentiometer Signal too High"),
"P1545": _("Throttle Pos.Contr Malfunction"),
"P1546": _("Boost Pressure Contr.Valve Short to B+"),
"P1547": _("Boost Pressure Contr.Valve Short to Ground"),
"P1548": _("Boost Pressure Contr.Valve Open"),
"P1549": _("Boost Pressure Contr.Valve Short to Ground"),
"P1550": _("Charge Pressure Deviation"),
"P1551": _("Barometric Pressure Sensor Circ. Short to B+"),
"P1552": _("Barometric Pressure Sensor Circ. Open/Short to Ground"),
"P1553": _("Barometric/manifold pressure signal ratio out of range"),
"P1554": _("Idle Speed Contr.Throttle Pos. Basic Setting Conditions not met"),
"P1555": _("Charge Pressure Upper Limit exceeded"),
"P1556": _("Charge Pressure Contr. Negative Deviation"),
"P1557": _("Charge Pressure Contr. Positive Deviation"),
"P1558": _("Throttle Actuator Electrical Malfunction"),
"P1559": _("Idle Speed Contr.Throttle Pos. Adaptation Malfunction"),
"P1560": _("Maximum Engine Speed Exceeded"),
"P1561": _("Quantity Adjuster Deviation"),
"P1562": _("Quantity Adjuster Upper Limit Attained"),
"P1563": _("Quantity Adjuster Lower Limit Attained"),
"P1564": _("Idle Speed Contr.Throttle Pos. Low Voltage During Adaptation"),
"P1565": _("Idle Speed Control Throttle Position lower limit not attained"),
"P1566": _("Load signal from A/C compressor range/performance"),
"P1567": _("Load signal from A/C compressor no signal"),
"P1568": _("Idle Speed Contr.Throttle Pos. mechanical Malfunction"),
"P1569": _("Cruise control switch Incorrect signal"),
"P1570": _("Contr.Module Locked"),
"P1571": _("Left Eng. Mount Solenoid Valve Short to B+"),
"P1572": _("Left Eng. Mount Solenoid Valve Short to ground"),
"P1573": _("Left Eng. Mount Solenoid Valve Open circuit"),
"P1574": _("Left Eng. Mount Solenoid Valve Electrical fault in circuit"),
"P1575": _("Right Eng. Mount Solenoid Valve Short to B+"),
"P1576": _("Right Eng. Mount Solenoid Valve Short to ground"),
"P1577": _("Right Eng. Mount Solenoid Valve Open circuit"),
"P1578": _("Right Eng. Mount Solenoid Valve Electrical fault in circuit"),
"P1579": _("Idle Speed Contr.Throttle Pos. Adaptation not started"),
"P1580": _("Throttle Actuator B1 Malfunction"),
"P1581": _("Idle Speed Contr.Throttle Pos. Basic Setting Not Carried Out"),
"P1582": _("Idle Adaptation at Limit"),
"P1583": _("Transmission mount valves Short to B+"),
"P1584": _("Transmission mount valves Short to ground"),
"P1585": _("Transmission mount valves Open circuit"),
"P1586": _("Engine mount solenoid valves Short to B+"),
"P1587": _("Engine mount solenoid valves Short to ground"),
"P1588": _("Engine mount solenoid valves Open circuit"),
"P1600": _("Power Supply (B+) Terminal 15 Low Voltage"),
"P1602": _("Power Supply (B+) Terminal 30 Low Voltage"),
"P1603": _("Internal Control Module Malfunction"),
"P1604": _("Internal Control Module Driver Error"),
"P1605": _("Rough Road/Acceleration Sensor Electrical Malfunction"),
"P1606": _("Rough Road Spec Engine Torque ABS-ECU Electrical Malfunction"),
"P1607": _("Vehicle speed signal Error message from instrument cluster"),
"P1608": _("Steering angle signal Error message from steering angle sensor"),
"P1609": _("Crash shut-down activated"),
"P1611": _("MIL Call-up Circ./Transm.Contr.Module Short to Ground"),
"P1612": _("Electronic Control Module Incorrect Coding"),
"P1613": _("MIL Call-up Circ Open/Short to B+"),
"P1614": _("MIL Call-up Circ./Transm.Contr.Module Range/Performance"),
"P1615": _("Engine Oil Temperature Sensor Circuit range/performance"),
"P1616": _("Glow Plug/Heater Indicator Circ. Short to B+"),
"P1617": _("Glow Plug/Heater Indicator Circ. Open/Short to Ground"),
"P1618": _("Glow Plug/Heater Relay Circ. Short to B+"),
"P1619": _("Glow Plug/Heater Relay Circ. Open/Short to Ground"),
"P1620": _("Engine coolant temperature signal open/short to B+"),
"P1621": _("Engine coolant temperature signal short to ground"),
"P1622": _("Engine coolant temperature signal range/performance"),
"P1623": _("Data Bus Powertrain No Communication"),
"P1624": _("MIL Request Sign.active"),
"P1625": _("Data-Bus Powertrain Unplausible Message from Transm.Contr."),
"P1626": _("Data-Bus Powertrain Missing Message from Transm.Contr."),
"P1627": _("Data-Bus Powertrain missing message from fuel injection pump"),
"P1628": _("Data-Bus Powertrain missing message from steering sensor"),
"P1629": _("Data-Bus Powertrain missing message from distance control"),
"P1630": _("Accelera.Pedal Pos.Sensor 1 Signal too Low"),
"P1631": _("Accelera.Pedal Pos.Sensor 1 Signal too High"),
"P1632": _("Accelera.Pedal Pos.Sensor 1 Power Supply Malfunction"),
"P1633": _("Accelera.Pedal Pos.Sensor 2 Signal too Low"),
"P1634": _("Accelera.Pedal Pos.Sensor 2 Signal too High"),
"P1635": _("Data Bus Powertrain missing message f.air condition control"),
"P1636": _("Data Bus Powertrain missing message from Airbag control"),
"P1637": _("Data Bus Powertrain missing message f.central electr.control"),
"P1638": _("Data Bus Powertrain missing message from clutch control"),
"P1639": _("Accelera.Pedal Pos.Sensor 1+2 Range/Performance"),
"P1640": _("Internal Contr.Module (EEPROM) Error"),
"P1641": _("Please check DTC Memory of Air Condition ECU"),
"P1642": _("Please check DTC Memory of Airbag ECU"),
"P1643": _("Please check DTC Memory of central electric ECU"),
"P1644": _("Please check DTC Memory of clutch ECU"),
"P1645": _("Data Bus Powertrain missing message f.all wheel drive contr."),
"P1646": _("Please Check DTC Memory of all wheel drive ECU"),
"P1647": _("Please check coding of ECUs in Data Bus Powertrain"),
"P1648": _("Data Bus Powertrain Malfunction"),
"P1649": _("Data Bus Powertrain Missing message from ABS Control Module"),
"P1650": _("Data Bus Powertrain Missing message fr.instrument panel ECU"),
"P1651": _("Data Bus Powertrain missing messages"),
"P1652": _("Please check DTC Memory of transmission ECU"),
"P1653": _("Please check DTC Memory of ABS Control Module"),
"P1654": _("Please check DTC Memory of control panel ECU"),
"P1655": _("Please check DTC Memory of ADR Control Module"),
"P1656": _("A/C clutch relay circuit short to ground"),
"P1657": _("A/C clutch relay circuit short to B+"),
"P1658": _("Data Bus Powertrain Incorrect signal from ADR Control Module"),
"P1676": _("Drive by Wire-MIL Circ. Electrical Malfunction"),
"P1677": _("Drive by Wire-MIL Circ. Short to B+"),
"P1678": _("Drive by Wire-MIL Circ. Short to Ground"),
"P1679": _("Drive by Wire-MIL Circ. Open"),
"P1681": _("Contr.Unit Programming, Programming not Finished"),
"P1684": _("Contr.Unit Programming Communication Error"),
"P1686": _("Contr.Unit Error Programming Error"),
"P1690": _("Malfunction Indication Light Malfunction"),
"P1691": _("Malfunction Indication Light Open"),
"P1692": _("Malfunction Indication Light Short to Ground"),
"P1693": _("Malfunction Indication Light Short to B+"),
"P1694": _("Malfunction Indication Light Open/Short to Ground"),
"P1704": _("Kick Down Switch Malfunction"),
"P1705": _("Gear/Ratio Monitoring Adaptation limit reached"),
"P1711": _("Wheel Speed Signal 1 Range/Performance"),
"P1716": _("Wheel Speed Signal 2 Range/Performance"),
"P1721": _("Wheel Speed Signal 3 Range/Performance"),
"P1723": _("Starter Interlock Circ. Open"),
"P1724": _("Starter Interlock Circ. Short to Ground"),
"P1726": _("Wheel Speed Signal 4 Range/Performance"),
"P1728": _("Different Wheel Speed Signals Range/Performance"),
"P1729": _("Starter Interlock Circ. Short to B+"),
"P1733": _("Tiptronic Switch Down Circ. Short to Ground"),
"P1739": _("Tiptronic Switch up Circ. Short to Ground"),
"P1740": _("Clutch temperature control"),
"P1741": _("Clutch pressure adaptation at limit"),
"P1742": _("Clutch torque adaptation at limit"),
"P1743": _("Clutch slip control signal too high"),
"P1744": _("Tiptronic Switch Recognition Circ. Short to Ground"),
"P1745": _("Transm.Contr.Unit Relay Short to B+"),
"P1746": _("Transm.Contr.Unit Relay Malfunction"),
"P1747": _("Transm.Contr.Unit Relay Open/Short to Ground"),
"P1748": _("Transm.Contr.Unit Self-Check"),
"P1749": _("Transm.Contr.Unit Incorrect Coded"),
"P1750": _("Power Supply Voltage Low Voltage"),
"P1751": _("Power Supply Voltage High Voltage"),
"P1752": _("Power Supply Malfunction"),
"P1760": _("Shift Lock Malfunction"),
"P1761": _("Shift Lock Short to Ground"),
"P1762": _("Shift Lock Short to B+"),
"P1763": _("Shift Lock Open"),
"P1764": _("Transmission temperature control"),
"P1765": _("Hydraulic Pressure Sensor 2 adaptation at limit"),
"P1766": _("Throttle Angle Signal Stuck Off"),
"P1767": _("Throttle Angle Signal Stuck On"),
"P1768": _("Hydraulic Pressure Sensor 2 Too High"),
"P1769": _("Hydraulic Pressure Sensor 2 Too Low"),
"P1770": _("Load Signal Range/Performance"),
"P1771": _("Load Signal Stuck Off"),
"P1772": _("Load Signal Stuck On"),
"P1773": _("Hydraulic Pressure Sensor 1 Too High"),
"P1774": _("Hydraulic Pressure Sensor 1 Too Low"),
"P1775": _("Hydraulic Pressure Sensor 1 adaptation at limit"),
"P1776": _("Hydraulic Pressure Sensor 1 range/performance"),
"P1777": _("Hydraulic Pressure Sensor 2 range/performance"),
"P1778": _("Solenoid EV7 Electrical Malfunction"),
"P1781": _("Engine Torque Reduction Open/Short to Ground"),
"P1782": _("Engine Torque Reduction Short to B+"),
"P1784": _("Shift up/down Wire Open/Short to Ground"),
"P1785": _("Shift up/down Wire Short to B+"),
"P1786": _("Reversing Light Circ. Open"),
"P1787": _("Reversing Light Circ. Short to Ground"),
"P1788": _("Reversing Light Circ. Short to B+"),
"P1789": _("Idle Speed Intervention Circ. Error Message from Engine Contr."),
"P1790": _("Transmission Range Display Circ. Open"),
"P1791": _("Transmission Range Display Circ. Short to Ground"),
"P1792": _("Transmission Range Display Circ. Short to B+"),
"P1793": _("Output Speed Sensor 2 Circ. No Signal"),
"P1795": _("Vehicle Speed Signal Circ. Open"),
"P1796": _("Vehicle Speed Signal Circ. Short to Ground"),
"P1797": _("Vehicle Speed Signal Circ. Short to B+"),
"P1798": _("Output Speed Sensor 2 Circ. Range/Performance"),
"P1799": _("Output Speed Sensor 2 Circ. Rpm too High"),
"P1813": _("Pressure Contr.Solenoid 1 Electrical"),
"P1814": _("Pressure Contr.Solenoid 1 Open/Short to Ground"),
"P1815": _("Pressure Contr.Solenoid 1 Short to B+"),
"P1818": _("Pressure Contr.Solenoid 2 Electrical"),
"P1819": _("Pressure Contr.Solenoid 2 Open/Short to Ground"),
"P1820": _("Pressure Contr.Solenoid 2 Short to B+"),
"P1823": _("Pressure Contr.Solenoid 3 Electrical"),
"P1824": _("Pressure Contr.Solenoid 3 Open/Short to Ground"),
"P1825": _("Pressure Contr.Solenoid 3 Short to B+"),
"P1828": _("Pressure Contr.Solenoid 4 Electrical"),
"P1829": _("Pressure Contr.Solenoid 4 Open/Short to Ground"),
"P1830": _("Pressure Contr.Solenoid 4 Short to B+"),
"P1834": _("Pressure Contr.Solenoid 5 Open/Short to Ground"),
"P1835": _("Pressure Contr.Solenoid 5 Short to B+"),
"P1841": _("Engine/Transmission Control Modules Versions do not match"),
"P1842": _("Please check DTC Memory of instrument panel ECU"),
"P1843": _("Please check DTC Memory of ADR Control Module"),
"P1844": _("Please check DTC Memory of central electric control ECU"),
"P1847": _("Please check DTC Memory of brake system ECU"),
"P1848": _("Please check DTC Memory of engine ECU"),
"P1849": _("Please check DTC Memory of transmission ECU"),
"P1850": _("Data-Bus Powertrain Missing Message from Engine Contr."),
"P1851": _("Data-Bus Powertrain Missing Message from Brake Contr."),
"P1852": _("Data-Bus Powertrain Unplausible Message from Engine Contr."),
"P1853": _("Data-Bus Powertrain Unplausible Message from Brake Contr."),
"P1854": _("Data-Bus Powertrain Hardware Defective"),
"P1855": _("Data-Bus Powertrain Software version Contr."),
"P1856": _("Throttle/Pedal Pos.Sensor A Circ. Error Message from Engine Contr."),
"P1857": _("Load Signal Error Message from Engine Contr."),
"P1858": _("Engine Speed Input Circ. Error Message from Engine Contr."),
"P1859": _("Brake Switch Circ. Error Message from Engine Contr."),
"P1860": _("Kick Down Switch Error Message from Engine Contr."),
"P1861": _("Throttle Position (TP) sensor Error Message from ECM"),
"P1862": _("Data Bus Powertrain Missing message from instr. panel ECU"),
"P1863": _("Data Bus Powertrain Missing Message from St. Angle Sensor"),
"P1864": _("Data Bus Powertrain Missing message from ADR control module"),
"P1865": _("Data Bus Powertrain Missing message from central electronics"),
"P1866": _("Data Bus Powertrain Missing messages"),
}
| bwhitelock/garmon-ng | garmon/audi_codes.py | Python | gpl-3.0 | 34,868 | 0.000488 |
# This module is part of the GeoTag-X project builder.
# Copyright (C) 2015 UNITAR.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from question import Question
class TestQuestion(unittest.TestCase):
def test_valid_keys(self):
self.assertTrue(Question.iskey("A")[0], "Single-character")
self.assertTrue(Question.iskey("thisIsALongKey")[0], "Multi-character")
self.assertTrue(Question.iskey("--")[0], "Hyphens")
self.assertTrue(Question.iskey("--key")[0], "Leading hyphens")
self.assertTrue(Question.iskey("_")[0], "Underscores")
self.assertTrue(Question.iskey("__key")[0], "Leading underscores")
self.assertTrue(Question.iskey("_now-y0u_4re-pushing-1t")[0], "Mixed characters")
self.assertTrue(Question.iskey("_end")[0], "Not a reserved keyword")
def test_illegal_keys(self):
self.assertFalse(Question.iskey("")[0], "Empty string")
self.assertFalse(Question.iskey(" ")[0], "Whitespace only")
self.assertFalse(Question.iskey(" key")[0], "Leading whitespace")
self.assertFalse(Question.iskey("end\t")[0], "Traling tabulation")
self.assertFalse(Question.iskey("*$/\\")[0], "Non-alphanumeric characters")
self.assertFalse(Question.iskey("end")[0], "Reserved keyword")
self.assertFalse(Question.iskey("photoVisible")[0], "Reserved keyword")
self.assertFalse(Question.iskey(32768)[0], "Not a string")
self.assertFalse(Question.iskey("\n")[0], "Illegal escape character")
if __name__ == "__main__":
unittest.main()
| geotagx/geotagx-project-template | src/test_question.py | Python | agpl-3.0 | 2,076 | 0.010597 |
from django.apps import AppConfig
class Leaderboard2Config(AppConfig):
name = 'leaderboard'
| FRC-RS/FRS | leaderboard/apps.py | Python | mit | 98 | 0 |
"""Test Home Assistant json utility functions."""
from json import JSONEncoder
import os
import sys
from tempfile import mkdtemp
import unittest
from unittest.mock import Mock
import pytest
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util.json import SerializationError, load_json, save_json
# Test data that can be saved as JSON
TEST_JSON_A = {"a": 1, "B": "two"}
TEST_JSON_B = {"a": "one", "B": 2}
# Test data that can not be saved as JSON (keys must be strings)
TEST_BAD_OBJECT = {("A",): 1}
# Test data that can not be loaded as JSON
TEST_BAD_SERIALIED = "THIS IS NOT JSON\n"
TMP_DIR = None
def setup():
"""Set up for tests."""
global TMP_DIR
TMP_DIR = mkdtemp()
def teardown():
"""Clean up after tests."""
for fname in os.listdir(TMP_DIR):
os.remove(os.path.join(TMP_DIR, fname))
os.rmdir(TMP_DIR)
def _path_for(leaf_name):
return os.path.join(TMP_DIR, leaf_name + ".json")
def test_save_and_load():
"""Test saving and loading back."""
fname = _path_for("test1")
save_json(fname, TEST_JSON_A)
data = load_json(fname)
assert data == TEST_JSON_A
# Skipped on Windows
@unittest.skipIf(
sys.platform.startswith("win"), "private permissions not supported on Windows"
)
def test_save_and_load_private():
"""Test we can load private files and that they are protected."""
fname = _path_for("test2")
save_json(fname, TEST_JSON_A, private=True)
data = load_json(fname)
assert data == TEST_JSON_A
stats = os.stat(fname)
assert stats.st_mode & 0o77 == 0
def test_overwrite_and_reload():
"""Test that we can overwrite an existing file and read back."""
fname = _path_for("test3")
save_json(fname, TEST_JSON_A)
save_json(fname, TEST_JSON_B)
data = load_json(fname)
assert data == TEST_JSON_B
def test_save_bad_data():
"""Test error from trying to save unserialisable data."""
fname = _path_for("test4")
with pytest.raises(SerializationError):
save_json(fname, TEST_BAD_OBJECT)
def test_load_bad_data():
"""Test error from trying to load unserialisable data."""
fname = _path_for("test5")
with open(fname, "w") as fh:
fh.write(TEST_BAD_SERIALIED)
with pytest.raises(HomeAssistantError):
load_json(fname)
def test_custom_encoder():
"""Test serializing with a custom encoder."""
class MockJSONEncoder(JSONEncoder):
"""Mock JSON encoder."""
def default(self, o):
"""Mock JSON encode method."""
return "9"
fname = _path_for("test6")
save_json(fname, Mock(), encoder=MockJSONEncoder)
data = load_json(fname)
assert data == "9"
| leppa/home-assistant | tests/util/test_json.py | Python | apache-2.0 | 2,695 | 0.000371 |
# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ctypes.util
import errno
import fcntl
import getpass
import locale
import logging
import os
import random
import subprocess
import sys
import textwrap
import time
from struct import unpack, pack
from termios import TIOCGWINSZ
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils._text import to_bytes, to_text, to_native
from ansible.module_utils.six import with_metaclass, text_type
from ansible.utils.color import stringc
from ansible.utils.singleton import Singleton
from ansible.utils.unsafe_proxy import wrap_var
try:
# Python 2
input = raw_input
except NameError:
# Python 3, we already have raw_input
pass
_LIBC = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))
# Set argtypes, to avoid segfault if the wrong type is provided,
# restype is assumed to be c_int
_LIBC.wcwidth.argtypes = (ctypes.c_wchar,)
_LIBC.wcswidth.argtypes = (ctypes.c_wchar_p, ctypes.c_int)
# Max for c_int
_MAX_INT = 2 ** (ctypes.sizeof(ctypes.c_int) * 8 - 1) - 1
_LOCALE_INITIALIZED = False
_LOCALE_INITIALIZATION_ERR = None
def initialize_locale():
"""Set the locale to the users default setting
and set ``_LOCALE_INITIALIZED`` to indicate whether
``get_text_width`` may run into trouble
"""
global _LOCALE_INITIALIZED, _LOCALE_INITIALIZATION_ERR
if _LOCALE_INITIALIZED is False:
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error as e:
_LOCALE_INITIALIZATION_ERR = e
else:
_LOCALE_INITIALIZED = True
def get_text_width(text):
"""Function that utilizes ``wcswidth`` or ``wcwidth`` to determine the
number of columns used to display a text string.
We try first with ``wcswidth``, and fallback to iterating each
character and using wcwidth individually, falling back to a value of 0
for non-printable wide characters
On Py2, this depends on ``locale.setlocale(locale.LC_ALL, '')``,
that in the case of Ansible is done in ``bin/ansible``
"""
if not isinstance(text, text_type):
raise TypeError('get_text_width requires text, not %s' % type(text))
if _LOCALE_INITIALIZATION_ERR:
Display().warning(
'An error occurred while calling ansible.utils.display.initialize_locale '
'(%s). This may result in incorrectly calculated text widths that can '
'cause Display to print incorrect line lengths' % _LOCALE_INITIALIZATION_ERR
)
elif not _LOCALE_INITIALIZED:
Display().warning(
'ansible.utils.display.initialize_locale has not been called, '
'this may result in incorrectly calculated text widths that can '
'cause Display to print incorrect line lengths'
)
try:
width = _LIBC.wcswidth(text, _MAX_INT)
except ctypes.ArgumentError:
width = -1
if width != -1:
return width
width = 0
counter = 0
for c in text:
counter += 1
if c in (u'\x08', u'\x7f', u'\x94', u'\x1b'):
# A few characters result in a subtraction of length:
# BS, DEL, CCH, ESC
# ESC is slightly different in that it's part of an escape sequence, and
# while ESC is non printable, it's part of an escape sequence, which results
# in a single non printable length
width -= 1
counter -= 1
continue
try:
w = _LIBC.wcwidth(c)
except ctypes.ArgumentError:
w = -1
if w == -1:
# -1 signifies a non-printable character
# use 0 here as a best effort
w = 0
width += w
if width == 0 and counter and not _LOCALE_INITIALIZED:
raise EnvironmentError(
'ansible.utils.display.initialize_locale has not been called, '
'and get_text_width could not calculate text width of %r' % text
)
# It doesn't make sense to have a negative printable width
return width if width >= 0 else 0
class FilterBlackList(logging.Filter):
def __init__(self, blacklist):
self.blacklist = [logging.Filter(name) for name in blacklist]
def filter(self, record):
return not any(f.filter(record) for f in self.blacklist)
class FilterUserInjector(logging.Filter):
"""
This is a filter which injects the current user as the 'user' attribute on each record. We need to add this filter
to all logger handlers so that 3rd party libraries won't print an exception due to user not being defined.
"""
try:
username = getpass.getuser()
except KeyError:
# people like to make containers w/o actual valid passwd/shadow and use host uids
username = 'uid=%s' % os.getuid()
def filter(self, record):
record.user = FilterUserInjector.username
return True
logger = None
# TODO: make this a callback event instead
if getattr(C, 'DEFAULT_LOG_PATH'):
path = C.DEFAULT_LOG_PATH
if path and (os.path.exists(path) and os.access(path, os.W_OK)) or os.access(os.path.dirname(path), os.W_OK):
# NOTE: level is kept at INFO to avoid security disclosures caused by certain libraries when using DEBUG
logging.basicConfig(filename=path, level=logging.INFO, # DO NOT set to logging.DEBUG
format='%(asctime)s p=%(process)d u=%(user)s n=%(name)s | %(message)s')
logger = logging.getLogger('ansible')
for handler in logging.root.handlers:
handler.addFilter(FilterBlackList(getattr(C, 'DEFAULT_LOG_FILTER', [])))
handler.addFilter(FilterUserInjector())
else:
print("[WARNING]: log file at %s is not writeable and we cannot create it, aborting\n" % path, file=sys.stderr)
# map color to log levels
color_to_log_level = {C.COLOR_ERROR: logging.ERROR,
C.COLOR_WARN: logging.WARNING,
C.COLOR_OK: logging.INFO,
C.COLOR_SKIP: logging.WARNING,
C.COLOR_UNREACHABLE: logging.ERROR,
C.COLOR_DEBUG: logging.DEBUG,
C.COLOR_CHANGED: logging.INFO,
C.COLOR_DEPRECATE: logging.WARNING,
C.COLOR_VERBOSE: logging.INFO}
b_COW_PATHS = (
b"/usr/bin/cowsay",
b"/usr/games/cowsay",
b"/usr/local/bin/cowsay", # BSD path for cowsay
b"/opt/local/bin/cowsay", # MacPorts path for cowsay
)
class Display(with_metaclass(Singleton, object)):
def __init__(self, verbosity=0):
self.columns = None
self.verbosity = verbosity
# list of all deprecation messages to prevent duplicate display
self._deprecations = {}
self._warns = {}
self._errors = {}
self.b_cowsay = None
self.noncow = C.ANSIBLE_COW_SELECTION
self.set_cowsay_info()
if self.b_cowsay:
try:
cmd = subprocess.Popen([self.b_cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.cows_available = set([to_text(c) for c in out.split()])
if C.ANSIBLE_COW_ACCEPTLIST and any(C.ANSIBLE_COW_ACCEPTLIST):
self.cows_available = set(C.ANSIBLE_COW_ACCEPTLIST).intersection(self.cows_available)
except Exception:
# could not execute cowsay for some reason
self.b_cowsay = False
self._set_column_width()
def set_cowsay_info(self):
if C.ANSIBLE_NOCOWS:
return
if C.ANSIBLE_COW_PATH:
self.b_cowsay = C.ANSIBLE_COW_PATH
else:
for b_cow_path in b_COW_PATHS:
if os.path.exists(b_cow_path):
self.b_cowsay = b_cow_path
def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False, newline=True):
""" Display a message to the user
Note: msg *must* be a unicode string to prevent UnicodeError tracebacks.
"""
nocolor = msg
if not log_only:
has_newline = msg.endswith(u'\n')
if has_newline:
msg2 = msg[:-1]
else:
msg2 = msg
if color:
msg2 = stringc(msg2, color)
if has_newline or newline:
msg2 = msg2 + u'\n'
msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr))
if sys.version_info >= (3,):
# Convert back to text string on python3
# We first convert to a byte string so that we get rid of
# characters that are invalid in the user's locale
msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace')
# Note: After Display() class is refactored need to update the log capture
# code in 'bin/ansible-connection' (and other relevant places).
if not stderr:
fileobj = sys.stdout
else:
fileobj = sys.stderr
fileobj.write(msg2)
try:
fileobj.flush()
except IOError as e:
# Ignore EPIPE in case fileobj has been prematurely closed, eg.
# when piping to "head -n1"
if e.errno != errno.EPIPE:
raise
if logger and not screen_only:
# We first convert to a byte string so that we get rid of
# color and characters that are invalid in the user's locale
msg2 = to_bytes(nocolor.lstrip(u'\n'))
if sys.version_info >= (3,):
# Convert back to text string on python3
msg2 = to_text(msg2, self._output_encoding(stderr=stderr))
lvl = logging.INFO
if color:
# set logger level based on color (not great)
try:
lvl = color_to_log_level[color]
except KeyError:
# this should not happen, but JIC
raise AnsibleAssertionError('Invalid color supplied to display: %s' % color)
# actually log
logger.log(lvl, msg2)
def v(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=0)
def vv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=1)
def vvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=2)
def vvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=3)
def vvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=4)
def vvvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=5)
def debug(self, msg, host=None):
if C.DEFAULT_DEBUG:
if host is None:
self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color=C.COLOR_DEBUG)
else:
self.display("%6d %0.5f [%s]: %s" % (os.getpid(), time.time(), host, msg), color=C.COLOR_DEBUG)
def verbose(self, msg, host=None, caplevel=2):
to_stderr = C.VERBOSE_TO_STDERR
if self.verbosity > caplevel:
if host is None:
self.display(msg, color=C.COLOR_VERBOSE, stderr=to_stderr)
else:
self.display("<%s> %s" % (host, msg), color=C.COLOR_VERBOSE, stderr=to_stderr)
def get_deprecation_message(self, msg, version=None, removed=False, date=None, collection_name=None):
''' used to print out a deprecation message.'''
msg = msg.strip()
if msg and msg[-1] not in ['!', '?', '.']:
msg += '.'
if collection_name == 'ansible.builtin':
collection_name = 'ansible-core'
if removed:
header = '[DEPRECATED]: {0}'.format(msg)
removal_fragment = 'This feature was removed'
help_text = 'Please update your playbooks.'
else:
header = '[DEPRECATION WARNING]: {0}'.format(msg)
removal_fragment = 'This feature will be removed'
# FUTURE: make this a standalone warning so it only shows up once?
help_text = 'Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.'
if collection_name:
from_fragment = 'from {0}'.format(collection_name)
else:
from_fragment = ''
if date:
when = 'in a release after {0}.'.format(date)
elif version:
when = 'in version {0}.'.format(version)
else:
when = 'in a future release.'
message_text = ' '.join(f for f in [header, removal_fragment, from_fragment, when, help_text] if f)
return message_text
def deprecated(self, msg, version=None, removed=False, date=None, collection_name=None):
if not removed and not C.DEPRECATION_WARNINGS:
return
message_text = self.get_deprecation_message(msg, version=version, removed=removed, date=date, collection_name=collection_name)
if removed:
raise AnsibleError(message_text)
wrapped = textwrap.wrap(message_text, self.columns, drop_whitespace=False)
message_text = "\n".join(wrapped) + "\n"
if message_text not in self._deprecations:
self.display(message_text.strip(), color=C.COLOR_DEPRECATE, stderr=True)
self._deprecations[message_text] = 1
def warning(self, msg, formatted=False):
if not formatted:
new_msg = "[WARNING]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = "\n".join(wrapped) + "\n"
else:
new_msg = "\n[WARNING]: \n%s" % msg
if new_msg not in self._warns:
self.display(new_msg, color=C.COLOR_WARN, stderr=True)
self._warns[new_msg] = 1
def system_warning(self, msg):
if C.SYSTEM_WARNINGS:
self.warning(msg)
def banner(self, msg, color=None, cows=True):
'''
Prints a header-looking line with cowsay or stars with length depending on terminal width (3 minimum)
'''
msg = to_text(msg)
if self.b_cowsay and cows:
try:
self.banner_cowsay(msg)
return
except OSError:
self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.")
msg = msg.strip()
try:
star_len = self.columns - get_text_width(msg)
except EnvironmentError:
star_len = self.columns - len(msg)
if star_len <= 3:
star_len = 3
stars = u"*" * star_len
self.display(u"\n%s %s" % (msg, stars), color=color)
def banner_cowsay(self, msg, color=None):
if u": [" in msg:
msg = msg.replace(u"[", u"")
if msg.endswith(u"]"):
msg = msg[:-1]
runcmd = [self.b_cowsay, b"-W", b"60"]
if self.noncow:
thecow = self.noncow
if thecow == 'random':
thecow = random.choice(list(self.cows_available))
runcmd.append(b'-f')
runcmd.append(to_bytes(thecow))
runcmd.append(to_bytes(msg))
cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.display(u"%s\n" % to_text(out), color=color)
def error(self, msg, wrap_text=True):
if wrap_text:
new_msg = u"\n[ERROR]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = u"\n".join(wrapped) + u"\n"
else:
new_msg = u"ERROR! %s" % msg
if new_msg not in self._errors:
self.display(new_msg, color=C.COLOR_ERROR, stderr=True)
self._errors[new_msg] = 1
@staticmethod
def prompt(msg, private=False):
prompt_string = to_bytes(msg, encoding=Display._output_encoding())
if sys.version_info >= (3,):
# Convert back into text on python3. We do this double conversion
# to get rid of characters that are illegal in the user's locale
prompt_string = to_text(prompt_string)
if private:
return getpass.getpass(prompt_string)
else:
return input(prompt_string)
def do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
result = None
if sys.__stdin__.isatty():
do_prompt = self.prompt
if prompt and default is not None:
msg = "%s [%s]: " % (prompt, default)
elif prompt:
msg = "%s: " % prompt
else:
msg = 'input for %s: ' % varname
if confirm:
while True:
result = do_prompt(msg, private)
second = do_prompt("confirm " + msg, private)
if result == second:
break
self.display("***** VALUES ENTERED DO NOT MATCH ****")
else:
result = do_prompt(msg, private)
else:
result = None
self.warning("Not prompting as we are not in interactive mode")
# if result is false and default is not None
if not result and default is not None:
result = default
if encrypt:
# Circular import because encrypt needs a display class
from ansible.utils.encrypt import do_encrypt
result = do_encrypt(result, encrypt, salt_size, salt)
# handle utf-8 chars
result = to_text(result, errors='surrogate_or_strict')
if unsafe:
result = wrap_var(result)
return result
@staticmethod
def _output_encoding(stderr=False):
encoding = locale.getpreferredencoding()
# https://bugs.python.org/issue6202
# Python2 hardcodes an obsolete value on Mac. Use MacOSX defaults
# instead.
if encoding in ('mac-roman',):
encoding = 'utf-8'
return encoding
def _set_column_width(self):
if os.isatty(1):
tty_size = unpack('HHHH', fcntl.ioctl(1, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[1]
else:
tty_size = 0
self.columns = max(79, tty_size - 1)
| s-hertel/ansible | lib/ansible/utils/display.py | Python | gpl-3.0 | 19,334 | 0.001914 |
import pygame
import time
import random
pygame.init()
display_width = 800
display_height = 600
black = (0,0,0)
white = (255,255,255)
red = (255,0,0)
block_color = (53,115,255)
car_width = 73
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('A bit Racey')
clock = pygame.time.Clock()
carImg = pygame.image.load('racecar.png')
def things_dodged(count):
font = pygame.font.SysFont(None, 25)
text = font.render("Dodged: "+str(count), True, black)
gameDisplay.blit(text,(0,0))
def things(thingx, thingy, thingw, thingh, color):
pygame.draw.rect(gameDisplay, color, [thingx, thingy, thingw, thingh])
def car(x,y):
gameDisplay.blit(carImg,(x,y))
def text_objects(text, font):
textSurface = font.render(text, True, black)
return textSurface, textSurface.get_rect()
def message_display(text):
largeText = pygame.font.Font('freesansbold.ttf',115)
TextSurf, TextRect = text_objects(text, largeText)
TextRect.center = ((display_width/2),(display_height/2))
gameDisplay.blit(TextSurf, TextRect)
pygame.display.update()
time.sleep(2)
game_loop()
def crash():
message_display('You Crashed')
def game_intro():
intro = True
while intro:
for event in pygame.event.get():
print(event)
if event.type == pygame.QUIT:
pygame.quit()
quit()
gameDisplay.fill(white)
largeText = pygame.font.Font('freesansbold.ttf',115)
TextSurf, TextRect = text_objects("A bit Racey", largeText)
TextRect.center = ((display_width/2),(display_height/2))
gameDisplay.blit(TextSurf, TextRect)
pygame.display.update()
clock.tick(15)
def game_loop():
x = (display_width * 0.45)
y = (display_height * 0.8)
x_change = 0
thing_startx = random.randrange(0, display_width)
thing_starty = -600
thing_speed = 4
thing_width = 100
thing_height = 100
thingCount = 1
dodged = 0
gameExit = False
while not gameExit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change = -5
if event.key == pygame.K_RIGHT:
x_change = 5
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
x_change = 0
x += x_change
gameDisplay.fill(white)
# things(thingx, thingy, thingw, thingh, color)
things(thing_startx, thing_starty, thing_width, thing_height, block_color)
thing_starty += thing_speed
car(x,y)
things_dodged(dodged)
if x > display_width - car_width or x < 0:
crash()
if thing_starty > display_height:
thing_starty = 0 - thing_height
thing_startx = random.randrange(0,display_width)
dodged += 1
thing_speed += 1
thing_width += (dodged * 1.2)
if y < thing_starty+thing_height:
print('y crossover')
if x > thing_startx and x < thing_startx + thing_width or x+car_width > thing_startx and x + car_width < thing_startx+thing_width:
print('x crossover')
crash()
pygame.display.update()
clock.tick(60)
game_intro()
game_loop()
pygame.quit()
quit()
| danstoner/python_experiments | playing_with_pygame/pygame-tutorial-series/part10.py | Python | gpl-2.0 | 3,658 | 0.020503 |
#!/usr/bin/env python
""" Evaluate the performance of detector
get the statistical quantify for the hypotheis test
like False Alarm Rate.
"""
from __future__ import print_function, division, absolute_import
import copy, os
import collections
from ..Detector import MEM_FS
from ..Detector import BotDetector
from ..util import update_not_none, plt, np, DataRecorder
from ..util import zdump, zload, Load, get_detect_metric
from ..util import DataEndException
import itertools
import pandas
from .Detect import Detect
class BotnetDetectionEval(Detect):
"""plot ROC curve for the hypothesis test"""
def init_parser(self, parser):
super(BotnetDetectionEval, self).init_parser(parser)
parser.add_argument('--roc_thresholds', default=None, type=Load,
help=("any valid python expression. Thresholds used for get "
"roc curve"))
parser.add_argument('--label_col_name', default=None, type=str,
help="name of the label column")
parser.add_argument('--ip_col_names', default=None,
type=lambda x: x.split(','),
help="name of the ip columns")
@staticmethod
def parse_label(label):
return 'Botnet' in label
def get_ground_truth(self):
label_col_name = self.desc['label_col_name']
ip_col_names = self.desc['ip_col_names']
detect_rg = self.desc.get('detect_rg')
rg_type = self.desc['win_type']
assert len(ip_col_names) <= 2, "at most two IP columns are allowed."
fetch_columns = [label_col_name] + ip_col_names
data_records = self.detector.data_file.data.get_rows(fetch_columns,
rg=detect_rg,
rg_type=rg_type)
ground_truth_bot_ips = set()
all_ips = set()
for row in data_records:
if self.parse_label(row[0]): # is botflow
ground_truth_bot_ips.add(row[1])
ground_truth_bot_ips.add(row[2])
all_ips.add(row[1])
all_ips.add(row[2])
return {
'ground_truth_bot_ips': ground_truth_bot_ips,
'all_ips': all_ips,
}
@staticmethod
def get_detected_ips(label_info, detection):
ips = set()
for i, d in enumerate(detection):
if not d:
continue
ips |= set(label_info['win_ips'][i])
return ips
def eval(self):
thresholds = self.desc['roc_thresholds']
ground_truth = self.get_ground_truth()
self.logger.debug('# of ips in this time frame: %d.' %
(len(ground_truth['all_ips'])))
self.logger.debug('# of bot ips in this time frame: %d.' %
(len(ground_truth['ground_truth_bot_ips'])))
divs = self.detector.record_data['entropy']
divs = np.array(divs, dtype=float) / np.max(divs)
bot_detector_desc = copy.deepcopy(self.desc)
bot_detector_desc.update({
'threshold': 0,
'anomaly_detector': self.detector,
})
bot_detector = BotDetector.SoBotDet(bot_detector_desc)
data_recorder = DataRecorder()
res = np.zeros((len(thresholds), 2))
for i, threshold in enumerate(thresholds):
bot_detector.desc['threshold'] = threshold
self.logger.info('Start to detect with threshold %s ' % (threshold))
result = bot_detector.detect(None, anomaly_detect=False)
tp, fn, tn, fp, sensitivity, specificity = \
get_detect_metric(ground_truth['ground_truth_bot_ips'],
result['detected_bot_ips'],
ground_truth['all_ips'])
tpr = tp * 1.0 / (tp + fn) if (tp + fn) > 0 else float('nan')
fpr = fp * 1.0 / (fp + tn) if (fp + tn) > 0 else float('nan')
data_recorder.add(threshold=threshold, tp=tp, tn=tn, fp=fp, fn=fn,
tpr=tpr, fpr=fpr,
detect_result=result)
data_frame = data_recorder.to_pandas_dataframe()
data_frame.set_index(['threshold'], drop=False)
return {
'metric': data_frame,
'ground_truth_bot_ips': ground_truth['ground_truth_bot_ips'],
'all_ips': ground_truth['all_ips'],
}
def run(self):
self.desc = copy.deepcopy(self.args.config['DETECTOR_DESC'])
update_not_none(self.desc, self.args.__dict__)
self.detect()
return self.eval()
class TimeBasedBotnetDetectionEval(BotnetDetectionEval):
"""Calculate corrected metrics (tTP, tFN, tFP, tTN) for botnet detection.
Please refer to the following paper for the details:
Garcia, Sebastian, et al. 'An empirical comparison of botnet detection
methods.' Computers & Security 45 (2014): 100-123.
"""
def init_parser(self, parser):
super(TimeBasedBotnetDetectionEval, self).init_parser(parser)
parser.add_argument('--timeframe_size', default=None, type=float,
help=("--timeframe_size [float] the size of each time frame."
"Metrics (tTP, tFN, tFP, tTN) will be calculated for "
"each time frame."))
def parse_tuple(s):
return tuple(float(val) for val in
self.desc['timeframe_rg'].split[','])
parser.add_argument('--timeframe_rg', default=None, type=parse_tuple,
help=("comma-separated strings, the first one is start time, "
"the second one is end time. Data in the range will be "
"divided to timeframes for evaluation."))
parser.add_argument('--timeframe_decay_ratio', default=None, type=float,
help="parameter in the exp correction function.")
parser.add_argument('--output_prefix', default=None,
help='prefix for output file')
def get_roc_curve(self, stats):
thresholds = self.desc['roc_thresholds']
if 'threshold' not in stats.columns:
return
data_recorder = DataRecorder()
for threshold in thresholds:
threshold_stats = stats[stats.threshold==threshold]
sum_stats = threshold_stats.sum()
FPR = sum_stats.tFP / (sum_stats.tFP + sum_stats.tTN)
TPR = sum_stats.tTP / (sum_stats.tTP + sum_stats.tFN)
precision = sum_stats.tTP / (sum_stats.tTP + sum_stats.tFP)
f1_score = 2 * precision * TPR / (precision + TPR)
data_recorder.add(threshold=threshold,
FPR=FPR,
TPR=TPR,
precision=precision,
f1_score=f1_score)
return data_recorder.to_pandas_dataframe()
def run(self):
timeframe_rg = self.desc['timeframe_rg']
thresholds = self.desc['roc_thresholds']
assert len(timeframe_rg) == 2, "unknown format of timeframe_rg"
timeframe_size = self.desc['timeframe_size']
timeframe_decay_ratio = self.desc['timeframe_decay_ratio']
cur_time = timeframe_rg[0]
data_recorder = DataRecorder()
timeframe_idx = 0
while cur_time < timeframe_rg[1]:
self.desc['detect_rg'] = [cur_time, cur_time + timeframe_size]
self.detect()
try:
eval_result = self.eval()
except DataEndException:
self.logger.warning('Has read end of the data in evaluation!')
break
metric = eval_result['metric']
bot_ips = eval_result['ground_truth_bot_ips']
bot_ip_num = float(len(bot_ips))
normal_ip_num = float(len(eval_result['all_ips'])) - bot_ip_num
correct_value = np.exp(-1 * timeframe_decay_ratio * timeframe_idx) + 1
tTP = metric.tp * correct_value / bot_ip_num # UPDATE HERE
tFN = metric.fn * correct_value / bot_ip_num
tFP = metric.fp * 1.0 / normal_ip_num
tTN = metric.tn * 1.0 / normal_ip_num
for idx, threshold in enumerate(thresholds):
data_recorder.add(threshold=threshold,
timeframe_idx=timeframe_idx,
tTP=tTP[idx],
tFN=tFN[idx],
tFP=tFP[idx],
tTN=tTN[idx])
cur_time += timeframe_size
timeframe_idx += 1
output_prefix = self.desc.get('output_prefix', 'output_prefix')
timeframe_results = data_recorder.to_pandas_dataframe()
timeframe_results.to_csv(output_prefix + '_time_frame.csv', sep=',')
roc = self.get_roc_curve(data_recorder.to_pandas_dataframe())
if roc is not None:
roc.to_csv(output_prefix + '_roc.csv', sep=',')
return roc
def plot(self, data_recorder):
pass
| hbhzwj/GAD | gad/Experiment/EvalForBotnetDetection.py | Python | gpl-3.0 | 9,139 | 0.001532 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that keys are consistently accepted or rejected in all languages."""
import itertools
from typing import Iterable, Tuple
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import tink
from tink import aead
from tink import daead
from tink import hybrid
from tink import mac
from tink import prf
from tink import signature
from tink.proto import common_pb2
from tink.proto import ecdsa_pb2
from tink.proto import tink_pb2
from util import supported_key_types
from util import testing_servers
# Test cases that succeed in a language but should fail
SUCCEEDS_BUT_SHOULD_FAIL = [
# TODO(b/160130470): In CC and Python Hybrid templates are not checked for
# valid AEAD params. (These params *are* checked when the key is used.)
('EciesAeadHkdfPrivateKey(NIST_P256,UNCOMPRESSED,SHA256,AesEaxKey(15,11))',
'cc'),
('EciesAeadHkdfPrivateKey(NIST_P256,UNCOMPRESSED,SHA256,AesEaxKey(15,11))',
'python'),
]
# Test cases that fail in a language but should succeed
FAILS_BUT_SHOULD_SUCCEED = [
# TODO(b/160134058) Java and Go do not accept templates with CURVE25519.
('EciesAeadHkdfPrivateKey(CURVE25519,UNCOMPRESSED,SHA1,AesGcmKey(16))',
'java'),
('EciesAeadHkdfPrivateKey(CURVE25519,UNCOMPRESSED,SHA1,AesGcmKey(16))',
'go'),
('EciesAeadHkdfPrivateKey(CURVE25519,UNCOMPRESSED,SHA224,AesGcmKey(16))',
'java'),
('EciesAeadHkdfPrivateKey(CURVE25519,UNCOMPRESSED,SHA224,AesGcmKey(16))',
'go'),
('EciesAeadHkdfPrivateKey(CURVE25519,UNCOMPRESSED,SHA256,AesGcmKey(16))',
'java'),
('EciesAeadHkdfPrivateKey(CURVE25519,UNCOMPRESSED,SHA256,AesGcmKey(16))',
'go'),
('EciesAeadHkdfPrivateKey(CURVE25519,UNCOMPRESSED,SHA384,AesGcmKey(16))',
'java'),
('EciesAeadHkdfPrivateKey(CURVE25519,UNCOMPRESSED,SHA384,AesGcmKey(16))',
'go'),
('EciesAeadHkdfPrivateKey(CURVE25519,UNCOMPRESSED,SHA512,AesGcmKey(16))',
'java'),
('EciesAeadHkdfPrivateKey(CURVE25519,UNCOMPRESSED,SHA512,AesGcmKey(16))',
'go'),
]
HASH_TYPES = [
common_pb2.UNKNOWN_HASH, common_pb2.SHA1, common_pb2.SHA224,
common_pb2.SHA256, common_pb2.SHA384, common_pb2.SHA512
]
CURVE_TYPES = [
common_pb2.UNKNOWN_CURVE,
common_pb2.NIST_P256,
common_pb2.NIST_P384,
common_pb2.NIST_P521,
common_pb2.CURVE25519
]
EC_POINT_FORMATS = [
common_pb2.UNKNOWN_FORMAT,
common_pb2.UNCOMPRESSED,
common_pb2.COMPRESSED,
common_pb2.DO_NOT_USE_CRUNCHY_UNCOMPRESSED
]
SIGNATURE_ENCODINGS = [
ecdsa_pb2.UNKNOWN_ENCODING,
ecdsa_pb2.IEEE_P1363,
ecdsa_pb2.DER
]
TestCasesType = Iterable[Tuple[str, tink_pb2.KeyTemplate]]
def aes_eax_test_cases() -> TestCasesType:
for key_size in [15, 16, 24, 32, 64, 96]:
for iv_size in [11, 12, 16, 17, 24, 32]:
yield ('AesEaxKey(%d,%d)' % (key_size, iv_size),
aead.aead_key_templates.create_aes_eax_key_template(
key_size, iv_size))
def aes_gcm_test_cases() -> TestCasesType:
for key_size in [15, 16, 24, 32, 64, 96]:
yield ('AesGcmKey(%d)' % key_size,
aead.aead_key_templates.create_aes_gcm_key_template(key_size))
def aes_gcm_siv_test_cases() -> TestCasesType:
for key_size in [15, 16, 24, 32, 64, 96]:
yield ('AesGcmSivKey(%d)' % key_size,
aead.aead_key_templates.create_aes_gcm_siv_key_template(key_size))
def aes_ctr_hmac_aead_test_cases() -> TestCasesType:
def _test_case(aes_key_size=16, iv_size=16, hmac_key_size=16,
tag_size=16, hash_type=common_pb2.SHA256):
return ('AesCtrHmacAeadKey(%d,%d,%d,%d,%s)' %
(aes_key_size, iv_size, hmac_key_size, tag_size,
common_pb2.HashType.Name(hash_type)),
aead.aead_key_templates.create_aes_ctr_hmac_aead_key_template(
aes_key_size=aes_key_size,
iv_size=iv_size,
hmac_key_size=hmac_key_size,
tag_size=tag_size,
hash_type=hash_type))
for aes_key_size in [15, 16, 24, 32, 64, 96]:
for iv_size in [11, 12, 16, 17, 24, 32]:
yield _test_case(aes_key_size=aes_key_size, iv_size=iv_size)
for hmac_key_size in [15, 16, 24, 32, 64, 96]:
for tag_size in [9, 10, 16, 20, 21, 24, 32, 33, 64, 65]:
for hash_type in HASH_TYPES:
yield _test_case(hmac_key_size=hmac_key_size, tag_size=tag_size,
hash_type=hash_type)
def hmac_test_cases() -> TestCasesType:
def _test_case(key_size=32, tag_size=16, hash_type=common_pb2.SHA256):
return ('HmacKey(%d,%d,%s)' % (key_size, tag_size,
common_pb2.HashType.Name(hash_type)),
mac.mac_key_templates.create_hmac_key_template(
key_size, tag_size, hash_type))
for key_size in [15, 16, 24, 32, 64, 96]:
yield _test_case(key_size=key_size)
for tag_size in [9, 10, 16, 20, 21, 24, 32, 33, 64, 65]:
for hash_type in HASH_TYPES:
yield _test_case(tag_size=tag_size, hash_type=hash_type)
def aes_cmac_test_cases() -> TestCasesType:
def _test_case(key_size=32, tag_size=16):
return ('AesCmacKey(%d,%d)' % (key_size, tag_size),
mac.mac_key_templates.create_aes_cmac_key_template(
key_size, tag_size))
for key_size in [15, 16, 24, 32, 64, 96]:
yield _test_case(key_size=key_size)
for tag_size in [9, 10, 16, 20, 21, 24, 32, 33, 64, 65]:
yield _test_case(tag_size=tag_size)
def aes_cmac_prf_test_cases() -> TestCasesType:
for key_size in [15, 16, 24, 32, 64, 96]:
yield ('AesCmacPrfKey(%d)' % key_size,
prf.prf_key_templates._create_aes_cmac_key_template(key_size))
def hmac_prf_test_cases() -> TestCasesType:
def _test_case(key_size=32, hash_type=common_pb2.SHA256):
return ('HmacPrfKey(%d,%s)' % (key_size,
common_pb2.HashType.Name(hash_type)),
prf.prf_key_templates._create_hmac_key_template(
key_size, hash_type))
for key_size in [15, 16, 24, 32, 64, 96]:
yield _test_case(key_size=key_size)
for hash_type in HASH_TYPES:
yield _test_case(hash_type=hash_type)
def hkdf_prf_test_cases() -> TestCasesType:
def _test_case(key_size=32, hash_type=common_pb2.SHA256):
return ('HkdfPrfKey(%d,%s)' % (key_size,
common_pb2.HashType.Name(hash_type)),
prf.prf_key_templates._create_hkdf_key_template(
key_size, hash_type))
for key_size in [15, 16, 24, 32, 64, 96]:
yield _test_case(key_size=key_size)
for hash_type in HASH_TYPES:
yield _test_case(hash_type=hash_type)
def aes_siv_test_cases() -> TestCasesType:
for key_size in [15, 16, 24, 32, 64, 96]:
yield ('AesSivKey(%d)' % key_size,
daead.deterministic_aead_key_templates.create_aes_siv_key_template(
key_size))
def ecies_aead_hkdf_test_cases() -> TestCasesType:
for curve_type in CURVE_TYPES:
for hash_type in HASH_TYPES:
ec_point_format = common_pb2.UNCOMPRESSED
dem_key_template = aead.aead_key_templates.AES128_GCM
yield ('EciesAeadHkdfPrivateKey(%s,%s,%s,AesGcmKey(16))' %
(common_pb2.EllipticCurveType.Name(curve_type),
common_pb2.EcPointFormat.Name(ec_point_format),
common_pb2.HashType.Name(hash_type)),
hybrid.hybrid_key_templates.create_ecies_aead_hkdf_key_template(
curve_type, ec_point_format, hash_type, dem_key_template))
for ec_point_format in EC_POINT_FORMATS:
curve_type = common_pb2.NIST_P256
hash_type = common_pb2.SHA256
dem_key_template = aead.aead_key_templates.AES128_GCM
yield ('EciesAeadHkdfPrivateKey(%s,%s,%s,AesGcmKey(16))' %
(common_pb2.EllipticCurveType.Name(curve_type),
common_pb2.EcPointFormat.Name(ec_point_format),
common_pb2.HashType.Name(hash_type)),
hybrid.hybrid_key_templates.create_ecies_aead_hkdf_key_template(
curve_type, ec_point_format, hash_type, dem_key_template))
curve_type = common_pb2.NIST_P256
ec_point_format = common_pb2.UNCOMPRESSED
hash_type = common_pb2.SHA256
# Use invalid AEAD key template as DEM
# TODO(juerg): Once b/160130470 is fixed, increase test coverage to all
# aead templates.
dem_key_template = aead.aead_key_templates.create_aes_eax_key_template(15, 11)
yield ('EciesAeadHkdfPrivateKey(%s,%s,%s,AesEaxKey(15,11))' %
(common_pb2.EllipticCurveType.Name(curve_type),
common_pb2.EcPointFormat.Name(ec_point_format),
common_pb2.HashType.Name(hash_type)),
hybrid.hybrid_key_templates.create_ecies_aead_hkdf_key_template(
curve_type, ec_point_format, hash_type, dem_key_template))
def ecdsa_test_cases() -> TestCasesType:
for hash_type in HASH_TYPES:
for curve_type in CURVE_TYPES:
for signature_encoding in SIGNATURE_ENCODINGS:
yield ('EcdsaPrivateKey(%s,%s,%s)' %
(common_pb2.HashType.Name(hash_type),
common_pb2.EllipticCurveType.Name(curve_type),
ecdsa_pb2.EcdsaSignatureEncoding.Name(signature_encoding)),
signature.signature_key_templates.create_ecdsa_key_template(
hash_type, curve_type, signature_encoding))
def rsa_ssa_pkcs1_test_cases() -> TestCasesType:
gen = signature.signature_key_templates.create_rsa_ssa_pkcs1_key_template
for hash_type in HASH_TYPES:
modulus_size = 2048
public_exponent = 65537
yield ('RsaSsaPkcs1PrivateKey(%s,%d,%d)' %
(common_pb2.HashType.Name(hash_type), modulus_size,
public_exponent),
gen(hash_type, modulus_size, public_exponent))
for modulus_size in [0, 2000, 3072, 4096]:
hash_type = common_pb2.SHA256
public_exponent = 65537
yield ('RsaSsaPkcs1PrivateKey(%s,%d,%d)' %
(common_pb2.HashType.Name(hash_type), modulus_size,
public_exponent),
gen(hash_type, modulus_size, public_exponent))
for public_exponent in [0, 1, 2, 3, 65536, 65537, 65538]:
hash_type = common_pb2.SHA256
modulus_size = 2048
yield ('RsaSsaPkcs1PrivateKey(%s,%d,%d)' %
(common_pb2.HashType.Name(hash_type), modulus_size,
public_exponent),
gen(hash_type, modulus_size, public_exponent))
def rsa_ssa_pss_test_cases() -> TestCasesType:
gen = signature.signature_key_templates.create_rsa_ssa_pss_key_template
for hash_type in HASH_TYPES:
salt_length = 32
modulus_size = 2048
public_exponent = 65537
yield ('RsaSsaPssPrivateKey(%s,%s,%d,%d,%d)' %
(common_pb2.HashType.Name(hash_type),
common_pb2.HashType.Name(hash_type), salt_length, modulus_size,
public_exponent),
gen(hash_type, hash_type, salt_length, modulus_size,
public_exponent))
for salt_length in [-3, 0, 1, 16, 64]:
hash_type = common_pb2.SHA256
modulus_size = 2048
public_exponent = 65537
yield ('RsaSsaPssPrivateKey(%s,%s,%d,%d,%d)' %
(common_pb2.HashType.Name(hash_type),
common_pb2.HashType.Name(hash_type), salt_length, modulus_size,
public_exponent),
gen(hash_type, hash_type, salt_length, modulus_size,
public_exponent))
for modulus_size in [0, 2000, 3072, 4096]:
hash_type = common_pb2.SHA256
salt_length = 32
public_exponent = 65537
yield ('RsaSsaPssPrivateKey(%s,%s,%d,%d,%d)' %
(common_pb2.HashType.Name(hash_type),
common_pb2.HashType.Name(hash_type), salt_length, modulus_size,
public_exponent),
gen(hash_type, hash_type, salt_length, modulus_size,
public_exponent))
hash_type1 = common_pb2.SHA256
hash_type2 = common_pb2.SHA512
salt_length = 32
modulus_size = 2048
public_exponent = 65537
yield ('RsaSsaPssPrivateKey(%s,%s,%d,%d,%d)' %
(common_pb2.HashType.Name(hash_type1),
common_pb2.HashType.Name(hash_type2), salt_length, modulus_size,
public_exponent),
gen(hash_type1, hash_type2, salt_length, modulus_size,
public_exponent))
for public_exponent in [0, 1, 2, 3, 65536, 65537, 65538]:
hash_type = common_pb2.SHA256
salt_length = 32
modulus_size = 2048
yield ('RsaSsaPssPrivateKey(%s,%s,%d,%d,%d)' %
(common_pb2.HashType.Name(hash_type),
common_pb2.HashType.Name(hash_type), salt_length, modulus_size,
public_exponent),
gen(hash_type, hash_type, salt_length, modulus_size,
public_exponent))
def setUpModule():
aead.register()
daead.register()
mac.register()
hybrid.register()
signature.register()
testing_servers.start('key_generation_consistency')
def tearDownModule():
testing_servers.stop()
class KeyGenerationConsistencyTest(parameterized.TestCase):
@parameterized.parameters(
itertools.chain(aes_eax_test_cases(),
aes_gcm_test_cases(),
aes_gcm_siv_test_cases(),
aes_ctr_hmac_aead_test_cases(),
hmac_test_cases(),
aes_cmac_test_cases(),
aes_cmac_prf_test_cases(),
hmac_prf_test_cases(),
hkdf_prf_test_cases(),
aes_siv_test_cases(),
ecies_aead_hkdf_test_cases(),
ecdsa_test_cases(),
rsa_ssa_pkcs1_test_cases(),
rsa_ssa_pss_test_cases()))
def test_key_generation_consistency(self, name, template):
supported_langs = supported_key_types.SUPPORTED_LANGUAGES[
supported_key_types.KEY_TYPE_FROM_URL[template.type_url]]
failures = 0
results = {}
for lang in supported_langs:
try:
_ = testing_servers.new_keyset(lang, template)
if (name, lang) in SUCCEEDS_BUT_SHOULD_FAIL:
failures += 1
if (name, lang) in FAILS_BUT_SHOULD_SUCCEED:
self.fail('(%s, %s) succeeded, but is in FAILS_BUT_SHOULD_SUCCEED' %
(name, lang))
results[lang] = 'success'
except tink.TinkError as e:
if (name, lang) not in FAILS_BUT_SHOULD_SUCCEED:
failures += 1
if (name, lang) in SUCCEEDS_BUT_SHOULD_FAIL:
self.fail(
'(%s, %s) is in SUCCEEDS_BUT_SHOULD_FAIL, but failed with %s' %
(name, lang, e))
results[lang] = e
# Test that either all supported langs accept the template, or all reject.
if failures not in [0, len(supported_langs)]:
self.fail('key generation for template %s is inconsistent: %s' %
(name, results))
logging.info('Key generation status: %s',
'Success' if failures == 0 else 'Fail')
if __name__ == '__main__':
absltest.main()
| google/tink | testing/cross_language/key_generation_consistency_test.py | Python | apache-2.0 | 15,388 | 0.004744 |
from unittest import TestCase
class TestFoo( TestCase ):
def test_foo_1( self ):
self.assertTrue( True )
def test_foo_2( self ):
self.assertTrue( True )
def test_foo_3( self ):
self.assertTrue( True )
def test_foo_4( self ):
self.assertTrue( True )
def test_foo_5( self ):
self.assertTrue( True )
def test_foo_6( self ):
self.assertTrue( True )
def test_foo_7( self ):
self.assertTrue( True )
def test_foo_8( self ):
self.assertTrue( True )
def test_foo_9( self ):
self.assertTrue( True )
def test_foo_10( self ):
self.assertTrue( True )
def test_foo_11( self ):
self.assertTrue( True )
def test_foo_12( self ):
self.assertTrue( True )
def test_foo_13( self ):
self.assertTrue( True )
def test_foo_14( self ):
self.assertTrue( True )
def test_foo_15( self ):
self.assertTrue( True )
def test_foo_16( self ):
self.assertTrue( True )
def test_foo_17( self ):
self.assertTrue( True )
def test_foo_18( self ):
self.assertTrue( True )
def test_foo_19( self ):
self.assertTrue( True )
def test_foo_20( self ):
self.assertTrue( True )
def test_foo_21( self ):
self.assertTrue( True )
def test_foo_22( self ):
self.assertTrue( True )
def test_foo_23( self ):
self.assertTrue( True )
def test_foo_24( self ):
self.assertTrue( True )
def test_foo_25( self ):
self.assertTrue( True )
def test_foo_26( self ):
self.assertTrue( True )
def test_foo_27( self ):
self.assertTrue( True )
def test_foo_28( self ):
self.assertTrue( True )
def test_foo_29( self ):
self.assertTrue( True )
def test_foo_30( self ):
self.assertTrue( True )
def test_foo_31( self ):
self.assertTrue( True )
def test_foo_32( self ):
self.assertTrue( True )
| codepanda/pycicl | tests/fixtures/parallel/tests/testfoo.py | Python | mit | 2,035 | 0.064865 |
"""Compute ESA driving functions for various systems.
ESA is abbreviation for equivalent scattering approach.
ESA driving functions for an edge-shaped SSD are provided below.
Further ESA for different geometries might be added here.
Note that mode-matching (such as NFC-HOA, SDM) are equivalent
to ESA in their specific geometries (spherical/circular, planar/linear).
"""
import numpy as _np
from scipy.special import jn as _jn, hankel2 as _hankel2
from . import secondary_source_line as _secondary_source_line
from . import secondary_source_point as _secondary_source_point
from .. import util as _util
def plane_2d_edge(omega, x0, n=[0, 1, 0], *, alpha=_np.pi*3/2, Nc=None,
c=None):
r"""Driving function for 2-dimensional plane wave with edge ESA.
Driving function for a virtual plane wave using the 2-dimensional ESA
for an edge-shaped secondary source distribution consisting of
monopole line sources.
Parameters
----------
omega : float
Angular frequency.
x0 : int(N, 3) array_like
Sequence of secondary source positions.
n : (3,) array_like, optional
Normal vector of synthesized plane wave.
alpha : float, optional
Outer angle of edge.
Nc : int, optional
Number of elements for series expansion of driving function. Estimated
if not given.
c : float, optional
Speed of sound
Returns
-------
d : (N,) numpy.ndarray
Complex weights of secondary sources.
selection : (N,) numpy.ndarray
Boolean array containing ``True`` or ``False`` depending on
whether the corresponding secondary source is "active" or not.
secondary_source_function : callable
A function that can be used to create the sound field of a
single secondary source. See `sfs.fd.synthesize()`.
Notes
-----
One leg of the secondary sources has to be located on the x-axis (y0=0),
the edge at the origin.
Derived from :cite:`Spors2016`
"""
x0 = _np.asarray(x0)
n = _util.normalize_vector(n)
k = _util.wavenumber(omega, c)
phi_s = _np.arctan2(n[1], n[0]) + _np.pi
L = x0.shape[0]
r = _np.linalg.norm(x0, axis=1)
phi = _np.arctan2(x0[:, 1], x0[:, 0])
phi = _np.where(phi < 0, phi + 2 * _np.pi, phi)
if Nc is None:
Nc = _np.ceil(2 * k * _np.max(r) * alpha / _np.pi)
epsilon = _np.ones(Nc) # weights for series expansion
epsilon[0] = 2
d = _np.zeros(L, dtype=complex)
for m in _np.arange(Nc):
nu = m * _np.pi / alpha
d = d + 1/epsilon[m] * _np.exp(1j*nu*_np.pi/2) * _np.sin(nu*phi_s) \
* _np.cos(nu*phi) * nu/r * _jn(nu, k*r)
d[phi > 0] = -d[phi > 0]
selection = _util.source_selection_all(len(x0))
return 4*_np.pi/alpha * d, selection, _secondary_source_line(omega, c)
def plane_2d_edge_dipole_ssd(omega, x0, n=[0, 1, 0], *, alpha=_np.pi*3/2,
Nc=None, c=None):
r"""Driving function for 2-dimensional plane wave with edge dipole ESA.
Driving function for a virtual plane wave using the 2-dimensional ESA
for an edge-shaped secondary source distribution consisting of
dipole line sources.
Parameters
----------
omega : float
Angular frequency.
x0 : int(N, 3) array_like
Sequence of secondary source positions.
n : (3,) array_like, optional
Normal vector of synthesized plane wave.
alpha : float, optional
Outer angle of edge.
Nc : int, optional
Number of elements for series expansion of driving function. Estimated
if not given.
c : float, optional
Speed of sound
Returns
-------
d : (N,) numpy.ndarray
Complex weights of secondary sources.
selection : (N,) numpy.ndarray
Boolean array containing ``True`` or ``False`` depending on
whether the corresponding secondary source is "active" or not.
secondary_source_function : callable
A function that can be used to create the sound field of a
single secondary source. See `sfs.fd.synthesize()`.
Notes
-----
One leg of the secondary sources has to be located on the x-axis (y0=0),
the edge at the origin.
Derived from :cite:`Spors2016`
"""
x0 = _np.asarray(x0)
n = _util.normalize_vector(n)
k = _util.wavenumber(omega, c)
phi_s = _np.arctan2(n[1], n[0]) + _np.pi
L = x0.shape[0]
r = _np.linalg.norm(x0, axis=1)
phi = _np.arctan2(x0[:, 1], x0[:, 0])
phi = _np.where(phi < 0, phi + 2 * _np.pi, phi)
if Nc is None:
Nc = _np.ceil(2 * k * _np.max(r) * alpha / _np.pi)
epsilon = _np.ones(Nc) # weights for series expansion
epsilon[0] = 2
d = _np.zeros(L, dtype=complex)
for m in _np.arange(Nc):
nu = m * _np.pi / alpha
d = d + 1/epsilon[m] * _np.exp(1j*nu*_np.pi/2) * _np.cos(nu*phi_s) \
* _np.cos(nu*phi) * _jn(nu, k*r)
return 4*_np.pi/alpha * d
def line_2d_edge(omega, x0, xs, *, alpha=_np.pi*3/2, Nc=None, c=None):
r"""Driving function for 2-dimensional line source with edge ESA.
Driving function for a virtual line source using the 2-dimensional ESA
for an edge-shaped secondary source distribution consisting of line
sources.
Parameters
----------
omega : float
Angular frequency.
x0 : int(N, 3) array_like
Sequence of secondary source positions.
xs : (3,) array_like
Position of synthesized line source.
alpha : float, optional
Outer angle of edge.
Nc : int, optional
Number of elements for series expansion of driving function. Estimated
if not given.
c : float, optional
Speed of sound
Returns
-------
d : (N,) numpy.ndarray
Complex weights of secondary sources.
selection : (N,) numpy.ndarray
Boolean array containing ``True`` or ``False`` depending on
whether the corresponding secondary source is "active" or not.
secondary_source_function : callable
A function that can be used to create the sound field of a
single secondary source. See `sfs.fd.synthesize()`.
Notes
-----
One leg of the secondary sources has to be located on the x-axis (y0=0),
the edge at the origin.
Derived from :cite:`Spors2016`
"""
x0 = _np.asarray(x0)
k = _util.wavenumber(omega, c)
phi_s = _np.arctan2(xs[1], xs[0])
if phi_s < 0:
phi_s = phi_s + 2 * _np.pi
r_s = _np.linalg.norm(xs)
L = x0.shape[0]
r = _np.linalg.norm(x0, axis=1)
phi = _np.arctan2(x0[:, 1], x0[:, 0])
phi = _np.where(phi < 0, phi + 2 * _np.pi, phi)
if Nc is None:
Nc = _np.ceil(2 * k * _np.max(r) * alpha / _np.pi)
epsilon = _np.ones(Nc) # weights for series expansion
epsilon[0] = 2
d = _np.zeros(L, dtype=complex)
idx = (r <= r_s)
for m in _np.arange(Nc):
nu = m * _np.pi / alpha
f = 1/epsilon[m] * _np.sin(nu*phi_s) * _np.cos(nu*phi) * nu/r
d[idx] = d[idx] + f[idx] * _jn(nu, k*r[idx]) * _hankel2(nu, k*r_s)
d[~idx] = d[~idx] + f[~idx] * _jn(nu, k*r_s) * _hankel2(nu, k*r[~idx])
d[phi > 0] = -d[phi > 0]
selection = _util.source_selection_all(len(x0))
return -1j*_np.pi/alpha * d, selection, _secondary_source_line(omega, c)
def line_2d_edge_dipole_ssd(omega, x0, xs, *, alpha=_np.pi*3/2, Nc=None,
c=None):
r"""Driving function for 2-dimensional line source with edge dipole ESA.
Driving function for a virtual line source using the 2-dimensional ESA
for an edge-shaped secondary source distribution consisting of dipole line
sources.
Parameters
----------
omega : float
Angular frequency.
x0 : (N, 3) array_like
Sequence of secondary source positions.
xs : (3,) array_like
Position of synthesized line source.
alpha : float, optional
Outer angle of edge.
Nc : int, optional
Number of elements for series expansion of driving function. Estimated
if not given.
c : float, optional
Speed of sound
Returns
-------
d : (N,) numpy.ndarray
Complex weights of secondary sources.
selection : (N,) numpy.ndarray
Boolean array containing ``True`` or ``False`` depending on
whether the corresponding secondary source is "active" or not.
secondary_source_function : callable
A function that can be used to create the sound field of a
single secondary source. See `sfs.fd.synthesize()`.
Notes
-----
One leg of the secondary sources has to be located on the x-axis (y0=0),
the edge at the origin.
Derived from :cite:`Spors2016`
"""
x0 = _np.asarray(x0)
k = _util.wavenumber(omega, c)
phi_s = _np.arctan2(xs[1], xs[0])
if phi_s < 0:
phi_s = phi_s + 2 * _np.pi
r_s = _np.linalg.norm(xs)
L = x0.shape[0]
r = _np.linalg.norm(x0, axis=1)
phi = _np.arctan2(x0[:, 1], x0[:, 0])
phi = _np.where(phi < 0, phi + 2 * _np.pi, phi)
if Nc is None:
Nc = _np.ceil(2 * k * _np.max(r) * alpha / _np.pi)
epsilon = _np.ones(Nc) # weights for series expansion
epsilon[0] = 2
d = _np.zeros(L, dtype=complex)
idx = (r <= r_s)
for m in _np.arange(Nc):
nu = m * _np.pi / alpha
f = 1/epsilon[m] * _np.cos(nu*phi_s) * _np.cos(nu*phi)
d[idx] = d[idx] + f[idx] * _jn(nu, k*r[idx]) * _hankel2(nu, k*r_s)
d[~idx] = d[~idx] + f[~idx] * _jn(nu, k*r_s) * _hankel2(nu, k*r[~idx])
return -1j*_np.pi/alpha * d
def point_25d_edge(omega, x0, xs, *, xref=[2, -2, 0], alpha=_np.pi*3/2,
Nc=None, c=None):
r"""Driving function for 2.5-dimensional point source with edge ESA.
Driving function for a virtual point source using the 2.5-dimensional
ESA for an edge-shaped secondary source distribution consisting of point
sources.
Parameters
----------
omega : float
Angular frequency.
x0 : int(N, 3) array_like
Sequence of secondary source positions.
xs : (3,) array_like
Position of synthesized line source.
xref: (3,) array_like or float
Reference position or reference distance
alpha : float, optional
Outer angle of edge.
Nc : int, optional
Number of elements for series expansion of driving function. Estimated
if not given.
c : float, optional
Speed of sound
Returns
-------
d : (N,) numpy.ndarray
Complex weights of secondary sources.
selection : (N,) numpy.ndarray
Boolean array containing ``True`` or ``False`` depending on
whether the corresponding secondary source is "active" or not.
secondary_source_function : callable
A function that can be used to create the sound field of a
single secondary source. See `sfs.fd.synthesize()`.
Notes
-----
One leg of the secondary sources has to be located on the x-axis (y0=0),
the edge at the origin.
Derived from :cite:`Spors2016`
"""
x0 = _np.asarray(x0)
xs = _np.asarray(xs)
xref = _np.asarray(xref)
if _np.isscalar(xref):
a = _np.linalg.norm(xref) / _np.linalg.norm(xref - xs)
else:
a = _np.linalg.norm(xref - x0, axis=1) / _np.linalg.norm(xref - xs)
d, selection, _ = line_2d_edge(omega, x0, xs, alpha=alpha, Nc=Nc, c=c)
return 1j*_np.sqrt(a) * d, selection, _secondary_source_point(omega, c)
| sfstoolbox/sfs-python | sfs/fd/esa.py | Python | mit | 11,543 | 0 |
from django.conf.urls.defaults import patterns, include, url
from singlecontrol.views import index, socketio
urlpatterns = patterns('',
url(r'^$', view=index, name='index'),
url(r'^socket\.io', view=socketio, name='socketio'),
)
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns()
| victorpoluceno/python_kinect_socketio | urls.py | Python | bsd-2-clause | 349 | 0.005731 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import l10n_be_coda
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| diogocs1/comps | web/addons/l10n_be_coda/__init__.py | Python | apache-2.0 | 1,105 | 0.00181 |
try:
import unittest2 as unittest
except ImportError:
import unittest
import rope.base.project
import rope.base.builtins
from rope.base import libutils
from ropetest import testutils
class ObjectInferTest(unittest.TestCase):
def setUp(self):
super(ObjectInferTest, self).setUp()
self.project = testutils.sample_project()
def tearDown(self):
testutils.remove_project(self.project)
super(ObjectInferTest, self).tearDown()
def test_simple_type_inferencing(self):
code = 'class Sample(object):\n pass\na_var = Sample()\n'
scope = libutils.get_string_scope(self.project, code)
sample_class = scope['Sample'].get_object()
a_var = scope['a_var'].get_object()
self.assertEquals(sample_class, a_var.get_type())
def test_simple_type_inferencing_classes_defined_in_holding_scope(self):
code = 'class Sample(object):\n pass\n' \
'def a_func():\n a_var = Sample()\n'
scope = libutils.get_string_scope(self.project, code)
sample_class = scope['Sample'].get_object()
a_var = scope['a_func'].get_object().\
get_scope()['a_var'].get_object()
self.assertEquals(sample_class, a_var.get_type())
def test_simple_type_inferencing_classes_in_class_methods(self):
code = 'class Sample(object):\n pass\n' \
'class Another(object):\n' \
' def a_method():\n a_var = Sample()\n'
scope = libutils.get_string_scope(self.project, code)
sample_class = scope['Sample'].get_object()
another_class = scope['Another'].get_object()
a_var = another_class['a_method'].\
get_object().get_scope()['a_var'].get_object()
self.assertEquals(sample_class, a_var.get_type())
def test_simple_type_inferencing_class_attributes(self):
code = 'class Sample(object):\n pass\n' \
'class Another(object):\n' \
' def __init__(self):\n self.a_var = Sample()\n'
scope = libutils.get_string_scope(self.project, code)
sample_class = scope['Sample'].get_object()
another_class = scope['Another'].get_object()
a_var = another_class['a_var'].get_object()
self.assertEquals(sample_class, a_var.get_type())
def test_simple_type_inferencing_for_in_class_assignments(self):
code = 'class Sample(object):\n pass\n' \
'class Another(object):\n an_attr = Sample()\n'
scope = libutils.get_string_scope(self.project, code)
sample_class = scope['Sample'].get_object()
another_class = scope['Another'].get_object()
an_attr = another_class['an_attr'].get_object()
self.assertEquals(sample_class, an_attr.get_type())
def test_simple_type_inferencing_for_chained_assignments(self):
mod = 'class Sample(object):\n pass\n' \
'copied_sample = Sample'
mod_scope = libutils.get_string_scope(self.project, mod)
sample_class = mod_scope['Sample']
copied_sample = mod_scope['copied_sample']
self.assertEquals(sample_class.get_object(),
copied_sample.get_object())
def test_following_chained_assignments_avoiding_circles(self):
mod = 'class Sample(object):\n pass\n' \
'sample_class = Sample\n' \
'sample_class = sample_class\n'
mod_scope = libutils.get_string_scope(self.project, mod)
sample_class = mod_scope['Sample']
sample_class_var = mod_scope['sample_class']
self.assertEquals(sample_class.get_object(),
sample_class_var.get_object())
def test_function_returned_object_static_type_inference1(self):
src = 'class Sample(object):\n pass\n' \
'def a_func():\n return Sample\n' \
'a_var = a_func()\n'
scope = libutils.get_string_scope(self.project, src)
sample_class = scope['Sample']
a_var = scope['a_var']
self.assertEquals(sample_class.get_object(), a_var.get_object())
def test_function_returned_object_static_type_inference2(self):
src = 'class Sample(object):\n pass\n' \
'def a_func():\n return Sample()\n' \
'a_var = a_func()\n'
scope = libutils.get_string_scope(self.project, src)
sample_class = scope['Sample'].get_object()
a_var = scope['a_var'].get_object()
self.assertEquals(sample_class, a_var.get_type())
def test_recursive_function_returned_object_static_type_inference(self):
src = 'class Sample(object):\n pass\n' \
'def a_func():\n' \
' if True:\n return Sample()\n' \
' else:\n return a_func()\n' \
'a_var = a_func()\n'
scope = libutils.get_string_scope(self.project, src)
sample_class = scope['Sample'].get_object()
a_var = scope['a_var'].get_object()
self.assertEquals(sample_class, a_var.get_type())
def test_func_returned_obj_using_call_spec_func_static_type_infer(self):
src = 'class Sample(object):\n' \
' def __call__(self):\n return Sample\n' \
'sample = Sample()\na_var = sample()'
scope = libutils.get_string_scope(self.project, src)
sample_class = scope['Sample']
a_var = scope['a_var']
self.assertEquals(sample_class.get_object(), a_var.get_object())
def test_list_type_inferencing(self):
src = 'class Sample(object):\n pass\na_var = [Sample()]\n'
scope = libutils.get_string_scope(self.project, src)
sample_class = scope['Sample'].get_object()
a_var = scope['a_var'].get_object()
self.assertNotEquals(sample_class, a_var.get_type())
def test_attributed_object_inference(self):
src = 'class Sample(object):\n' \
' def __init__(self):\n self.a_var = None\n' \
' def set(self):\n self.a_var = Sample()\n'
scope = libutils.get_string_scope(self.project, src)
sample_class = scope['Sample'].get_object()
a_var = sample_class['a_var'].get_object()
self.assertEquals(sample_class, a_var.get_type())
def test_getting_property_attributes(self):
src = 'class A(object):\n pass\n' \
'def f(*args):\n return A()\n' \
'class B(object):\n p = property(f)\n' \
'a_var = B().p\n'
pymod = libutils.get_string_module(self.project, src)
a_class = pymod['A'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEquals(a_class, a_var.get_type())
def test_getting_property_attributes_with_method_getters(self):
src = 'class A(object):\n pass\n' \
'class B(object):\n def p_get(self):\n return A()\n' \
' p = property(p_get)\n' \
'a_var = B().p\n'
pymod = libutils.get_string_module(self.project, src)
a_class = pymod['A'].get_object()
a_var = pymod['a_var'].get_object()
self.assertEquals(a_class, a_var.get_type())
def test_lambda_functions(self):
code = 'class C(object):\n pass\n' \
'l = lambda: C()\na_var = l()'
mod = libutils.get_string_module(self.project, code)
c_class = mod['C'].get_object()
a_var = mod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_mixing_subscript_with_tuple_assigns(self):
code = 'class C(object):\n attr = 0\n' \
'd = {}\nd[0], b = (0, C())\n'
mod = libutils.get_string_module(self.project, code)
c_class = mod['C'].get_object()
a_var = mod['b'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_mixing_ass_attr_with_tuple_assignment(self):
code = 'class C(object):\n attr = 0\n' \
'c = C()\nc.attr, b = (0, C())\n'
mod = libutils.get_string_module(self.project, code)
c_class = mod['C'].get_object()
a_var = mod['b'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_mixing_slice_with_tuple_assigns(self):
mod = libutils.get_string_module(
self.project,
'class C(object):\n attr = 0\n'
'd = [None] * 3\nd[0:2], b = ((0,), C())\n')
c_class = mod['C'].get_object()
a_var = mod['b'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_nested_tuple_assignments(self):
mod = libutils.get_string_module(
self.project,
'class C1(object):\n pass\nclass C2(object):\n pass\n'
'a, (b, c) = (C1(), (C2(), C1()))\n')
c1_class = mod['C1'].get_object()
c2_class = mod['C2'].get_object()
a_var = mod['a'].get_object()
b_var = mod['b'].get_object()
c_var = mod['c'].get_object()
self.assertEquals(c1_class, a_var.get_type())
self.assertEquals(c2_class, b_var.get_type())
self.assertEquals(c1_class, c_var.get_type())
def test_empty_tuples(self):
mod = libutils.get_string_module(
self.project, 't = ()\na, b = t\n')
a = mod['a'].get_object() # noqa
def test_handling_generator_functions(self):
code = 'class C(object):\n pass\n' \
'def f():\n yield C()\n' \
'for c in f():\n a_var = c\n'
mod = libutils.get_string_module(self.project, code)
c_class = mod['C'].get_object()
a_var = mod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_handling_generator_functions_for_strs(self):
mod = testutils.create_module(self.project, 'mod')
mod.write('def f():\n yield ""\n'
'for s in f():\n a_var = s\n')
pymod = self.project.get_pymodule(mod)
a_var = pymod['a_var'].get_object()
self.assertTrue(isinstance(a_var.get_type(), rope.base.builtins.Str))
def test_considering_nones_to_be_unknowns(self):
code = 'class C(object):\n pass\n' \
'a_var = None\na_var = C()\na_var = None\n'
mod = libutils.get_string_module(self.project, code)
c_class = mod['C'].get_object()
a_var = mod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_basic_list_comprehensions(self):
code = 'class C(object):\n pass\n' \
'l = [C() for i in range(1)]\na_var = l[0]\n'
mod = libutils.get_string_module(self.project, code)
c_class = mod['C'].get_object()
a_var = mod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_basic_generator_expressions(self):
code = 'class C(object):\n pass\n' \
'l = (C() for i in range(1))\na_var = list(l)[0]\n'
mod = libutils.get_string_module(self.project, code)
c_class = mod['C'].get_object()
a_var = mod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_list_comprehensions_and_loop_var(self):
code = 'class C(object):\n pass\n' \
'c_objects = [C(), C()]\n' \
'l = [c for c in c_objects]\na_var = l[0]\n'
mod = libutils.get_string_module(self.project, code)
c_class = mod['C'].get_object()
a_var = mod['a_var'].get_object()
self.assertEquals(c_class, a_var.get_type())
def test_list_comprehensions_and_multiple_loop_var(self):
code = 'class C1(object):\n pass\n' \
'class C2(object):\n pass\n' \
'l = [(c1, c2) for c1 in [C1()] for c2 in [C2()]]\n' \
'a, b = l[0]\n'
mod = libutils.get_string_module(self.project, code)
c1_class = mod['C1'].get_object()
c2_class = mod['C2'].get_object()
a_var = mod['a'].get_object()
b_var = mod['b'].get_object()
self.assertEquals(c1_class, a_var.get_type())
self.assertEquals(c2_class, b_var.get_type())
def test_list_comprehensions_and_multiple_iters(self):
mod = libutils.get_string_module(
self.project,
'class C1(object):\n pass\nclass C2(object):\n pass\n'
'l = [(c1, c2) for c1, c2 in [(C1(), C2())]]\n'
'a, b = l[0]\n')
c1_class = mod['C1'].get_object()
c2_class = mod['C2'].get_object()
a_var = mod['a'].get_object()
b_var = mod['b'].get_object()
self.assertEquals(c1_class, a_var.get_type())
self.assertEquals(c2_class, b_var.get_type())
def test_we_know_the_type_of_catched_exceptions(self):
code = 'class MyError(Exception):\n pass\n' \
'try:\n raise MyError()\n' \
'except MyError as e:\n pass\n'
mod = libutils.get_string_module(self.project, code)
my_error = mod['MyError'].get_object()
e_var = mod['e'].get_object()
self.assertEquals(my_error, e_var.get_type())
def test_we_know_the_type_of_catched_multiple_excepts(self):
code = 'class MyError(Exception):\n pass\n' \
'try:\n raise MyError()\n' \
'except (MyError, Exception) as e:\n pass\n'
mod = libutils.get_string_module(self.project, code)
my_error = mod['MyError'].get_object()
e_var = mod['e'].get_object()
self.assertEquals(my_error, e_var.get_type())
def test_using_property_as_decorators(self):
code = 'class A(object):\n pass\n' \
'class B(object):\n' \
' @property\n def f(self):\n return A()\n' \
'b = B()\nvar = b.f\n'
mod = libutils.get_string_module(self.project, code)
var = mod['var'].get_object()
a = mod['A'].get_object()
self.assertEquals(a, var.get_type())
def test_using_property_as_decorators_and_passing_parameter(self):
code = 'class B(object):\n' \
' @property\n def f(self):\n return self\n' \
'b = B()\nvar = b.f\n'
mod = libutils.get_string_module(self.project, code)
var = mod['var'].get_object()
a = mod['B'].get_object()
self.assertEquals(a, var.get_type())
def suite():
result = unittest.TestSuite()
result.addTests(unittest.makeSuite(ObjectInferTest))
return result
if __name__ == '__main__':
unittest.main()
| emacsway/rope | ropetest/objectinfertest.py | Python | gpl-2.0 | 14,587 | 0 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import StringIO
from pytest import raises
from aspen.website import Website
from aspen.http.response import Response
from aspen.exceptions import BadLocation
simple_error_spt = """
[---]
[---] text/plain via stdlib_format
{response.body}
"""
# Tests
# =====
def test_basic():
website = Website()
expected = os.getcwd()
actual = website.www_root
assert actual == expected
def test_normal_response_is_returned(harness):
harness.fs.www.mk(('index.html', "Greetings, program!"))
expected = '\r\n'.join("""\
HTTP/1.1
Content-Type: text/html
Greetings, program!
""".splitlines())
actual = harness.client.GET()._to_http('1.1')
assert actual == expected
def test_fatal_error_response_is_returned(harness):
harness.fs.www.mk(('index.html.spt', "[---]\nraise heck\n[---]\n"))
expected = 500
actual = harness.client.GET(raise_immediately=False).code
assert actual == expected
def test_redirect_has_only_location(harness):
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
website.redirect('http://elsewhere', code=304)
[---]"""))
actual = harness.client.GET(raise_immediately=False)
assert actual.code == 304
headers = actual.headers
assert headers.keys() == ['Location']
def test_nice_error_response_is_returned(harness):
harness.short_circuit = False
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(500)
[---]"""))
assert harness.client.GET(raise_immediately=False).code == 500
def test_nice_error_response_is_returned_for_404(harness):
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(404)
[---]"""))
assert harness.client.GET(raise_immediately=False).code == 404
def test_response_body_doesnt_expose_traceback_by_default(harness):
harness.fs.project.mk(('error.spt', simple_error_spt))
harness.fs.www.mk(('index.html.spt', """
[---]
raise Exception("Can I haz traceback ?")
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 500
assert "Can I haz traceback ?" not in response.body
def test_response_body_exposes_traceback_for_show_tracebacks(harness):
harness.client.website.show_tracebacks = True
harness.fs.project.mk(('error.spt', simple_error_spt))
harness.fs.www.mk(('index.html.spt', """
[---]
raise Exception("Can I haz traceback ?")
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 500
assert "Can I haz traceback ?" in response.body
def test_default_error_simplate_doesnt_expose_raised_body_by_default(harness):
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(404, "Um, yeah.")
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 404
assert "Um, yeah." not in response.body
def test_default_error_simplate_exposes_raised_body_for_show_tracebacks(harness):
harness.client.website.show_tracebacks = True
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(404, "Um, yeah.")
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 404
assert "Um, yeah." in response.body
def test_nice_error_response_can_come_from_user_error_spt(harness):
harness.fs.project.mk(('error.spt', '[---]\n[---] text/plain\nTold ya.'))
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(420)
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 420
assert response.body == 'Told ya.'
def test_nice_error_response_can_come_from_user_420_spt(harness):
harness.fs.project.mk(('420.spt', """
[---]
msg = "Enhance your calm." if response.code == 420 else "Ok."
[---] text/plain
%(msg)s"""))
harness.fs.www.mk(('index.html.spt', """
from aspen import Response
[---]
raise Response(420)
[---]"""))
response = harness.client.GET(raise_immediately=False)
assert response.code == 420
assert response.body == 'Enhance your calm.'
def test_delegate_error_to_simplate_respects_original_accept_header(harness):
harness.fs.project.mk(('error.spt', """[---]
[---] text/fake
Lorem ipsum
[---] text/html
<p>Error</p>
[---] text/plain
Error
"""))
harness.fs.www.mk(('foo.spt',"""
from aspen import Response
[---]
raise Response(404)
[---] text/plain
"""))
response = harness.client.GET('/foo', raise_immediately=False, HTTP_ACCEPT=b'text/fake')
assert response.code == 404
assert 'text/fake' in response.headers['Content-Type']
def test_default_error_spt_handles_text_html(harness):
harness.fs.www.mk(('foo.html.spt',"""
from aspen import Response
[---]
raise Response(404)
[---]
"""))
response = harness.client.GET('/foo.html', raise_immediately=False)
assert response.code == 404
assert 'text/html' in response.headers['Content-Type']
def test_default_error_spt_handles_application_json(harness):
harness.fs.www.mk(('foo.json.spt',"""
from aspen import Response
[---]
raise Response(404)
[---]
"""))
response = harness.client.GET('/foo.json', raise_immediately=False)
assert response.code == 404
assert response.headers['Content-Type'] == 'application/json'
assert response.body == '''\
{ "error_code": 404
, "error_message_short": "Not Found"
, "error_message_long": ""
}
'''
def test_default_error_spt_application_json_includes_msg_for_show_tracebacks(harness):
harness.client.website.show_tracebacks = True
harness.fs.www.mk(('foo.json.spt',"""
from aspen import Response
[---]
raise Response(404, "Right, sooo...")
[---]
"""))
response = harness.client.GET('/foo.json', raise_immediately=False)
assert response.code == 404
assert response.headers['Content-Type'] == 'application/json'
assert response.body == '''\
{ "error_code": 404
, "error_message_short": "Not Found"
, "error_message_long": "Right, sooo..."
}
'''
def test_default_error_spt_falls_through_to_text_plain(harness):
harness.fs.www.mk(('foo.xml.spt',"""
from aspen import Response
[---]
raise Response(404)
[---]
"""))
response = harness.client.GET('/foo.xml', raise_immediately=False)
assert response.code == 404
assert response.headers['Content-Type'] == 'text/plain; charset=UTF-8'
assert response.body == "Not found, program!\n\n"
def test_default_error_spt_fall_through_includes_msg_for_show_tracebacks(harness):
harness.client.website.show_tracebacks = True
harness.fs.www.mk(('foo.xml.spt',"""
from aspen import Response
[---]
raise Response(404, "Try again!")
[---]
"""))
response = harness.client.GET('/foo.xml', raise_immediately=False)
assert response.code == 404
assert response.headers['Content-Type'] == 'text/plain; charset=UTF-8'
assert response.body == "Not found, program!\nTry again!\n"
def test_custom_error_spt_without_text_plain_results_in_406(harness):
harness.fs.project.mk(('error.spt', """
[---]
[---] text/html
<h1>Oh no!</h1>
"""))
harness.fs.www.mk(('foo.xml.spt',"""
from aspen import Response
[---]
raise Response(404)
[---]
"""))
response = harness.client.GET('/foo.xml', raise_immediately=False)
assert response.code == 406
def test_custom_error_spt_with_text_plain_works(harness):
harness.fs.project.mk(('error.spt', """
[---]
[---] text/plain
Oh no!
"""))
harness.fs.www.mk(('foo.xml.spt',"""
from aspen import Response
[---]
raise Response(404)
[---]
"""))
response = harness.client.GET('/foo.xml', raise_immediately=False)
assert response.code == 404
assert response.headers['Content-Type'] == 'text/plain; charset=UTF-8'
assert response.body == "Oh no!\n"
def test_autoindex_response_is_404_by_default(harness):
harness.fs.www.mk(('README', "Greetings, program!"))
assert harness.client.GET(raise_immediately=False).code == 404
def test_autoindex_response_is_returned(harness):
harness.fs.www.mk(('README', "Greetings, program!"))
harness.client.website.list_directories = True
body = harness.client.GET(raise_immediately=False).body
assert 'README' in body
def test_resources_can_import_from_project_root(harness):
harness.fs.project.mk(('foo.py', 'bar = "baz"'))
harness.fs.www.mk(('index.html.spt', "from foo import bar\n[---]\n[---]\nGreetings, %(bar)s!"))
assert harness.client.GET(raise_immediately=False).body == "Greetings, baz!"
def test_non_500_response_exceptions_dont_get_folded_to_500(harness):
harness.fs.www.mk(('index.html.spt', '''
from aspen import Response
[---]
raise Response(400)
[---]
'''))
response = harness.client.GET(raise_immediately=False)
assert response.code == 400
def test_errors_show_tracebacks(harness):
harness.fs.www.mk(('index.html.spt', '''
from aspen import Response
[---]
website.show_tracebacks = 1
raise Response(400,1,2,3,4,5,6,7,8,9)
[---]
'''))
response = harness.client.GET(raise_immediately=False)
assert response.code == 500
assert 'Response(400,1,2,3,4,5,6,7,8,9)' in response.body
class TestMiddleware(object):
"""Simple WSGI middleware for testing."""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
if environ['PATH_INFO'] == '/middleware':
start_response('200 OK', [('Content-Type', 'text/plain')])
return ['TestMiddleware']
return self.app(environ, start_response)
def build_environ(path):
"""Build WSGI environ for testing."""
return {
'REQUEST_METHOD': b'GET',
'PATH_INFO': path,
'QUERY_STRING': b'',
'SERVER_SOFTWARE': b'build_environ/1.0',
'SERVER_PROTOCOL': b'HTTP/1.1',
'wsgi.input': StringIO.StringIO()
}
def test_call_wraps_wsgi_middleware(client):
client.website.algorithm.default_short_circuit = False
client.website.wsgi_app = TestMiddleware(client.website.wsgi_app)
respond = [False, False]
def start_response_should_404(status, headers):
assert status.lower().strip() == '404 not found'
respond[0] = True
client.website(build_environ('/'), start_response_should_404)
assert respond[0]
def start_response_should_200(status, headers):
assert status.lower().strip() == '200 ok'
respond[1] = True
client.website(build_environ('/middleware'), start_response_should_200)
assert respond[1]
# redirect
def test_redirect_redirects(website):
assert raises(Response, website.redirect, '/').value.code == 302
def test_redirect_code_is_settable(website):
assert raises(Response, website.redirect, '/', code=8675309).value.code == 8675309
def test_redirect_permanent_is_301(website):
assert raises(Response, website.redirect, '/', permanent=True).value.code == 301
def test_redirect_without_website_base_url_is_fine(website):
assert raises(Response, website.redirect, '/').value.headers['Location'] == '/'
def test_redirect_honors_website_base_url(website):
website.base_url = 'foo'
assert raises(Response, website.redirect, '/').value.headers['Location'] == 'foo/'
def test_redirect_can_override_base_url_per_call(website):
website.base_url = 'foo'
assert raises(Response, website.redirect, '/', base_url='b').value.headers['Location'] == 'b/'
def test_redirect_declines_to_construct_bad_urls(website):
raised = raises(BadLocation, website.redirect, '../foo', base_url='http://www.example.com')
assert raised.value.body == 'Bad redirect location: http://www.example.com../foo'
def test_redirect_declines_to_construct_more_bad_urls(website):
raised = raises(BadLocation, website.redirect, 'http://www.example.org/foo',
base_url='http://www.example.com')
assert raised.value.body == 'Bad redirect location: '\
'http://www.example.comhttp://www.example.org/foo'
def test_redirect_will_construct_a_good_absolute_url(website):
response = raises(Response, website.redirect, '/foo', base_url='http://www.example.com').value
assert response.headers['Location'] == 'http://www.example.com/foo'
def test_redirect_will_allow_a_relative_path(website):
response = raises(Response, website.redirect, '../foo', base_url='').value
assert response.headers['Location'] == '../foo'
def test_redirect_will_allow_an_absolute_url(website):
response = raises(Response, website.redirect, 'http://www.example.org/foo', base_url='').value
assert response.headers['Location'] == 'http://www.example.org/foo'
def test_redirect_can_use_given_response(website):
response = Response(65, 'Greetings, program!', {'Location': 'A Town'})
response = raises(Response, website.redirect, '/flah', response=response).value
assert response.code == 302 # gets clobbered
assert response.headers['Location'] == '/flah' # gets clobbered
assert response.body == 'Greetings, program!' # not clobbered
# canonicalize_base_url
def test_canonicalize_base_url_canonicalizes_base_url(harness):
harness.fs.www.mk(('index.html', 'Greetings, program!'))
harness.client.hydrate_website(base_url='http://example.com')
response = harness.client.GxT()
assert response.code == 302
assert response.headers['Location'] == 'http://example.com/'
def test_canonicalize_base_url_includes_path_and_qs_for_GET(harness):
harness.fs.www.mk(('index.html', 'Greetings, program!'))
harness.client.hydrate_website(base_url='http://example.com')
response = harness.client.GxT('/foo/bar?baz=buz')
assert response.code == 302
assert response.headers['Location'] == 'http://example.com/foo/bar?baz=buz'
def test_canonicalize_base_url_redirects_to_homepage_for_POST(harness):
harness.fs.www.mk(('index.html', 'Greetings, program!'))
harness.client.hydrate_website(base_url='http://example.com')
response = harness.client.PxST('/foo/bar?baz=buz')
assert response.code == 302
assert response.headers['Location'] == 'http://example.com/'
def test_canonicalize_base_url_allows_good_base_url(harness):
harness.fs.www.mk(('index.html', 'Greetings, program!'))
harness.client.hydrate_website(base_url='http://localhost')
response = harness.client.GET()
assert response.code == 200
assert response.body == 'Greetings, program!'
def test_canonicalize_base_url_is_noop_without_base_url(harness):
harness.fs.www.mk(('index.html', 'Greetings, program!'))
harness.client.hydrate_website()
response = harness.client.GET()
assert response.code == 200
assert response.body == 'Greetings, program!'
| jaraco/aspen | tests/test_website.py | Python | mit | 14,886 | 0.00477 |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
import os
import docker
from flexmock import flexmock
import requests
from atomic_reactor.constants import DOCKER_SOCKET_PATH
from atomic_reactor.util import ImageName
from tests.constants import COMMAND, IMPORTED_IMAGE_ID
old_ope = os.path.exists
mock_containers = \
[{'Created': 1430292310,
'Image': 'fedora',
'Names': ['/goofy_mayer'],
'Command': '/bin/bash',
'Id': 'f8ee920b2db5e802da2583a13a4edbf0523ca5fff6b6d6454c1fd6db5f38014d',
'Status': 'Up 2 seconds'},
{'Created': 1430293290,
'Image': 'busybox:latest',
'Names': ['/boring_mestorf'],
'Id': '105026325ff668ccf4dc2bcf4f009ea35f2c6a933a778993e6fad3c50173aaab',
'Command': COMMAND}]
mock_image = \
{'Created': 1414577076,
'Id': '3ab9a7ed8a169ab89b09fb3e12a14a390d3c662703b65b4541c0c7bde0ee97eb',
'ParentId': 'a79ad4dac406fcf85b9c7315fe08de5b620c1f7a12f45c8185c843f4b4a49c4e',
'RepoTags': ['buildroot-fedora:latest'],
'Size': 0,
'VirtualSize': 856564160}
mock_images = None
mock_logs = 'uid=0(root) gid=0(root) groups=10(wheel)'
mock_build_logs = \
[{"stream": "Step 0 : FROM fedora:latest"},
{"status": "Pulling from fedora", "id": "latest"},
{"status": "Digest: sha256:c63476a082b960f6264e59ef0ff93a9169eac8daf59e24805e0382afdcc9082f"}, # noqa
{"status": "Status: Image is up to date for fedora:latest"},
{"stream": "Step 1 : RUN uname -a && env"},
{"stream": " ---> Running in 3600c91d1c40"},
{"stream": "Removing intermediate container 3600c91d1c40"},
{"stream": "Successfully built 1793c2380436"}]
mock_build_logs_failed = mock_build_logs + \
[{"errorDetail": {"code": 2, "message":
"The command &{[/bin/sh -c ls -lha /a/b/c]} returned a non-zero code: 2"},
"error": "The command &{[/bin/sh -c ls -lha /a/b/c]} returned a non-zero code: 2"}] # noqa
mock_pull_logs = \
[{"stream": "Trying to pull repository localhost:5000/busybox ..."},
{"status": "Pulling image (latest) from localhost:5000/busybox", "progressDetail": {}, "id": "8c2e06607696"}, # noqa
{"status": "Download complete", "progressDetail": {}, "id": "8c2e06607696"},
{"status": "Status: Image is up to date for localhost:5000/busybox:latest"}]
mock_pull_logs_failed = \
[{"errorDetail": {"message": "Error: image ***:latest not found"}, "error": "Error: image ***:latest not found"}] # noqa
mock_push_logs = \
[{"status": "The push refers to a repository [localhost:5000/busybox] (len: 1)"},
{"status": "Image already exists", "progressDetail": {}, "id": "17583c7dd0da"},
{"status": "Image already exists", "progressDetail": {}, "id": "d1592a710ac3"},
{"status": "latest: digest: sha256:afe8a267153784d570bfea7d22699c612a61f984e2b9a93135660bb85a3113cf size: 2735"}] # noqa
mock_push_logs_failed = \
[{"status": "The push refers to a repository [localhost:5000/busybox] (len: 1)"},
{"status": "Sending image list"},
{"errorDetail": {"message": "Put http://localhost:5000/v1/repositories/busybox/: dial tcp [::1]:5000: getsockopt: connection refused"}, "error": "Put http://localhost:5000/v1/repositories/busybox/: dial tcp [::1]:5000: getsockopt: connection refused"}] # noqa
mock_info = {
'BridgeNfIp6tables': True,
'BridgeNfIptables': True,
'Containers': 18,
'CpuCfsPeriod': True,
'CpuCfsQuota': True,
'Debug': False,
'DockerRootDir': '/var/lib/docker',
'Driver': 'overlay',
'DriverStatus': [['Backing Filesystem', 'xfs']],
'ExecutionDriver': 'native-0.2',
'ExperimentalBuild': False,
'HttpProxy': '',
'HttpsProxy': '',
'ID': 'YC7N:MYIE:6SEL:JYLU:SRIG:PCVV:APZD:WTH4:4MGR:N4BG:CT53:ZW2O',
'IPv4Forwarding': True,
'Images': 162,
'IndexServerAddress': 'https://index.docker.io/v1/',
'InitPath': '/usr/libexec/docker/dockerinit',
'InitSha1': 'eb5677df79a87639f30ab5c2c01e5170abc96af2',
'KernelVersion': '4.1.4-200.fc22.x86_64',
'Labels': None,
'LoggingDriver': 'json-file',
'MemTotal': 12285665280,
'MemoryLimit': True,
'NCPU': 4,
'NEventsListener': 0,
'NFd': 15,
'NGoroutines': 31,
'Name': 'the-build-host',
'NoProxy': '',
'OomKillDisable': True,
'OperatingSystem': 'Fedora 24 (Rawhide) (containerized)',
'RegistryConfig': {'IndexConfigs': {'127.0.0.1:5000': {'Mirrors': [],
'Name': '127.0.0.1:5000',
'Official': False,
'Secure': False},
'172.17.0.1:5000': {'Mirrors': [],
'Name': '172.17.0.1:5000',
'Official': False,
'Secure': False},
'172.17.0.2:5000': {'Mirrors': [],
'Name': '172.17.0.2:5000',
'Official': False,
'Secure': False},
'172.17.0.3:5000': {'Mirrors': [],
'Name': '172.17.0.3:5000',
'Official': False,
'Secure': False},
'docker.io': {'Mirrors': None,
'Name': 'docker.io',
'Official': True,
'Secure': True}
},
'InsecureRegistryCIDRs': ['127.0.0.0/8'], 'Mirrors': None},
'SwapLimit': True,
'SystemTime': '2015-09-15T16:38:50.585211559+02:00'
}
mock_version = {
'ApiVersion': '1.21',
'Arch': 'amd64',
'BuildTime': 'Thu Sep 10 17:53:19 UTC 2015',
'GitCommit': 'af9b534-dirty',
'GoVersion': 'go1.5.1',
'KernelVersion': '4.1.4-200.fc22.x86_64',
'Os': 'linux',
'Version': '1.9.0-dev-fc24'
}
mock_import_image = '{"status": "%s"}' % IMPORTED_IMAGE_ID
mock_inspect_container = {
'Id': 'f8ee920b2db5e802da2583a13a4edbf0523ca5fff6b6d6454c1fd6db5f38014d',
'Mounts': [
{
"Source": "/mnt/tmp",
"Destination": "/tmp",
"Mode": "",
"RW": True,
"Propagation": "rprivate",
"Name": "test"
},
{
"Source": "/mnt/conflict_exception",
"Destination": "/exception",
"Mode": "",
"RW": True,
"Propagation": "rprivate",
"Name": "conflict_exception"
},
{
"Source": "/mnt/real_exception",
"Destination": "/exception",
"Mode": "",
"RW": True,
"Propagation": "rprivate",
"Name": "real_exception"
},
{
"Source": "",
"Destination": "/skip_me",
"Mode": "",
"RW": True,
"Propagation": "rprivate",
"Name": "skip_me"
}
]
}
def _find_image(img, ignore_registry=False):
global mock_images
tagged_img = ImageName.parse(img).to_str(explicit_tag=True)
for im in mock_images:
im_name = im['RepoTags'][0]
if im_name == tagged_img:
return im
if ignore_registry:
im_name_wo_reg = ImageName.parse(im_name).to_str(registry=False)
if im_name_wo_reg == tagged_img:
return im
return None
def _docker_exception(code=404, content='not found', exc_class=docker.errors.APIError):
response = flexmock(content=content, status_code=code)
return exc_class(code, response)
def _mock_pull(repo, tag='latest', **kwargs):
im = ImageName.parse(repo)
if im.repo == 'library-only' and im.namespace != 'library':
return iter(mock_pull_logs_failed)
repotag = '%s:%s' % (repo, tag)
if _find_image(repotag) is None:
new_image = mock_image.copy()
new_image['RepoTags'] = [repotag]
mock_images.append(new_image)
return iter(mock_pull_logs)
def _mock_remove_image(img, **kwargs):
i = _find_image(img)
if i is not None:
mock_images.remove(i)
return None
raise _docker_exception()
def _mock_inspect(img, **kwargs):
# real 'docker inspect busybox' returns info even there's only localhost:5000/busybox
i = _find_image(img, ignore_registry=True)
if i is not None:
return i
raise _docker_exception()
def _mock_tag(src_img, dest_repo, dest_tag='latest', **kwargs):
i = _find_image(src_img)
if i is None:
raise _docker_exception()
dst_img = "%s:%s" % (dest_repo, dest_tag)
i = _find_image(dst_img)
if i is None:
new_image = mock_image.copy()
new_image['RepoTags'] = [dst_img]
mock_images.append(new_image)
return True
def _mock_generator_raises():
raise RuntimeError("build generator failure")
yield {}
def mock_docker(build_should_fail=False,
inspect_should_fail=False,
wait_should_fail=False,
provided_image_repotags=None,
should_raise_error={},
remember_images=False,
push_should_fail=False,
build_should_fail_generator=False):
"""
mock all used docker.APIClient methods
:param build_should_fail: True == build() log will contain error
:param inspect_should_fail: True == inspect_image() will raise docker.errors.NotFound
:param wait_should_fail: True == wait() will return 1 instead of 0
:param provided_image_repotags: images() will contain provided image
:param should_raise_error: methods (with args) to raise docker.errors.APIError
:param remember_images: keep track of available image tags
"""
if provided_image_repotags:
mock_image['RepoTags'] = provided_image_repotags
push_result = mock_push_logs if not push_should_fail else mock_push_logs_failed
if build_should_fail:
if build_should_fail_generator:
build_result = _mock_generator_raises()
else:
build_result = iter(mock_build_logs_failed)
else:
build_result = iter(mock_build_logs)
if not hasattr(docker, 'APIClient'):
setattr(docker, 'APIClient', docker.Client)
flexmock(docker.APIClient, build=lambda **kwargs: build_result)
flexmock(docker.APIClient, commit=lambda cid, **kwargs: mock_containers[0])
flexmock(docker.APIClient, containers=lambda **kwargs: mock_containers)
flexmock(docker.APIClient, create_container=lambda img, **kwargs: mock_containers[0])
flexmock(docker.APIClient, images=lambda **kwargs: [mock_image])
def mock_inspect_image(image_id):
if inspect_should_fail:
raise _docker_exception(exc_class=docker.errors.NotFound)
else:
return mock_image
flexmock(docker.APIClient, inspect_image=mock_inspect_image)
flexmock(docker.APIClient, inspect_container=lambda im_id: mock_inspect_container)
flexmock(docker.APIClient, logs=lambda cid, **kwargs: iter([mock_logs]) if kwargs.get('stream')
else mock_logs)
flexmock(docker.APIClient, pull=lambda img, **kwargs: iter(mock_pull_logs))
flexmock(docker.APIClient, push=lambda iid, **kwargs: iter(push_result))
flexmock(docker.APIClient, remove_container=lambda cid, **kwargs: None)
flexmock(docker.APIClient, remove_image=lambda iid, **kwargs: None)
flexmock(docker.APIClient, start=lambda cid, **kwargs: None)
flexmock(docker.APIClient, tag=lambda img, rep, **kwargs: True)
flexmock(docker.APIClient, wait=lambda cid: 1 if wait_should_fail else 0)
flexmock(docker.APIClient, version=lambda **kwargs: mock_version)
flexmock(docker.APIClient, info=lambda **kwargs: mock_info)
flexmock(docker.APIClient, import_image_from_data=lambda url: mock_import_image)
flexmock(docker.APIClient, import_image_from_stream=lambda url: mock_import_image)
class GetImageResult(object):
data = ''
def __init__(self):
self.fp = open(__file__, 'rb')
def __getattr__(self, attr):
return getattr(self, self.fp, attr)
def __enter__(self):
return self.fp
def __exit__(self, tp, val, tb):
self.fp.close()
flexmock(docker.APIClient, get_image=lambda img, **kwargs: GetImageResult())
flexmock(os.path, exists=lambda p: True if p == DOCKER_SOCKET_PATH else old_ope(p))
def remove_volume(volume_name):
if 'exception' in volume_name:
if volume_name == 'conflict_exception':
response = flexmock(content="abc", status_code=requests.codes.CONFLICT)
else:
response = flexmock(content="abc", status_code=requests.codes.NOT_FOUND)
raise docker.errors.APIError("failed to remove volume %s" % volume_name, response)
return None
flexmock(docker.APIClient, remove_volume=lambda iid, **kwargs: remove_volume(iid))
for method, args in should_raise_error.items():
response = flexmock(content="abc", status_code=123)
if args:
(flexmock(docker.APIClient)
.should_receive(method)
.with_args(*args).and_raise(docker.errors.APIError, "xyz",
response))
else:
(flexmock(docker.APIClient)
.should_receive(method)
.and_raise(docker.errors.APIError, "xyz", response))
if remember_images:
global mock_images
mock_images = [mock_image]
flexmock(docker.APIClient, inspect_image=_mock_inspect)
flexmock(docker.APIClient, pull=_mock_pull)
flexmock(docker.APIClient, remove_image=_mock_remove_image)
flexmock(docker.APIClient, tag=_mock_tag)
flexmock(docker.APIClient, _retrieve_server_version=lambda: '1.20')
| jarodwilson/atomic-reactor | tests/docker_mock.py | Python | bsd-3-clause | 14,501 | 0.002 |
from django.db import models
from .common_info import CommonInfo
from django.utils import timezone
from django.urls import reverse
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.core.validators import URLValidator
def validate_nonzero(value):
if value == 0:
raise ValidationError(
_("Quantity {} is not allowed".format(value)), params={"value": value}
)
class DataSource(CommonInfo):
"""A parent container for DataGroup objects"""
STATE_CHOICES = (
("AT", "Awaiting Triage"),
("IP", "In Progress"),
("CO", "Complete"),
("ST", "Stale"),
)
PRIORITY_CHOICES = (("HI", "High"), ("MD", "Medium"), ("LO", "Low"))
title = models.CharField(max_length=50)
url = models.CharField(max_length=150, blank=True, validators=[URLValidator()])
estimated_records = models.PositiveIntegerField(
default=47,
validators=[validate_nonzero],
help_text="Estimated number of documents that the data source will eventually contain.",
)
state = models.CharField(max_length=2, choices=STATE_CHOICES, default="AT")
description = models.TextField(blank=True)
priority = models.CharField(max_length=2, choices=PRIORITY_CHOICES, default="HI")
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("data_source_edit", kwargs={"pk": self.pk})
| HumanExposure/factotum | dashboard/models/data_source.py | Python | gpl-3.0 | 1,472 | 0.002717 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2020 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Valerio Cosentino <valcos@bitergia.com>
#
from perceval.backend import (Backend,
BackendCommand)
class BackendB(Backend):
"""Mocked backend class used for testing"""
def __init__(self, origin, tag=None, archive=None):
super().__init__(origin, tag=tag, archive=archive)
class BackendCommandB(BackendCommand):
"""Mocked backend command class used for testing"""
BACKEND = BackendB
def __init__(self, *args):
super().__init__(*args)
| grimoirelab/perceval | tests/mocked_package/nested_package/nested_backend_b.py | Python | gpl-3.0 | 1,241 | 0 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from six.moves import StringIO
import glanceclient.exc
import mock
from oslo_config import cfg
from oslo_utils import netutils
import six
import testtools
from nova import context
from nova import exception
from nova.image import glance
from nova import test
CONF = cfg.CONF
NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000"
class tzinfo(datetime.tzinfo):
@staticmethod
def utcoffset(*args, **kwargs):
return datetime.timedelta()
NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo())
class TestConversions(test.NoDBTestCase):
def test_convert_timestamps_to_datetimes(self):
fixture = {'name': None,
'properties': {},
'status': None,
'is_public': None,
'created_at': NOW_GLANCE_FORMAT,
'updated_at': NOW_GLANCE_FORMAT,
'deleted_at': NOW_GLANCE_FORMAT}
result = glance._convert_timestamps_to_datetimes(fixture)
self.assertEqual(result['created_at'], NOW_DATETIME)
self.assertEqual(result['updated_at'], NOW_DATETIME)
self.assertEqual(result['deleted_at'], NOW_DATETIME)
def _test_extracting_missing_attributes(self, include_locations):
# Verify behavior from glance objects that are missing attributes
# TODO(jaypipes): Find a better way of testing this crappy
# glanceclient magic object stuff.
class MyFakeGlanceImage(object):
def __init__(self, metadata):
IMAGE_ATTRIBUTES = ['size', 'owner', 'id', 'created_at',
'updated_at', 'status', 'min_disk',
'min_ram', 'is_public']
raw = dict.fromkeys(IMAGE_ATTRIBUTES)
raw.update(metadata)
self.__dict__['raw'] = raw
def __getattr__(self, key):
try:
return self.__dict__['raw'][key]
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
try:
self.__dict__['raw'][key] = value
except KeyError:
raise AttributeError(key)
metadata = {
'id': 1,
'created_at': NOW_DATETIME,
'updated_at': NOW_DATETIME,
}
image = MyFakeGlanceImage(metadata)
observed = glance._extract_attributes(
image, include_locations=include_locations)
expected = {
'id': 1,
'name': None,
'is_public': None,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': NOW_DATETIME,
'updated_at': NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None
}
if include_locations:
expected['locations'] = None
expected['direct_url'] = None
self.assertEqual(expected, observed)
def test_extracting_missing_attributes_include_locations(self):
self._test_extracting_missing_attributes(include_locations=True)
def test_extracting_missing_attributes_exclude_locations(self):
self._test_extracting_missing_attributes(include_locations=False)
class TestExceptionTranslations(test.NoDBTestCase):
def test_client_forbidden_to_imagenotauthed(self):
in_exc = glanceclient.exc.Forbidden('123')
out_exc = glance._translate_image_exception('123', in_exc)
self.assertIsInstance(out_exc, exception.ImageNotAuthorized)
def test_client_httpforbidden_converts_to_imagenotauthed(self):
in_exc = glanceclient.exc.HTTPForbidden('123')
out_exc = glance._translate_image_exception('123', in_exc)
self.assertIsInstance(out_exc, exception.ImageNotAuthorized)
def test_client_notfound_converts_to_imagenotfound(self):
in_exc = glanceclient.exc.NotFound('123')
out_exc = glance._translate_image_exception('123', in_exc)
self.assertIsInstance(out_exc, exception.ImageNotFound)
def test_client_httpnotfound_converts_to_imagenotfound(self):
in_exc = glanceclient.exc.HTTPNotFound('123')
out_exc = glance._translate_image_exception('123', in_exc)
self.assertIsInstance(out_exc, exception.ImageNotFound)
class TestGlanceSerializer(test.NoDBTestCase):
def test_serialize(self):
metadata = {'name': 'image1',
'is_public': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings': [
{'virtual': 'aaa',
'device': 'bbb'},
{'virtual': 'xxx',
'device': 'yyy'}],
'block_device_mapping': [
{'virtual_device': 'fake',
'device_name': '/dev/fake'},
{'virtual_device': 'ephemeral0',
'device_name': '/dev/fake0'}]}}
# NOTE(tdurakov): Assertion of serialized objects won't work
# during using of random PYTHONHASHSEED. Assertion of
# serialized/deserialized object and initial one is enough
converted = glance._convert_to_string(metadata)
self.assertEqual(glance._convert_from_string(converted), metadata)
class TestGetImageService(test.NoDBTestCase):
@mock.patch.object(glance.GlanceClientWrapper, '__init__',
return_value=None)
def test_get_remote_service_from_id(self, gcwi_mocked):
id_or_uri = '123'
_ignored, image_id = glance.get_remote_image_service(
mock.sentinel.ctx, id_or_uri)
self.assertEqual(id_or_uri, image_id)
gcwi_mocked.assert_called_once_with()
@mock.patch.object(glance.GlanceClientWrapper, '__init__',
return_value=None)
def test_get_remote_service_from_href(self, gcwi_mocked):
id_or_uri = 'http://127.0.0.1/123'
_ignored, image_id = glance.get_remote_image_service(
mock.sentinel.ctx, id_or_uri)
self.assertEqual('123', image_id)
gcwi_mocked.assert_called_once_with(context=mock.sentinel.ctx,
host='127.0.0.1',
port=80,
use_ssl=False)
class TestCreateGlanceClient(test.NoDBTestCase):
@mock.patch('oslo_utils.netutils.is_valid_ipv6')
@mock.patch('glanceclient.Client')
def test_headers_passed_glanceclient(self, init_mock, ipv6_mock):
self.flags(auth_strategy='keystone')
ipv6_mock.return_value = False
auth_token = 'token'
ctx = context.RequestContext('fake', 'fake', auth_token=auth_token)
host = 'host4'
port = 9295
use_ssl = False
expected_endpoint = 'http://host4:9295'
expected_params = {
'identity_headers': {
'X-Auth-Token': 'token',
'X-User-Id': 'fake',
'X-Roles': '',
'X-Tenant-Id': 'fake',
'X-Identity-Status': 'Confirmed'
},
'token': 'token'
}
glance._create_glance_client(ctx, host, port, use_ssl)
init_mock.assert_called_once_with('1', expected_endpoint,
**expected_params)
# Test the version is properly passed to glanceclient.
ipv6_mock.reset_mock()
init_mock.reset_mock()
expected_endpoint = 'http://host4:9295'
expected_params = {
'identity_headers': {
'X-Auth-Token': 'token',
'X-User-Id': 'fake',
'X-Roles': '',
'X-Tenant-Id': 'fake',
'X-Identity-Status': 'Confirmed'
},
'token': 'token'
}
glance._create_glance_client(ctx, host, port, use_ssl, version=2)
init_mock.assert_called_once_with('2', expected_endpoint,
**expected_params)
# Test that non-keystone auth strategy doesn't bother to pass
# glanceclient all the Keystone-related headers.
ipv6_mock.reset_mock()
init_mock.reset_mock()
self.flags(auth_strategy='non-keystone')
expected_endpoint = 'http://host4:9295'
expected_params = {
}
glance._create_glance_client(ctx, host, port, use_ssl)
init_mock.assert_called_once_with('1', expected_endpoint,
**expected_params)
# Test that the IPv6 bracketization adapts the endpoint properly.
ipv6_mock.reset_mock()
init_mock.reset_mock()
ipv6_mock.return_value = True
expected_endpoint = 'http://[host4]:9295'
expected_params = {
}
glance._create_glance_client(ctx, host, port, use_ssl)
init_mock.assert_called_once_with('1', expected_endpoint,
**expected_params)
class TestGlanceClientWrapper(test.NoDBTestCase):
@mock.patch('time.sleep')
@mock.patch('nova.image.glance._create_glance_client')
def test_static_client_without_retries(self, create_client_mock,
sleep_mock):
client_mock = mock.MagicMock()
images_mock = mock.MagicMock()
images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable
type(client_mock).images = mock.PropertyMock(return_value=images_mock)
create_client_mock.return_value = client_mock
self.flags(num_retries=0, group='glance')
ctx = context.RequestContext('fake', 'fake')
host = 'host4'
port = 9295
use_ssl = False
client = glance.GlanceClientWrapper(context=ctx, host=host, port=port,
use_ssl=use_ssl)
create_client_mock.assert_called_once_with(ctx, host, port, use_ssl, 1)
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctx, 1, 'get', 'meow')
self.assertFalse(sleep_mock.called)
@mock.patch('nova.image.glance.LOG')
@mock.patch('time.sleep')
@mock.patch('nova.image.glance._create_glance_client')
def test_static_client_with_retries_negative(self, create_client_mock,
sleep_mock, mock_log):
client_mock = mock.Mock(spec=glanceclient.Client)
images_mock = mock.Mock()
images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable
client_mock.images = images_mock
create_client_mock.return_value = client_mock
self.flags(num_retries=-1, group='glance')
ctx = context.RequestContext('fake', 'fake')
host = 'host4'
port = 9295
use_ssl = False
client = glance.GlanceClientWrapper(context=ctx, host=host, port=port,
use_ssl=use_ssl)
create_client_mock.assert_called_once_with(ctx, host, port, use_ssl, 1)
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctx, 1, 'get', 'meow')
self.assertTrue(mock_log.warning.called)
msg = mock_log.warning.call_args_list[0]
self.assertIn('Treating negative config value', msg[0][0])
self.assertFalse(sleep_mock.called)
@mock.patch('time.sleep')
@mock.patch('nova.image.glance._create_glance_client')
def test_static_client_with_retries(self, create_client_mock,
sleep_mock):
self.flags(num_retries=1, group='glance')
client_mock = mock.MagicMock()
images_mock = mock.MagicMock()
images_mock.get.side_effect = [
glanceclient.exc.ServiceUnavailable,
None
]
type(client_mock).images = mock.PropertyMock(return_value=images_mock)
create_client_mock.return_value = client_mock
ctx = context.RequestContext('fake', 'fake')
host = 'host4'
port = 9295
use_ssl = False
client = glance.GlanceClientWrapper(context=ctx,
host=host, port=port, use_ssl=use_ssl)
client.call(ctx, 1, 'get', 'meow')
sleep_mock.assert_called_once_with(1)
@mock.patch('random.shuffle')
@mock.patch('time.sleep')
@mock.patch('nova.image.glance._create_glance_client')
def test_default_client_without_retries(self, create_client_mock,
sleep_mock, shuffle_mock):
api_servers = [
'host1:9292',
'https://host2:9293',
'http://host3:9294'
]
client_mock = mock.MagicMock()
images_mock = mock.MagicMock()
images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable
type(client_mock).images = mock.PropertyMock(return_value=images_mock)
create_client_mock.return_value = client_mock
shuffle_mock.return_value = api_servers
self.flags(num_retries=0, group='glance')
self.flags(api_servers=api_servers, group='glance')
# Here we are testing the behaviour that calling client.call() twice
# when there are no retries will cycle through the api_servers and not
# sleep (which would be an indication of a retry)
ctx = context.RequestContext('fake', 'fake')
client = glance.GlanceClientWrapper()
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctx, 1, 'get', 'meow')
self.assertFalse(sleep_mock.called)
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctx, 1, 'get', 'meow')
self.assertFalse(sleep_mock.called)
create_client_mock.assert_has_calls(
[
mock.call(ctx, 'host1', 9292, False, 1),
mock.call(ctx, 'host2', 9293, True, 1),
]
)
@mock.patch('random.shuffle')
@mock.patch('time.sleep')
@mock.patch('nova.image.glance._create_glance_client')
def test_default_client_with_retries(self, create_client_mock,
sleep_mock, shuffle_mock):
api_servers = [
'host1:9292',
'https://host2:9293',
'http://host3:9294'
]
client_mock = mock.MagicMock()
images_mock = mock.MagicMock()
images_mock.get.side_effect = [
glanceclient.exc.ServiceUnavailable,
None
]
type(client_mock).images = mock.PropertyMock(return_value=images_mock)
create_client_mock.return_value = client_mock
self.flags(num_retries=1, group='glance')
self.flags(api_servers=api_servers, group='glance')
ctx = context.RequestContext('fake', 'fake')
# And here we're testing that if num_retries is not 0, then we attempt
# to retry the same connection action against the next client.
client = glance.GlanceClientWrapper()
client.call(ctx, 1, 'get', 'meow')
create_client_mock.assert_has_calls(
[
mock.call(ctx, 'host1', 9292, False, 1),
mock.call(ctx, 'host2', 9293, True, 1),
]
)
sleep_mock.assert_called_once_with(1)
@mock.patch('oslo_service.sslutils.is_enabled')
@mock.patch('glanceclient.Client')
def test_create_glance_client_with_ssl(self, client_mock,
ssl_enable_mock):
self.flags(ca_file='foo.cert', cert_file='bar.cert',
key_file='wut.key', group='ssl')
ctxt = mock.sentinel.ctx
glance._create_glance_client(ctxt, 'host4', 9295, use_ssl=True)
client_mock.assert_called_once_with(
'1', 'https://host4:9295', insecure=False, ssl_compression=False,
cert_file='bar.cert', key_file='wut.key', cacert='foo.cert')
@mock.patch.object(glanceclient.common.http.HTTPClient, 'get')
def test_determine_curr_major_version(self, http_client_mock):
result = ("http://host1:9292/v2/", {'versions': [
{'status': 'CURRENT', 'id': 'v2.3'},
{'status': 'SUPPORTED', 'id': 'v1.0'}]})
http_client_mock.return_value = result
maj_ver = glance._determine_curr_major_version('http://host1:9292')
self.assertEqual(2, maj_ver)
@mock.patch.object(glanceclient.common.http.HTTPClient, 'get')
def test_determine_curr_major_version_invalid(self, http_client_mock):
result = ("http://host1:9292/v2/", "Invalid String")
http_client_mock.return_value = result
curr_major_version = glance._determine_curr_major_version('abc')
self.assertIsNone(curr_major_version)
@mock.patch.object(glanceclient.common.http.HTTPClient, 'get')
def test_determine_curr_major_version_unsupported(self, http_client_mock):
result = ("http://host1:9292/v2/", {'versions': [
{'status': 'CURRENT', 'id': 'v666.0'},
{'status': 'SUPPORTED', 'id': 'v1.0'}]})
http_client_mock.return_value = result
maj_ver = glance._determine_curr_major_version('http://host1:9292')
self.assertIsNone(maj_ver)
class TestDownloadNoDirectUri(test.NoDBTestCase):
"""Tests the download method of the GlanceImageService when the
default of not allowing direct URI transfers is set.
"""
@mock.patch.object(six.moves.builtins, 'open')
@mock.patch('nova.image.glance.GlanceImageService.show')
def test_download_no_data_no_dest_path(self, show_mock, open_mock):
client = mock.MagicMock()
client.call.return_value = mock.sentinel.image_chunks
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
res = service.download(ctx, mock.sentinel.image_id)
self.assertFalse(show_mock.called)
self.assertFalse(open_mock.called)
client.call.assert_called_once_with(ctx, 1, 'data',
mock.sentinel.image_id)
self.assertEqual(mock.sentinel.image_chunks, res)
@mock.patch.object(six.moves.builtins, 'open')
@mock.patch('nova.image.glance.GlanceImageService.show')
def test_download_data_no_dest_path(self, show_mock, open_mock):
client = mock.MagicMock()
client.call.return_value = [1, 2, 3]
ctx = mock.sentinel.ctx
data = mock.MagicMock()
service = glance.GlanceImageService(client)
res = service.download(ctx, mock.sentinel.image_id, data=data)
self.assertFalse(show_mock.called)
self.assertFalse(open_mock.called)
client.call.assert_called_once_with(ctx, 1, 'data',
mock.sentinel.image_id)
self.assertIsNone(res)
data.write.assert_has_calls(
[
mock.call(1),
mock.call(2),
mock.call(3)
]
)
self.assertFalse(data.close.called)
@mock.patch.object(six.moves.builtins, 'open')
@mock.patch('nova.image.glance.GlanceImageService.show')
def test_download_no_data_dest_path(self, show_mock, open_mock):
client = mock.MagicMock()
client.call.return_value = [1, 2, 3]
ctx = mock.sentinel.ctx
writer = mock.MagicMock()
open_mock.return_value = writer
service = glance.GlanceImageService(client)
res = service.download(ctx, mock.sentinel.image_id,
dst_path=mock.sentinel.dst_path)
self.assertFalse(show_mock.called)
client.call.assert_called_once_with(ctx, 1, 'data',
mock.sentinel.image_id)
open_mock.assert_called_once_with(mock.sentinel.dst_path, 'wb')
self.assertIsNone(res)
writer.write.assert_has_calls(
[
mock.call(1),
mock.call(2),
mock.call(3)
]
)
writer.close.assert_called_once_with()
@mock.patch.object(six.moves.builtins, 'open')
@mock.patch('nova.image.glance.GlanceImageService.show')
def test_download_data_dest_path(self, show_mock, open_mock):
# NOTE(jaypipes): This really shouldn't be allowed, but because of the
# horrible design of the download() method in GlanceImageService, no
# error is raised, and the dst_path is ignored...
# #TODO(jaypipes): Fix the aforementioned horrible design of
# the download() method.
client = mock.MagicMock()
client.call.return_value = [1, 2, 3]
ctx = mock.sentinel.ctx
data = mock.MagicMock()
service = glance.GlanceImageService(client)
res = service.download(ctx, mock.sentinel.image_id, data=data)
self.assertFalse(show_mock.called)
self.assertFalse(open_mock.called)
client.call.assert_called_once_with(ctx, 1, 'data',
mock.sentinel.image_id)
self.assertIsNone(res)
data.write.assert_has_calls(
[
mock.call(1),
mock.call(2),
mock.call(3)
]
)
self.assertFalse(data.close.called)
@mock.patch.object(six.moves.builtins, 'open')
@mock.patch('nova.image.glance.GlanceImageService.show')
def test_download_data_dest_path_write_fails(self, show_mock, open_mock):
client = mock.MagicMock()
client.call.return_value = [1, 2, 3]
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
# NOTE(mikal): data is a file like object, which in our case always
# raises an exception when we attempt to write to the file.
class FakeDiskException(Exception):
pass
class Exceptionator(StringIO):
def write(self, _):
raise FakeDiskException('Disk full!')
self.assertRaises(FakeDiskException, service.download, ctx,
mock.sentinel.image_id, data=Exceptionator())
@mock.patch('nova.image.glance.GlanceImageService._get_transfer_module')
@mock.patch('nova.image.glance.GlanceImageService.show')
def test_download_direct_file_uri(self, show_mock, get_tran_mock):
self.flags(allowed_direct_url_schemes=['file'], group='glance')
show_mock.return_value = {
'locations': [
{
'url': 'file:///files/image',
'metadata': mock.sentinel.loc_meta
}
]
}
tran_mod = mock.MagicMock()
get_tran_mock.return_value = tran_mod
client = mock.MagicMock()
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
res = service.download(ctx, mock.sentinel.image_id,
dst_path=mock.sentinel.dst_path)
self.assertIsNone(res)
self.assertFalse(client.call.called)
show_mock.assert_called_once_with(ctx,
mock.sentinel.image_id,
include_locations=True)
get_tran_mock.assert_called_once_with('file')
tran_mod.download.assert_called_once_with(ctx, mock.ANY,
mock.sentinel.dst_path,
mock.sentinel.loc_meta)
@mock.patch.object(six.moves.builtins, 'open')
@mock.patch('nova.image.glance.GlanceImageService._get_transfer_module')
@mock.patch('nova.image.glance.GlanceImageService.show')
def test_download_direct_exception_fallback(self, show_mock,
get_tran_mock,
open_mock):
# Test that we fall back to downloading to the dst_path
# if the download method of the transfer module raised
# an exception.
self.flags(allowed_direct_url_schemes=['file'], group='glance')
show_mock.return_value = {
'locations': [
{
'url': 'file:///files/image',
'metadata': mock.sentinel.loc_meta
}
]
}
tran_mod = mock.MagicMock()
tran_mod.download.side_effect = Exception
get_tran_mock.return_value = tran_mod
client = mock.MagicMock()
client.call.return_value = [1, 2, 3]
ctx = mock.sentinel.ctx
writer = mock.MagicMock()
open_mock.return_value = writer
service = glance.GlanceImageService(client)
res = service.download(ctx, mock.sentinel.image_id,
dst_path=mock.sentinel.dst_path)
self.assertIsNone(res)
show_mock.assert_called_once_with(ctx,
mock.sentinel.image_id,
include_locations=True)
get_tran_mock.assert_called_once_with('file')
tran_mod.download.assert_called_once_with(ctx, mock.ANY,
mock.sentinel.dst_path,
mock.sentinel.loc_meta)
client.call.assert_called_once_with(ctx, 1, 'data',
mock.sentinel.image_id)
# NOTE(jaypipes): log messages call open() in part of the
# download path, so here, we just check that the last open()
# call was done for the dst_path file descriptor.
open_mock.assert_called_with(mock.sentinel.dst_path, 'wb')
self.assertIsNone(res)
writer.write.assert_has_calls(
[
mock.call(1),
mock.call(2),
mock.call(3)
]
)
@mock.patch.object(six.moves.builtins, 'open')
@mock.patch('nova.image.glance.GlanceImageService._get_transfer_module')
@mock.patch('nova.image.glance.GlanceImageService.show')
def test_download_direct_no_mod_fallback(self, show_mock,
get_tran_mock,
open_mock):
# Test that we fall back to downloading to the dst_path
# if no appropriate transfer module is found...
# an exception.
self.flags(allowed_direct_url_schemes=['funky'], group='glance')
show_mock.return_value = {
'locations': [
{
'url': 'file:///files/image',
'metadata': mock.sentinel.loc_meta
}
]
}
get_tran_mock.return_value = None
client = mock.MagicMock()
client.call.return_value = [1, 2, 3]
ctx = mock.sentinel.ctx
writer = mock.MagicMock()
open_mock.return_value = writer
service = glance.GlanceImageService(client)
res = service.download(ctx, mock.sentinel.image_id,
dst_path=mock.sentinel.dst_path)
self.assertIsNone(res)
show_mock.assert_called_once_with(ctx,
mock.sentinel.image_id,
include_locations=True)
get_tran_mock.assert_called_once_with('file')
client.call.assert_called_once_with(ctx, 1, 'data',
mock.sentinel.image_id)
# NOTE(jaypipes): log messages call open() in part of the
# download path, so here, we just check that the last open()
# call was done for the dst_path file descriptor.
open_mock.assert_called_with(mock.sentinel.dst_path, 'wb')
self.assertIsNone(res)
writer.write.assert_has_calls(
[
mock.call(1),
mock.call(2),
mock.call(3)
]
)
writer.close.assert_called_once_with()
class TestIsImageAvailable(test.NoDBTestCase):
"""Tests the internal _is_image_available function."""
class ImageSpecV2(object):
visibility = None
properties = None
class ImageSpecV1(object):
is_public = None
properties = None
def test_auth_token_override(self):
ctx = mock.MagicMock(auth_token=True)
img = mock.MagicMock()
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
self.assertFalse(img.called)
def test_admin_override(self):
ctx = mock.MagicMock(auth_token=False, is_admin=True)
img = mock.MagicMock()
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
self.assertFalse(img.called)
def test_v2_visibility(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False)
# We emulate warlock validation that throws an AttributeError
# if you try to call is_public on an image model returned by
# a call to V2 image.get(). Here, the ImageSpecV2 does not have
# an is_public attribute and MagicMock will throw an AttributeError.
img = mock.MagicMock(visibility='PUBLIC',
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_v1_is_public(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False)
img = mock.MagicMock(is_public=True,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_project_is_owner(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
project_id='123')
props = {
'owner_id': '123'
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
ctx.reset_mock()
img = mock.MagicMock(is_public=False, properties=props,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_project_context_matches_project_prop(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
project_id='123')
props = {
'project_id': '123'
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
ctx.reset_mock()
img = mock.MagicMock(is_public=False, properties=props,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_no_user_in_props(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
project_id='123')
props = {
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertFalse(res)
ctx.reset_mock()
img = mock.MagicMock(is_public=False, properties=props,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertFalse(res)
def test_user_matches_context(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
user_id='123')
props = {
'user_id': '123'
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
ctx.reset_mock()
img = mock.MagicMock(is_public=False, properties=props,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
class TestShow(test.NoDBTestCase):
"""Tests the show method of the GlanceImageService."""
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_show_success(self, is_avail_mock, trans_from_mock):
is_avail_mock.return_value = True
trans_from_mock.return_value = {'mock': mock.sentinel.trans_from}
client = mock.MagicMock()
client.call.return_value = {}
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
info = service.show(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(ctx, 1, 'get',
mock.sentinel.image_id)
is_avail_mock.assert_called_once_with(ctx, {})
trans_from_mock.assert_called_once_with({}, include_locations=False)
self.assertIn('mock', info)
self.assertEqual(mock.sentinel.trans_from, info['mock'])
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_show_not_available(self, is_avail_mock, trans_from_mock):
is_avail_mock.return_value = False
client = mock.MagicMock()
client.call.return_value = mock.sentinel.images_0
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
with testtools.ExpectedException(exception.ImageNotFound):
service.show(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(ctx, 1, 'get',
mock.sentinel.image_id)
is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
self.assertFalse(trans_from_mock.called)
@mock.patch('nova.image.glance._reraise_translated_image_exception')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_show_client_failure(self, is_avail_mock, trans_from_mock,
reraise_mock):
raised = exception.ImageNotAuthorized(image_id=123)
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.Forbidden
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
service = glance.GlanceImageService(client)
with testtools.ExpectedException(exception.ImageNotAuthorized):
service.show(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(ctx, 1, 'get',
mock.sentinel.image_id)
self.assertFalse(is_avail_mock.called)
self.assertFalse(trans_from_mock.called)
reraise_mock.assert_called_once_with(mock.sentinel.image_id)
@mock.patch('nova.image.glance._is_image_available')
def test_show_queued_image_without_some_attrs(self, is_avail_mock):
is_avail_mock.return_value = True
client = mock.MagicMock()
# fake image cls without disk_format, container_format, name attributes
class fake_image_cls(dict):
id = 'b31aa5dd-f07a-4748-8f15-398346887584'
deleted = False
protected = False
min_disk = 0
created_at = '2014-05-20T08:16:48'
size = 0
status = 'queued'
is_public = False
min_ram = 0
owner = '980ec4870033453ead65c0470a78b8a8'
updated_at = '2014-05-20T08:16:48'
glance_image = fake_image_cls()
client.call.return_value = glance_image
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
image_info = service.show(ctx, glance_image.id)
client.call.assert_called_once_with(ctx, 1, 'get',
glance_image.id)
NOVA_IMAGE_ATTRIBUTES = set(['size', 'disk_format', 'owner',
'container_format', 'status', 'id',
'name', 'created_at', 'updated_at',
'deleted', 'deleted_at', 'checksum',
'min_disk', 'min_ram', 'is_public',
'properties'])
self.assertEqual(NOVA_IMAGE_ATTRIBUTES, set(image_info.keys()))
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_include_locations_success(self, avail_mock, trans_from_mock):
locations = [mock.sentinel.loc1]
avail_mock.return_value = True
trans_from_mock.return_value = {'locations': locations}
client = mock.Mock()
client.call.return_value = mock.sentinel.image
service = glance.GlanceImageService(client)
ctx = mock.sentinel.ctx
image_id = mock.sentinel.image_id
info = service.show(ctx, image_id, include_locations=True)
client.call.assert_called_once_with(ctx, 2, 'get', image_id)
avail_mock.assert_called_once_with(ctx, mock.sentinel.image)
trans_from_mock.assert_called_once_with(mock.sentinel.image,
include_locations=True)
self.assertIn('locations', info)
self.assertEqual(locations, info['locations'])
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_include_direct_uri_success(self, avail_mock, trans_from_mock):
locations = [mock.sentinel.loc1]
avail_mock.return_value = True
trans_from_mock.return_value = {'locations': locations,
'direct_uri': mock.sentinel.duri}
client = mock.Mock()
client.call.return_value = mock.sentinel.image
service = glance.GlanceImageService(client)
ctx = mock.sentinel.ctx
image_id = mock.sentinel.image_id
info = service.show(ctx, image_id, include_locations=True)
client.call.assert_called_once_with(ctx, 2, 'get', image_id)
expected = locations
expected.append({'url': mock.sentinel.duri, 'metadata': {}})
self.assertIn('locations', info)
self.assertEqual(expected, info['locations'])
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_do_not_show_deleted_images(self, is_avail_mock, trans_from_mock):
class fake_image_cls(dict):
id = 'b31aa5dd-f07a-4748-8f15-398346887584'
deleted = True
glance_image = fake_image_cls()
client = mock.MagicMock()
client.call.return_value = glance_image
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
with testtools.ExpectedException(exception.ImageNotFound):
service.show(ctx, glance_image.id, show_deleted=False)
client.call.assert_called_once_with(ctx, 1, 'get',
glance_image.id)
self.assertFalse(is_avail_mock.called)
self.assertFalse(trans_from_mock.called)
class TestDetail(test.NoDBTestCase):
"""Tests the detail method of the GlanceImageService."""
@mock.patch('nova.image.glance._extract_query_params')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_success_available(self, is_avail_mock, trans_from_mock,
ext_query_mock):
params = {}
is_avail_mock.return_value = True
ext_query_mock.return_value = params
trans_from_mock.return_value = mock.sentinel.trans_from
client = mock.MagicMock()
client.call.return_value = [mock.sentinel.images_0]
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
images = service.detail(ctx, **params)
client.call.assert_called_once_with(ctx, 1, 'list')
is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
trans_from_mock.assert_called_once_with(mock.sentinel.images_0)
self.assertEqual([mock.sentinel.trans_from], images)
@mock.patch('nova.image.glance._extract_query_params')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_success_unavailable(self, is_avail_mock, trans_from_mock,
ext_query_mock):
params = {}
is_avail_mock.return_value = False
ext_query_mock.return_value = params
trans_from_mock.return_value = mock.sentinel.trans_from
client = mock.MagicMock()
client.call.return_value = [mock.sentinel.images_0]
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
images = service.detail(ctx, **params)
client.call.assert_called_once_with(ctx, 1, 'list')
is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
self.assertFalse(trans_from_mock.called)
self.assertEqual([], images)
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_params_passed(self, is_avail_mock, _trans_from_mock):
client = mock.MagicMock()
client.call.return_value = [mock.sentinel.images_0]
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
service.detail(ctx, page_size=5, limit=10)
expected_filters = {
'is_public': 'none'
}
client.call.assert_called_once_with(ctx, 1, 'list',
filters=expected_filters,
page_size=5,
limit=10)
@mock.patch('nova.image.glance._reraise_translated_exception')
@mock.patch('nova.image.glance._extract_query_params')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_client_failure(self, is_avail_mock, trans_from_mock,
ext_query_mock, reraise_mock):
params = {}
ext_query_mock.return_value = params
raised = exception.Forbidden()
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.Forbidden
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
service = glance.GlanceImageService(client)
with testtools.ExpectedException(exception.Forbidden):
service.detail(ctx, **params)
client.call.assert_called_once_with(ctx, 1, 'list')
self.assertFalse(is_avail_mock.called)
self.assertFalse(trans_from_mock.called)
reraise_mock.assert_called_once_with()
class TestCreate(test.NoDBTestCase):
"""Tests the create method of the GlanceImageService."""
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_create_success(self, trans_to_mock, trans_from_mock):
translated = {
'image_id': mock.sentinel.image_id
}
trans_to_mock.return_value = translated
trans_from_mock.return_value = mock.sentinel.trans_from
image_mock = mock.MagicMock(spec=dict)
client = mock.MagicMock()
client.call.return_value = mock.sentinel.image_meta
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
image_meta = service.create(ctx, image_mock)
trans_to_mock.assert_called_once_with(image_mock)
client.call.assert_called_once_with(ctx, 1, 'create',
image_id=mock.sentinel.image_id)
trans_from_mock.assert_called_once_with(mock.sentinel.image_meta)
self.assertEqual(mock.sentinel.trans_from, image_meta)
# Now verify that if we supply image data to the call,
# that the client is also called with the data kwarg
client.reset_mock()
service.create(ctx, image_mock, data=mock.sentinel.data)
client.call.assert_called_once_with(ctx, 1, 'create',
image_id=mock.sentinel.image_id,
data=mock.sentinel.data)
@mock.patch('nova.image.glance._reraise_translated_exception')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_create_client_failure(self, trans_to_mock, trans_from_mock,
reraise_mock):
translated = {}
trans_to_mock.return_value = translated
image_mock = mock.MagicMock(spec=dict)
raised = exception.Invalid()
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.BadRequest
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
service = glance.GlanceImageService(client)
self.assertRaises(exception.Invalid, service.create, ctx, image_mock)
trans_to_mock.assert_called_once_with(image_mock)
self.assertFalse(trans_from_mock.called)
class TestUpdate(test.NoDBTestCase):
"""Tests the update method of the GlanceImageService."""
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_update_success(self, trans_to_mock, trans_from_mock):
translated = {
'id': mock.sentinel.image_id,
'name': mock.sentinel.name
}
trans_to_mock.return_value = translated
trans_from_mock.return_value = mock.sentinel.trans_from
image_mock = mock.MagicMock(spec=dict)
client = mock.MagicMock()
client.call.return_value = mock.sentinel.image_meta
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
image_meta = service.update(ctx, mock.sentinel.image_id, image_mock)
trans_to_mock.assert_called_once_with(image_mock)
# Verify that the 'id' element has been removed as a kwarg to
# the call to glanceclient's update (since the image ID is
# supplied as a positional arg), and that the
# purge_props default is True.
client.call.assert_called_once_with(ctx, 1, 'update',
mock.sentinel.image_id,
name=mock.sentinel.name,
purge_props=True)
trans_from_mock.assert_called_once_with(mock.sentinel.image_meta)
self.assertEqual(mock.sentinel.trans_from, image_meta)
# Now verify that if we supply image data to the call,
# that the client is also called with the data kwarg
client.reset_mock()
service.update(ctx, mock.sentinel.image_id,
image_mock, data=mock.sentinel.data)
client.call.assert_called_once_with(ctx, 1, 'update',
mock.sentinel.image_id,
name=mock.sentinel.name,
purge_props=True,
data=mock.sentinel.data)
@mock.patch('nova.image.glance._reraise_translated_image_exception')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_update_client_failure(self, trans_to_mock, trans_from_mock,
reraise_mock):
translated = {
'name': mock.sentinel.name
}
trans_to_mock.return_value = translated
trans_from_mock.return_value = mock.sentinel.trans_from
image_mock = mock.MagicMock(spec=dict)
raised = exception.ImageNotAuthorized(image_id=123)
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.Forbidden
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
service = glance.GlanceImageService(client)
self.assertRaises(exception.ImageNotAuthorized,
service.update, ctx, mock.sentinel.image_id,
image_mock)
client.call.assert_called_once_with(ctx, 1, 'update',
mock.sentinel.image_id,
purge_props=True,
name=mock.sentinel.name)
self.assertFalse(trans_from_mock.called)
reraise_mock.assert_called_once_with(mock.sentinel.image_id)
class TestDelete(test.NoDBTestCase):
"""Tests the delete method of the GlanceImageService."""
def test_delete_success(self):
client = mock.MagicMock()
client.call.return_value = True
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
service.delete(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(ctx, 1, 'delete',
mock.sentinel.image_id)
def test_delete_client_failure(self):
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.NotFound
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
self.assertRaises(exception.ImageNotFound, service.delete, ctx,
mock.sentinel.image_id)
class TestGlanceUrl(test.NoDBTestCase):
def test_generate_glance_http_url(self):
generated_url = glance.generate_glance_url()
glance_host = CONF.glance.host
# ipv6 address, need to wrap it with '[]'
if netutils.is_valid_ipv6(glance_host):
glance_host = '[%s]' % glance_host
http_url = "http://%s:%d" % (glance_host, CONF.glance.port)
self.assertEqual(generated_url, http_url)
def test_generate_glance_https_url(self):
self.flags(protocol="https", group='glance')
generated_url = glance.generate_glance_url()
glance_host = CONF.glance.host
# ipv6 address, need to wrap it with '[]'
if netutils.is_valid_ipv6(glance_host):
glance_host = '[%s]' % glance_host
https_url = "https://%s:%d" % (glance_host, CONF.glance.port)
self.assertEqual(generated_url, https_url)
class TestGlanceApiServers(test.NoDBTestCase):
def test_get_ipv4_api_servers(self):
self.flags(api_servers=['10.0.1.1:9292',
'https://10.0.0.1:9293',
'http://10.0.2.2:9294'], group='glance')
glance_host = ['10.0.1.1', '10.0.0.1',
'10.0.2.2']
api_servers = glance.get_api_servers()
i = 0
for server in api_servers:
i += 1
self.assertIn(server[0], glance_host)
if i > 2:
break
def test_get_ipv6_api_servers(self):
self.flags(api_servers=['[2001:2012:1:f101::1]:9292',
'https://[2010:2013:1:f122::1]:9293',
'http://[2001:2011:1:f111::1]:9294'],
group='glance')
glance_host = ['2001:2012:1:f101::1', '2010:2013:1:f122::1',
'2001:2011:1:f111::1']
api_servers = glance.get_api_servers()
i = 0
for server in api_servers:
i += 1
self.assertIn(server[0], glance_host)
if i > 2:
break
class TestUpdateGlanceImage(test.NoDBTestCase):
@mock.patch('nova.image.glance.GlanceImageService')
def test_start(self, mock_glance_image_service):
consumer = glance.UpdateGlanceImage(
'context', 'id', 'metadata', 'stream')
with mock.patch.object(glance, 'get_remote_image_service') as a_mock:
a_mock.return_value = (mock_glance_image_service, 'image_id')
consumer.start()
mock_glance_image_service.update.assert_called_with(
'context', 'image_id', 'metadata', 'stream', purge_props=False)
| apporc/nova | nova/tests/unit/image/test_glance.py | Python | apache-2.0 | 53,589 | 0.000187 |
# import os
import numpy as np
import matplotlib.pyplot as plt
import random
# cluster_dp_GPU = "./cluster_dp_GPU"
# os.system(cluster_dp_GPU)
input_file = raw_input("enter the input file name:")
result_file = raw_input("enter the result file name:")
location = []
# input_lable = []
for line in open("dataset/"+input_file, "r"):
# line = line.replace('-','')
items = line.strip("\n").split(",")
# input_lable.append(int(items.pop()))
tmp = []
for item in items:
tmp.append(float(item))
location.append(tmp)
location = np.array(location)
# input_lable = np.array(input_lable)
length = len(location)
print "data input complete"
result_lable = []
for line in open(result_file, "r"):
items = line.strip("\n").split(",")
result_lable.append(int(items.pop()))
print "result read complete"
R = range(256)
random.shuffle(R)
random.shuffle(R)
R = np.array(R) / 255.0
G = range(256)
random.shuffle(G)
random.shuffle(G)
G = np.array(G) / 255
B = range(256)
random.shuffle(B)
random.shuffle(B)
B = np.array(B) / 255.0
colors = []
for i in range(256):
colors.append((R[i], G[i], B[i]))
# plt.figure()
# for i in range(length):
# index = input_lable[i]
# plt.plot(location[i][0], location[i][1], color=(R[index*5%255],G[index*15%255],B[index*20%255]), marker='.')
# plt.xlabel('x'), plt.ylabel('y')
# plt.show()
# plt.close()
plt.figure()
for i in range(length):
index = result_lable[i]
plt.plot(location[i][0], location[i][1], color=(R[index*5%255],G[index*15%255],B[index*20%255]), marker='.')
plt.xlabel('x'), plt.ylabel('y')
plt.show()
| GeKeShi/cluster-dp | cluster_image.py | Python | gpl-3.0 | 1,589 | 0.004405 |
#!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import random
import cmath
from gnuradio import gr, gr_unittest, digital, blocks
class test_clock_recovery_mm(gr_unittest.TestCase):
def setUp(self):
random.seed(0)
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test01(self):
# Test complex/complex version
omega = 2
gain_omega = 0.001
mu = 0.5
gain_mu = 0.01
omega_rel_lim = 0.001
self.test = digital.clock_recovery_mm_cc(omega, gain_omega,
mu, gain_mu,
omega_rel_lim)
data = 100*[complex(1, 1),]
self.src = blocks.vector_source_c(data, False)
self.snk = blocks.vector_sink_c()
self.tb.connect(self.src, self.test, self.snk)
self.tb.run()
expected_result = 100*[complex(0.99972, 0.99972)] # doesn't quite get to 1.0
dst_data = self.snk.data()
# Only compare last Ncmp samples
Ncmp = 30
len_e = len(expected_result)
len_d = len(dst_data)
expected_result = expected_result[len_e - Ncmp:]
dst_data = dst_data[len_d - Ncmp:]
#print expected_result
#print dst_data
self.assertComplexTuplesAlmostEqual(expected_result, dst_data, 5)
def test02(self):
# Test float/float version
omega = 2
gain_omega = 0.01
mu = 0.5
gain_mu = 0.01
omega_rel_lim = 0.001
self.test = digital.clock_recovery_mm_ff(omega, gain_omega,
mu, gain_mu,
omega_rel_lim)
data = 100*[1,]
self.src = blocks.vector_source_f(data, False)
self.snk = blocks.vector_sink_f()
self.tb.connect(self.src, self.test, self.snk)
self.tb.run()
expected_result = 100*[0.9997, ] # doesn't quite get to 1.0
dst_data = self.snk.data()
# Only compare last Ncmp samples
Ncmp = 30
len_e = len(expected_result)
len_d = len(dst_data)
expected_result = expected_result[len_e - Ncmp:]
dst_data = dst_data[len_d - Ncmp:]
#print expected_result
#print dst_data
self.assertFloatTuplesAlmostEqual(expected_result, dst_data, 4)
def test03(self):
# Test complex/complex version with varying input
omega = 2
gain_omega = 0.01
mu = 0.25
gain_mu = 0.01
omega_rel_lim = 0.0001
self.test = digital.clock_recovery_mm_cc(omega, gain_omega,
mu, gain_mu,
omega_rel_lim)
data = 1000*[complex(1, 1), complex(1, 1), complex(-1, -1), complex(-1, -1)]
self.src = blocks.vector_source_c(data, False)
self.snk = blocks.vector_sink_c()
self.tb.connect(self.src, self.test, self.snk)
self.tb.run()
expected_result = 1000*[complex(-1.2, -1.2), complex(1.2, 1.2)]
dst_data = self.snk.data()
# Only compare last Ncmp samples
Ncmp = 100
len_e = len(expected_result)
len_d = len(dst_data)
expected_result = expected_result[len_e - Ncmp:]
dst_data = dst_data[len_d - Ncmp:]
#print expected_result
#print dst_data
self.assertComplexTuplesAlmostEqual(expected_result, dst_data, 1)
def test04(self):
# Test float/float version
omega = 2
gain_omega = 0.01
mu = 0.25
gain_mu = 0.1
omega_rel_lim = 0.001
self.test = digital.clock_recovery_mm_ff(omega, gain_omega,
mu, gain_mu,
omega_rel_lim)
data = 1000*[1, 1, -1, -1]
self.src = blocks.vector_source_f(data, False)
self.snk = blocks.vector_sink_f()
self.tb.connect(self.src, self.test, self.snk)
self.tb.run()
expected_result = 1000*[-1.2, 1.2]
dst_data = self.snk.data()
# Only compare last Ncmp samples
Ncmp = 100
len_e = len(expected_result)
len_d = len(dst_data)
expected_result = expected_result[len_e - Ncmp:]
dst_data = dst_data[len_d - Ncmp:]
#print expected_result
#print dst_data
self.assertFloatTuplesAlmostEqual(expected_result, dst_data, 1)
if __name__ == '__main__':
gr_unittest.run(test_clock_recovery_mm, "test_clock_recovery_mm.xml")
| skoslowski/gnuradio | gr-digital/python/digital/qa_clock_recovery_mm.py | Python | gpl-3.0 | 4,755 | 0.003785 |
#----------------------------------------------------------------------
# Copyright (c) 2008 Board of Trustees, Princeton University
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
#
# SFA API faults
#
#
### $Id$
### $URL$
import xmlrpclib
class SfaFault(xmlrpclib.Fault):
def __init__(self, faultCode, faultString, extra = None):
if extra:
faultString += ": " + str(extra)
xmlrpclib.Fault.__init__(self, faultCode, faultString)
class SfaInvalidAPIMethod(SfaFault):
def __init__(self, method, interface = None, extra = None):
faultString = "Invalid method " + method
if interface:
faultString += " for interface " + interface
SfaFault.__init__(self, 100, faultString, extra)
class SfaInvalidArgumentCount(SfaFault):
def __init__(self, got, min, max = min, extra = None):
if min != max:
expected = "%d-%d" % (min, max)
else:
expected = "%d" % min
faultString = "Expected %s arguments, got %d" % \
(expected, got)
SfaFault.__init__(self, 101, faultString, extra)
class SfaInvalidArgument(SfaFault):
def __init__(self, extra = None, name = None):
if name is not None:
faultString = "Invalid %s value" % name
else:
faultString = "Invalid argument"
SfaFault.__init__(self, 102, faultString, extra)
class SfaAuthenticationFailure(SfaFault):
def __init__(self, extra = None):
faultString = "Failed to authenticate call"
SfaFault.__init__(self, 103, faultString, extra)
class SfaDBError(SfaFault):
def __init__(self, extra = None):
faultString = "Database error"
SfaFault.__init__(self, 106, faultString, extra)
class SfaPermissionDenied(SfaFault):
def __init__(self, extra = None):
faultString = "Permission denied"
SfaFault.__init__(self, 108, faultString, extra)
class SfaNotImplemented(SfaFault):
def __init__(self, interface=None, extra = None):
faultString = "Not implemented"
if interface:
faultString += " at interface " + interface
SfaFault.__init__(self, 109, faultString, extra)
class SfaAPIError(SfaFault):
def __init__(self, extra = None):
faultString = "Internal API error"
SfaFault.__init__(self, 111, faultString, extra)
class MalformedHrnException(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Malformed HRN: %(value)s" % locals()
SfaFault.__init__(self, 102, faultString, extra)
def __str__(self):
return repr(self.value)
class TreeException(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Tree Exception: %(value)s, " % locals()
SfaFault.__init__(self, 111, faultString, extra)
def __str__(self):
return repr(self.value)
class NonExistingRecord(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Non exsiting record %(value)s, " % locals()
SfaFault.__init__(self, 111, faultString, extra)
def __str__(self):
return repr(self.value)
class ExistingRecord(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Existing record: %(value)s, " % locals()
SfaFault.__init__(self, 111, faultString, extra)
def __str__(self):
return repr(self.value)
class NonexistingCredType(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Non existing record: %(value)s, " % locals()
SfaFault.__init__(self, 111, faultString, extra)
def __str__(self):
return repr(self.value)
class NonexistingFile(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Non existing file: %(value)s, " % locals()
SfaFault.__init__(self, 111, faultString, extra)
def __str__(self):
return repr(self.value)
class InvalidRPCParams(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Invalid RPC Params: %(value)s, " % locals()
SfaFault.__init__(self, 102, faultString, extra)
def __str__(self):
return repr(self.value)
# SMBAKER exceptions follow
class ConnectionKeyGIDMismatch(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Connection Key GID mismatch: %(value)s" % locals()
SfaFault.__init__(self, 102, faultString, extra)
def __str__(self):
return repr(self.value)
class MissingCallerGID(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Missing Caller GID: %(value)s" % locals()
SfaFault.__init__(self, 102, faultString, extra)
def __str__(self):
return repr(self.value)
class RecordNotFound(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Record not found: %(value)s" % locals()
SfaFault.__init__(self, 102, faultString, extra)
def __str__(self):
return repr(self.value)
class UnknownSfaType(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Unknown SFA Type: %(value)s" % locals()
SfaFault.__init__(self, 102, faultString, extra)
def __str__(self):
return repr(self.value)
class MissingAuthority(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Missing authority: %(value)s" % locals()
SfaFault.__init__(self, 102, faultString, extra)
def __str__(self):
return repr(self.value)
class PlanetLabRecordDoesNotExist(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "PlanetLab record does not exist : %(value)s" % locals()
SfaFault.__init__(self, 102, faultString, extra)
def __str__(self):
return repr(self.value)
class PermissionError(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Permission error: %(value)s" % locals()
SfaFault.__init__(self, 108, faultString, extra)
def __str__(self):
return repr(self.value)
class InsufficientRights(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Insufficient rights: %(value)s" % locals()
SfaFault.__init__(self, 108, faultString, extra)
def __str__(self):
return repr(self.value)
class MissingDelegateBit(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Missing delegate bit: %(value)s" % locals()
SfaFault.__init__(self, 108, faultString, extra)
def __str__(self):
return repr(self.value)
class ChildRightsNotSubsetOfParent(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Child rights not subset of parent: %(value)s" % locals()
SfaFault.__init__(self, 103, faultString, extra)
def __str__(self):
return repr(self.value)
class CertMissingParent(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Cert missing parent: %(value)s" % locals()
SfaFault.__init__(self, 103, faultString, extra)
def __str__(self):
return repr(self.value)
class CertNotSignedByParent(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Cert not signed by parent: %(value)s" % locals()
SfaFault.__init__(self, 103, faultString, extra)
def __str__(self):
return repr(self.value)
class GidParentHrn(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Cert URN is not an extension of its parent: %(value)s" % locals()
SfaFault.__init__(self, 103, faultString, extra)
def __str__(self):
return repr(self.value)
class GidInvalidParentHrn(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "GID invalid parent hrn: %(value)s" % locals()
SfaFault.__init__(self, 102, faultString, extra)
def __str__(self):
return repr(self.value)
class SliverDoesNotExist(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Sliver does not exist : %(value)s" % locals()
SfaFault.__init__(self, 102, faultString, extra)
def __str__(self):
return repr(self.value)
class BadRequestHash(xmlrpclib.Fault):
def __init__(self, hash = None, extra = None):
faultString = "bad request hash: " + str(hash)
xmlrpclib.Fault.__init__(self, 902, faultString)
class MissingTrustedRoots(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Trusted root directory does not exist: %(value)s" % locals()
SfaFault.__init__(self, 102, faultString, extra)
def __str__(self):
return repr(self.value)
class MissingSfaInfo(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Missing information: %(value)s" % locals()
SfaFault.__init__(self, 102, faultString, extra)
def __str__(self):
return repr(self.value)
class InvalidRSpec(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Invalid RSpec: %(value)s" % locals()
SfaFault.__init__(self, 108, faultString, extra)
def __str__(self):
return repr(self.value)
class AccountNotEnabled(SfaFault):
def __init__(self, extra = None):
faultString = "Account Disabled"
SfaFault.__init__(self, 108, faultString, extra)
def __str__(self):
return repr(self.value)
class CredentialNotVerifiable(SfaFault):
def __init__(self, value, extra = None):
self.value = value
faultString = "Unable to verify credential: %(value)s, " %locals()
SfaFault.__init__(self, 115, faultString, extra)
def __str__(self):
return repr(self.value)
class CertExpired(SfaFault):
def __init__(self, value, extra=None):
self.value = value
faultString = "%s cert is expired" % value
SfaFault.__init__(self, 102, faultString, extra)
| dana-i2cat/felix | ofam/src/src/ext/sfa/util/faults.py | Python | apache-2.0 | 11,639 | 0.01452 |
import cv2
import numpy
from PIL import Image
import numpy as np
import os
from matplotlib import pyplot as plt
bin_n = 16 # Number of bins
def hog(img):
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
bins = np.int32(bin_n*ang/(2*np.pi)) # quantizing binvalues in (0...16)
bin_cells = bins[:10,:10], bins[10:,:10], bins[:10,10:], bins[10:,10:]
mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists) # hist is a 64 bit vector
return hist
print "OpenCV version : {0}".format(cv2.__version__)
svm_params = dict( kernel_type = cv2.SVM_LINEAR,
svm_type = cv2.SVM_C_SVC,
C=2.67, gamma=5.383 )
def predict_class(path):
pre_out=''
print type(pre_out)
training_set = []
test_set=[]
color_test_set=[]
training_labels=[]
###### SVM training ########################
svm = cv2.SVM()
svm.load('hog_svm_data1.dat')
###### Now testing HOG ########################
img = cv2.imread(path)
res=cv2.resize(img,(400,300))
h=hog(res)
test_set.append(h)
testData = np.float32(test_set)
result = svm.predict(testData)
if result==1:
pre_out+= 'Vehicle'
elif result==2:
pre_out+= 'Animal'
elif result==3:
pre_out+= 'Building'
###### Now testing Color ########################
svm1 = cv2.SVM()
svm1.load('color_svm_data.dat')
img = cv2.imread(path)
res=cv2.resize(img,(400,300))
crop_img = res[50:150, 100:200]
cv2.imwrite("d:/Emmanu/project-data/color-test.jpg", crop_img)
img = Image.open('d:/Emmanu/project-data/color-test.jpg')
img200=img.convert('RGBA')
arr= np.array(img200)
flat_arr= arr.ravel()
color_test_set.append(flat_arr)
testData = np.float32(color_test_set)
result = svm1.predict(testData)
if result==1:
pre_out+=' and '+ 'It has Red Shade'
elif result==2:
pre_out+=' and '+ 'It has Green Shade'
elif result==3:
pre_out+=' and '+ 'It has Blue Shade'
elif result==4:
pre_out+=' and '+ 'It has Black Shade'
elif result==5:
pre_out+=' and '+ 'It has Brown Shade'
elif result==6:
pre_out+=' and '+ 'It has Yellow Shade'
elif result==7:
pre_out+=' and '+ 'It has white Shade'
return pre_out
def predict_shape(path,val):
training_set = []
test_set=[]
test_set1=[]
color_test_set=[]
training_labels=[]
result_list=[]
###### SVM training ########################
svm = cv2.SVM()
svm.load('hog_svm_data1.dat')
svm1 = cv2.SVM()
svm1.load('hog_svm_data2.dat')
###### Now testing HOG ########################
img = cv2.imread(path)
res=cv2.resize(img,(400,300))
h=hog(res)
test_set.append(h)
testData = np.float32(test_set)
pre_shape = svm.predict(testData)
if val==3:
if pre_shape==2:
img = cv2.imread(path)
res=cv2.resize(img,(400,300))
h=hog(res)
test_set1.append(h)
testData = np.float32(test_set1)
pre_shape = svm1.predict(testData)
print 'inside'
return pre_shape
return pre_shape
def predict_color(path):
training_set = []
test_set=[]
color_test_set=[]
training_labels=[]
result_list=[]
###### Now testing Color ########################
svm1 = cv2.SVM()
svm1.load('color_svm_data.dat')
img = cv2.imread(path)
res=cv2.resize(img,(400,300))
crop_img = res[50:150, 100:200]
cv2.imwrite("d:/Emmanu/project-data/color-test.jpg", crop_img)
img = Image.open('d:/Emmanu/project-data/color-test.jpg')
img200=img.convert('RGBA')
arr= np.array(img200)
flat_arr= arr.ravel()
color_test_set.append(flat_arr)
testData = np.float32(color_test_set)
pre_color = svm1.predict(testData)
return pre_color
def main():
print predict_shape('d:/Emmanu/project-data/tes.jpg')
if __name__ == '__main__':main()
| Emmanu12/Image-Classification-using-SVM | predict.py | Python | apache-2.0 | 4,390 | 0.023235 |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os, sys
import pkg_resources
import norikraclient
long_description = open(os.path.join("README.rst")).read()
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Topic :: Software Development :: Testing",
"Topic :: System :: Monitoring",
"Topic :: System :: Systems Administration",
]
requires = ['msgpack-python', 'requests']
deplinks = []
setup(
name='norikra-client-python',
version=norikraclient.__version__,
description='norikra-client-python library',
long_description=long_description,
classifiers=classifiers,
keywords=['norikra', 'streaming', 'procesing'],
author='WAKAYAMA Shirou',
author_email='shirou.faw at gmail.com',
url='http://github.com/shirou/norikra-client-python',
download_url='http://pypi.python.org/pypi/norikra-client-python',
license='MIT License',
packages=find_packages(),
include_package_data=True,
install_requires=requires,
dependency_links=deplinks,
entry_points={
'console_scripts': [
'norikra-client-py = norikraclient.command:main',
],
}
)
| norikra/norikra-client-python | setup.py | Python | mit | 1,304 | 0.001534 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.