text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import os
from collections import OrderedDict
from odoo import api, fields, models
from odoo.addons.base.models.ir_model import MODULE_UNINSTALL_FLAG
from odoo.exceptions import MissingError
from odoo.http import request
_logger = logging.getLogger(__name__)
class IrModuleModule(models.Model):
_name = "ir.module.module"
_description = 'Module'
_inherit = _name
# The order is important because of dependencies (page need view, menu need page)
_theme_model_names = OrderedDict([
('ir.ui.view', 'theme.ir.ui.view'),
('ir.asset', 'theme.ir.asset'),
('website.page', 'theme.website.page'),
('website.menu', 'theme.website.menu'),
('ir.attachment', 'theme.ir.attachment'),
])
_theme_translated_fields = {
'theme.ir.ui.view': [('theme.ir.ui.view,arch', 'ir.ui.view,arch_db')],
'theme.website.menu': [('theme.website.menu,name', 'website.menu,name')],
}
image_ids = fields.One2many('ir.attachment', 'res_id',
domain=[('res_model', '=', _name), ('mimetype', '=like', 'image/%')],
string='Screenshots', readonly=True)
# for kanban view
is_installed_on_current_website = fields.Boolean(compute='_compute_is_installed_on_current_website')
def _compute_is_installed_on_current_website(self):
"""
Compute for every theme in ``self`` if the current website is using it or not.
This method does not take dependencies into account, because if it did, it would show
the current website as having multiple different themes installed at the same time,
which would be confusing for the user.
"""
for module in self:
module.is_installed_on_current_website = module == self.env['website'].get_current_website().theme_id
def write(self, vals):
"""
Override to correctly upgrade themes after upgrade/installation of modules.
# Install
If this theme wasn't installed before, then load it for every website
for which it is in the stream.
eg. The very first installation of a theme on a website will trigger this.
eg. If a website uses theme_A and we install sale, then theme_A_sale will be
autoinstalled, and in this case we need to load theme_A_sale for the website.
# Upgrade
There are 2 cases to handle when upgrading a theme:
* When clicking on the theme upgrade button on the interface,
in which case there will be an http request made.
-> We want to upgrade the current website only, not any other.
* When upgrading with -u, in which case no request should be set.
-> We want to upgrade every website using this theme.
"""
for module in self:
if module.name.startswith('theme_') and vals.get('state') == 'installed':
_logger.info('Module %s has been loaded as theme template (%s)' % (module.name, module.state))
if module.state in ['to install', 'to upgrade']:
websites_to_update = module._theme_get_stream_website_ids()
if module.state == 'to upgrade' and request:
Website = self.env['website']
current_website = Website.get_current_website()
websites_to_update = current_website if current_website in websites_to_update else Website
for website in websites_to_update:
module._theme_load(website)
return super(IrModuleModule, self).write(vals)
def _get_module_data(self, model_name):
"""
Return every theme template model of type ``model_name`` for every theme in ``self``.
:param model_name: string with the technical name of the model for which to get data.
(the name must be one of the keys present in ``_theme_model_names``)
:return: recordset of theme template models (of type defined by ``model_name``)
"""
theme_model_name = self._theme_model_names[model_name]
IrModelData = self.env['ir.model.data']
records = self.env[theme_model_name]
for module in self:
imd_ids = IrModelData.search([('module', '=', module.name), ('model', '=', theme_model_name)]).mapped('res_id')
records |= self.env[theme_model_name].with_context(active_test=False).browse(imd_ids)
return records
def _update_records(self, model_name, website):
"""
This method:
- Find and update existing records.
For each model, overwrite the fields that are defined in the template (except few
cases such as active) but keep inherited models to not lose customizations.
- Create new records from templates for those that didn't exist.
- Remove the models that existed before but are not in the template anymore.
See _theme_cleanup for more information.
There is a special 'while' loop around the 'for' to be able queue back models at the end
of the iteration when they have unmet dependencies. Hopefully the dependency will be
found after all models have been processed, but if it's not the case an error message will be shown.
:param model_name: string with the technical name of the model to handle
(the name must be one of the keys present in ``_theme_model_names``)
:param website: ``website`` model for which the records have to be updated
:raise MissingError: if there is a missing dependency.
"""
self.ensure_one()
remaining = self._get_module_data(model_name)
last_len = -1
while (len(remaining) != last_len):
last_len = len(remaining)
for rec in remaining:
rec_data = rec._convert_to_base_model(website)
if not rec_data:
_logger.info('Record queued: %s' % rec.display_name)
continue
find = rec.with_context(active_test=False).mapped('copy_ids').filtered(lambda m: m.website_id == website)
# special case for attachment
# if module B override attachment from dependence A, we update it
if not find and model_name == 'ir.attachment':
find = rec.copy_ids.search([('key', '=', rec.key), ('website_id', '=', website.id)])
if find:
imd = self.env['ir.model.data'].search([('model', '=', find._name), ('res_id', '=', find.id)])
if imd and imd.noupdate:
_logger.info('Noupdate set for %s (%s)' % (find, imd))
else:
# at update, ignore active field
if 'active' in rec_data:
rec_data.pop('active')
if model_name == 'ir.ui.view' and (find.arch_updated or find.arch == rec_data['arch']):
rec_data.pop('arch')
find.update(rec_data)
self._post_copy(rec, find)
else:
new_rec = self.env[model_name].create(rec_data)
self._post_copy(rec, new_rec)
remaining -= rec
if len(remaining):
error = 'Error - Remaining: %s' % remaining.mapped('display_name')
_logger.error(error)
raise MissingError(error)
self._theme_cleanup(model_name, website)
def _post_copy(self, old_rec, new_rec):
self.ensure_one()
translated_fields = self._theme_translated_fields.get(old_rec._name, [])
for (src_field, dst_field) in translated_fields:
self._cr.execute("""INSERT INTO ir_translation (lang, src, name, res_id, state, value, type, module)
SELECT t.lang, t.src, %s, %s, t.state, t.value, t.type, t.module
FROM ir_translation t
WHERE name = %s
AND res_id = %s
ON CONFLICT DO NOTHING""",
(dst_field, new_rec.id, src_field, old_rec.id))
def _theme_load(self, website):
"""
For every type of model in ``self._theme_model_names``, and for every theme in ``self``:
create/update real models for the website ``website`` based on the theme template models.
:param website: ``website`` model on which to load the themes
"""
for module in self:
_logger.info('Load theme %s for website %s from template.' % (module.mapped('name'), website.id))
for model_name in self._theme_model_names:
module._update_records(model_name, website)
self.env['theme.utils'].with_context(website_id=website.id)._post_copy(module)
def _theme_unload(self, website):
"""
For every type of model in ``self._theme_model_names``, and for every theme in ``self``:
remove real models that were generated based on the theme template models
for the website ``website``.
:param website: ``website`` model on which to unload the themes
"""
for module in self:
_logger.info('Unload theme %s for website %s from template.' % (self.mapped('name'), website.id))
for model_name in self._theme_model_names:
template = self._get_module_data(model_name)
models = template.with_context(**{'active_test': False, MODULE_UNINSTALL_FLAG: True}).mapped('copy_ids').filtered(lambda m: m.website_id == website)
models.unlink()
self._theme_cleanup(model_name, website)
def _theme_cleanup(self, model_name, website):
"""
Remove orphan models of type ``model_name`` from the current theme and
for the website ``website``.
We need to compute it this way because if the upgrade (or deletion) of a theme module
removes a model template, then in the model itself the variable
``theme_template_id`` will be set to NULL and the reference to the theme being removed
will be lost. However we do want the ophan to be deleted from the website when
we upgrade or delete the theme from the website.
``website.page`` and ``website.menu`` don't have ``key`` field so we don't clean them.
TODO in master: add a field ``theme_id`` on the models to more cleanly compute orphans.
:param model_name: string with the technical name of the model to cleanup
(the name must be one of the keys present in ``_theme_model_names``)
:param website: ``website`` model for which the models have to be cleaned
"""
self.ensure_one()
model = self.env[model_name]
if model_name in ('website.page', 'website.menu'):
return model
# use active_test to also unlink archived models
# and use MODULE_UNINSTALL_FLAG to also unlink inherited models
orphans = model.with_context(**{'active_test': False, MODULE_UNINSTALL_FLAG: True}).search([
('key', '=like', self.name + '.%'),
('website_id', '=', website.id),
('theme_template_id', '=', False),
])
orphans.unlink()
def _theme_get_upstream(self):
"""
Return installed upstream themes.
:return: recordset of themes ``ir.module.module``
"""
self.ensure_one()
return self.upstream_dependencies(exclude_states=('',)).filtered(lambda x: x.name.startswith('theme_'))
def _theme_get_downstream(self):
"""
Return installed downstream themes that starts with the same name.
eg. For theme_A, this will return theme_A_sale, but not theme_B even if theme B
depends on theme_A.
:return: recordset of themes ``ir.module.module``
"""
self.ensure_one()
return self.downstream_dependencies().filtered(lambda x: x.name.startswith(self.name))
def _theme_get_stream_themes(self):
"""
Returns all the themes in the stream of the current theme.
First find all its downstream themes, and all of the upstream themes of both
sorted by their level in hierarchy, up first.
:return: recordset of themes ``ir.module.module``
"""
self.ensure_one()
all_mods = self + self._theme_get_downstream()
for down_mod in self._theme_get_downstream() + self:
for up_mod in down_mod._theme_get_upstream():
all_mods = up_mod | all_mods
return all_mods
def _theme_get_stream_website_ids(self):
"""
Websites for which this theme (self) is in the stream (up or down) of their theme.
:return: recordset of websites ``website``
"""
self.ensure_one()
websites = self.env['website']
for website in websites.search([('theme_id', '!=', False)]):
if self in website.theme_id._theme_get_stream_themes():
websites |= website
return websites
def _theme_upgrade_upstream(self):
""" Upgrade the upstream dependencies of a theme, and install it if necessary. """
def install_or_upgrade(theme):
if theme.state != 'installed':
theme.button_install()
themes = theme + theme._theme_get_upstream()
themes.filtered(lambda m: m.state == 'installed').button_upgrade()
self._button_immediate_function(install_or_upgrade)
@api.model
def _theme_remove(self, website):
"""
Remove from ``website`` its current theme, including all the themes in the stream.
The order of removal will be reverse of installation to handle dependencies correctly.
:param website: ``website`` model for which the themes have to be removed
"""
# _theme_remove is the entry point of any change of theme for a website
# (either removal or installation of a theme and its dependencies). In
# either case, we need to reset some default configuration before.
self.env['theme.utils'].with_context(website_id=website.id)._reset_default_config()
if not website.theme_id:
return
for theme in reversed(website.theme_id._theme_get_stream_themes()):
theme._theme_unload(website)
website.theme_id = False
def button_choose_theme(self):
"""
Remove any existing theme on the current website and install the theme ``self`` instead.
The actual loading of the theme on the current website will be done
automatically on ``write`` thanks to the upgrade and/or install.
When installating a new theme, upgrade the upstream chain first to make sure
we have the latest version of the dependencies to prevent inconsistencies.
:return: dict with the next action to execute
"""
self.ensure_one()
website = self.env['website'].get_current_website()
self._theme_remove(website)
# website.theme_id must be set before upgrade/install to trigger the load in ``write``
website.theme_id = self
# this will install 'self' if it is not installed yet
self._theme_upgrade_upstream()
active_todo = self.env['ir.actions.todo'].search([('state', '=', 'open')], limit=1)
result = None
if active_todo:
result = active_todo.action_launch()
else:
result = website.button_go_website(mode_edit=True)
if result.get('url') and 'enable_editor' in result['url']:
result['url'] = result['url'].replace('enable_editor', 'with_loader=1&enable_editor')
return result
def button_remove_theme(self):
"""Remove the current theme of the current website."""
website = self.env['website'].get_current_website()
self._theme_remove(website)
def button_refresh_theme(self):
"""
Refresh the current theme of the current website.
To refresh it, we only need to upgrade the modules.
Indeed the (re)loading of the theme will be done automatically on ``write``.
"""
website = self.env['website'].get_current_website()
website.theme_id._theme_upgrade_upstream()
@api.model
def update_list(self):
res = super(IrModuleModule, self).update_list()
self.update_theme_images()
return res
@api.model
def update_theme_images(self):
IrAttachment = self.env['ir.attachment']
existing_urls = IrAttachment.search_read([['res_model', '=', self._name], ['type', '=', 'url']], ['url'])
existing_urls = {url_wrapped['url'] for url_wrapped in existing_urls}
themes = self.env['ir.module.module'].with_context(active_test=False).search([
('category_id', 'child_of', self.env.ref('base.module_category_theme').id),
], order='name')
for theme in themes:
terp = self.get_module_info(theme.name)
images = terp.get('images', [])
for image in images:
image_path = '/' + os.path.join(theme.name, image)
if image_path not in existing_urls:
image_name = os.path.basename(image_path)
IrAttachment.create({
'type': 'url',
'name': image_name,
'url': image_path,
'res_model': self._name,
'res_id': theme.id,
})
def get_themes_domain(self):
"""Returns the 'ir.module.module' search domain matching all available themes."""
def get_id(model_id):
return self.env['ir.model.data']._xmlid_to_res_id(model_id)
return [
('category_id', 'not in', [
get_id('base.module_category_hidden'),
get_id('base.module_category_theme_hidden'),
]),
'|',
('category_id', '=', get_id('base.module_category_theme')),
('category_id.parent_id', '=', get_id('base.module_category_theme'))
]
def _check(self):
super()._check()
View = self.env['ir.ui.view']
website_views_to_adapt = getattr(self.pool, 'website_views_to_adapt', [])
if website_views_to_adapt:
for view_replay in website_views_to_adapt:
cow_view = View.browse(view_replay[0])
View._load_records_write_on_cow(cow_view, view_replay[1], view_replay[2])
self.pool.website_views_to_adapt.clear()
| jeremiahyan/odoo | addons/website/models/ir_module_module.py | Python | gpl-3.0 | 19,278 | 0.004305 |
#!/usr/bin/env python2.7
#(c) 2015-2016 by Authors
#This file is a part of Nano-Align program.
#Released under the BSD license (see LICENSE file)
"""
Flips blockades signals according to the protein's AA order
"""
from __future__ import print_function
import sys
import os
nanoalign_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, nanoalign_root)
from nanoalign.identifier import Identifier
from nanoalign.blockade import read_mat, write_mat
import nanoalign.signal_proc as sp
from nanoalign.model_loader import load_model
def flip(blockades, model_file):
"""
Flips blockades
"""
blockade_model = load_model(model_file)
identifier = Identifier(blockade_model)
peptide = blockades[0].peptide
clusters = sp.preprocess_blockades(blockades, cluster_size=1,
min_dwell=0.0, max_dwell=1000)
print("Num\tFwd_dst\tRev_dst\t\tNeeds_flip", file=sys.stderr)
num_reversed = 0
new_blockades = []
for num, cluster in enumerate(clusters):
discr_signal = sp.discretize(cluster.consensus, len(peptide))
fwd_dist = identifier.signal_protein_distance(discr_signal, peptide)
rev_dist = identifier.signal_protein_distance(discr_signal,
peptide[::-1])
print("{0}\t{1:5.2f}\t{2:5.2f}\t\t{3}"
.format(num + 1, fwd_dist, rev_dist, fwd_dist > rev_dist),
file=sys.stderr)
new_blockades.append(cluster.blockades[0])
if fwd_dist > rev_dist:
new_blockades[-1].eventTrace = new_blockades[-1].eventTrace[::-1]
num_reversed += 1
print("Reversed:", num_reversed, "of", len(blockades), file=sys.stderr)
return new_blockades
def main():
if len(sys.argv) != 4:
print("usage: flip-blockades.py blockades_in model_file flipped_out\n\n"
"Orients blockade signals according to the AA order "
"in the protein of origin")
return 1
blockades_in = sys.argv[1]
blockades_out = sys.argv[3]
svr_file = sys.argv[2]
blockades = read_mat(blockades_in)
rev_blockades = flip(blockades, svr_file)
write_mat(rev_blockades, blockades_out)
return 0
if __name__ == "__main__":
sys.exit(main())
| fenderglass/Nano-Align | scripts/flip-blockades.py | Python | bsd-2-clause | 2,322 | 0.004307 |
import gc
import pandas as pd
import numpy as np
import os
import arboretum
import json
import sklearn.metrics
from sklearn.metrics import f1_score, roc_auc_score
from sklearn.model_selection import train_test_split
from scipy.sparse import dok_matrix, coo_matrix
from sklearn.utils.multiclass import type_of_target
if __name__ == '__main__':
path = "data"
aisles = pd.read_csv(os.path.join(path, "aisles.csv"), dtype={'aisle_id': np.uint8, 'aisle': 'category'})
departments = pd.read_csv(os.path.join(path, "departments.csv"),
dtype={'department_id': np.uint8, 'department': 'category'})
order_prior = pd.read_csv(os.path.join(path, "order_products__prior.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order': np.uint8,
'reordered': bool})
order_train = pd.read_csv(os.path.join(path, "order_products__train.csv"), dtype={'order_id': np.uint32,
'product_id': np.uint16,
'add_to_cart_order': np.uint8,
'reordered': bool})
orders = pd.read_csv(os.path.join(path, "orders.csv"), dtype={'order_id': np.uint32,
'user_id': np.uint32,
'eval_set': 'category',
'order_number': np.uint8,
'order_dow': np.uint8,
'order_hour_of_day': np.uint8
})
products = pd.read_csv(os.path.join(path, "products.csv"), dtype={'product_id': np.uint16,
'aisle_id': np.uint8,
'department_id': np.uint8})
product_embeddings = pd.read_pickle('data/product_embeddings.pkl')
embedings = list(range(32))
product_embeddings = product_embeddings[embedings + ['product_id']]
order_train = pd.read_pickle(os.path.join(path, 'chunk_0.pkl'))
order_test = order_train.loc[order_train.eval_set == "test", ['order_id', 'product_id']]
order_train = order_train.loc[order_train.eval_set == "train", ['order_id', 'product_id', 'reordered']]
product_periods = pd.read_pickle(os.path.join(path, 'product_periods_stat.pkl')).fillna(9999)
print(order_train.columns)
###########################
prob = pd.merge(order_prior, orders, on='order_id')
print(prob.columns)
prob = prob.groupby(['product_id', 'user_id'])\
.agg({'reordered':'sum', 'user_id': 'size'})
print(prob.columns)
prob.rename(columns={'sum': 'reordered',
'user_id': 'total'}, inplace=True)
prob.reordered = (prob.reordered > 0).astype(np.float32)
prob.total = (prob.total > 0).astype(np.float32)
prob['reorder_prob'] = prob.reordered / prob.total
prob = prob.groupby('product_id').agg({'reorder_prob': 'mean'}).rename(columns={'mean': 'reorder_prob'})\
.reset_index()
prod_stat = order_prior.groupby('product_id').agg({'reordered': ['sum', 'size'],
'add_to_cart_order':'mean'})
prod_stat.columns = prod_stat.columns.levels[1]
prod_stat.rename(columns={'sum':'prod_reorders',
'size':'prod_orders',
'mean': 'prod_add_to_card_mean'}, inplace=True)
prod_stat.reset_index(inplace=True)
prod_stat['reorder_ration'] = prod_stat['prod_reorders'] / prod_stat['prod_orders']
prod_stat = pd.merge(prod_stat, prob, on='product_id')
# prod_stat.drop(['prod_reorders'], axis=1, inplace=True)
user_stat = orders.loc[orders.eval_set == 'prior', :].groupby('user_id').agg({'order_number': 'max',
'days_since_prior_order': ['sum',
'mean',
'median']})
user_stat.columns = user_stat.columns.droplevel(0)
user_stat.rename(columns={'max': 'user_orders',
'sum': 'user_order_starts_at',
'mean': 'user_mean_days_since_prior',
'median': 'user_median_days_since_prior'}, inplace=True)
user_stat.reset_index(inplace=True)
orders_products = pd.merge(orders, order_prior, on="order_id")
user_order_stat = orders_products.groupby('user_id').agg({'user_id': 'size',
'reordered': 'sum',
"product_id": lambda x: x.nunique()})
user_order_stat.rename(columns={'user_id': 'user_total_products',
'product_id': 'user_distinct_products',
'reordered': 'user_reorder_ratio'}, inplace=True)
user_order_stat.reset_index(inplace=True)
user_order_stat.user_reorder_ratio = user_order_stat.user_reorder_ratio / user_order_stat.user_total_products
user_stat = pd.merge(user_stat, user_order_stat, on='user_id')
user_stat['user_average_basket'] = user_stat.user_total_products / user_stat.user_orders
########################### products
prod_usr = orders_products.groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr.rename(columns={'user_id':'prod_users_unq'}, inplace=True)
prod_usr.reset_index(inplace=True)
prod_usr_reordered = orders_products.loc[orders_products.reordered, :].groupby(['product_id']).agg({'user_id': lambda x: x.nunique()})
prod_usr_reordered.rename(columns={'user_id': 'prod_users_unq_reordered'}, inplace=True)
prod_usr_reordered.reset_index(inplace=True)
order_stat = orders_products.groupby('order_id').agg({'order_id': 'size'}) \
.rename(columns={'order_id': 'order_size'}).reset_index()
orders_products = pd.merge(orders_products, order_stat, on='order_id')
orders_products['add_to_cart_order_inverted'] = orders_products.order_size - orders_products.add_to_cart_order
orders_products['add_to_cart_order_relative'] = orders_products.add_to_cart_order / orders_products.order_size
data = orders_products.groupby(['user_id', 'product_id']).agg({'user_id': 'size',
'order_number': ['min', 'max'],
'add_to_cart_order': ['mean', 'median'],
'days_since_prior_order': ['mean', 'median'],
'order_dow': ['mean', 'median'],
'order_hour_of_day': ['mean', 'median'],
'add_to_cart_order_inverted': ['mean', 'median'],
'add_to_cart_order_relative': ['mean', 'median'],
'reordered': ['sum']})
data.columns = data.columns.droplevel(0)
data.columns = ['up_orders', 'up_first_order', 'up_last_order', 'up_mean_cart_position', 'up_median_cart_position',
'days_since_prior_order_mean', 'days_since_prior_order_median', 'order_dow_mean',
'order_dow_median',
'order_hour_of_day_mean', 'order_hour_of_day_median',
'add_to_cart_order_inverted_mean', 'add_to_cart_order_inverted_median',
'add_to_cart_order_relative_mean', 'add_to_cart_order_relative_median',
'reordered_sum'
]
data['user_product_reordered_ratio'] = (data.reordered_sum + 1.0) / data.up_orders
# data['first_order'] = data['up_orders'] > 0
# data['second_order'] = data['up_orders'] > 1
#
# data.groupby('product_id')['']
data.reset_index(inplace=True)
data = pd.merge(data, prod_stat, on='product_id')
data = pd.merge(data, user_stat, on='user_id')
data['up_order_rate'] = data.up_orders / data.user_orders
data['up_orders_since_last_order'] = data.user_orders - data.up_last_order
data['up_order_rate_since_first_order'] = data.user_orders / (data.user_orders - data.up_first_order + 1)
############################
user_dep_stat = pd.read_pickle('data/user_department_products.pkl')
user_aisle_stat = pd.read_pickle('data/user_aisle_products.pkl')
############### train
print(order_train.shape)
order_train = pd.merge(order_train, products, on='product_id')
print(order_train.shape)
order_train = pd.merge(order_train, orders, on='order_id')
print(order_train.shape)
order_train = pd.merge(order_train, user_dep_stat, on=['user_id', 'department_id'])
print(order_train.shape)
order_train = pd.merge(order_train, user_aisle_stat, on=['user_id', 'aisle_id'])
print(order_train.shape)
order_train = pd.merge(order_train, prod_usr, on='product_id')
print(order_train.shape)
order_train = pd.merge(order_train, prod_usr_reordered, on='product_id', how='left')
order_train.prod_users_unq_reordered.fillna(0, inplace=True)
print(order_train.shape)
order_train = pd.merge(order_train, data, on=['product_id', 'user_id'])
print(order_train.shape)
order_train['aisle_reordered_ratio'] = order_train.aisle_reordered / order_train.user_orders
order_train['dep_reordered_ratio'] = order_train.dep_reordered / order_train.user_orders
order_train = pd.merge(order_train, product_periods, on=['user_id', 'product_id'])
##############
order_test = pd.merge(order_test, products, on='product_id')
order_test = pd.merge(order_test, orders, on='order_id')
order_test = pd.merge(order_test, user_dep_stat, on=['user_id', 'department_id'])
order_test = pd.merge(order_test, user_aisle_stat, on=['user_id', 'aisle_id'])
order_test = pd.merge(order_test, prod_usr, on='product_id')
order_test = pd.merge(order_test, prod_usr_reordered, on='product_id', how='left')
order_train.prod_users_unq_reordered.fillna(0, inplace=True)
order_test = pd.merge(order_test, data, on=['product_id', 'user_id'])
order_test['aisle_reordered_ratio'] = order_test.aisle_reordered / order_test.user_orders
order_test['dep_reordered_ratio'] = order_test.dep_reordered / order_test.user_orders
order_test = pd.merge(order_test, product_periods, on=['user_id', 'product_id'])
order_train = pd.merge(order_train, product_embeddings, on=['product_id'])
order_test = pd.merge(order_test, product_embeddings, on=['product_id'])
print('data is joined')
features = [
# 'reordered_dow_ration', 'reordered_dow', 'reordered_dow_size',
# 'reordered_prev', 'add_to_cart_order_prev', 'order_dow_prev', 'order_hour_of_day_prev',
'user_product_reordered_ratio', 'reordered_sum',
'add_to_cart_order_inverted_mean', 'add_to_cart_order_relative_mean',
'reorder_prob',
'last', 'prev1', 'prev2', 'median', 'mean',
'dep_reordered_ratio', 'aisle_reordered_ratio',
'aisle_products',
'aisle_reordered',
'dep_products',
'dep_reordered',
'prod_users_unq', 'prod_users_unq_reordered',
'order_number', 'prod_add_to_card_mean',
'days_since_prior_order',
'order_dow', 'order_hour_of_day',
'reorder_ration',
'user_orders', 'user_order_starts_at', 'user_mean_days_since_prior',
# 'user_median_days_since_prior',
'user_average_basket', 'user_distinct_products', 'user_reorder_ratio', 'user_total_products',
'prod_orders', 'prod_reorders',
'up_order_rate', 'up_orders_since_last_order', 'up_order_rate_since_first_order',
'up_orders', 'up_first_order', 'up_last_order', 'up_mean_cart_position',
# 'up_median_cart_position',
'days_since_prior_order_mean',
# 'days_since_prior_order_median',
'order_dow_mean',
# 'order_dow_median',
'order_hour_of_day_mean',
# 'order_hour_of_day_median'
]
features.extend(embedings)
print('not included', set(order_train.columns.tolist()) - set(features))
data = order_train[features].fillna(-1.).values.astype(np.float32)
data_categoties = order_train[['product_id', 'aisle_id', 'department_id']].values.astype(np.uint32)
labels = order_train[['reordered']].values.astype(np.float32).flatten()
data_val = order_test[features].fillna(-1.).values.astype(np.float32)
data_categoties_val = order_test[['product_id', 'aisle_id', 'department_id']].values.astype(np.uint32)
print(data.shape, data_val.shape)
assert data.shape[0] == 8474661
config = json.dumps({'objective': 1,
'internals':
{
'compute_overlap': 3,
'double_precision': True
},
'verbose':
{
'gpu': True,
'booster': True,
'data': True
},
'tree':
{
'eta': 0.01,
'max_depth': 10,
'gamma': 0.0,
'min_child_weight': 20.0,
'min_leaf_size': 0,
'colsample_bytree': 0.6,
'colsample_bylevel': 0.6,
'lambda': 0.1,
'gamma_relative': 0.0001
}})
print(config)
data = arboretum.DMatrix(data, data_category=data_categoties, y=labels)
data_val = arboretum.DMatrix(data_val, data_category=data_categoties_val)
model = arboretum.Garden(config, data)
print('training...')
# grow trees
for i in range(7400):
print('tree', i)
model.grow_tree()
model.append_last_tree(data_val)
if i % 20 == 0:
pred = model.get_y(data)
print('train', sklearn.metrics.log_loss(labels, pred, eps=1e-6), roc_auc_score(labels, pred))
prediction = model.predict(data_val)
orders = order_test.order_id.values
products = order_test.product_id.values
result = pd.DataFrame({'product_id': products, 'order_id': orders, 'prediction': prediction})
result.to_pickle('data/prediction_arboretum.pkl')
| bataeves/kaggle | instacart/imba/arboretum_submition.py | Python | unlicense | 15,544 | 0.005597 |
from django.contrib.auth.forms import PasswordResetForm
from django.shortcuts import redirect
from django.views.generic import CreateView, TemplateView, View, FormView
from django.contrib.auth import authenticate, login
from game.forms import *
from game.models import User, Game
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth import REDIRECT_FIELD_NAME, login as auth_login, logout as auth_logout
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib import messages
import json
from django.contrib.auth import get_user
from django.shortcuts import get_object_or_404
class HomeView(TemplateView):
template_name = 'home.html'
def dispatch(self, request, *args, **kwargs):
# if logged in, send them to the lobby
if request.user.is_authenticated:
return redirect('/lobby/')
super(HomeView, self).dispatch(request, *args, **kwargs)
class CreateUserView(CreateView):
template_name = 'register.html'
form_class = UserCreationForm
success_url = '/lobby/'
def form_valid(self, form):
valid = super(CreateUserView, self).form_valid(form)
username, password = form.cleaned_data.get('username'), form.cleaned_data.get('password1')
new_user = authenticate(username=username, password=password)
login(self.request, new_user)
return valid
class LobbyView(TemplateView):
template_name = 'components/lobby/lobby.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(LobbyView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(LobbyView, self).get_context_data(**kwargs)
# get current open games to prepopulate the list
# we're creating a list of games that contains just the id (for the link) and the creator
available_games = [{'creator': game.creator.username, 'id': game.pk} for game in Game.get_available_games()]
# for the player's games, we're returning a list of games with the opponent and id
player_games = Game.get_games_for_player(self.request.user)
return context
class GameView(TemplateView):
template_name = 'components/game/game.html'
game = None
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
# get the game by the id
self.game = Game.get_by_id(kwargs['game_id'])
user = get_user(request)
# check to see if the game is open and available for this user
# if this player is the creator, just return
if self.game.creator == user or self.game.opponent == user:
return super(GameView, self).dispatch(request, *args, **kwargs)
# if there is no opponent and the game is not yet completed,
# set the opponent as this user
if not self.game.opponent and not self.game.completed:
self.game.opponent = user
self.game.save()
return super(GameView, self).dispatch(request, *args, **kwargs)
else:
messages.add_message(request, messages.ERROR, 'Sorry, the selected game is not available.')
return redirect('/lobby/')
def get_context_data(self, **kwargs):
context = super(GameView, self).get_context_data(**kwargs)
context['game'] = self.game
return context | codyparker/channels-obstruction | game/views/views.py | Python | mit | 3,489 | 0.002006 |
# UrbanFootprint v1.5
# Copyright (C) 2017 Calthorpe Analytics
#
# This file is part of UrbanFootprint version 1.5
#
# UrbanFootprint is distributed under the terms of the GNU General
# Public License version 3, as published by the Free Software Foundation. This
# code is distributed WITHOUT ANY WARRANTY, without implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License v3 for more details; see <http://www.gnu.org/licenses/>.
from django.contrib.gis.db import models
from footprint.main.models.geospatial.feature import Feature
__author__ = 'calthorpe_analytics'
class JurisdictionBoundary(Feature):
city_uid = models.IntegerField(null=True)
city = models.CharField(max_length=50, null=True)
county = models.CharField(max_length=50, null=True)
county_id = models.IntegerField(null=True)
pop12 = models.DecimalField(max_digits=14, decimal_places=2)
pop20 = models.DecimalField(max_digits=14, decimal_places=2)
pop35 = models.DecimalField(max_digits=14, decimal_places=2)
pop40 = models.DecimalField(max_digits=14, decimal_places=2)
hh12 = models.DecimalField(max_digits=14, decimal_places=2)
hh20 = models.DecimalField(max_digits=14, decimal_places=2)
hh35 = models.DecimalField(max_digits=14, decimal_places=2)
hh40 = models.DecimalField(max_digits=14, decimal_places=2)
emp12 = models.DecimalField(max_digits=14, decimal_places=2)
emp20 = models.DecimalField(max_digits=14, decimal_places=2)
emp35 = models.DecimalField(max_digits=14, decimal_places=2)
emp40 = models.DecimalField(max_digits=14, decimal_places=2)
class Meta(object):
abstract = True
app_label = 'main'
| CalthorpeAnalytics/urbanfootprint | footprint/client/configuration/scag_dm/base/jurisdiction_boundary.py | Python | gpl-3.0 | 1,720 | 0 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
__author__ = "Steven Hiscocks"
__copyright__ = "Copyright (c) 2013 Steven Hiscocks"
__license__ = "GPL"
import datetime
import time
from distutils.version import LooseVersion
from systemd import journal
if LooseVersion(getattr(journal, '__version__', "0")) < '204':
raise ImportError("Fail2Ban requires systemd >= 204")
from .failmanager import FailManagerEmpty
from .filter import JournalFilter, Filter
from .mytime import MyTime
from ..helpers import getLogger, logging, splitwords
# Gets the instance of the logger.
logSys = getLogger(__name__)
##
# Journal reader class.
#
# This class reads from systemd journal and detects login failures or anything
# else that matches a given regular expression. This class is instantiated by
# a Jail object.
class FilterSystemd(JournalFilter): # pragma: systemd no cover
##
# Constructor.
#
# Initialize the filter object with default values.
# @param jail the jail object
def __init__(self, jail, **kwargs):
jrnlargs = FilterSystemd._getJournalArgs(kwargs)
JournalFilter.__init__(self, jail, **kwargs)
self.__modified = 0
# Initialise systemd-journal connection
self.__journal = journal.Reader(**jrnlargs)
self.__matches = []
self.setDatePattern(None)
self.ticks = 0
logSys.debug("Created FilterSystemd")
@staticmethod
def _getJournalArgs(kwargs):
args = {'converters':{'__CURSOR': lambda x: x}}
try:
args['path'] = kwargs.pop('journalpath')
except KeyError:
pass
try:
args['files'] = kwargs.pop('journalfiles')
except KeyError:
pass
else:
import glob
p = args['files']
if not isinstance(p, (list, set, tuple)):
p = splitwords(p)
files = []
for p in p:
files.extend(glob.glob(p))
args['files'] = list(set(files))
try:
args['flags'] = kwargs.pop('journalflags')
except KeyError:
pass
return args
##
# Add a journal match filters from list structure
#
# @param matches list structure with journal matches
def _addJournalMatches(self, matches):
if self.__matches:
self.__journal.add_disjunction() # Add OR
newMatches = []
for match in matches:
newMatches.append([])
for match_element in match:
self.__journal.add_match(match_element)
newMatches[-1].append(match_element)
self.__journal.add_disjunction()
self.__matches.extend(newMatches)
##
# Add a journal match filter
#
# @param match journalctl syntax matches in list structure
def addJournalMatch(self, match):
newMatches = [[]]
for match_element in match:
if match_element == "+":
newMatches.append([])
else:
newMatches[-1].append(match_element)
try:
self._addJournalMatches(newMatches)
except ValueError:
logSys.error(
"Error adding journal match for: %r", " ".join(match))
self.resetJournalMatches()
raise
else:
logSys.info("Added journal match for: %r", " ".join(match))
##
# Reset a journal match filter called on removal or failure
#
# @return None
def resetJournalMatches(self):
self.__journal.flush_matches()
logSys.debug("Flushed all journal matches")
match_copy = self.__matches[:]
self.__matches = []
try:
self._addJournalMatches(match_copy)
except ValueError:
logSys.error("Error restoring journal matches")
raise
else:
logSys.debug("Journal matches restored")
##
# Delete a journal match filter
#
# @param match journalctl syntax matches
def delJournalMatch(self, match):
if match in self.__matches:
del self.__matches[self.__matches.index(match)]
self.resetJournalMatches()
else:
raise ValueError("Match not found")
logSys.info("Removed journal match for: %r" % " ".join(match))
##
# Get current journal match filter
#
# @return journalctl syntax matches
def getJournalMatch(self):
return self.__matches
def uni_decode(self, x):
v = Filter.uni_decode(x, self.getLogEncoding())
return v
##
# Get journal reader
#
# @return journal reader
def getJournalReader(self):
return self.__journal
##
# Format journal log entry into syslog style
#
# @param entry systemd journal entry dict
# @return format log line
def formatJournalEntry(self, logentry):
# Be sure, all argument of line tuple should have the same type:
uni_decode = self.uni_decode
logelements = []
v = logentry.get('_HOSTNAME')
if v:
logelements.append(uni_decode(v))
v = logentry.get('SYSLOG_IDENTIFIER')
if not v:
v = logentry.get('_COMM')
if v:
logelements.append(uni_decode(v))
v = logentry.get('SYSLOG_PID')
if not v:
v = logentry.get('_PID')
if v:
logelements[-1] += ("[%i]" % v)
logelements[-1] += ":"
if logelements[-1] == "kernel:":
if '_SOURCE_MONOTONIC_TIMESTAMP' in logentry:
monotonic = logentry.get('_SOURCE_MONOTONIC_TIMESTAMP')
else:
monotonic = logentry.get('__MONOTONIC_TIMESTAMP')[0]
logelements.append("[%12.6f]" % monotonic.total_seconds())
msg = logentry.get('MESSAGE','')
if isinstance(msg, list):
logelements.append(" ".join(uni_decode(v) for v in msg))
else:
logelements.append(uni_decode(msg))
logline = " ".join(logelements)
date = logentry.get('_SOURCE_REALTIME_TIMESTAMP',
logentry.get('__REALTIME_TIMESTAMP'))
logSys.debug("Read systemd journal entry: %r" %
"".join([date.isoformat(), logline]))
## use the same type for 1st argument:
return ((logline[:0], date.isoformat(), logline),
time.mktime(date.timetuple()) + date.microsecond/1.0E6)
def seekToTime(self, date):
if not isinstance(date, datetime.datetime):
date = datetime.datetime.fromtimestamp(date)
self.__journal.seek_realtime(date)
##
# Main loop.
#
# Peridocily check for new journal entries matching the filter and
# handover to FailManager
def run(self):
if not self.getJournalMatch():
logSys.notice(
"Jail started without 'journalmatch' set. "
"Jail regexs will be checked against all journal entries, "
"which is not advised for performance reasons.")
# Seek to now - findtime in journal
start_time = datetime.datetime.now() - \
datetime.timedelta(seconds=int(self.getFindTime()))
self.seekToTime(start_time)
# Move back one entry to ensure do not end up in dead space
# if start time beyond end of journal
try:
self.__journal.get_previous()
except OSError:
pass # Reading failure, so safe to ignore
while self.active:
# wait for records (or for timeout in sleeptime seconds):
self.__journal.wait(self.sleeptime)
if self.idle:
# because journal.wait will returns immediatelly if we have records in journal,
# just wait a little bit here for not idle, to prevent hi-load:
time.sleep(self.sleeptime)
continue
self.__modified = 0
while self.active:
logentry = None
try:
logentry = self.__journal.get_next()
except OSError as e:
logSys.error("Error reading line from systemd journal: %s",
e, exc_info=logSys.getEffectiveLevel() <= logging.DEBUG)
self.ticks += 1
if logentry:
self.processLineAndAdd(
*self.formatJournalEntry(logentry))
self.__modified += 1
if self.__modified >= 100: # todo: should be configurable
break
else:
break
if self.__modified:
try:
while True:
ticket = self.failManager.toBan()
self.jail.putFailTicket(ticket)
except FailManagerEmpty:
self.failManager.cleanup(MyTime.time())
# close journal:
try:
if self.__journal:
self.__journal.close()
except Exception as e: # pragma: no cover
logSys.error("Close journal failed: %r", e,
exc_info=logSys.getEffectiveLevel()<=logging.DEBUG)
logSys.debug((self.jail is not None and self.jail.name
or "jailless") +" filter terminated")
return True
def status(self, flavor="basic"):
ret = super(FilterSystemd, self).status(flavor=flavor)
ret.append(("Journal matches",
[" + ".join(" ".join(match) for match in self.__matches)]))
return ret
| yarikoptic/fail2ban | fail2ban/server/filtersystemd.py | Python | gpl-2.0 | 8,738 | 0.029068 |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 20 10:52:38 2017
@author: Andrew Ruba
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import random
import csv
import os
from numpy.random import choice
import numpy as np
from scipy.optimize import curve_fit
import time
import matplotlib.pyplot as plt
from scipy import stats
## below arrays are for saving simulation data for statistical analysis
global gausslist
gausslist = []
global bimodallist
bimodallist = []
global bimodalmean
bimodalmean = []
global bimodalsd
bimodalsd = []
global bimodalheight
bimodalheight = []
global bimodalauc
bimodalauc = []
def sim(gui, PTNUM, RADIUS, PREC, ITER, BINSIZE, PERCERROR, A, B, ROTATION):
def simulation(num_points, radius, dr, ss, mm, aa, bb, rotation):
def area_fn(X):
X = float(X)
A = -(dr**2)*np.pi
B = dr*2*np.pi
return X*B+A
def gauss_fn(x, s, m):
a = area_fn(m)
x = float(x)
s = float(s)
m = float(m)
return a*np.e**(-(x-m)**2.0/(2.0*s**2.0))
def combine(x):
s = ss
m = mm
return (area_fn(x) * gauss_fn(x, s, m))
##starting with perfect x,y and adding error
xydata = []
mm = mm + 0.00001
while len(xydata) < num_points:
theta = np.random.random()*360.0
## precision distribution sampling
# ss = choice([3,5,7,9], p=[0.1475,0.2775,0.3075,0.2675])
# ss = choice([4.5,5.5,6.5,7.5,8.5,9.5], p=[0.02,0.05,0.07,0.11,0.2,0.55])
y_prec = np.random.normal(0.0, ss)
z_prec = np.random.normal(0.0, ss)
yy = aa*np.cos(theta)
zz = bb*np.sin(theta)
yyy = yy*np.cos(np.radians(rotation))+zz*np.sin(np.radians(rotation))
zzz = -yy*np.sin(np.radians(rotation))+zz*np.cos(np.radians(rotation))
xydata.append((yyy+y_prec, zzz+z_prec))
def gen_matrix(r, d_r):
##'be' is short for bin edges
if r%d_r > 0:
be = range(0, r+r%d_r, d_r)
else:
be = range(0, r+d_r, d_r)
matrix = []
for i in range(len(be)-1):
matrix.append([])
x = 0
for i in range(len(matrix)):
for j in range(x):
matrix[i].append(0)
x += 1
##generate areas of sections closest to x axis
for i in range(len(matrix)):
theta = np.arccos(float(be[len(be)-2-i])/float(be[len(be)-1-i]))
arc_area = (theta/(2*np.pi)) * np.pi * float(be[len(be)-1-i])**2
tri_area = 0.5 * float(be[len(be)-2-i]) * (np.sin(theta) * float(be[len(be)-1-i]))
matrix[i].append(4 * (arc_area - tri_area))
##skipping factor
x = 2
##generate areas of layers going further out from x axis
while len(matrix[0]) < len(matrix):
for i in range(len(matrix) - len(matrix[0])):
num = 0
for j in range(len(matrix)):
for k in range(len(matrix[i]) + 1):
if j == i and k < len(matrix[i]):
num += matrix[j][k]
elif j > i:
num += matrix[j][k]
theta = np.arccos(float(be[len(be)-1-x-i])/float(be[len(be)-1-i]))
arc_area = (theta/(2*np.pi)) * np.pi * float(be[len(be)-1-i])**2
tri_area = 0.5 * float(be[len(be)-1-x-i]) * (np.sin(theta) * float(be[len(be)-1-i]))
matrix[i].append(4 * (arc_area - tri_area) - num)
x += 1
return matrix
def smoothdata(data, r, d_r):
"""smoothds data with 3 moving window and takes abs value average"""
smooth_data = []
r += 1
##comment out for smoothing
smooth_data = []
for i in range(len(data)):
smooth_data.append(data[i])
##adds + and - bins
final_smooth_data = []
for i in range(int(r/d_r)):
final_smooth_data.append(smooth_data[i] + smooth_data[len(smooth_data)-1-i])
return list(reversed(final_smooth_data))
def deconvolution(hv, be, r, d_r):
"""hv = hist_values, be = bin_edges"""
density = []
matrix = gen_matrix(r, d_r)
while len(hv) > len(matrix):
hv.pop()
while len(matrix) > len(hv):
matrix.pop()
rev_hv = list(reversed(hv))
x = 0
for i in range(len(rev_hv)):
##calculate how much to subtract from bin
density_sub = 0
y = 0
for j in range(x):
density_sub += density[y] * matrix[j][i]
y += 1
##calculate final bin value
density.append((rev_hv[i] - density_sub) / matrix[i][i])
x += 1
unrev_hv = list(reversed(density))
smooth_data = []
for i in range(len(unrev_hv)):
if i == 0 or i == (len(unrev_hv) - 1):
smooth_data.append(unrev_hv[i])
else:
smooth_data.append(np.average([unrev_hv[i-1], unrev_hv[i], unrev_hv[i+1]]))
return unrev_hv, smooth_data, hv
def make_hist(data, r, d_r):
hist_values, bin_edges = np.histogram(data, bins = 2 * int(r/d_r), range = (-r, r))
new_bin_edges = []
for i in bin_edges:
if i >= 0:
new_bin_edges.append(i)
new_hist_values = smoothdata(hist_values, r, d_r)
return new_hist_values, new_bin_edges
def csv_read(path):
with open(path, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
holdlist = []
for row in reader:
holdlist.append(float(row[1]))
return holdlist
jkl = []
for y,z in xydata:
jkl.append(y)
radius = int(np.floor(radius/dr))*dr
if num_points == PTNUM + 1:
## decide the proper bin size
minbinsize = 2
binsizes = []
binsizesdata = [[] for variable in range(1, int(PREC)+1)]
gui.message.set('0% done calculating ideal bin size...')
gui.update()
for binoptimization in range(10):
for binsize in range(1, int(PREC)+1):
if binsize >= minbinsize:
error = 0
# print ('binsize ' + str(binsize))
jkl = []
mm = mm + 0.00001
while len(jkl) < num_points-1:
theta = np.random.random()*360.0
## precision distribution sampling
# ss = choice([3,5,7,9], p=[0.1475,0.2775,0.3075,0.2675])
# ss = choice([4.5,5.5,6.5,7.5,8.5,9.5], p=[0.02,0.05,0.07,0.11,0.2,0.55])
y_prec = np.random.normal(0.0, ss)
jkl.append(mm*np.cos(theta)+y_prec)
a,b = make_hist(jkl, radius, binsize)
final_unsmooth, final_smooth, final_2d = deconvolution(a, b, radius, binsize)
holdlist = []
addZero = False
for val in list(reversed(final_unsmooth)):
if not addZero:
if val >= 0.0:
holdlist.append(val)
else:
addZero = True
holdlist.append(0.0)
else:
holdlist.append(0.0)
final_unsmooth = list(reversed(holdlist))
##rescale ideal data
matrix = gen_matrix(radius, binsize)
newmatrix = []
for i in matrix:
newmatrix.append(list(reversed(i)))
matrix = list(reversed(newmatrix))
# print (a)
# print (final_unsmooth)
while len(a) > len(matrix):
a.pop()
while len(matrix) > len(a):
matrix.pop()
for ncol in range(len(matrix[0])):
binsub = 0.0
for mcol in range(len(matrix)):
binsub += float(matrix[mcol][ncol]*final_unsmooth[mcol])
try:
if a[ncol] != 0.0:
# print (binsub)
error += np.square(a[ncol] - binsub) / a[ncol]
except:
pass
popped = a.pop()
while popped == 0:
popped = a.pop()
binsizesdata[binsize-1].append((error, len(a)+1,1-stats.chi2.cdf(error, len(a)+1),binsize))
else:
binsizesdata[binsize-1].append((1000000.0,1,0.0,binsize))
gui.message.set(str((binoptimization*10) + 10) + ' % done calculating ideal bin size...')
gui.update()
finalbinsizes = []
for bintrial in range(len(binsizesdata)):
errhold = []
dfhold = []
pvalhold = []
binhold = []
for trial in range(len(binsizesdata[bintrial])):
chisq, df, pval, binsize = binsizesdata[bintrial][trial]
errhold.append(chisq)
dfhold.append(df)
pvalhold.append(pval)
binhold.append(binsize)
chisq = np.average(errhold)
df = np.round(np.average(dfhold))
pval = 1-stats.chi2.cdf(chisq,df)
binsize = binhold[0]
finalbinsizes.append((chisq,df,pval,binsize))
# print (finalbinsizes)
for binsizedata in finalbinsizes:
chisq, df, pval, binsize = binsizedata
if pval >= 0.95:
dr = binsize
break
else:
dr = int(PREC)
a,b = make_hist(jkl, radius, dr)
final = deconvolution(a,b,radius,dr)
if num_points != PTNUM + 1:
def gauss_fn(x, a, s, m):
return a*np.e**(-(x-m)**2.0/(2.0*s**2.0))
def bimodal(x,mu1,sigma1,A1,mu2,sigma2,A2):
return gauss_fn(x, A1, sigma1, mu1)+gauss_fn(x, A2, sigma2, mu2)
try:
guess = [np.max(final[0]), ss, mm]
tempbins = list(range(int(dr/2), radius+int(dr/2), dr))
tempdensity = final[0]
holdlist = []
addZero = False
for val in list(reversed(tempdensity)):
if not addZero:
if val >= 0.0:
holdlist.append(val)
else:
addZero = True
holdlist.append(0.0)
else:
holdlist.append(0.0)
tempdensity = list(reversed(holdlist))
while len(tempdensity) > len(tempbins):
tempdensity.pop()
while len(tempbins) > len(tempdensity):
tempbins.pop()
revtempbins = list(np.negative(list(reversed(tempbins))))
revtempdensity = list(reversed(tempdensity))
bins = revtempbins + tempbins
density = revtempdensity + tempdensity
params, var = curve_fit(gauss_fn, bins, density, p0 = guess)
params_gauss = np.abs(params)
## computes 1 SD errors
var_gauss = np.sqrt(np.diag(var))
def frange(beg, end, step):
f_range = []
while beg < end - (step/2.0):
f_range.append(beg)
beg += step
return f_range
guess = [-mm, ss, np.max(final[0]), mm, ss, np.max(final[0])]
tempbins = frange(dr/2.0, radius, dr)
tempdensity = final[0]
holdlist = []
addZero = False
for val in list(reversed(tempdensity)):
if not addZero:
if val >= 0.0:
holdlist.append(val)
else:
addZero = True
holdlist.append(0.0)
else:
holdlist.append(0.0)
tempdensity = list(reversed(holdlist))
while len(tempdensity) > len(tempbins):
tempdensity.pop()
while len(tempbins) > len(tempdensity):
tempbins.pop()
revtempbins = list(np.negative(list(reversed(tempbins))))
revtempdensity = list(reversed(tempdensity))
bins = revtempbins + tempbins
density = revtempdensity + tempdensity
params, var = curve_fit(bimodal, bins, density, p0 = guess)
params = np.abs(params)
## computes 1 SD errors
var = np.sqrt(np.diag(var))
## average paramters
stdev = np.average((params[1], params[4]))
mean = np.average((params[0], params[3]))
height = np.average((params[2], params[5]))
stdev_e = np.average((var[1], var[4]))
mean_e = np.average((var[0], var[3]))
height_e = np.average((var[2], var[5]))
params_bimodal = [height, stdev, mean]
var_bimodal = [height_e, stdev_e, mean_e]
## uncomment following for comparing central vs. peripheral peak fitting errors
# bimodalmean.append(params_gauss[0])
bimodalmean.append(mean)
# bimodalmean.append(tempdensity)
bimodalsd.append(stdev)
bimodalheight.append(height)
auc = 0.0
step = mean - 5.0*stdev
while step < mean + 5.0*stdev:
auc+=0.01*gauss_fn(step,height,stdev,mean)
step += 0.01
bimodalauc.append(auc)
# bimodallist.append(var_bimodal[1])
gausslist.append(var_gauss[1])
# if np.sum(var_bimodal) < np.sum(var_gauss):
params = params_bimodal
var = var_bimodal
# else:
# params = params_gauss
# var = var_gauss
except RuntimeError:
params = []
var = []
hist_mids = []
for i in range(len(b)-1):
hist_mids.append(np.average((b[i],b[i+1])))
norm_values = []
for i in final[1]:
norm_values.append(i/np.max(final[1]))
return params, var, norm_values, hist_mids, dr
else:
return dr
pt_min = PTNUM
pt_max = PTNUM
rt_min = RADIUS
rt_max = RADIUS
prec_min = PREC
prec_max = PREC
iterations = ITER
PREC = float(PREC)
one_diff = []
perc_err = PERCERROR*0.01
def roundup(x):
val = int(math.ceil(x / 10.0)) * 10
if val >= 30:
return val
else:
return 30
ptlist = range(pt_min, pt_max+100, 100)
for pt in ptlist:
for rt in range(rt_min, rt_max+1, 1):
for prec in range(prec_min, prec_max+1, 1):
prec = prec+0.000001
xrng = roundup(rt + prec*5.0)
# DR = simulation(pt+1, xrng, BINSIZE, float(prec), float(rt))
## uncomment below to manually set bin size
DR = PTNUM
# print ('ideal bin size: '+ str(DR))
p_ab, v, d, h_m, DR = simulation(1000000, xrng, DR, float(prec), float(rt), A, B, ROTATION)
p_normal, v, d, h_m, DR = simulation(1000000, xrng, DR, float(prec), float(rt), RADIUS, RADIUS, 0)
# print (p)
a, s, m_ab = p_ab
a, s, m_normal = p_normal
return '%.3f'%(m_ab-m_normal) | andrewruba/YangLab | JPC simulations 2019/Figure 7 - symmetry compression/simulation.py | Python | gpl-3.0 | 13,921 | 0.037354 |
"""Validators to determine the current webserver configuration"""
import logging
import socket
import requests
import zope.interface
from acme import crypto_util
from acme import errors as acme_errors
from letsencrypt import interfaces
logger = logging.getLogger(__name__)
class Validator(object):
# pylint: disable=no-self-use
"""Collection of functions to test a live webserver's configuration"""
zope.interface.implements(interfaces.IValidator)
def certificate(self, cert, name, alt_host=None, port=443):
"""Verifies the certificate presented at name is cert"""
host = alt_host if alt_host else socket.gethostbyname(name)
try:
presented_cert = crypto_util.probe_sni(name, host, port)
except acme_errors.Error as error:
logger.exception(error)
return False
return presented_cert.digest("sha256") == cert.digest("sha256")
def redirect(self, name, port=80, headers=None):
"""Test whether webserver redirects to secure connection."""
url = "http://{0}:{1}".format(name, port)
if headers:
response = requests.get(url, headers=headers, allow_redirects=False)
else:
response = requests.get(url, allow_redirects=False)
if response.status_code not in (301, 303):
return False
redirect_location = response.headers.get("location", "")
if not redirect_location.startswith("https://"):
return False
if response.status_code != 301:
logger.error("Server did not redirect with permanent code")
return False
return True
def hsts(self, name):
"""Test for HTTP Strict Transport Security header"""
headers = requests.get("https://" + name).headers
hsts_header = headers.get("strict-transport-security")
if not hsts_header:
return False
# Split directives following RFC6797, section 6.1
directives = [d.split("=") for d in hsts_header.split(";")]
max_age = [d for d in directives if d[0] == "max-age"]
if not max_age:
logger.error("Server responded with invalid HSTS header field")
return False
try:
_, max_age_value = max_age[0]
max_age_value = int(max_age_value)
except ValueError:
logger.error("Server responded with invalid HSTS header field")
return False
# Test whether HSTS does not expire for at least two weeks.
if max_age_value <= (2 * 7 * 24 * 3600):
logger.error("HSTS should not expire in less than two weeks")
return False
return True
def ocsp_stapling(self, name):
"""Verify ocsp stapling for domain."""
raise NotImplementedError()
| rutsky/letsencrypt | letsencrypt/validator.py | Python | apache-2.0 | 2,825 | 0.000354 |
class Graph():
pointSet = []
# it should be a dict on {point:[list of point it connects]}
pathDic = dict()
pointTime = dict() # points' discover time and finish time
def __init__(self, points, paths):
if set(paths.keys()) - set(points):
print(
"Warning: Some points in path dict not exist in points set,\
initialize fail!")
return
self.pointSet = points
self.pointSet.sort() # sort points to alphabetical
self.pathDic = paths
# init each points discovery and finishing time list
self.pointTime = {key: [] for key in self.pointSet}
for i in self.pointSet:
try:
self.pathDic[i].sort() # sort paths list to alphabetical
except KeyError: # if some point has no path, give it a empty list
self.pathDic[i] = []
def DFS(g):
time = 0
greyMark = set() # grey set for storing all points in recursive
blackMark = set() # black set for storing all points have done
def DFS_inner(g, i, time): # recursive function
time += 1
greyMark.add(i) # add to grey set
g.pointTime[i].append(time) # store discover time
for c in g.pathDic[i]:
if c in blackMark or c in greyMark:
pass
else:
time = DFS_inner(g, c, time)
time += 1
# store finish time, so finish time's index is 1
g.pointTime[i].append(time)
blackMark.add(i) # finish
greyMark.remove(i) # delete grey set
return time
for i in g.pointSet:
if i in blackMark or i in greyMark:
pass
else:
time = DFS_inner(g, i, time)
# format print
for k in g.pointSet:
print("{0} -> discover time is {1} -> finish time is {2}"
.format(k, g.pointTime[k][0], g.pointTime[k][1]))
return "done"
def topologicalSort(g):
DFS(g) # in case graph has not DFSed before
# create list of turtle that [(point, finish time)]
finishTimeList = []
for k, v in g.pointTime.items():
finishTimeList.append((k, v[1])) # v[1] is finish time
# sort elements increasing by finish time
finishTimeList.sort(key=lambda pair: pair[1])
# insert on the front of result list
result = []
for i in finishTimeList:
result.insert(0, i[0])
# format print
reStr = result[0]
for i in result[1:]:
reStr += " -> " + i
print(reStr)
return "done"
if __name__ == "__main__":
# test
test1set = ["u", "v", "w", "x", "y", "z", ]
test1path = {"u": ["x", "v"],
"v": ["y"],
"w": ["y", "z"],
"x": ["v"],
"y": ["x"],
"z": ["z"],
}
# q1 data
q1set = ["s", "v", "w", "q", "t", "x", "z", "y", "r", "u", ]
q1path = {"s": ["v"],
"v": ["w"],
"w": ["s"],
"q": ["s", "w", "t"],
"t": ["x", "y"],
"x": ["z"],
"z": ["x"],
"y": ["q"],
"r": ["u", "y"],
"u": ["y"],
}
# q2 data
q2set = ["m", "n", "q", "o", "p", "r", "s",
"t", "u", "v", "w", "x", "y", "z", ]
q2path = {"m": ["x", "q", "r", ],
"n": ["o", "q", "u", ],
"q": ["t", ],
"o": ["r", "s", "v", ],
"p": ["o", "s", "z", ],
"r": ["u", "y", ],
"s": ["r", ],
"t": [],
"u": ["t", ],
"v": ["x", "w", ],
"w": ["z", ],
"x": [],
"y": ["v", ],
"z": [],
}
# test1 = Graph(test1set, test1path)
q1 = Graph(q1set, q1path)
q2 = Graph(q2set, q2path)
DFS(q1)
print("\n")
topologicalSort(q2)
print("\n")
| ccqpein/Arithmetic-Exercises | Depth-First-Search/DFS.py | Python | apache-2.0 | 3,928 | 0.000255 |
#!/bin/python
import datetime
class message():
def __init__(self, msg_id=0, channel_id=0, source_id=0, source_chat_id='',\
msg=''):
curr_datetime = datetime.datetime.now()
self.date = curr_datetime.strftime("%Y%m%d")
self.time = curr_datetime.strftime("%H:%M:%S.%f %z")
self.msg_id = msg_id
self.channel_id = channel_id
self.source_id = source_id
self.source_chat_id = source_chat_id
self.msg = msg
def str(self):
return "'%s','%s',%d,%d,%d,'%s','%s'" \
% (self.date, \
self.time, \
self.msg_id, \
self.channel_id, \
self.source_id, \
self.source_chat_id, \
self.msg)
@staticmethod
def from_message_record(record, set_curr_time = True):
"""
Convert a db record to a message record
:param record: Database record
:param set_curr_time: Indicate if current date and time is set
"""
if not record:
ret = message()
else:
ret = message(msg_id=record[message.msg_id_index()],
channel_id=record[message.channel_id_index()],
source_id=record[message.source_id_index()],
source_chat_id=record[message.source_chat_id_index()],
msg=record[message.msg_index()])
if not set_curr_time:
ret.date = record[message.date_index()]
ret.time = record[message.time_index()]
return ret
@staticmethod
def field_str():
return "date text, time text, msgid int, channelid int, " + \
"sourceid int, sourcechatid text, msg text"
@staticmethod
def key_str():
return "msgid"
@staticmethod
def date_index():
return 0
@staticmethod
def time_index():
return 1
@staticmethod
def msg_id_index():
return 2
@staticmethod
def channel_id_index():
return 3
@staticmethod
def source_id_index():
return 4
@staticmethod
def source_chat_id_index():
return 5
@staticmethod
def msg_index():
return 6
| gavincyi/Telex | src/message.py | Python | apache-2.0 | 2,393 | 0.008776 |
# -*- coding: utf-8 -*-
import random
import mock
from datetime import datetime
from nose.tools import * # flake8: noqa
from rest_framework import exceptions
from api.base.exceptions import Conflict
from api.base.settings.defaults import API_BASE
from api.nodes.serializers import NodeContributorsCreateSerializer
from framework.auth.core import Auth
from tests.base import ApiTestCase, capture_signals, fake
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
UserFactory
)
from tests.utils import assert_logs
from website.models import NodeLog
from website.project.signals import contributor_added, unreg_contributor_added, contributor_removed
from website.util import permissions, disconnected_from_listeners
class NodeCRUDTestCase(ApiTestCase):
def setUp(self):
super(NodeCRUDTestCase, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.title = 'Cool Project'
self.new_title = 'Super Cool Project'
self.description = 'A Properly Cool Project'
self.new_description = 'An even cooler project'
self.category = 'data'
self.new_category = 'project'
self.public_project = ProjectFactory(title=self.title,
description=self.description,
category=self.category,
is_public=True,
creator=self.user)
self.public_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id)
self.private_project = ProjectFactory(title=self.title,
description=self.description,
category=self.category,
is_public=False,
creator=self.user)
self.private_url = '/{}nodes/{}/'.format(API_BASE, self.private_project._id)
self.fake_url = '/{}nodes/{}/'.format(API_BASE, '12345')
def make_node_payload(node, attributes):
return {
'data': {
'id': node._id,
'type': 'nodes',
'attributes': attributes,
}
}
def make_contrib_id(node_id, user_id):
return '{}-{}'.format(node_id, user_id)
class TestNodeContributorList(NodeCRUDTestCase):
def setUp(self):
super(TestNodeContributorList, self).setUp()
self.private_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.private_project._id)
self.public_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.public_project._id)
def test_concatenated_id(self):
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data'][0]['id'].split('-')[0], self.public_project._id)
assert_equal(res.json['data'][0]['id'].split('-')[1], self.user._id)
def test_permissions_work_with_many_users(self):
users = {
'admin': [self.user._id],
'write': [],
'read': []
}
for i in range(0, 25):
perm = random.choice(users.keys())
perms = []
if perm == 'admin':
perms = ['read', 'write', 'admin', ]
elif perm == 'write':
perms = ['read', 'write', ]
elif perm == 'read':
perms = ['read', ]
user = AuthUserFactory()
self.private_project.add_contributor(user, permissions=perms)
users[perm].append(user._id)
res = self.app.get(self.private_url, auth=self.user.auth)
data = res.json['data']
for user in data:
api_perm = user['attributes']['permission']
user_id = user['id'].split('-')[1]
assert user_id in users[api_perm], 'Permissions incorrect for {}. Should not have {} permission.'.format(user_id, api_perm)
def test_return_public_contributor_list_logged_out(self):
self.public_project.add_contributor(self.user_two, save=True)
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(len(res.json['data']), 2)
assert_equal(res.json['data'][0]['id'], make_contrib_id(self.public_project._id, self.user._id))
assert_equal(res.json['data'][1]['id'], make_contrib_id(self.public_project._id, self.user_two._id))
def test_return_public_contributor_list_logged_in(self):
res = self.app.get(self.public_url, auth=self.user_two.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(len(res.json['data']), 1)
assert_equal(res.json['data'][0]['id'], make_contrib_id(self.public_project._id, self.user._id))
def test_return_private_contributor_list_logged_out(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert 'detail' in res.json['errors'][0]
def test_return_private_contributor_list_logged_in_contributor(self):
self.private_project.add_contributor(self.user_two)
self.private_project.save()
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(len(res.json['data']), 2)
assert_equal(res.json['data'][0]['id'], make_contrib_id(self.private_project._id, self.user._id))
assert_equal(res.json['data'][1]['id'], make_contrib_id(self.private_project._id, self.user_two._id))
def test_return_private_contributor_list_logged_in_non_contributor(self):
res = self.app.get(self.private_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert 'detail' in res.json['errors'][0]
def test_filtering_on_obsolete_fields(self):
# regression test for changes in filter fields
url_fullname = '{}?filter[fullname]=foo'.format(self.public_url)
res = self.app.get(url_fullname, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
errors = res.json['errors']
assert_equal(len(errors), 1)
assert_equal(errors[0]['detail'], u"'fullname' is not a valid field for this endpoint.")
# middle_name is now middle_names
url_middle_name = '{}?filter[middle_name]=foo'.format(self.public_url)
res = self.app.get(url_middle_name, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
errors = res.json['errors']
assert_equal(len(errors), 1)
assert_equal(errors[0]['detail'], "'middle_name' is not a valid field for this endpoint.")
def test_disabled_contributors_contain_names_under_meta(self):
self.public_project.add_contributor(self.user_two, save=True)
self.user_two.is_disabled = True
self.user_two.save()
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(len(res.json['data']), 2)
assert_equal(res.json['data'][0]['id'], make_contrib_id(self.public_project._id, self.user._id))
assert_equal(res.json['data'][1]['id'], make_contrib_id(self.public_project._id, self.user_two._id))
assert_equal(res.json['data'][1]['embeds']['users']['errors'][0]['meta']['full_name'], self.user_two.fullname)
assert_equal(res.json['data'][1]['embeds']['users']['errors'][0]['detail'], 'The requested user is no longer available.')
def test_total_bibliographic_contributor_count_returned_in_metadata(self):
non_bibliographic_user = UserFactory()
self.public_project.add_contributor(non_bibliographic_user, visible=False, auth=Auth(self.public_project.creator))
self.public_project.save()
res = self.app.get(self.public_url, auth=self.user_two.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['links']['meta']['total_bibliographic'], len(self.public_project.visible_contributor_ids))
def test_unregistered_contributor_field_is_null_if_account_claimed(self):
project = ProjectFactory(creator=self.user, is_public=True)
url = '/{}nodes/{}/contributors/'.format(API_BASE, project._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1)
assert_equal(res.json['data'][0]['attributes'].get('unregistered_contributor'), None)
def test_unregistered_contributors_show_up_as_name_associated_with_project(self):
project = ProjectFactory(creator=self.user, is_public=True)
project.add_unregistered_contributor('Robert Jackson', 'robert@gmail.com', auth=Auth(self.user), save=True)
url = '/{}nodes/{}/contributors/'.format(API_BASE, project._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 2)
assert_equal(res.json['data'][1]['embeds']['users']['data']['attributes']['full_name'], 'Robert Jackson')
assert_equal(res.json['data'][1]['attributes'].get('unregistered_contributor'), 'Robert Jackson')
project_two = ProjectFactory(creator=self.user, is_public=True)
project_two.add_unregistered_contributor('Bob Jackson', 'robert@gmail.com', auth=Auth(self.user), save=True)
url = '/{}nodes/{}/contributors/'.format(API_BASE, project_two._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 2)
assert_equal(res.json['data'][1]['embeds']['users']['data']['attributes']['full_name'], 'Robert Jackson')
assert_equal(res.json['data'][1]['attributes'].get('unregistered_contributor'), 'Bob Jackson')
def test_contributors_order_is_the_same_over_multiple_requests(self):
self.public_project.add_unregistered_contributor(
'Robert Jackson',
'robert@gmail.com',
auth=Auth(self.user),
save=True
)
for i in range(0,10):
new_user = AuthUserFactory()
if i%2 == 0:
visible = True
else:
visible = False
self.public_project.add_contributor(
new_user,
visible=visible,
auth=Auth(self.public_project.creator),
save=True
)
req_one = self.app.get("{}?page=2".format(self.public_url), auth=Auth(self.public_project.creator))
req_two = self.app.get("{}?page=2".format(self.public_url), auth=Auth(self.public_project.creator))
id_one = [item['id'] for item in req_one.json['data']]
id_two = [item['id'] for item in req_two.json['data']]
for a, b in zip(id_one, id_two):
assert_equal(a, b)
class TestNodeContributorFiltering(ApiTestCase):
def setUp(self):
super(TestNodeContributorFiltering, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
def test_filtering_full_name_field(self):
url = '/{}nodes/{}/contributors/?filter[full_name]=Freddie'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
errors = res.json['errors']
assert_equal(len(errors), 1)
assert_equal(errors[0]['detail'], "'full_name' is not a valid field for this endpoint.")
def test_filtering_permission_field(self):
url = '/{}nodes/{}/contributors/?filter[permission]=admin'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1)
assert_equal(res.json['data'][0]['attributes'].get('permission'), 'admin')
def test_filtering_node_with_only_bibliographic_contributors(self):
base_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.project._id)
# no filter
res = self.app.get(base_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1)
# filter for bibliographic contributors
url = base_url + '?filter[bibliographic]=True'
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 1)
assert_true(res.json['data'][0]['attributes'].get('bibliographic', None))
# filter for non-bibliographic contributors
url = base_url + '?filter[bibliographic]=False'
res = self.app.get(url, auth=self.user.auth)
assert_equal(len(res.json['data']), 0)
def test_filtering_node_with_non_bibliographic_contributor(self):
non_bibliographic_contrib = UserFactory()
self.project.add_contributor(non_bibliographic_contrib, visible=False)
self.project.save()
base_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.project._id)
# no filter
res = self.app.get(base_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json['data']), 2)
# filter for bibliographic contributors
url = base_url + '?filter[bibliographic]=True'
res = self.app.get(url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
assert_true(res.json['data'][0]['attributes'].get('bibliographic', None))
# filter for non-bibliographic contributors
url = base_url + '?filter[bibliographic]=False'
res = self.app.get(url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
assert_false(res.json['data'][0]['attributes'].get('bibliographic', None))
def test_filtering_on_invalid_field(self):
url = '/{}nodes/{}/contributors/?filter[invalid]=foo'.format(API_BASE, self.project._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
errors = res.json['errors']
assert_equal(len(errors), 1)
assert_equal(errors[0]['detail'], "'invalid' is not a valid field for this endpoint.")
class TestNodeContributorAdd(NodeCRUDTestCase):
def setUp(self):
super(TestNodeContributorAdd, self).setUp()
self.private_url = '/{}nodes/{}/contributors/?send_email=false'.format(API_BASE, self.private_project._id)
self.public_url = '/{}nodes/{}/contributors/?send_email=false'.format(API_BASE, self.public_project._id)
self.user_three = AuthUserFactory()
self.data_user_two = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True,
},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': self.user_two._id,
}
}
}
}
}
self.data_user_three = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True,
},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': self.user_three._id,
}
}
}
}
}
def test_add_node_contributors_relationships_is_a_list(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True
},
'relationships': [{'contributor_id': self.user_three._id}]
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Malformed request.")
def test_contributor_create_invalid_data(self):
res = self.app.post_json_api(self.public_url, "Incorrect data", auth=self.user_three.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Malformed request.")
res = self.app.post_json_api(self.public_url, ["Incorrect data"], auth=self.user_three.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "Malformed request.")
def test_add_contributor_no_relationships(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'A user ID or full name must be provided to add a contributor.')
def test_add_contributor_empty_relationships(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True
},
'relationships': {}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'A user ID or full name must be provided to add a contributor.')
def test_add_contributor_no_user_key_in_relationships(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True
},
'relationships': {
'id': self.user_two._id,
'type': 'users'
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Malformed request.')
def test_add_contributor_no_data_in_relationships(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True
},
'relationships': {
'users': {
'id': self.user_two._id
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must include /data.')
def test_add_contributor_no_target_type_in_relationships(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True
},
'relationships': {
'users': {
'data': {
'id': self.user_two._id
}
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must include /type.')
def test_add_contributor_no_target_id_in_relationships(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True
},
'relationships': {
'users': {
'data': {
'type': 'users'
}
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'A user ID or full name must be provided to add a contributor.')
def test_add_contributor_incorrect_target_id_in_relationships(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True
},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': '12345'
}
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_add_contributor_no_type(self):
data = {
'data': {
'attributes': {
'bibliographic': True
},
'relationships': {
'users': {
'data': {
'id': self.user_two._id,
'type': 'users'
}
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['source']['pointer'], "/data/type")
def test_add_contributor_incorrect_type(self):
data = {
'data': {
'type': 'Incorrect type',
'attributes': {
'bibliographic': True
},
'relationships': {
'users': {
'data': {
'id': self.user_two._id,
'type': 'users'
}
}
}
}
}
res = self.app.post_json_api(self.public_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
@assert_logs(NodeLog.CONTRIB_ADDED, 'public_project')
def test_add_contributor_is_visible_by_default(self):
del self.data_user_two['data']['attributes']['bibliographic']
res = self.app.post_json_api(self.public_url, self.data_user_two, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], '{}-{}'.format(self.public_project._id, self.user_two._id))
self.public_project.reload()
assert_in(self.user_two, self.public_project.contributors)
assert_true(self.public_project.get_visible(self.user_two))
@assert_logs(NodeLog.CONTRIB_ADDED, 'public_project')
def test_adds_bibliographic_contributor_public_project_admin(self):
res = self.app.post_json_api(self.public_url, self.data_user_two, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], '{}-{}'.format(self.public_project._id, self.user_two._id))
self.public_project.reload()
assert_in(self.user_two, self.public_project.contributors)
@assert_logs(NodeLog.CONTRIB_ADDED, 'private_project')
def test_adds_non_bibliographic_contributor_private_project_admin(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': False
},
'relationships': {
'users': {
'data': {
'id': self.user_two._id,
'type': 'users'
}
}
}
}
}
res = self.app.post_json_api(self.private_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], '{}-{}'.format(self.private_project._id, self.user_two._id))
assert_equal(res.json['data']['attributes']['bibliographic'], False)
self.private_project.reload()
assert_in(self.user_two, self.private_project.contributors)
assert_false(self.private_project.get_visible(self.user_two))
def test_adds_contributor_public_project_non_admin(self):
self.public_project.add_contributor(self.user_two, permissions=[permissions.READ, permissions.WRITE], auth=Auth(self.user), save=True)
res = self.app.post_json_api(self.public_url, self.data_user_three,
auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
self.public_project.reload()
assert_not_in(self.user_three, self.public_project.contributors.all())
def test_adds_contributor_public_project_non_contributor(self):
res = self.app.post_json_api(self.public_url, self.data_user_three,
auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_not_in(self.user_three, self.public_project.contributors.all())
def test_adds_contributor_public_project_not_logged_in(self):
res = self.app.post_json_api(self.public_url, self.data_user_two, expect_errors=True)
assert_equal(res.status_code, 401)
assert_not_in(self.user_two, self.public_project.contributors.all())
@assert_logs(NodeLog.CONTRIB_ADDED, 'private_project')
def test_adds_contributor_private_project_admin(self):
res = self.app.post_json_api(self.private_url, self.data_user_two, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], '{}-{}'.format(self.private_project._id, self.user_two._id))
self.private_project.reload()
assert_in(self.user_two, self.private_project.contributors)
@assert_logs(NodeLog.CONTRIB_ADDED, 'private_project')
def test_adds_contributor_without_bibliographic_private_project_admin(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
},
'relationships': {
'users': {
'data': {
'id': self.user_two._id,
'type': 'users'
}
}
}
}
}
res = self.app.post_json_api(self.private_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 201)
self.private_project.reload()
assert_in(self.user_two, self.private_project.contributors)
@assert_logs(NodeLog.CONTRIB_ADDED, 'private_project')
def test_adds_admin_contributor_private_project_admin(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True,
'permission': permissions.ADMIN
},
'relationships': {
'users': {
'data': {
'id': self.user_two._id,
'type': 'users'
}
}
}
}
}
res = self.app.post_json_api(self.private_url, data, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], '{}-{}'.format(self.private_project._id, self.user_two._id))
self.private_project.reload()
assert_in(self.user_two, self.private_project.contributors)
assert_equal(self.private_project.get_permissions(self.user_two), [permissions.READ, permissions.WRITE, permissions.ADMIN])
@assert_logs(NodeLog.CONTRIB_ADDED, 'private_project')
def test_adds_write_contributor_private_project_admin(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True,
'permission': permissions.WRITE
},
'relationships': {
'users': {
'data': {
'id': self.user_two._id,
'type': 'users'
}
}
}
}
}
res = self.app.post_json_api(self.private_url, data, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], '{}-{}'.format(self.private_project._id, self.user_two._id))
self.private_project.reload()
assert_in(self.user_two, self.private_project.contributors)
assert_equal(self.private_project.get_permissions(self.user_two), [permissions.READ, permissions.WRITE])
@assert_logs(NodeLog.CONTRIB_ADDED, 'private_project')
def test_adds_read_contributor_private_project_admin(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True,
'permission': permissions.READ
},
'relationships': {
'users': {
'data': {
'id': self.user_two._id,
'type': 'users'
}
}
}
}
}
res = self.app.post_json_api(self.private_url, data, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['id'], '{}-{}'.format(self.private_project._id, self.user_two._id))
self.private_project.reload()
assert_in(self.user_two, self.private_project.contributors)
assert_equal(self.private_project.get_permissions(self.user_two), [permissions.READ])
def test_adds_invalid_permission_contributor_private_project_admin(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True,
'permission': 'invalid',
},
'relationships': {
'users': {
'data': {
'id': self.user_two._id,
'type': 'users'
}
}
}
}
}
res = self.app.post_json_api(self.private_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
self.private_project.reload()
assert_not_in(self.user_two, self.private_project.contributors.all())
@assert_logs(NodeLog.CONTRIB_ADDED, 'private_project')
def test_adds_none_permission_contributor_private_project_admin_uses_default_permissions(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True,
'permission': None
},
'relationships': {
'users': {
'data': {
'id': self.user_two._id,
'type': 'users'
}
}
}
}
}
res = self.app.post_json_api(self.private_url, data, auth=self.user.auth)
assert_equal(res.status_code, 201)
self.private_project.reload()
assert_in(self.user_two, self.private_project.contributors)
for permission in permissions.DEFAULT_CONTRIBUTOR_PERMISSIONS:
assert_true(self.private_project.has_permission(self.user_two, permission))
def test_adds_already_existing_contributor_private_project_admin(self):
self.private_project.add_contributor(self.user_two, auth=Auth(self.user), save=True)
self.private_project.reload()
res = self.app.post_json_api(self.private_url, self.data_user_two,
auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
def test_adds_non_existing_user_private_project_admin(self):
data = {
'data': {
'type': 'contributors',
'attributes': {
'bibliographic': True
},
'relationships': {
'users': {
'data': {
'id': 'FAKE',
'type': 'users'
}
}
}
}
}
res = self.app.post_json_api(self.private_url, data, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
self.private_project.reload()
assert_equal(len(self.private_project.contributors), 1)
def test_adds_contributor_private_project_non_admin(self):
self.private_project.add_contributor(self.user_two, permissions=[permissions.READ, permissions.WRITE], auth=Auth(self.user))
res = self.app.post_json_api(self.private_url, self.data_user_three,
auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
self.private_project.reload()
assert_not_in(self.user_three, self.private_project.contributors.all())
def test_adds_contributor_private_project_non_contributor(self):
res = self.app.post_json_api(self.private_url, self.data_user_three,
auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
self.private_project.reload()
assert_not_in(self.user_three, self.private_project.contributors.all())
def test_adds_contributor_private_project_not_logged_in(self):
res = self.app.post_json_api(self.private_url, self.data_user_two, expect_errors=True)
assert_equal(res.status_code, 401)
self.private_project.reload()
assert_not_in(self.user_two, self.private_project.contributors.all())
@assert_logs(NodeLog.CONTRIB_ADDED, 'public_project')
def test_add_unregistered_contributor_with_fullname(self):
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': 'John Doe',
}
}
}
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth)
self.public_project.reload()
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['attributes']['unregistered_contributor'], 'John Doe')
assert_equal(res.json['data']['attributes']['email'], None)
assert_in(res.json['data']['embeds']['users']['data']['id'],
self.public_project.contributors.values_list('guids___id', flat=True))
@assert_logs(NodeLog.CONTRIB_ADDED, 'public_project')
def test_add_contributor_with_fullname_and_email_unregistered_user(self):
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': 'John Doe',
'email': 'john@doe.com'
}
}
}
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth)
self.public_project.reload()
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['attributes']['unregistered_contributor'], 'John Doe')
assert_equal(res.json['data']['attributes']['email'], 'john@doe.com')
assert_equal(res.json['data']['attributes']['bibliographic'], True)
assert_equal(res.json['data']['attributes']['permission'], permissions.WRITE)
assert_in(
res.json['data']['embeds']['users']['data']['id'],
self.public_project.contributors.values_list('guids___id', flat=True)
)
@assert_logs(NodeLog.CONTRIB_ADDED, 'public_project')
def test_add_contributor_with_fullname_and_email_unregistered_user_set_attributes(self):
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': 'John Doe',
'email': 'john@doe.com',
'bibliographic': False,
'permission': permissions.READ
}
}
}
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth)
self.public_project.reload()
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['attributes']['unregistered_contributor'], 'John Doe')
assert_equal(res.json['data']['attributes']['email'], 'john@doe.com')
assert_equal(res.json['data']['attributes']['bibliographic'], False)
assert_equal(res.json['data']['attributes']['permission'], permissions.READ)
assert_in(res.json['data']['embeds']['users']['data']['id'],
self.public_project.contributors.values_list('guids___id', flat=True))
@assert_logs(NodeLog.CONTRIB_ADDED, 'public_project')
def test_add_contributor_with_fullname_and_email_registered_user(self):
user = UserFactory()
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': user.fullname,
'email': user.username
}
}
}
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth)
self.public_project.reload()
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['attributes']['unregistered_contributor'], None)
assert_equal(res.json['data']['attributes']['email'], user.username)
assert_in(res.json['data']['embeds']['users']['data']['id'],
self.public_project.contributors.values_list('guids___id', flat=True))
def test_add_unregistered_contributor_already_contributor(self):
name, email = fake.name(), fake.email()
self.public_project.add_unregistered_contributor(auth=Auth(self.user), fullname=name, email=email)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': 'Doesn\'t Matter',
'email': email
}
}
}
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth, expect_errors=True)
self.public_project.reload()
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], '{} is already a contributor.'.format(name))
def test_add_contributor_user_is_deactivated(self):
user = UserFactory()
user.date_disabled = datetime.utcnow()
user.save()
payload = {
'data': {
'type': 'contributors',
'attributes': {},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': user._id
}
}
}
}
}
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Deactivated users cannot be added as contributors.')
def test_add_contributor_index_returned(self):
res = self.app.post_json_api(self.public_url, self.data_user_two, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['attributes']['index'], 1)
res = self.app.post_json_api(self.public_url, self.data_user_three, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['attributes']['index'], 2)
def test_add_contributor_set_index_out_of_range(self):
user_one = UserFactory()
self.public_project.add_contributor(user_one, save=True)
user_two = UserFactory()
self.public_project.add_contributor(user_two, save=True)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'index': 4
},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': self.user_two._id
}
}
}
}
}
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'],
'4 is not a valid contributor index for node with id {}'.format(self.public_project._id))
def test_add_contributor_set_index_first(self):
user_one = UserFactory()
self.public_project.add_contributor(user_one, save=True)
user_two = UserFactory()
self.public_project.add_contributor(user_two, save=True)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'index': 0
},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': self.user_two._id
}
}
}
}
}
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth)
self.public_project.reload()
assert_equal(res.status_code, 201)
contributor_obj = self.public_project.contributor_set.get(user=self.user_two)
index = list(self.public_project.get_contributor_order()).index(contributor_obj.pk)
assert_equal(index, 0)
def test_add_contributor_set_index_last(self):
user_one = UserFactory()
self.public_project.add_contributor(user_one, save=True)
user_two = UserFactory()
self.public_project.add_contributor(user_two, save=True)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'index': 3
},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': self.user_two._id
}
}
}
}
}
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth)
self.public_project.reload()
assert_equal(res.status_code, 201)
contributor_obj = self.public_project.contributor_set.get(user=self.user_two)
index = list(self.public_project.get_contributor_order()).index(contributor_obj.pk)
assert_equal(index, 3)
def test_add_inactive_merged_user_as_contributor(self):
primary_user = UserFactory()
merged_user = UserFactory(merged_by=primary_user)
payload = {
'data': {
'type': 'contributors',
'attributes': {},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': merged_user._id
}
}
}
}
}
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
contributor_added = res.json['data']['embeds']['users']['data']['id']
assert_equal(contributor_added, primary_user._id)
class TestNodeContributorCreateValidation(NodeCRUDTestCase):
def setUp(self):
super(TestNodeContributorCreateValidation, self).setUp()
self.validate_data = NodeContributorsCreateSerializer.validate_data
def test_add_contributor_validation_user_id(self):
self.validate_data(NodeContributorsCreateSerializer(), self.public_project, user_id='abcde')
def test_add_contributor_validation_user_id_fullname(self):
with assert_raises(Conflict):
self.validate_data(NodeContributorsCreateSerializer(), 'fake', user_id='abcde', full_name='Kanye')
def test_add_contributor_validation_user_id_email(self):
with assert_raises(Conflict):
self.validate_data(NodeContributorsCreateSerializer(), 'fake', user_id='abcde', email='kanye@west.com')
def test_add_contributor_validation_user_id_fullname_email(self):
with assert_raises(Conflict):
self.validate_data(NodeContributorsCreateSerializer(), 'fake', user_id='abcde', full_name='Kanye', email='kanye@west.com')
def test_add_contributor_validation_fullname(self):
self.validate_data(NodeContributorsCreateSerializer(), self.public_project, full_name='Kanye')
def test_add_contributor_validation_email(self):
with assert_raises(exceptions.ValidationError):
self.validate_data(NodeContributorsCreateSerializer(), 'fake', email='kanye@west.com')
def test_add_contributor_validation_fullname_email(self):
self.validate_data(NodeContributorsCreateSerializer(), self.public_project, full_name='Kanye', email='kanye@west.com')
class TestNodeContributorCreateEmail(NodeCRUDTestCase):
def setUp(self):
super(TestNodeContributorCreateEmail, self).setUp()
self.url = '/{}nodes/{}/contributors/'.format(API_BASE, self.public_project._id)
@mock.patch('framework.auth.views.mails.send_mail')
def test_add_contributor_no_email_if_false(self, mock_mail):
url = '{}?send_email=false'.format(self.url)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': 'Kanye West',
'email': 'kanye@west.com'
}
}
}
res = self.app.post_json_api(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(mock_mail.call_count, 0)
@mock.patch('framework.auth.views.mails.send_mail')
def test_add_contributor_sends_email(self, mock_mail):
url = '{}?send_email=default'.format(self.url)
payload = {
'data': {
'type': 'contributors',
'attributes': {
},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': self.user_two._id
}
}
}
}
}
res = self.app.post_json_api(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(mock_mail.call_count, 1)
@mock.patch('website.project.signals.contributor_added.send')
def test_add_contributor_signal_if_default(self, mock_send):
url = '{}?send_email=default'.format(self.url)
payload = {
'data': {
'type': 'contributors',
'attributes': {
},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': self.user_two._id
}
}
}
}
}
res = self.app.post_json_api(url, payload, auth=self.user.auth)
args, kwargs = mock_send.call_args
assert_equal(res.status_code, 201)
assert_equal('default', kwargs['email_template'])
@mock.patch('website.project.signals.contributor_added.send')
def test_add_contributor_signal_if_preprint(self, mock_send):
url = '{}?send_email=preprint'.format(self.url)
payload = {
'data': {
'type': 'contributors',
'attributes': {
},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': self.user_two._id
}
}
}
}
}
res = self.app.post_json_api(url, payload, auth=self.user.auth)
args, kwargs = mock_send.call_args
assert_equal(res.status_code, 201)
assert_equal('preprint', kwargs['email_template'])
@mock.patch('framework.auth.views.mails.send_mail')
def test_add_unregistered_contributor_sends_email(self, mock_mail):
url = '{}?send_email=default'.format(self.url)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': 'Kanye West',
'email': 'kanye@west.com'
}
}
}
res = self.app.post_json_api(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(mock_mail.call_count, 1)
@mock.patch('website.project.signals.unreg_contributor_added.send')
def test_add_unregistered_contributor_signal_if_default(self, mock_send):
url = '{}?send_email=default'.format(self.url)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': 'Kanye West',
'email': 'kanye@west.com'
}
}
}
res = self.app.post_json_api(url, payload, auth=self.user.auth)
args, kwargs = mock_send.call_args
assert_equal(res.status_code, 201)
assert_equal('default', kwargs['email_template'])
@mock.patch('website.project.signals.unreg_contributor_added.send')
def test_add_unregistered_contributor_signal_if_preprint(self, mock_send):
url = '{}?send_email=preprint'.format(self.url)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': 'Kanye West',
'email': 'kanye@west.com'
}
}
}
res = self.app.post_json_api(url, payload, auth=self.user.auth)
args, kwargs = mock_send.call_args
assert_equal(res.status_code, 201)
assert_equal('preprint', kwargs['email_template'])
@mock.patch('framework.auth.views.mails.send_mail')
def test_add_contributor_invalid_send_email_param(self, mock_mail):
url = '{}?send_email=true'.format(self.url)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': 'Kanye West',
'email': 'kanye@west.com'
}
}
}
res = self.app.post_json_api(url, payload, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'true is not a valid email preference.')
assert_equal(mock_mail.call_count, 0)
@mock.patch('framework.auth.views.mails.send_mail')
def test_add_unregistered_contributor_without_email_no_email(self, mock_mail):
url = '{}?send_email=default'.format(self.url)
payload = {
'data': {
'type': 'contributors',
'attributes': {
'full_name': 'Kanye West',
}
}
}
with capture_signals() as mock_signal:
res = self.app.post_json_api(url, payload, auth=self.user.auth)
assert_in(contributor_added, mock_signal.signals_sent())
assert_equal(res.status_code, 201)
assert_equal(mock_mail.call_count, 0)
class TestNodeContributorBulkCreate(NodeCRUDTestCase):
def setUp(self):
super(TestNodeContributorBulkCreate, self).setUp()
self.user_three = AuthUserFactory()
self.private_url = '/{}nodes/{}/contributors/?send_email=false'.format(API_BASE, self.private_project._id)
self.public_url = '/{}nodes/{}/contributors/?send_email=false'.format(API_BASE, self.public_project._id)
self.payload_one = {
'type': 'contributors',
'attributes': {
'bibliographic': True,
'permission': "admin"
},
'relationships': {
'users': {
'data': {
'id': self.user_two._id,
'type': 'users'
}
}
}
}
self.payload_two = {
'type': 'contributors',
'attributes': {
'bibliographic': False,
'permission': "read"
},
'relationships': {
'users': {
'data': {
'id': self.user_three._id,
'type': 'users'
}
}
}
}
def test_bulk_create_contributors_blank_request(self):
res = self.app.post_json_api(self.public_url, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
def test_node_contributor_bulk_create_contributor_exists(self):
self.public_project.add_contributor(self.user_two, permissions=[permissions.READ], visible=True, save=True)
res = self.app.post_json_api(self.public_url, {'data': [self.payload_two, self.payload_one]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert 'is already a contributor' in res.json['errors'][0]['detail']
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 2)
def test_node_contributor_bulk_create_logged_out_public_project(self):
res = self.app.post_json_api(self.public_url, {'data': [self.payload_one, self.payload_two]},
expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
def test_node_contributor_bulk_create_logged_in_public_project_project(self):
res = self.app.post_json_api(self.public_url, {'data': [self.payload_one, self.payload_two]},
auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 201)
assert_items_equal([res.json['data'][0]['attributes']['bibliographic'], res.json['data'][1]['attributes']['bibliographic']],
[True, False])
assert_items_equal([res.json['data'][0]['attributes']['permission'], res.json['data'][1]['attributes']['permission']],
['admin', 'read'])
assert_equal(res.content_type, 'application/vnd.api+json')
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 3)
def test_node_contributor_bulk_create_logged_out_private_project(self):
res = self.app.post_json_api(self.private_url, {'data': [self.payload_one, self.payload_two]},
expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
def test_node_contributor_bulk_create_logged_in_contrib_private_project(self):
res = self.app.post_json_api(self.private_url, {'data': [self.payload_one, self.payload_two]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 201)
assert_equal(len(res.json['data']), 2)
assert_items_equal([res.json['data'][0]['attributes']['bibliographic'], res.json['data'][1]['attributes']['bibliographic']],
[True, False])
assert_items_equal([res.json['data'][0]['attributes']['permission'], res.json['data'][1]['attributes']['permission']],
['admin', 'read'])
assert_equal(res.content_type, 'application/vnd.api+json')
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 3)
def test_node_contributor_bulk_create_logged_in_non_contrib_private_project(self):
res = self.app.post_json_api(self.private_url, {'data': [self.payload_one, self.payload_two]},
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
def test_node_contributor_bulk_create_logged_in_read_only_contrib_private_project(self):
self.private_project.add_contributor(self.user_two, permissions=[permissions.READ], save=True)
res = self.app.post_json_api(self.private_url, {'data': [self.payload_two]},
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
def test_node_contributor_bulk_create_all_or_nothing(self):
invalid_id_payload = {
'type': 'contributors',
'attributes': {
'bibliographic': True
},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': '12345'
}
}
}
}
res = self.app.post_json_api(self.public_url, {'data': [self.payload_one, invalid_id_payload]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 404)
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
def test_node_contributor_bulk_create_limits(self):
node_contrib_create_list = {'data': [self.payload_one] * 101}
res = self.app.post_json_api(self.public_url, node_contrib_create_list,
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.json['errors'][0]['detail'], 'Bulk operation limit is 100, got 101.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data')
def test_node_contributor_ugly_payload(self):
payload = 'sdf;jlasfd'
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Malformed request.')
def test_node_contributor_bulk_create_invalid_permissions_all_or_nothing(self):
payload = {
'type': 'contributors',
'attributes': {
'permission': 'super-user',
'bibliographic': True
},
'relationships': {
'users': {
'data': {
'type': 'users',
'id': self.user_two._id
}
}
}
}
payload = {'data': [self.payload_two, payload]}
res = self.app.post_json_api(self.public_url, payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
class TestNodeContributorBulkUpdate(NodeCRUDTestCase):
def setUp(self):
super(TestNodeContributorBulkUpdate, self).setUp()
self.user_three = AuthUserFactory()
self.user_four = AuthUserFactory()
self.public_project.add_contributor(self.user_two, permissions=[permissions.READ], visible=True, save=True)
self.public_project.add_contributor(self.user_three, permissions=[permissions.READ], visible=True, save=True)
self.private_project.add_contributor(self.user_two, permissions=[permissions.READ], visible=True, save=True)
self.private_project.add_contributor(self.user_three, permissions=[permissions.READ], visible=True, save=True)
self.private_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.private_project._id)
self.public_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.public_project._id)
self.payload_one_public = {
'id': make_contrib_id(self.public_project._id, self.user_two._id),
'type': 'contributors',
'attributes': {
'bibliographic': True,
'permission': "admin"
}
}
self.payload_one_private = {
'id': make_contrib_id(self.private_project._id, self.user_two._id),
'type': 'contributors',
'attributes': {
'bibliographic': True,
'permission': "admin"
}
}
self.payload_two_public = {
'id': make_contrib_id(self.public_project._id, self.user_three._id),
'type': 'contributors',
'attributes': {
'bibliographic': False,
'permission': "write"
}
}
self.payload_two_private = {
'id': make_contrib_id(self.private_project._id, self.user_three._id),
'type': 'contributors',
'attributes': {
'bibliographic': False,
'permission': "write"
}
}
def test_bulk_update_contributors_blank_request(self):
res = self.app.patch_json_api(self.public_url, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
def test_bulk_update_contributors_dict_instead_of_list(self):
res = self.app.put_json_api(self.public_url, {'data': self.payload_one_public},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
def test_bulk_update_contributors_public_project_one_not_found(self):
invalid_id = {
'id': '12345-abcde',
'type': 'contributors',
'attributes': {}
}
empty_payload = {'data': [invalid_id, self.payload_one_public]}
res = self.app.put_json_api(self.public_url, empty_payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Could not find all objects to update.')
res = self.app.get(self.public_url)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'] )
def test_bulk_update_contributors_public_projects_logged_out(self):
res = self.app.put_json_api(self.public_url, {'data': [self.payload_one_public, self.payload_two_public]},
expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
res = self.app.get(self.public_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
def test_bulk_update_contributors_public_projects_logged_in(self):
res = self.app.put_json_api(self.public_url, {'data': [self.payload_one_public, self.payload_two_public]},
auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 200)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission']],
['admin', 'write'])
def test_bulk_update_contributors_private_projects_logged_out(self):
res = self.app.put_json_api(self.private_url, {'data': [self.payload_one_private, self.payload_two_private]},
expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
res = self.app.get(self.private_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
def test_bulk_update_contributors_private_projects_logged_in_contrib(self):
res = self.app.put_json_api(self.private_url, {'data': [self.payload_one_private, self.payload_two_private]},
auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 200)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission']],
['admin', 'write'])
def test_bulk_update_contributors_private_projects_logged_in_non_contrib(self):
res = self.app.put_json_api(self.private_url, {'data': [self.payload_one_private, self.payload_two_private]},
auth=self.user_four.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
res = self.app.get(self.private_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
def test_bulk_update_contributors_private_projects_logged_in_read_only_contrib(self):
res = self.app.put_json_api(self.private_url, {'data': [self.payload_one_private, self.payload_two_private]},
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
res = self.app.get(self.private_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
def test_bulk_update_contributors_projects_send_dictionary_not_list(self):
res = self.app.put_json_api(self.public_url, {'data': self.payload_one_public},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Expected a list of items but got type "dict".')
def test_bulk_update_contributors_id_not_supplied(self):
res = self.app.put_json_api(self.public_url, {'data': [{'type': 'contributors', 'attributes': {}}]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], "Contributor identifier not provided.")
def test_bulk_update_contributors_type_not_supplied(self):
res = self.app.put_json_api(self.public_url, {'data': [{'id': make_contrib_id(self.public_project._id, self.user_two._id), 'attributes': {}}]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/0/type')
assert_equal(res.json['errors'][0]['detail'], "This field may not be null.")
def test_bulk_update_contributors_wrong_type(self):
invalid_type = {
'id': make_contrib_id(self.public_project._id, self.user_two._id),
'type': 'Wrong type.',
'attributes': {}
}
res = self.app.put_json_api(self.public_url, {'data': [invalid_type]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 409)
def test_bulk_update_contributors_invalid_id_format(self):
invalid_id = {
'id': '12345',
'type': 'contributors',
'attributes': {}
}
res = self.app.put_json_api(self.public_url, {'data': [invalid_id]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Contributor identifier incorrectly formatted.')
def test_bulk_update_contributors_wrong_id(self):
invalid_id = {
'id': '12345-abcde',
'type': 'contributors',
'attributes': {}
}
res = self.app.put_json_api(self.public_url, {'data': [invalid_id]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Could not find all objects to update.')
def test_bulk_update_contributors_limits(self):
contrib_update_list = {'data': [self.payload_one_public] * 101}
res = self.app.put_json_api(self.public_url, contrib_update_list, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.json['errors'][0]['detail'], 'Bulk operation limit is 100, got 101.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data')
def test_bulk_update_contributors_invalid_permissions(self):
res = self.app.put_json_api(self.public_url, {'data': [self.payload_two_public, {'id': make_contrib_id(self.public_project._id, self.user_two._id), 'type': 'contributors', 'attributes': {'permission': 'super-user'}}]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], '"super-user" is not a valid choice.')
res = self.app.get(self.public_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
def test_bulk_update_contributors_invalid_bibliographic(self):
res = self.app.put_json_api(self.public_url, {'data': [self.payload_two_public, {'id': make_contrib_id(self.public_project._id, self.user_two._id), 'type': 'contributors', 'attributes': {'bibliographic': 'true and false'}}]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], '"true and false" is not a valid boolean.')
res = self.app.get(self.public_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
def test_bulk_update_contributors_must_have_at_least_one_bibliographic_contributor(self):
res = self.app.put_json_api(self.public_url, {'data': [self.payload_two_public,
{'id': make_contrib_id(self.public_project._id, self.user._id), 'type': 'contributors',
'attributes': {'permission': 'admin', 'bibliographic': False}},
{'id': make_contrib_id(self.public_project._id, self.user_two._id), 'type': 'contributors',
'attributes': {'bibliographic': False}}
]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Must have at least one visible contributor')
def test_bulk_update_contributors_must_have_at_least_one_admin(self):
res = self.app.put_json_api(self.public_url, {'data': [self.payload_two_public,
{'id': make_contrib_id(self.public_project._id, self.user._id), 'type': 'contributors',
'attributes': {'permission': 'read'}}]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], '{} is the only admin.'.format(self.user.fullname))
class TestNodeContributorBulkPartialUpdate(NodeCRUDTestCase):
def setUp(self):
super(TestNodeContributorBulkPartialUpdate, self).setUp()
self.user_three = AuthUserFactory()
self.user_four = AuthUserFactory()
self.public_project.add_contributor(self.user_two, permissions=[permissions.READ], visible=True, save=True)
self.public_project.add_contributor(self.user_three, permissions=[permissions.READ], visible=True, save=True)
self.private_project.add_contributor(self.user_two, permissions=[permissions.READ], visible=True, save=True)
self.private_project.add_contributor(self.user_three, permissions=[permissions.READ], visible=True, save=True)
self.private_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.private_project._id)
self.public_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.public_project._id)
self.public_payload_one = {
'id': make_contrib_id(self.public_project._id, self.user_two._id),
'type': 'contributors',
'attributes': {
'bibliographic': True,
'permission': "admin"
}
}
self.private_payload_one = {
'id': make_contrib_id(self.private_project._id, self.user_two._id),
'type': 'contributors',
'attributes': {
'bibliographic': True,
'permission': "admin"
}
}
self.public_payload_two = {
'id': make_contrib_id(self.public_project._id, self.user_three._id),
'type': 'contributors',
'attributes': {
'bibliographic': False,
'permission': "write"
}
}
self.private_payload_two = {
'id': make_contrib_id(self.private_project._id, self.user_three._id),
'type': 'contributors',
'attributes': {
'bibliographic': False,
'permission': "write"
}
}
def test_bulk_partial_update_contributors_blank_request(self):
res = self.app.patch_json_api(self.public_url, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
def test_bulk_partial_update_contributors_public_project_one_not_found(self):
invalid_id = {
'id': '12345-abcde',
'type': 'contributors',
'attributes': {}
}
empty_payload = {'data': [invalid_id, self.public_payload_one]}
res = self.app.patch_json_api(self.public_url, empty_payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Could not find all objects to update.')
res = self.app.get(self.public_url)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'] )
def test_bulk_partial_update_contributors_public_projects_logged_out(self):
res = self.app.patch_json_api(self.public_url,
{'data': [self.public_payload_one, self.public_payload_two]}, bulk=True, expect_errors=True)
assert_equal(res.status_code, 401)
res = self.app.get(self.public_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
def test_bulk_partial_update_contributors_public_projects_logged_in(self):
res = self.app.patch_json_api(self.public_url, {'data': [self.public_payload_one, self.public_payload_two]},
auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 200)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission']],
['admin', 'write'])
def test_bulk_partial_update_contributors_private_projects_logged_out(self):
res = self.app.patch_json_api(self.private_url, {'data': [self.private_payload_one, self.private_payload_two]},
expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
res = self.app.get(self.private_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
def test_bulk_partial_update_contributors_private_projects_logged_in_contrib(self):
res = self.app.patch_json_api(self.private_url, {'data': [self.private_payload_one, self.private_payload_two]},
auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 200)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission']],
['admin', 'write'])
def test_bulk_partial_update_contributors_private_projects_logged_in_non_contrib(self):
res = self.app.patch_json_api(self.private_url, {'data': [self.private_payload_one, self.private_payload_two]},
auth=self.user_four.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
res = self.app.get(self.private_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
def test_bulk_partial_update_contributors_private_projects_logged_in_read_only_contrib(self):
res = self.app.patch_json_api(self.private_url, {'data': [self.private_payload_one, self.private_payload_two]},
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
res = self.app.get(self.private_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
def test_bulk_partial_update_contributors_projects_send_dictionary_not_list(self):
res = self.app.patch_json_api(self.public_url, {'data': self.public_payload_one},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Expected a list of items but got type "dict".')
def test_bulk_partial_update_contributors_id_not_supplied(self):
res = self.app.patch_json_api(self.public_url, {'data': [{'type': 'contributors', 'attributes': {}}]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['detail'], "Contributor identifier not provided.")
def test_bulk_partial_update_contributors_type_not_supplied(self):
res = self.app.patch_json_api(self.public_url, {'data': [{'id': make_contrib_id(self.public_project._id, self.user_two._id), 'attributes': {}}]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(len(res.json['errors']), 1)
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/0/type')
assert_equal(res.json['errors'][0]['detail'], "This field may not be null.")
def test_bulk_partial_update_contributors_wrong_type(self):
invalid_type = {
'id': make_contrib_id(self.public_project._id, self.user_two._id),
'type': 'Wrong type.',
'attributes': {}
}
res = self.app.patch_json_api(self.public_url, {'data': [invalid_type]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 409)
def test_bulk_partial_update_contributors_wrong_id(self):
invalid_id = {
'id': '12345-abcde',
'type': 'contributors',
'attributes': {}
}
res = self.app.patch_json_api(self.public_url, {'data': [invalid_id]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Could not find all objects to update.')
def test_bulk_partial_update_contributors_limits(self):
contrib_update_list = {'data': [self.public_payload_one] * 101}
res = self.app.patch_json_api(self.public_url, contrib_update_list, auth=self.user.auth,
expect_errors=True, bulk=True)
assert_equal(res.json['errors'][0]['detail'], 'Bulk operation limit is 100, got 101.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data')
def test_bulk_partial_update_invalid_permissions(self):
res = self.app.patch_json_api(self.public_url, {'data': [self.public_payload_two, {'id': make_contrib_id(self.public_project._id, self.user_two._id), 'type': 'contributors', 'attributes': {'permission': 'super-user'}}]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], '"super-user" is not a valid choice.')
res = self.app.get(self.public_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
def test_bulk_partial_update_invalid_bibliographic(self):
res = self.app.patch_json_api(self.public_url, {'data': [self.public_payload_two, {'id': make_contrib_id(self.public_project._id, self.user_two._id), 'type': 'contributors', 'attributes': {'bibliographic': 'true and false'}}]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], '"true and false" is not a valid boolean.')
res = self.app.get(self.public_url, auth=self.user.auth)
data = res.json['data']
assert_items_equal([data[0]['attributes']['permission'], data[1]['attributes']['permission'], data[2]['attributes']['permission']],
['admin', 'read', 'read'])
class TestNodeContributorBulkDelete(NodeCRUDTestCase):
def setUp(self):
super(TestNodeContributorBulkDelete, self).setUp()
self.user_three = AuthUserFactory()
self.user_four = AuthUserFactory()
self.public_project.add_contributor(self.user_two, permissions=[permissions.READ], visible=True, save=True)
self.public_project.add_contributor(self.user_three, permissions=[permissions.READ], visible=True, save=True)
self.private_project.add_contributor(self.user_two, permissions=[permissions.READ], visible=True, save=True)
self.private_project.add_contributor(self.user_three, permissions=[permissions.READ], visible=True, save=True)
self.private_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.private_project._id)
self.public_url = '/{}nodes/{}/contributors/'.format(API_BASE, self.public_project._id)
self.public_payload_one = {
'id': make_contrib_id(self.public_project._id, self.user_two._id),
'type': 'contributors'
}
self.private_payload_one = {
'id': make_contrib_id(self.private_project._id, self.user_two._id),
'type': 'contributors',
}
self.public_payload_two = {
'id': make_contrib_id(self.public_project._id, self.user_three._id),
'type': 'contributors'
}
self.private_payload_two = {
'id': make_contrib_id(self.private_project._id, self.user_three._id),
'type': 'contributors',
}
def test_bulk_delete_contributors_blank_request(self):
res = self.app.delete_json_api(self.public_url, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
def test_bulk_delete_invalid_id_format(self):
res = self.app.delete_json_api(self.public_url, {'data': [{'id': '12345', 'type':'contributors'}]}, auth=self.user.auth,
expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Contributor identifier incorrectly formatted.')
def test_bulk_delete_invalid_id(self):
res = self.app.delete_json_api(self.public_url, {'data': [{'id': '12345-abcde', 'type':'contributors'}]}, auth=self.user.auth,
expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Could not find all objects to delete.')
def test_bulk_delete_non_contributor(self):
res = self.app.delete_json_api(self.public_url, {'data': [{'id': make_contrib_id(self.public_project._id, self.user_four._id), 'type':'contributors'}]}, auth=self.user.auth,
expect_errors=True, bulk=True)
assert_equal(res.status_code, 404)
def test_bulk_delete_all_contributors(self):
res = self.app.delete_json_api(self.public_url, {'data': [self.public_payload_one, self.public_payload_two,
{'id': make_contrib_id(self.public_project._id, self.user._id), 'type': 'contributors'}]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_in(res.json['errors'][0]['detail'], ['Must have at least one registered admin contributor',
'Must have at least one visible contributor'])
self.public_project.reload()
assert_equal(len(self.public_project.contributors), 3)
def test_bulk_delete_contributors_no_id(self):
res = self.app.delete_json_api(self.public_url, {'data': [{'type': 'contributors'}]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must include /data/id.')
def test_bulk_delete_contributors_no_type(self):
res = self.app.delete_json_api(self.public_url, {'data': [{'id': make_contrib_id(self.public_project._id, self.user_two._id)}]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must include /type.')
def test_bulk_delete_contributors_invalid_type(self):
res = self.app.delete_json_api(self.public_url, {'data': [{'type': 'Wrong type', 'id': make_contrib_id(self.public_project._id, self.user_two._id)}]},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 409)
def test_bulk_delete_dict_inside_data(self):
res = self.app.delete_json_api(self.public_url, {'data': {'id': make_contrib_id(self.public_project._id, self.user_two._id), 'type': 'contributors'}},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Expected a list of items but got type "dict".')
def test_bulk_delete_contributors_public_project_logged_in(self):
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 3)
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in osf-models
with disconnected_from_listeners(contributor_removed):
res = self.app.delete_json_api(self.public_url, {'data': [self.public_payload_one, self.public_payload_two]},
auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 204)
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
def test_bulk_delete_contributors_public_projects_logged_out(self):
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 3)
res = self.app.delete_json_api(self.public_url, {'data': [self.public_payload_one, self.public_payload_two]},
expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 3)
def test_bulk_delete_contributors_private_projects_logged_in_contributor(self):
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 3)
# Disconnect contributor_removed so that we don't check in files
# We can remove this when StoredFileNode is implemented in osf-models
with disconnected_from_listeners(contributor_removed):
res = self.app.delete_json_api(self.private_url, {'data': [self.private_payload_one, self.private_payload_two]},
auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 204)
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
def test_bulk_delete_contributors_private_projects_logged_out(self):
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 3)
res = self.app.delete_json_api(self.private_url, {'data': [self.private_payload_one, self.private_payload_two]},
expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 3)
def test_bulk_delete_contributors_private_projects_logged_in_non_contributor(self):
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 3)
res = self.app.delete_json_api(self.private_url, {'data': [self.private_payload_one, self.private_payload_two]},
auth=self.user_four.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 3)
def test_bulk_delete_contributors_private_projects_logged_in_read_only_contributor(self):
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 3)
res = self.app.delete_json_api(self.private_url, {'data': [self.private_payload_one, self.private_payload_two]},
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 3)
def test_bulk_delete_contributors_all_or_nothing(self):
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 3)
invalid_id = {
'id': '12345-abcde',
'type': 'contributors',
}
new_payload = {'data': [self.public_payload_one, invalid_id]}
res = self.app.delete_json_api(self.public_url, new_payload, auth=self.user.auth,
expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Could not find all objects to delete.')
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 3)
def test_bulk_delete_contributors_limits(self):
new_payload = {'data': [self.public_payload_one] * 101 }
res = self.app.delete_json_api(self.public_url, new_payload, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Bulk operation limit is 100, got 101.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data')
def test_bulk_delete_contributors_no_payload(self):
res = self.app.delete_json_api(self.public_url, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
| Nesiehr/osf.io | api_tests/nodes/views/test_node_contributors_list.py | Python | apache-2.0 | 95,975 | 0.003813 |
"""Support for XS1 switches."""
import logging
from homeassistant.helpers.entity import ToggleEntity
from . import ACTUATORS, DOMAIN as COMPONENT_DOMAIN, XS1DeviceEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the XS1 switch platform."""
from xs1_api_client.api_constants import ActuatorType
actuators = hass.data[COMPONENT_DOMAIN][ACTUATORS]
switch_entities = []
for actuator in actuators:
if (actuator.type() == ActuatorType.SWITCH) or \
(actuator.type() == ActuatorType.DIMMER):
switch_entities.append(XS1SwitchEntity(actuator))
async_add_entities(switch_entities)
class XS1SwitchEntity(XS1DeviceEntity, ToggleEntity):
"""Representation of a XS1 switch actuator."""
@property
def name(self):
"""Return the name of the device if any."""
return self.device.name()
@property
def is_on(self):
"""Return true if switch is on."""
return self.device.value() == 100
def turn_on(self, **kwargs):
"""Turn the device on."""
self.device.turn_on()
def turn_off(self, **kwargs):
"""Turn the device off."""
self.device.turn_off()
| jnewland/home-assistant | homeassistant/components/xs1/switch.py | Python | apache-2.0 | 1,288 | 0 |
import unittest
from application.caches.local_cache import LocalCache
class LocalCacheTest(unittest.TestCase):
def setUp(self):
self.cache = LocalCache()
def tearDown(self):
pass
def test_add_element(self):
self.cache.add("test.key", "test.value")
self.assertEqual(self.cache.get("test.key"), "test.value")
if __name__ == "__main__":
unittest.main() | andrew749/andrew749.github.io | application/caches/test/test_local_cache.py | Python | apache-2.0 | 367 | 0.029973 |
#!/usr/bin/env python3
'''Khronos OpenVG parent image binding for EGL.
http://www.khronos.org/registry/egl/extensions/KHR/EGL_KHR_vg_parent_image.txt
'''
# Copyright © 2012-13 Tim Pederick.
#
# This file is part of Pegl.
#
# Pegl is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pegl is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pegl. If not, see <http://www.gnu.org/licenses/>.
# Local imports.
from .khr_image import Image
# Extension image target type.
Image.extend('EGL_KHR_vg_parent_image', {'VG_PARENT_IMAGE': 0x30BA})
| perey/pegl | src/pegl/ext/khr_vgimage.py | Python | gpl-3.0 | 983 | 0 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from sphinx_celery import conf
globals().update(conf.build_config(
'kombu', __file__,
project='Kombu',
version_dev='4.3',
version_stable='4.2',
canonical_url='http://docs.kombu.me',
webdomain='kombu.me',
github_project='celery/kombu',
author='Ask Solem & contributors',
author_name='Ask Solem',
copyright='2009-2016',
publisher='Celery Project',
html_logo='images/kombusmall.jpg',
html_favicon='images/favicon.ico',
html_prepend_sidebars=['sidebardonations.html'],
extra_extensions=['sphinx.ext.napoleon'],
apicheck_ignore_modules=[
'kombu.entity',
'kombu.messaging',
'kombu.asynchronous.aws.ext',
'kombu.asynchronous.aws.sqs.ext',
'kombu.transport.qpid_patches',
'kombu.utils',
'kombu.transport.virtual.base',
],
))
| pexip/os-kombu | docs/conf.py | Python | bsd-3-clause | 924 | 0 |
from graph.graph_server import GraphServer
__all__ = ['GraphServer']
| AndreasMadsen/bachelor-code | visualizer/__init__.py | Python | mit | 71 | 0 |
# Time-stamp: <2019-09-25 10:04:48 taoliu>
"""Description: Fine-tuning script to call broad peaks from a single
bedGraph track for scores.
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file LICENSE included with
the distribution).
"""
# ------------------------------------
# python modules
# ------------------------------------
import sys
import os
import logging
from MACS2.IO import BedGraphIO
# ------------------------------------
# constants
# ------------------------------------
logging.basicConfig(level=20,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
# ------------------------------------
# Misc functions
# ------------------------------------
error = logging.critical # function alias
warn = logging.warning
debug = logging.debug
info = logging.info
# ------------------------------------
# Classes
# ------------------------------------
# ------------------------------------
# Main function
# ------------------------------------
def run( options ):
info("Read and build bedGraph...")
bio = BedGraphIO.bedGraphIO(options.ifile)
btrack = bio.build_bdgtrack(baseline_value=0)
info("Call peaks from bedGraph...")
bpeaks = btrack.call_broadpeaks (lvl1_cutoff=options.cutoffpeak, lvl2_cutoff=options.cutofflink, min_length=options.minlen, lvl1_max_gap=options.lvl1maxgap, lvl2_max_gap=options.lvl2maxgap)
info("Write peaks...")
if options.ofile:
bf = open( os.path.join( options.outdir, options.ofile ), "w" )
options.oprefix = options.ofile
else:
bf = open ( os.path.join( options.outdir, "%s_c%.1f_C%.2f_l%d_g%d_G%d_broad.bed12" % (options.oprefix,options.cutoffpeak,options.cutofflink,options.minlen,options.lvl1maxgap,options.lvl2maxgap)), "w" )
bpeaks.write_to_gappedPeak(bf, name_prefix=(options.oprefix+"_broadRegion").encode(), score_column="score", trackline=options.trackline)
info("Done")
| taoliu/MACS | MACS2/bdgbroadcall_cmd.py | Python | bsd-3-clause | 2,141 | 0.01121 |
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import lib.images_rc
import sys
class QImportWiz(QDialog):
#
# private
#
def _getCheckableList(self, sheets):
"""return an instance model with a sheet list composed of
three columns (sheet, number of rows, number of columns)
"""
model = QStandardItemModel(len(sheets), 3)
model.setHeaderData(0, Qt.Horizontal, self.tr('Sheet'))
model.setHeaderData(1, Qt.Horizontal, self.tr('Rows'))
model.setHeaderData(2, Qt.Horizontal, self.tr('Columns'))
for index, value in enumerate(sheets):
# get data
key = value[1]
numRows = value[2]
numColumns = value[3]
rowEnabled = numRows*numColumns>0
# key column
item = QStandardItem(key)
if len(sheets) == 1:
check = Qt.Checked
else:
check = Qt.Unchecked
item.setCheckState(check)
item.setCheckable(True)
item.setEditable (False)
item.setSelectable (False)
item.setEnabled(rowEnabled)
item.setData(key)
model.setItem (index, 0, item)
# num rows column
item =QStandardItem(str(numRows))
item.setEditable (False)
item.setSelectable (False)
item.setEnabled(rowEnabled)
item.setData(key)
model.setItem (index, 1, item)
# num columns column
item =QStandardItem(str(numColumns))
item.setEditable (False)
item.setSelectable (False)
item.setEnabled(rowEnabled)
item.setData(key)
model.setItem(index, 2, item)
return model
def _viewClicked(self):
sheets = self.sheets()
self.acceptButton.setEnabled(bool(sheets))
#
# public
#
def sheets(self):
"""returns key list of selected sheets"""
selects = []
for index in range(self.model.rowCount()):
item = self.model.item(index)
if item.checkState() == Qt.Checked:
key = item.data().toString()
selects.append(str(key))
return selects
#
# init
#
def __init__(self, sheets, *args):
QDialog.__init__ (self, *args)
self.acceptButton = QPushButton(self.tr('Accept'), self)
self.acceptButton.setIcon(QIcon(':images/accept.png'))
self.cancelButton = QPushButton(self.tr('Cancel'), self)
self.cancelButton.setIcon(QIcon(':images/cancel.png'))
buttonBox = QDialogButtonBox()
buttonBox.addButton(self.acceptButton, QDialogButtonBox.AcceptRole)
buttonBox.addButton(self.cancelButton, QDialogButtonBox.RejectRole)
buttonBox.accepted.connect(lambda: self.accept())
buttonBox.rejected.connect(lambda: self.reject())
self.model = self._getCheckableList(sheets)
view = QTreeView()
view.setRootIsDecorated(False)
view.setModel(self.model)
view.resizeColumnToContents(0)
view.resizeColumnToContents(1)
view.resizeColumnToContents(2)
view.clicked.connect(self._viewClicked)
self._viewClicked()
vbox = QVBoxLayout()
vbox.addWidget(view)
vbox.addWidget(buttonBox)
self.setLayout(vbox)
self.setWindowTitle(self.tr('Import Excel'))
self.setMinimumSize(300, 250)
self.resize(300, 250)
| csvtools/csvtools | src/widgets/importwiz.py | Python | gpl-3.0 | 3,525 | 0.003972 |
import collections
import json
import re
from dcos import util
from dcos.errors import DCOSException
logger = util.get_logger(__name__)
def parse_json_item(json_item, schema):
"""Parse the json item based on a schema.
:param json_item: A JSON item in the form 'key=value'
:type json_item: str
:param schema: The JSON schema to use for parsing
:type schema: dict
:returns: A tuple for the parsed JSON item
:rtype: (str, any) where any is one of str, int, float, bool, list or dict
"""
terms = json_item.split('=', 1)
if len(terms) != 2:
raise DCOSException('{!r} is not a valid json-item'.format(json_item))
# Check that it is a valid key in our jsonschema
key = terms[0]
value = parse_json_value(key, terms[1], schema)
return (json.dumps(key), value)
def parse_json_value(key, value, schema):
"""Parse the json value based on a schema.
:param key: the key property
:type key: str
:param value: the value of property
:type value: str
:param schema: The JSON schema to use for parsing
:type schema: dict
:returns: parsed value
:rtype: str | int | float | bool | list | dict
"""
value_type = find_parser(key, schema)
return value_type(value)
def find_parser(key, schema):
"""
:param key: JSON field
:type key: str
:param schema: The JSON schema to use
:type schema: dict
:returns: A callable capable of parsing a string to its type
:rtype: ValueTypeParser
"""
key_schema = schema['properties'].get(key)
if key_schema is None:
keys = ', '.join(schema['properties'].keys())
raise DCOSException(
'Error: {!r} is not a valid property. '
'Possible properties are: {}'.format(key, keys))
else:
return ValueTypeParser(key_schema)
class ValueTypeParser(object):
"""Callable for parsing a string against a known JSON type.
:param schema: The JSON type as a schema
:type schema: dict
"""
def __init__(self, schema):
self.schema = schema
def __call__(self, value):
"""
:param value: String to try and parse
:type value: str
:returns: The parse value
:rtype: str | int | float | bool | list | dict
"""
value = clean_value(value)
if self.schema['type'] == 'string':
if self.schema.get('format') == 'uri':
return _parse_url(value)
else:
return _parse_string(value)
elif self.schema['type'] == 'object':
return _parse_object(value)
elif self.schema['type'] == 'number':
return _parse_number(value)
elif self.schema['type'] == 'integer':
return _parse_integer(value)
elif self.schema['type'] == 'boolean':
return _parse_boolean(value)
elif self.schema['type'] == 'array':
return _parse_array(value)
else:
raise DCOSException('Unknown type {!r}'.format(self._value_type))
def clean_value(value):
"""
:param value: String to try and clean
:type value: str
:returns: The cleaned string
:rtype: str
"""
if len(value) > 1 and value.startswith('"') and value.endswith('"'):
return value[1:-1]
elif len(value) > 1 and value.startswith("'") and value.endswith("'"):
return value[1:-1]
else:
return value
def _parse_string(value):
"""
:param value: The string to parse
:type value: str
:returns: The parsed value
:rtype: str
"""
return None if value == 'null' else value
def _parse_object(value):
"""
:param value: The string to parse
:type value: str
:returns: The parsed value
:rtype: dict
"""
try:
json_object = json.loads(value)
if json_object is None or isinstance(json_object, collections.Mapping):
return json_object
else:
raise DCOSException(
'Unable to parse {!r} as a JSON object'.format(value))
except ValueError as error:
logger.exception('Error parsing value as a JSON object')
msg = 'Unable to parse {!r} as a JSON object: {}'.format(value, error)
raise DCOSException(msg)
def _parse_number(value):
"""
:param value: The string to parse
:type value: str
:returns: The parsed value
:rtype: float
"""
try:
return None if value == 'null' else float(value)
except ValueError as error:
logger.exception('Error parsing value as a JSON number')
msg = 'Unable to parse {!r} as a float: {}'.format(value, error)
raise DCOSException(msg)
def _parse_integer(value):
"""
:param value: The string to parse
:type value: str
:returns: The parsed value
:rtype: int
"""
try:
return None if value == 'null' else int(value)
except ValueError as error:
logger.exception('Error parsing value as a JSON integer')
msg = 'Unable to parse {!r} as an int: {}'.format(value, error)
raise DCOSException(msg)
def _parse_boolean(value):
"""
:param value: The string to parse
:type value: str
:returns: The parsed value
:rtype: bool
"""
try:
boolean = json.loads(value)
if boolean is None or isinstance(boolean, bool):
return boolean
else:
raise DCOSException(
'Unable to parse {!r} as a boolean'.format(value))
except ValueError as error:
logger.exception('Error parsing value as a JSON boolean')
msg = 'Unable to parse {!r} as a boolean: {}'.format(value, error)
raise DCOSException(msg)
def _parse_array(value):
"""
:param value: The string to parse
:type value: str
:returns: The parsed value
:rtype: list
"""
try:
array = json.loads(value)
if array is None or isinstance(array, collections.Sequence):
return array
else:
raise DCOSException(
'Unable to parse {!r} as an array'.format(value))
except ValueError as error:
logger.exception('Error parsing value as a JSON array')
msg = 'Unable to parse {!r} as an array: {}'.format(value, error)
raise DCOSException(msg)
def _parse_url(value):
"""
:param value: The url to parse
:type url: str
:returns: The parsed value
:rtype: str
"""
scheme_pattern = r'^(?P<scheme>(?:(?:https?)://))'
domain_pattern = (
r'(?P<hostname>(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.?)+'
'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)?|') # domain,
value_regex = re.match(
scheme_pattern + # http:// or https://
r'(([^:])+(:[^:]+)?@){0,1}' + # auth credentials
domain_pattern +
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}))' # or ip
r'(?P<port>(?::\d+))?' # port
r'(?P<path>(?:/?|[/?]\S+))$', # resource path
value, re.IGNORECASE)
if value_regex is None:
scheme_match = re.match(scheme_pattern, value, re.IGNORECASE)
if scheme_match is None:
msg = 'Please check url {!r}. Missing http(s)://'.format(value)
raise DCOSException(msg)
else:
raise DCOSException(
'Unable to parse {!r} as a url'.format(value))
else:
return value
| genome21/dcos-cli | dcos/jsonitem.py | Python | apache-2.0 | 7,404 | 0.00027 |
#!/usr/bin/env python3
# Copyright 2017-2018 Clayton Smith
#
# This file is part of bbhn-utils
#
# bbhn-utils is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# bbhn-utils is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with bbhn-utils; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
from flask import Flask, render_template, Response
import json
from datetime import *
from node_db import NodeDB
DISPLAY_HOURS = 24
node_db = NodeDB()
app = Flask(__name__, static_url_path='/static')
@app.route('/')
def node_info():
last_seen = node_db.last_seen(DISPLAY_HOURS)
return render_template('index.html', last_seen=last_seen)
@app.route('/link/<ip>.html')
def link_info(ip):
name = node_db.name(ip)
neighbours = node_db.neighbours(ip, DISPLAY_HOURS)
for i, neighbour in enumerate(neighbours):
cost = node_db.cost_history(ip, neighbour[1], DISPLAY_HOURS)
cost = [(ts.timestamp() * 1000, lq) for ts, lq in cost]
neighbours[i] = neighbour + (json.dumps(cost),)
return render_template('link.html', ip=ip, name=name, neighbours=neighbours)
if __name__ == '__main__':
app.run(host='0.0.0.0')
node_db.close()
| argilo/bbhn-utils | nodeinfo.py | Python | gpl-3.0 | 1,671 | 0.000598 |
name1_1_1_0_0_1_0 = None
name1_1_1_0_0_1_1 = None
name1_1_1_0_0_1_2 = None
name1_1_1_0_0_1_3 = None
name1_1_1_0_0_1_4 = None | asedunov/intellij-community | python/testData/completion/heavyStarPropagation/lib/_pkg1/_pkg1_1/_pkg1_1_1/_pkg1_1_1_0/_pkg1_1_1_0_0/_mod1_1_1_0_0_1.py | Python | apache-2.0 | 128 | 0.007813 |
# -*- coding: utf-8 -*-
#
# Insekta documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 24 16:48:19 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Insekta'
copyright = u'2011, Insekta team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Insektadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Insekta.tex', u'Insekta Documentation',
u'Insekta team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'insekta', u'Insekta Documentation',
[u'Insekta team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Insekta', u'Insekta Documentation',
u'Insekta team', 'Insekta', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| teythoon/Insekta | docs/conf.py | Python | mit | 7,761 | 0.007473 |
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time
import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
# Raspberry Pi pin configuration:
RST = 24
# Note the following are only used with SPI:
DC = 23
SPI_PORT = 0
SPI_DEVICE = 0
# Beaglebone Black pin configuration:
# RST = 'P9_12'
# Note the following are only used with SPI:
# DC = 'P9_15'
# SPI_PORT = 1
# SPI_DEVICE = 0
# 128x32 display with hardware I2C:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST)
# 128x64 display with hardware I2C:
# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)
# Note you can change the I2C address by passing an i2c_address parameter like:
# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, i2c_address=0x3C)
# Alternatively you can specify an explicit I2C bus number, for example
# with the 128x32 display you would use:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, i2c_bus=2)
# 128x32 display with hardware SPI:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# 128x64 display with hardware SPI:
disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# Alternatively you can specify a software SPI implementation by providing
# digital GPIO pin numbers for all the required display pins. For example
# on a Raspberry Pi with the 128x32 display you might use:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, sclk=18, din=25, cs=22)
# Initialize library.
disp.begin()
# Clear display.
disp.clear()
disp.display()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = 2
shape_width = 20
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = padding
# Draw an ellipse.
draw.ellipse((x, top , x+shape_width, bottom), outline=255, fill=0)
x += shape_width+padding
# Draw a rectangle.
draw.rectangle((x, top, x+shape_width, bottom), outline=255, fill=0)
x += shape_width+padding
# Draw a triangle.
draw.polygon([(x, bottom), (x+shape_width/2, top), (x+shape_width, bottom)], outline=255, fill=0)
x += shape_width+padding
# Draw an X.
draw.line((x, bottom, x+shape_width, top), fill=255)
draw.line((x, top, x+shape_width, bottom), fill=255)
x += shape_width+padding
# Load default font.
font = ImageFont.load_default()
# Alternatively load a TTF font. Make sure the .ttf font file is in the same directory as the python script!
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
#font = ImageFont.truetype('Minecraftia.ttf', 8)
# Write two lines of text.
draw.text((x, top), 'Hello', font=font, fill=255)
draw.text((x, top+20), 'World!', font=font, fill=255)
# Display image.
disp.image(image)
disp.display()
| RoboPi-CSEDU/rupai | Adafruit_Python_SSD1306/examples/shapes.py | Python | mit | 4,281 | 0.002569 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
##Copyright (c) 2017 Benoit Valot and Panisa Treepong
##benoit.valot@univ-fcomte.fr
##UMR 6249 Chrono-Environnement, Besançon, France
##Licence GPL
from . import variables
class ClipRead():
"""Clip read object"""
def __init__(self, alignedsegment):
self.read_seq = alignedsegment.query_sequence
self.read_name = alignedsegment.query_name
self.read_start = alignedsegment.query_alignment_start #0 left
self.read_end = alignedsegment.query_alignment_end #exclusive
self.read_len = alignedsegment.query_alignment_length
self.ref_start = alignedsegment.reference_start #0 left
self.ref_end = alignedsegment.reference_end # exclusive
self.ref_len = alignedsegment.reference_length
self.cigar = alignedsegment.cigarstring
self.cigartuples = alignedsegment.cigartuples
self.isreverse = alignedsegment.is_reverse
def isstartclip(self):
"""Test if the read is start or end clip, look at """
if self.cigartuples is None:
raise Exception("ClipRead must be aligned")
if self.cigartuples[0][0] in variables.cigarclip:
return True
elif self.cigartuples[-1][0] in variables.cigarclip:
return False
else:
raise Exception("ClipRead must contain clip part at start or end")
def getdr(self, drstart, drend):
"""Return the dr sequence if complete or return None"""
s = self.read_start + (drstart - self.ref_start) ##if < 0, incomplet dr
if s < 0:
return None
e = self.read_end - (self.ref_end - drend)
if e > len(self.read_seq):
return None
return self.read_seq[s:e]
def getclippos(self):
"""Return the position of the clip"""
if self.isstartclip():
return self.ref_start
else:
return self.ref_end
def getclipseq(self):
"""return clip part of the read, except for hard clip return None"""
if len(self.read_seq) == self.read_len:
return None
if self.isstartclip():
return self.read_seq[:self.read_start]
else:
return self.read_seq[self.read_end:]
def __len__(self):
return len(self.read_seq)
def __repr__(self):
return self.read_seq
def __str__(self):
return str(self.ref_start) + ": " + str(self.read_start) + self.read_seq + \
str(self.read_end) + " :" + str(self.ref_end)
if __name__=='__main__':
import doctest
doctest.testmod()
| bvalot/panISa | lib/clipread.py | Python | gpl-3.0 | 2,634 | 0.007976 |
"""
mode_calcs.py is a subroutine of EMUstack that contains methods to
calculate the modes of a given layer, either analytically
(class 'Anallo') or from the FEM routine (class 'Simmo').
Copyright (C) 2013 Bjorn Sturmberg, Kokou Dossou, Felix Lawrence
EMUstack is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import sys
from scipy import sqrt
sys.path.append("../backend/")
import materials
import objects
from Fortran import EMUstack
_interfaces_i_have_known = {}
pi = np.pi
class Modes(object):
""" Super-class from which Simmo and Anallo inherit common functionality"""
def k_pll_norm(self):
return self.light.k_pll * self.structure.period
def wl_norm(self):
wl = float(self.light.wl_nm) / self.structure.period
if self.light.wl_nm % self.structure.period == 0:
wl += 1e-15
return wl
def air_ref(self):
""" Return an :Anallo: for air for the same :Light: as this."""
return self.light._air_ref(self.structure.period)
def calc_grating_orders(self, max_order):
""" Return the grating order indices px and py, unsorted."""
# Create arrays of grating order indexes (-p, ..., p)
pxs = pys = np.arange(-max_order, max_order + 1)
# The inner loop in the fortran is over y, not x
# So we call meshgrid with y first
pys_mesh, pxs_mesh = np.meshgrid(pys, pxs)
# Which elements of pys_mesh and pxs_mesh correspond to
# orders low enough that we're interested in?
low_ord = (pxs_mesh**2 + pys_mesh**2 <= max_order**2)
return pxs_mesh[low_ord], pys_mesh[low_ord]
def prop_fwd(self, height_norm):
""" Return the matrix P corresponding to forward propagation/decay"""
return np.mat(np.diag(np.exp(1j * self.k_z * height_norm)))
def __del__(self):
# Clean up _interfaces_i_have_known to avoid memory leak
if _interfaces_i_have_known != None:
for key in _interfaces_i_have_known.keys():
if id(self) in key:
_interfaces_i_have_known.pop(key)
class Anallo(Modes):
""" Like a :Simmo:, but for a thin film, and calculated analytically."""
def __init__(self, thin_film, light):
self.structure = thin_film
self.light = light
self.max_order_PWs = light.max_order_PWs
self.is_air_ref = False
def calc_modes(self):
#TODO: switch to just using calc_kz()?
kzs = self.calc_kz()
self.k_z = np.append(kzs, kzs) # add 2nd polarisation
self.structure.num_pw_per_pol = len(kzs)
def calc_kz(self):
""" Return a sorted 1D array of grating orders' kz."""
d = 1 #TODO: are lx, ly relevant here??
# Calculate vectors of pxs and pys of all orders
# with px^2 + py^2 <= self.max_order_PWs
pxs, pys = self.calc_grating_orders(self.max_order_PWs)
# Calculate k_x and k_y components of scattered PWs
# (using the grating equation)
alpha0, beta0 = self.k_pll_norm()
alphas = alpha0 + pxs * 2 * pi / d
betas = beta0 + pys * 2 * pi / d
k_z_unsrt = sqrt(self.k()**2 - alphas**2 - betas**2)
if self.is_air_ref:
assert not hasattr(self, 'sort_order'), \
"Are you sure you want to reset the sort_order?"
# Sort the modes from propagating to fastest decaying
# k_z is real for propagating waves
# This must be done consistently
s = np.argsort(-1*k_z_unsrt.real + k_z_unsrt.imag)
self.sort_order = s
else:
s = self.air_ref().sort_order
assert s.shape == k_z_unsrt.shape, (s.shape,
k_z_unsrt.shape)
# Find element of k_z_unsrt corresponding to zeroth order
self.specular_order = np.nonzero((pxs[s] == 0) * (pys[s] == 0))[0][0]
# Calculate number of propagating plane waves in thin film
self.num_prop_pw_per_pol = (k_z_unsrt.imag == 0).sum()
return k_z_unsrt[s]
def n(self):
if self.structure.loss:
return self.structure.material.n(self.light.wl_nm)
else:
return self.structure.material.n(self.light.wl_nm).real
def k(self):
""" Return the normalised wavenumber in the background material"""
return 2 * pi * self.n() / self.wl_norm()
def Z(self):
""" Return the wave impedance as a 1D array."""
# Zcr is relative characteristic impedance Zc / Z0
# Zcr = 1/n assumes that relative permeability is 1
# Otherwise, use Zcr = \sqrt(epsilon_r / mu_r)
Zcr = 1./self.n()
# self.k_z repeats itself halfway through
# First half is for TE pol, second is for TM
num_pw2 = len(self.k_z) / 2
k_z = self.k_z[:num_pw2]
assert (k_z == self.k_z[num_pw2:]).all()
# Calculate the (relative) wave impedances Z
# TE (E in interface plane): Z = Zcr * k/k_z
# TM (H in interface plane): Z = Zcr / (k/k_z)
k_on_kz = self.k() / k_z
# TE is always represented first
return np.concatenate((Zcr * k_on_kz, Zcr / k_on_kz))
def specular_incidence(self, pol = 'TE'):
""" Return a vector of plane wave amplitudes corresponding
to specular incidence in the specified polarisation.
i.e. all elements are 0 except the zeroth order.
"""
# Element corresponding to 0th order, TE
spec_TE = self.specular_order
# Element corresponding to 0th order, TM
spec_TM = self.specular_order + self.structure.num_pw_per_pol
tot_num_pw = self.structure.num_pw_per_pol * 2
inc_amp = np.mat(np.zeros(tot_num_pw, dtype='complex128')).T
if 'TE' == pol:
inc_amp[spec_TE] = 1
elif 'TM' == pol:
inc_amp[spec_TM] = 1
elif 'R Circ' == pol:
inc_amp[spec_TE] = 1/sqrt(2.)
inc_amp[spec_TM] = +1j/sqrt(2.)
elif 'L Circ' == pol:
inc_amp[spec_TE] = 1/sqrt(2.)
inc_amp[spec_TM] = -1j/sqrt(2.)
else:
raise NotImplementedError, \
"Must select from the currently implemented polarisations; \
TE, TM, R Circ, L Circ."
return inc_amp
class Simmo(Modes):
"""docstring for Simmo"""
def __init__(self, structure, light):
self.structure = structure
self.light = light
self.max_order_PWs = light.max_order_PWs
self.prop_consts = None
self.mode_pol = None
def calc_modes(self, num_BM, delete_working = True):
""" Run the FEM in Fortran """
st = self.structure
wl = self.light.wl_nm
if self.structure.diameter2 == 0:
nb_typ_el = 2
else:
nb_typ_el = 3
n_effs = np.array([st.background.n(wl), st.inclusion_a.n(wl),
st.inclusion_b.n(wl)])
n_effs = n_effs[:nb_typ_el]
if self.structure.loss == False:
n_effs = n_effs.real
pxs, pys = self.calc_grating_orders(self.max_order_PWs)
num_pw_per_pol = pxs.size
self.num_BM = num_BM
assert num_BM > num_pw_per_pol * 2, "You must include at least as many BMs as PWs. \n" + \
"Currently you have %(bm)i BMs < %(np)i PWs." % {
'bm': num_BM, 'np': num_pw_per_pol * 2}
d = self.structure.period
# Prepare for the mesh
with open("../backend/Data/"+self.structure.mesh_file) as f:
n_msh_pts, n_msh_el = [int(i) for i in f.readline().split()]
# Size of Fortran's complex superarray (scales with mesh)
# In theory could do some python-based preprocessing
# on the mesh file to work out RAM requirements
cmplx_max = 2**27#30
# Parameters that control how FEM routine runs
FEM_debug = 0 # Fortran routine will print info to screen and save additional into to file
E_H_field = 1 # Selected formulation (1=E-Field, 2=H-Field)
i_cond = 2 # Boundary conditions (0=Dirichlet,1=Neumann,2=Periodic)
itermax = 30 # Maximum number of iterations for convergence
resm = EMUstack.calc_modes(
self.wl_norm(), self.num_BM, self.max_order_PWs,
self.structure.period, FEM_debug,
self.structure.mesh_file, n_msh_pts, n_msh_el,
n_effs, self.k_pll_norm(),
E_H_field, i_cond, itermax,
self.structure.plot_modes, self.structure.plot_real,
self.structure.plot_imag, self.structure.plot_abs,
num_pw_per_pol, cmplx_max, nb_typ_el)
self.k_z, J, J_dag, self.sol1, self.sol2, self.mode_pol = resm
self.J, self.J_dag = np.mat(J), np.mat(J_dag)
if delete_working:
self.sol1 = None
self.sol2 = None
# self.mode_pol = None
def r_t_mat(lay1, lay2):
""" Return R12, T12, R21, T21 at an interface between lay1
and lay2.
"""
assert lay1.structure.period == lay2.structure.period
# We memorise to avoid extra calculations
global _interfaces_i_have_known
# Have we seen this interface before?
try:
return _interfaces_i_have_known[id(lay1), id(lay2)]
except KeyError: pass
# Or perhaps its reverse?
try:
R21, T21, R12, T12 = _interfaces_i_have_known[id(lay2), id(lay1)]
return R12, T12, R21, T21
except KeyError: pass
# No? Then we'll have to calculate its properties.
if isinstance(lay1, Anallo) and isinstance(lay2, Anallo):
ref_trans = r_t_mat_anallo(lay1, lay2)
elif isinstance(lay1, Anallo) and isinstance(lay2, Simmo):
ref_trans = r_t_mat_tf_ns(lay1, lay2)
elif isinstance(lay1, Simmo) and isinstance(lay2, Anallo):
R21, T21, R12, T12 = r_t_mat_tf_ns(lay2, lay1)
ref_trans = R12, T12, R21, T21
elif isinstance(lay1, Simmo) and isinstance(lay2, Simmo):
raise NotImplementedError, \
"Sorry! For, now you can put an extremely thin film between your \
NanoStructs"
# Store its R and T matrices for later use
_interfaces_i_have_known[id(lay1), id(lay2)] = ref_trans
return ref_trans
def r_t_mat_anallo(an1, an2):
""" Returns R12, T12, R21, T21 at an interface between thin films.
R12 is the reflection matrix from Anallo 1 off Anallo 2
The sign of elements in T12 and T21 is fixed to be positive,
in the eyes of `numpy.sign`
"""
if len(an1.k_z) != len(an2.k_z):
raise ValueError, "Need the same number of plane waves in \
Anallos %(an1)s and %(an2)s" % {'an1' : an1, 'an2' : an2}
Z1 = an1.Z()
Z2 = an2.Z()
R12 = np.mat(np.diag((Z2 - Z1)/(Z2 + Z1)))
# N.B. there is potentially a branch choice problem here, stemming
# from the normalisation to unit flux.
# We normalise each field amplitude by
# $chi^{\pm 1/2} = sqrt(k_z/k)^{\pm 1} = sqrt(Z/Zc)^{\pm 1}$
# The choice of branch in those square roots must be the same as the
# choice in the related square roots that we are about to take:
T12 = np.mat(np.diag(2.*sqrt(Z2)*sqrt(Z1)/(Z2+Z1)))
R21 = -R12
T21 = T12
return R12, T12, R21, T21
def r_t_mat_tf_ns(an1, sim2):
""" Returns R12, T12, R21, T21 at an1-sim2 interface.
Based on:
Dossou et al., JOSA A, Vol. 29, Issue 5, pp. 817-831 (2012)
http://dx.doi.org/10.1364/JOSAA.29.000817
But we use Zw = 1/(Zcr X) instead of X, so that an1 does not
have to be free space.
"""
Z1_sqrt_inv = sqrt(1/an1.Z()).reshape((1,-1))
# In the paper, X is a diagonal matrix. Here it is a 1 x N array.
# Same difference.
A = np.mat(Z1_sqrt_inv.T * sim2.J.A)
B = np.mat(sim2.J_dag.A * Z1_sqrt_inv)
denominator = np.eye(len(B)) + B.dot(A)
# R12 = -I + 2 A (I + BA)^-1 B
# T12 = 2 (I + BA)^-1 B
den_inv_times_B = np.linalg.solve(denominator, B)
R12 = -np.eye(len(A)) + 2 * A * den_inv_times_B
T12 = 2 * den_inv_times_B
# R21 = (I - BA)(I + BA)^-1 = (I + BA)^-1 (I - BA)
# T21 = 2 A (I + BA)^-1 = T12^T
R21 = np.linalg.solve(denominator, (np.eye(len(B)) - B*A))
T21 = 2 * A * denominator.I
# T21 = T12.T
return np.mat(R12), np.mat(T12), np.mat(R21), np.mat(T21)
| frownless/EMUstack | backend/mode_calcs.py | Python | gpl-3.0 | 13,371 | 0.004712 |
from django.contrib.auth import authenticate, login
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from django.core import serializers
from django.core.paginator import Paginator, EmptyPage
from django.core.serializers.json import DjangoJSONEncoder
from django.http.response import HttpResponse, HttpResponseForbidden
from django.shortcuts import get_object_or_404, render_to_response, render
from django.template import RequestContext
from django.utils import simplejson
from django.http import Http404
from django.template import TemplateDoesNotExist
# from django.views.generic.simple import direct_to_template
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework import status
from final.apps.fondo.domain.serializers import FondoSerializer, TicketSerializer, QuerySetSerializer
from final.apps.fondo.models import PettyCash, Ticket
import logging
_logger = logging.getLogger(__name__)
def main(request):
u = request.user
fondos = []
# fondos = u.customuser.pettycash_set.all()
return render_to_response('fondo/main.html', {'fondos': fondos})
def get_fondo_status(request, fondo_id):
""" Gets the current status.
"""
fondo = get_object_or_404(PettyCash, pk=fondo_id)
submited = fondo.ticket_set.filter(status='SUBM')
aproved = fondo.ticket_set.filter(status='OPEN')
total_submited = 0.0
total_aproved = 0.0
if submited:
total_submited = sum(t.value for t in submited)
if aproved:
total_aproved = sum(t.value for t in aproved)
print ("Status: submited %s, aproved %s" % (total_submited, total_aproved))
data = {}
data['submited'] = total_submited
data['aproved'] = total_aproved
return HttpResponse(simplejson.dumps(data, cls=DjangoJSONEncoder), mimetype='application/json')
def get_ticket_for_fondo(request, fondo_id):
page = 1
size = 5
if request.POST.get('page'):
page = request.POST['page']
if request.POST.get('size'):
size = request.POST['size']
_logger.debug("Page: %s, size: %s" % (page, size))
fondo = get_object_or_404(PettyCash, pk=fondo_id)
tickets = fondo.ticket_set.all()
p = Paginator(tickets, size)
try:
pag = p.page(page)
# tickets = QuerySetSerializer().serialize(pag)
tickets = []
for t in pag:
ticket = {}
ticket['id'] = str(t.id)
ticket['value'] = str(t.value)
ticket['description'] = str(t.description)
ticket['date'] = str(t.date)
tickets.append(ticket)
pagination = {}
pagination['has_previous'] = pag.has_previous()
pagination['has_next'] = pag.has_next()
pagination['page'] = page
pagination['size'] = size
data = {}
data['tickets'] = tickets
data['pagination'] = pagination
# data = simplejson.dumps(pagination)
# tickets = serializers.serialize('json', p.page(page))
except EmptyPage:
return HttpResponse({'error': 'Object is not your own'}, status=status.HTTP_404_NOT_FOUND, mimetype='application/json')
return HttpResponse(simplejson.dumps(data), mimetype='application/json')
class FondoViewSet(viewsets.ModelViewSet):
queryset = PettyCash.objects.all()
serializer_class = FondoSerializer
def list(self, request):
if self.is_superuser(request):
q = PettyCash.objects.all()
else:
q = request.user.fondo_set.all()
return Response(FondoSerializer(q).data)
def retrieve(self, request, pk=None):
try:
f = PettyCash.objects.get(pk=pk)
except PettyCash.DoesNotExist:
return Response({'error': 'Not found'}, status=status.HTTP_404_NOT_FOUND)
serializer = FondoSerializer(f)
if self.is_superuser(request) or f.owner.id == request.user.id:
return Response(serializer.data)
return Response({'error': 'Object is not your own'}, status=status.HTTP_404_NOT_FOUND)
def is_superuser(self, request):
"""
Indicates if user is a superuser
"""
if hasattr(request, 'user') and request.user.is_superuser:
return True
return False
class TicketViewSet(viewsets.ModelViewSet):
queryset = Ticket.objects.all()
serializer_class = TicketSerializer
def list(self, request):
status = request.GET.get('status')
fondo_id = request.GET.get('fondo')
_logger.debug("Getting tickets for fondo %s and status %s" % (fondo_id, status))
user = request.user
fondo = get_object_or_404(PettyCash, pk=fondo_id)
if status is not None:
q = Ticket.objects.filter(fondo=fondo, status=status)
else:
q = Ticket.objects.filter(fondo=fondo)
return Response(TicketSerializer(q).data)
def template_pages(request, page):
try:
template_name = "template/%s.html" % page
return render_to_response(template_name, {}, context_instance=RequestContext(request))
except TemplateDoesNotExist:
raise Http404()
| moteloquito/final-project | final/apps/fondo/views.py | Python | gpl-3.0 | 5,222 | 0.003255 |
# -*- coding: utf8 -*-
from django.conf import settings
from django.contrib import auth
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.views.decorators.cache import never_cache
from django.views.decorators.http import require_POST
from bootcamp.decorators import ajax_required
from registration.users import UserModel
from django.contrib.auth.models import User
from bootcamp.feeds.models import Feed
from django.core.context_processors import csrf
from django.template.loader import render_to_string
from django.shortcuts import render, redirect, get_object_or_404
import random
import json
FEEDS_NUM_PAGES = 20
MAJOR_VERSION = 0
MID_VERSION = 1
MIN_VERSION = 3
NOTE = """
更新内容:
1. 删除评论、帖子,取消赞扣分以防刷经验;
2. 增加修改资料功能;
"""
URL = "http://nqzx.net/media/ads/nqzx.apk"
def check_version(version):
ret = False
ls = version.split('.')
if MAJOR_VERSION > int(ls[0]):
ret = True
elif MID_VERSION > int(ls[1]):
ret = True
elif MIN_VERSION > int(ls[2]):
ret = True
else:
ret = False
return ret
def get_level(reputation):
if not reputation:
return 1;
if reputation < 5:
return 1
elif reputation < 15:
return 2
elif reputation < 30:
return 3
elif reputation < 50:
return 4
elif reputation < 100:
return 5
elif reputation < 200:
return 6
elif reputation < 500:
return 7
elif reputation < 1000:
return 8
elif reputation < 2000:
return 9
elif reputation < 3000:
return 10
elif reputation < 6000:
return 11
elif reputation < 10000:
return 12
elif reputation < 18000:
return 13
elif reputation < 30000:
return 14
elif reputation < 60000:
return 15
elif reputation < 100000:
return 16
elif reputation < 300000:
return 17
else:
return 18
@require_POST
@ajax_required
def login(request):
username = request.POST.get('account')
password = request.POST.get('password')
result = {"status": False, "data":""}
if not username or not password:
result = {"status": False, "data":"未收到用户名或密码!"}
return HttpResponse(json.dumps(result), content_type="application/json")
if username=="" or username.isspace():
result = {"status": False, "data":"用户名不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
if password=="" or password.isspace():
result = {"status": False, "data":"密码不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
user = auth.authenticate(username=username, password=password)
if user is not None:
if user.is_active:
auth.login(request, user)
result = {"status": True, "data": {"id": user.id, "email": user.email, \
"location": user.profile.location, "mobile": user.profile.mobile, "reputation": \
user.profile.reputation,"signdate": user.profile.signdate}}
else:
result = {"status": False, "data":"["+username+"]已被暂时禁用"}
else:
result = {"status": False, "data":"用户名或密码不正确,请重试"}
return HttpResponse(json.dumps(result), content_type="application/json")
@require_POST
@ajax_required
def reg(request):
username = request.POST.get('account')
password = request.POST.get('password')
email = request.POST.get('email')
result = {"status": False, "data":""}
if not username or not password or not email:
result = {"status": False, "data":"未收到用户名、密码或者用户名!"}
return HttpResponse(json.dumps(result), content_type="application/json")
if username=="" or username.isspace():
result = {"status": False, "data":"用户名不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
if password=="" or password.isspace():
result = {"status": False, "data":"密码不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
if email=="" or email.isspace():
result = {"status": False, "data":"邮箱不能为空"}
return HttpResponse(json.dumps(result), content_type="application/json")
# clean data
existing = UserModel().objects.filter(username__iexact=username)
if existing.exists():
result = {"status": False, "data":"用户名已经存在"}
return HttpResponse(json.dumps(result), content_type="application/json")
if UserModel().objects.filter(email__iexact=email):
result = {"status": False, "data":"邮箱已经存在"}
return HttpResponse(json.dumps(result), content_type="application/json")
user = UserModel().objects.create_user(username, email, password)
user.is_active = True
user.save()
result = {"status": True, "data": {"id": user.id, "email": user.email, \
"location": user.profile.location, "mobile": user.profile.mobile, "reputation": \
user.profile.reputation,"signdate": user.profile.signdate}}
return HttpResponse(json.dumps(result), content_type="application/json")
@require_POST
@ajax_required
def get_state(request):
user = request.user
state = {"id": user.id, "username": user.username, "email": user.email, "location": user.profile.location, \
"mobile": user.profile.mobile, "reputation": user.profile.reputation,"first_name": user.first_name, \
"sex": user.profile.sex,"signdate": user.profile.signdate}
return HttpResponse(json.dumps(state), content_type="application/json")
@require_POST
@ajax_required
def set_state(request):
result = {"status": False, "data": {}}
userid = request.POST.get('userid')
user = User.objects.get(pk=userid)
if not user:
return HttpResponse(json.dumps(state), content_type="application/json")
first_name = request.POST.get('first_name')
location = request.POST.get('location')
mobile = request.POST.get('mobile')
reputation = request.POST.get('reputation')
sex = request.POST.get('sex')
signdate = request.POST.get('signdate')
if first_name:
user.first_name = first_name;
if location:
user.profile.location = location
if mobile:
user.profile.mobile = mobile
if reputation:
user.profile.reputation = reputation
if sex:
user.profile.sex = sex
if signdate:
user.profile.signdate = signdate
user.save()
result = {"status": True, "data": {"first_name": first_name, "sex": sex, \
"location":location,"mobile":mobile,"reputation":reputation,"signdate":signdate}}
return HttpResponse(json.dumps(result), content_type="application/json")
def get_feeds(request):
page = 1
feed_id = request.POST["feed_id"]
csrf_token = unicode(csrf(request)['csrf_token'])
html = u''
if feed_id:
feed = Feed.objects.get(pk=feed_id)
html = u'{0}{1}'.format(html, render_to_string('app/partial_feed.html', {
'feed': feed,
'user': request.user,
'csrf_token': csrf_token,
'lvl': get_level(feed.user.profile.reputation),
})
)
else:
feeds = Feed.get_feeds()
paginator = Paginator(feeds, FEEDS_NUM_PAGES)
feeds = paginator.page(page)
for feed in feeds:
html = u'{0}{1}'.format(html, render_to_string('app/partial_feed.html', {
'feed': feed,
'user': request.user,
'csrf_token': csrf_token,
'lvl': get_level(feed.user.profile.reputation),
})
)
return HttpResponse(html)
@ajax_required
def checkupdate(request):
version = request.POST.get('version')
ret = {"status": check_version(version), "note": NOTE, "url": URL}
return HttpResponse(json.dumps(ret), content_type="application/json")
def _html_feeds(last_feed, user, csrf_token, feed_source='all'):
feeds = Feed.get_feeds_after(last_feed)
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
html = u''
for feed in feeds:
html = u'{0}{1}'.format(html, render_to_string('app/partial_feed.html', {
'feed': feed,
'user': user,
'csrf_token': csrf_token,
'lvl': get_level(feed.user.profile.reputation),
})
)
return html
def post(request):
last_feed = request.POST.get('last_feed')
user = request.user
rand_user = User.objects.get(pk=random.randint(318, 367))
csrf_token = unicode(csrf(request)['csrf_token'])
feed = Feed()
if user.id == 283:
feed.user = rand_user
else:
feed.user = user
post = request.POST['post']
post = post.strip()
if len(post) > 0:
feed.post = post[:255]
user.profile.reputation += 3
user.save()
feed.save()
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
def load(request):
from_feed = request.GET.get('from_feed')
page = request.GET.get('page')
active = request.GET.get('active')
feed_source = request.GET.get('feed_source')
if active and active != 'all':
all_feeds = Feed.get_feeds(from_feed, active)
else:
all_feeds = Feed.get_feeds(from_feed)
if feed_source != 'all':
all_feeds = all_feeds.filter(user__id=feed_source)
paginator = Paginator(all_feeds, FEEDS_NUM_PAGES)
try:
feeds = paginator.page(page)
except EmptyPage:
feeds = []
html = u''
csrf_token = unicode(csrf(request)['csrf_token'])
for feed in feeds:
html = u'{0}{1}'.format(html, render_to_string('app/partial_feed.html', {
'feed': feed,
'user': request.user,
'lvl': get_level(feed.user.profile.reputation),
'csrf_token': csrf_token
})
)
return HttpResponse(html)
def load_new(request):
last_feed = request.GET.get('last_feed')
user = request.user
csrf_token = unicode(csrf(request)['csrf_token'])
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
def comment(request):
if request.method == 'POST':
feed_id = request.POST['feed']
feed = Feed.objects.get(pk=feed_id)
post = request.POST['post']
post = post.strip()
if len(post) > 0:
post = post[:255]
user = request.user
feed.comment(user=user, post=post)
user.profile.notify_commented(feed)
user.profile.notify_also_commented(feed)
user.profile.reputation += 2
user.save()
return render(request, 'app/partial_feed_comments.html', {'feed': feed})
else:
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
return render(request, 'app/partial_feed_comments.html', {'feed': feed})
def track_comments(request):
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
return render(request, 'app/partial_feed_comments.html', {'feed': feed})
| Wang-Sen/nqzx-backend | bootcamp/app/views.py | Python | gpl-3.0 | 11,441 | 0.007145 |
#
# Copyright 2008-2019 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
from __future__ import division
import os
import logging
from vdsm.common import cpuarch
from vdsm.common.cache import memoized
from vdsm import dmidecodeUtil
P_VDSM_NODE_ID = '/etc/vdsm/vdsm.id'
@memoized
def uuid():
host_UUID = None
try:
if os.path.exists(P_VDSM_NODE_ID):
with open(P_VDSM_NODE_ID) as f:
host_UUID = f.readline().replace("\n", "")
else:
arch = cpuarch.real()
if cpuarch.is_x86(arch):
try:
hw_info = dmidecodeUtil.getHardwareInfoStructure()
host_UUID = hw_info['systemUUID'].lower()
except KeyError:
logging.warning('Could not find host UUID.')
elif cpuarch.is_ppc(arch):
# eg. output IBM,03061C14A
try:
with open('/proc/device-tree/system-id') as f:
systemId = f.readline()
host_UUID = systemId.rstrip('\0').replace(',', '')
except IOError:
logging.warning('Could not find host UUID.')
except:
logging.error("Error retrieving host UUID", exc_info=True)
return host_UUID
| oVirt/vdsm | lib/vdsm/host/__init__.py | Python | gpl-2.0 | 2,088 | 0.000479 |
import unittest
class TestExample(unittest.TestCase):
def test_example(self):
self.assertEquals(0, 0)
| ryanpdwyer/newtex | newtex/tests/test_git.py | Python | mit | 116 | 0 |
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 13338 if testnet else 3338
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| lemoncoin/lemoncoin | contrib/spendfrom/spendfrom.py | Python | mit | 10,043 | 0.005775 |
from sympy import (
Abs, And, Derivative, Dummy, Eq, Float, Function, Gt, I, Integral,
LambertW, Lt, Matrix, Or, Piecewise, Poly, Q, Rational, S, Symbol,
Wild, acos, asin, atan, atanh, cos, cosh, diff, erf, erfinv, erfc,
erfcinv, erf2, erf2inv, exp, expand, im, log, pi, re, sec, sin,
sinh, solve, solve_linear, sqrt, sstr, symbols, sympify, tan, tanh,
root, simplify, atan2, arg, Mul, SparseMatrix, ask, Tuple, nsolve, oo)
from sympy.core.function import nfloat
from sympy.solvers import solve_linear_system, solve_linear_system_LU, \
solve_undetermined_coeffs
from sympy.solvers.solvers import _invert, unrad, checksol, posify, _ispow, \
det_quick, det_perm, det_minor
from sympy.physics.units import cm
from sympy.polys.rootoftools import RootOf
from sympy.utilities.pytest import slow, XFAIL, raises, skip, ON_TRAVIS
from sympy.utilities.randtest import verify_numerically as tn
from sympy.abc import a, b, c, d, k, h, p, x, y, z, t, q, m
def NS(e, n=15, **options):
return sstr(sympify(e).evalf(n, **options), full_prec=True)
def test_swap_back():
f, g = map(Function, 'fg')
fx, gx = f(x), g(x)
assert solve([fx + y - 2, fx - gx - 5], fx, y, gx) == \
{fx: gx + 5, y: -gx - 3}
assert solve(fx + gx*x - 2, [fx, gx]) == {fx: 2, gx: 0}
assert solve(fx + gx**2*x - y, [fx, gx]) == [{fx: y - gx**2*x}]
assert solve([f(1) - 2, x + 2]) == [{x: -2, f(1): 2}]
def guess_solve_strategy(eq, symbol):
try:
solve(eq, symbol)
return True
except (TypeError, NotImplementedError):
return False
def test_guess_poly():
# polynomial equations
assert guess_solve_strategy( S(4), x ) # == GS_POLY
assert guess_solve_strategy( x, x ) # == GS_POLY
assert guess_solve_strategy( x + a, x ) # == GS_POLY
assert guess_solve_strategy( 2*x, x ) # == GS_POLY
assert guess_solve_strategy( x + sqrt(2), x) # == GS_POLY
assert guess_solve_strategy( x + 2**Rational(1, 4), x) # == GS_POLY
assert guess_solve_strategy( x**2 + 1, x ) # == GS_POLY
assert guess_solve_strategy( x**2 - 1, x ) # == GS_POLY
assert guess_solve_strategy( x*y + y, x ) # == GS_POLY
assert guess_solve_strategy( x*exp(y) + y, x) # == GS_POLY
assert guess_solve_strategy(
(x - y**3)/(y**2*sqrt(1 - y**2)), x) # == GS_POLY
def test_guess_poly_cv():
# polynomial equations via a change of variable
assert guess_solve_strategy( sqrt(x) + 1, x ) # == GS_POLY_CV_1
assert guess_solve_strategy(
x**Rational(1, 3) + sqrt(x) + 1, x ) # == GS_POLY_CV_1
assert guess_solve_strategy( 4*x*(1 - sqrt(x)), x ) # == GS_POLY_CV_1
# polynomial equation multiplying both sides by x**n
assert guess_solve_strategy( x + 1/x + y, x ) # == GS_POLY_CV_2
def test_guess_rational_cv():
# rational functions
assert guess_solve_strategy( (x + 1)/(x**2 + 2), x) # == GS_RATIONAL
assert guess_solve_strategy(
(x - y**3)/(y**2*sqrt(1 - y**2)), y) # == GS_RATIONAL_CV_1
# rational functions via the change of variable y -> x**n
assert guess_solve_strategy( (sqrt(x) + 1)/(x**Rational(1, 3) + sqrt(x) + 1), x ) \
#== GS_RATIONAL_CV_1
def test_guess_transcendental():
#transcendental functions
assert guess_solve_strategy( exp(x) + 1, x ) # == GS_TRANSCENDENTAL
assert guess_solve_strategy( 2*cos(x) - y, x ) # == GS_TRANSCENDENTAL
assert guess_solve_strategy(
exp(x) + exp(-x) - y, x ) # == GS_TRANSCENDENTAL
assert guess_solve_strategy(3**x - 10, x) # == GS_TRANSCENDENTAL
assert guess_solve_strategy(-3**x + 10, x) # == GS_TRANSCENDENTAL
assert guess_solve_strategy(a*x**b - y, x) # == GS_TRANSCENDENTAL
def test_solve_args():
# equation container, issue 5113
ans = {x: -3, y: 1}
eqs = (x + 5*y - 2, -3*x + 6*y - 15)
assert all(solve(container(eqs), x, y) == ans for container in
(tuple, list, set, frozenset))
assert solve(Tuple(*eqs), x, y) == ans
# implicit symbol to solve for
assert set(solve(x**2 - 4)) == set([S(2), -S(2)])
assert solve([x + y - 3, x - y - 5]) == {x: 4, y: -1}
assert solve(x - exp(x), x, implicit=True) == [exp(x)]
# no symbol to solve for
assert solve(42) == []
assert solve([1, 2]) == []
# duplicate symbols removed
assert solve((x - 3, y + 2), x, y, x) == {x: 3, y: -2}
# unordered symbols
# only 1
assert solve(y - 3, set([y])) == [3]
# more than 1
assert solve(y - 3, set([x, y])) == [{y: 3}]
# multiple symbols: take the first linear solution
assert solve(x + y - 3, [x, y]) == [{x: 3 - y}]
# unless it is an undetermined coefficients system
assert solve(a + b*x - 2, [a, b]) == {a: 2, b: 0}
assert solve(a*x**2 + b*x + c -
((x - h)**2 + 4*p*k)/4/p,
[h, p, k], exclude=[a, b, c], dict=True) == \
[{k: c - b**2/(4*a), h: -b/(2*a), p: 1/(4*a)}]
# failing undetermined system
assert solve(a*x + b**2/(x + 4) - 3*x - 4/x, a, b) == \
[{a: (-b**2*x + 3*x**3 + 12*x**2 + 4*x + 16)/(x**2*(x + 4))}]
# failed single equation
assert solve(1/(1/x - y + exp(y))) == []
raises(
NotImplementedError, lambda: solve(exp(x) + sin(x) + exp(y) + sin(y)))
# failed system
# -- when no symbols given, 1 fails
assert solve([y, exp(x) + x]) == [{x: -LambertW(1), y: 0}]
# both fail
assert solve(
(exp(x) - x, exp(y) - y)) == [{x: -LambertW(-1), y: -LambertW(-1)}]
# -- when symbols given
solve([y, exp(x) + x], x, y) == [(-LambertW(1), 0)]
# symbol is a number
assert solve(x**2 - pi, pi) == [x**2]
# no equations
assert solve([], [x]) == []
# overdetermined system
# - nonlinear
assert solve([(x + y)**2 - 4, x + y - 2]) == [{x: -y + 2}]
# - linear
assert solve((x + y - 2, 2*x + 2*y - 4)) == {x: -y + 2}
def test_solve_polynomial1():
assert solve(3*x - 2, x) == [Rational(2, 3)]
assert solve(Eq(3*x, 2), x) == [Rational(2, 3)]
assert set(solve(x**2 - 1, x)) == set([-S(1), S(1)])
assert set(solve(Eq(x**2, 1), x)) == set([-S(1), S(1)])
assert solve(x - y**3, x) == [y**3]
assert set(solve(x - y**3, y)) == set([
(-x**Rational(1, 3))/2 + I*sqrt(3)*x**Rational(1, 3)/2,
x**Rational(1, 3),
(-x**Rational(1, 3))/2 - I*sqrt(3)*x**Rational(1, 3)/2,
])
a11, a12, a21, a22, b1, b2 = symbols('a11,a12,a21,a22,b1,b2')
assert solve([a11*x + a12*y - b1, a21*x + a22*y - b2], x, y) == \
{
x: (a22*b1 - a12*b2)/(a11*a22 - a12*a21),
y: (a11*b2 - a21*b1)/(a11*a22 - a12*a21),
}
solution = {y: S.Zero, x: S.Zero}
assert solve((x - y, x + y), x, y ) == solution
assert solve((x - y, x + y), (x, y)) == solution
assert solve((x - y, x + y), [x, y]) == solution
assert set(solve(x**3 - 15*x - 4, x)) == set([
-2 + 3**Rational(1, 2),
S(4),
-2 - 3**Rational(1, 2)
])
assert set(solve((x**2 - 1)**2 - a, x)) == \
set([sqrt(1 + sqrt(a)), -sqrt(1 + sqrt(a)),
sqrt(1 - sqrt(a)), -sqrt(1 - sqrt(a))])
def test_solve_polynomial2():
assert solve(4, x) == []
def test_solve_polynomial_cv_1a():
"""
Test for solving on equations that can be converted to a polynomial equation
using the change of variable y -> x**Rational(p, q)
"""
assert solve( sqrt(x) - 1, x) == [1]
assert solve( sqrt(x) - 2, x) == [4]
assert solve( x**Rational(1, 4) - 2, x) == [16]
assert solve( x**Rational(1, 3) - 3, x) == [27]
assert solve(sqrt(x) + x**Rational(1, 3) + x**Rational(1, 4), x) == [0]
def test_solve_polynomial_cv_1b():
assert set(solve(4*x*(1 - a*sqrt(x)), x)) == set([S(0), 1/a**2])
assert set(solve(x * (x**(S(1)/3) - 3), x)) == set([S(0), S(27)])
def test_solve_polynomial_cv_2():
"""
Test for solving on equations that can be converted to a polynomial equation
multiplying both sides of the equation by x**m
"""
assert solve(x + 1/x - 1, x) in \
[[ Rational(1, 2) + I*sqrt(3)/2, Rational(1, 2) - I*sqrt(3)/2],
[ Rational(1, 2) - I*sqrt(3)/2, Rational(1, 2) + I*sqrt(3)/2]]
def test_quintics_1():
f = x**5 - 110*x**3 - 55*x**2 + 2310*x + 979
s = solve(f, check=False)
for root in s:
res = f.subs(x, root.n()).n()
assert tn(res, 0)
f = x**5 - 15*x**3 - 5*x**2 + 10*x + 20
s = solve(f)
for root in s:
assert root.func == RootOf
# if one uses solve to get the roots of a polynomial that has a RootOf
# solution, make sure that the use of nfloat during the solve process
# doesn't fail. Note: if you want numerical solutions to a polynomial
# it is *much* faster to use nroots to get them than to solve the
# equation only to get RootOf solutions which are then numerically
# evaluated. So for eq = x**5 + 3*x + 7 do Poly(eq).nroots() rather
# than [i.n() for i in solve(eq)] to get the numerical roots of eq.
assert nfloat(solve(x**5 + 3*x**3 + 7)[0], exponent=False) == \
RootOf(x**5 + 3*x**3 + 7, 0).n()
def test_highorder_poly():
# just testing that the uniq generator is unpacked
sol = solve(x**6 - 2*x + 2)
assert all(isinstance(i, RootOf) for i in sol) and len(sol) == 6
@XFAIL
@slow
def test_quintics_2():
f = x**5 + 15*x + 12
s = solve(f, check=False)
for root in s:
res = f.subs(x, root.n()).n()
assert tn(res, 0)
f = x**5 - 15*x**3 - 5*x**2 + 10*x + 20
s = solve(f)
for root in s:
assert root.func == RootOf
def test_solve_rational():
"""Test solve for rational functions"""
assert solve( ( x - y**3 )/( (y**2)*sqrt(1 - y**2) ), x) == [y**3]
def test_solve_nonlinear():
assert solve(x**2 - y**2, x, y) == [{x: -y}, {x: y}]
assert solve(x**2 - y**2/exp(x), x, y) == [{x: 2*LambertW(y/2)}]
assert solve(x**2 - y**2/exp(x), y, x) == [{y: -x*sqrt(exp(x))}, {y: x*sqrt(exp(x))}]
def test_issue_7228():
assert solve(4**(2*(x**2) + 2*x) - 8, x) == [-Rational(3, 2), S.Half]
def test_issue_7190():
assert solve(log(x-3) + log(x+3), x) == [sqrt(10)]
def test_linear_system():
x, y, z, t, n = symbols('x, y, z, t, n')
assert solve([x - 1, x - y, x - 2*y, y - 1], [x, y]) == []
assert solve([x - 1, x - y, x - 2*y, x - 1], [x, y]) == []
assert solve([x - 1, x - 1, x - y, x - 2*y], [x, y]) == []
assert solve([x + 5*y - 2, -3*x + 6*y - 15], x, y) == {x: -3, y: 1}
M = Matrix([[0, 0, n*(n + 1), (n + 1)**2, 0],
[n + 1, n + 1, -2*n - 1, -(n + 1), 0],
[-1, 0, 1, 0, 0]])
assert solve_linear_system(M, x, y, z, t) == \
{x: -t - t/n, z: -t - t/n, y: 0}
assert solve([x + y + z + t, -z - t], x, y, z, t) == {x: -y, z: -t}
def test_linear_system_function():
a = Function('a')
assert solve([a(0, 0) + a(0, 1) + a(1, 0) + a(1, 1), -a(1, 0) - a(1, 1)],
a(0, 0), a(0, 1), a(1, 0), a(1, 1)) == {a(1, 0): -a(1, 1), a(0, 0): -a(0, 1)}
def test_linear_systemLU():
n = Symbol('n')
M = Matrix([[1, 2, 0, 1], [1, 3, 2*n, 1], [4, -1, n**2, 1]])
assert solve_linear_system_LU(M, [x, y, z]) == {z: -3/(n**2 + 18*n),
x: 1 - 12*n/(n**2 + 18*n),
y: 6*n/(n**2 + 18*n)}
# Note: multiple solutions exist for some of these equations, so the tests
# should be expected to break if the implementation of the solver changes
# in such a way that a different branch is chosen
def test_solve_transcendental():
from sympy.abc import a, b
assert solve(exp(x) - 3, x) == [log(3)]
assert set(solve((a*x + b)*(exp(x) - 3), x)) == set([-b/a, log(3)])
assert solve(cos(x) - y, x) == [-acos(y) + 2*pi, acos(y)]
assert solve(2*cos(x) - y, x) == [-acos(y/2) + 2*pi, acos(y/2)]
assert solve(Eq(cos(x), sin(x)), x) == [-3*pi/4, pi/4]
assert set(solve(exp(x) + exp(-x) - y, x)) in [set([
log(y/2 - sqrt(y**2 - 4)/2),
log(y/2 + sqrt(y**2 - 4)/2),
]), set([
log(y - sqrt(y**2 - 4)) - log(2),
log(y + sqrt(y**2 - 4)) - log(2)]),
set([
log(y/2 - sqrt((y - 2)*(y + 2))/2),
log(y/2 + sqrt((y - 2)*(y + 2))/2)])]
assert solve(exp(x) - 3, x) == [log(3)]
assert solve(Eq(exp(x), 3), x) == [log(3)]
assert solve(log(x) - 3, x) == [exp(3)]
assert solve(sqrt(3*x) - 4, x) == [Rational(16, 3)]
assert solve(3**(x + 2), x) == []
assert solve(3**(2 - x), x) == []
assert solve(x + 2**x, x) == [-LambertW(log(2))/log(2)]
ans = solve(3*x + 5 + 2**(-5*x + 3), x)
assert len(ans) == 1 and ans[0].expand() == \
-Rational(5, 3) + LambertW(-10240*2**(S(1)/3)*log(2)/3)/(5*log(2))
assert solve(5*x - 1 + 3*exp(2 - 7*x), x) == \
[Rational(1, 5) + LambertW(-21*exp(Rational(3, 5))/5)/7]
assert solve(2*x + 5 + log(3*x - 2), x) == \
[Rational(2, 3) + LambertW(2*exp(-Rational(19, 3))/3)/2]
assert solve(3*x + log(4*x), x) == [LambertW(Rational(3, 4))/3]
assert set(solve((2*x + 8)*(8 + exp(x)), x)) == set([S(-4), log(8) + pi*I])
eq = 2*exp(3*x + 4) - 3
ans = solve(eq, x) # this generated a failure in flatten
assert len(ans) == 3 and all(eq.subs(x, a).n(chop=True) == 0 for a in ans)
assert solve(2*log(3*x + 4) - 3, x) == [(exp(Rational(3, 2)) - 4)/3]
assert solve(exp(x) + 1, x) == [pi*I]
eq = 2*(3*x + 4)**5 - 6*7**(3*x + 9)
result = solve(eq, x)
ans = [(log(2401) + 5*LambertW(-log(7**(7*3**Rational(1, 5)/5))))/(3*log(7))/-1]
assert result == ans
# it works if expanded, too
assert solve(eq.expand(), x) == result
assert solve(z*cos(x) - y, x) == [-acos(y/z) + 2*pi, acos(y/z)]
assert solve(z*cos(2*x) - y, x) == [-acos(y/z)/2 + pi, acos(y/z)/2]
assert solve(z*cos(sin(x)) - y, x) == [
asin(acos(y/z) - 2*pi) + pi, -asin(acos(y/z)) + pi,
-asin(acos(y/z) - 2*pi), asin(acos(y/z))]
assert solve(z*cos(x), x) == [pi/2, 3*pi/2]
# issue 4508
assert solve(y - b*x/(a + x), x) in [[-a*y/(y - b)], [a*y/(b - y)]]
assert solve(y - b*exp(a/x), x) == [a/log(y/b)]
# issue 4507
assert solve(y - b/(1 + a*x), x) in [[(b - y)/(a*y)], [-((y - b)/(a*y))]]
# issue 4506
assert solve(y - a*x**b, x) == [(y/a)**(1/b)]
# issue 4505
assert solve(z**x - y, x) == [log(y)/log(z)]
# issue 4504
assert solve(2**x - 10, x) == [log(10)/log(2)]
# issue 6744
assert solve(x*y) == [{x: 0}, {y: 0}]
assert solve([x*y]) == [{x: 0}, {y: 0}]
assert solve(x**y - 1) == [{x: 1}, {y: 0}]
assert solve([x**y - 1]) == [{x: 1}, {y: 0}]
assert solve(x*y*(x**2 - y**2)) == [{x: 0}, {x: -y}, {x: y}, {y: 0}]
assert solve([x*y*(x**2 - y**2)]) == [{x: 0}, {x: -y}, {x: y}, {y: 0}]
#issue 4739
assert solve(exp(log(5)*x) - 2**x, x) == [0]
# misc
# make sure that the right variables is picked up in tsolve
raises(NotImplementedError, lambda: solve((exp(x) + 1)**x))
# shouldn't generate a GeneratorsNeeded error in _tsolve when the NaN is generated
# for eq_down. Actual answers, as determined numerically are approx. +/- 0.83
assert solve(sinh(x)*sinh(sinh(x)) + cosh(x)*cosh(sinh(x)) - 3) is not None
# watch out for recursive loop in tsolve
raises(NotImplementedError, lambda: solve((x + 2)**y*x - 3, x))
# issue 7245
assert solve(sin(sqrt(x))) == [0, pi**2]
# issue 7602
a, b = symbols('a, b', real=True, negative=False)
assert str(solve(Eq(a, 0.5 - cos(pi*b)/2), b)) == \
'[-0.318309886183791*acos(-2.0*a + 1.0) + 2.0, 0.318309886183791*acos(-2.0*a + 1.0)]'
def test_solve_for_functions_derivatives():
t = Symbol('t')
x = Function('x')(t)
y = Function('y')(t)
a11, a12, a21, a22, b1, b2 = symbols('a11,a12,a21,a22,b1,b2')
soln = solve([a11*x + a12*y - b1, a21*x + a22*y - b2], x, y)
assert soln == {
x: (a22*b1 - a12*b2)/(a11*a22 - a12*a21),
y: (a11*b2 - a21*b1)/(a11*a22 - a12*a21),
}
assert solve(x - 1, x) == [1]
assert solve(3*x - 2, x) == [Rational(2, 3)]
soln = solve([a11*x.diff(t) + a12*y.diff(t) - b1, a21*x.diff(t) +
a22*y.diff(t) - b2], x.diff(t), y.diff(t))
assert soln == { y.diff(t): (a11*b2 - a21*b1)/(a11*a22 - a12*a21),
x.diff(t): (a22*b1 - a12*b2)/(a11*a22 - a12*a21) }
assert solve(x.diff(t) - 1, x.diff(t)) == [1]
assert solve(3*x.diff(t) - 2, x.diff(t)) == [Rational(2, 3)]
eqns = set((3*x - 1, 2*y - 4))
assert solve(eqns, set((x, y))) == { x: Rational(1, 3), y: 2 }
x = Symbol('x')
f = Function('f')
F = x**2 + f(x)**2 - 4*x - 1
assert solve(F.diff(x), diff(f(x), x)) == [(-x + 2)/f(x)]
# Mixed cased with a Symbol and a Function
x = Symbol('x')
y = Function('y')(t)
soln = solve([a11*x + a12*y.diff(t) - b1, a21*x +
a22*y.diff(t) - b2], x, y.diff(t))
assert soln == { y.diff(t): (a11*b2 - a21*b1)/(a11*a22 - a12*a21),
x: (a22*b1 - a12*b2)/(a11*a22 - a12*a21) }
def test_issue_3725():
f = Function('f')
F = x**2 + f(x)**2 - 4*x - 1
e = F.diff(x)
assert solve(e, f(x).diff(x)) in [[(2 - x)/f(x)], [-((x - 2)/f(x))]]
def test_issue_3870():
a, b, c, d = symbols('a b c d')
A = Matrix(2, 2, [a, b, c, d])
B = Matrix(2, 2, [0, 2, -3, 0])
C = Matrix(2, 2, [1, 2, 3, 4])
assert solve(A*B - C, [a, b, c, d]) == {a: 1, b: -S(1)/3, c: 2, d: -1}
assert solve([A*B - C], [a, b, c, d]) == {a: 1, b: -S(1)/3, c: 2, d: -1}
assert solve(Eq(A*B, C), [a, b, c, d]) == {a: 1, b: -S(1)/3, c: 2, d: -1}
assert solve([A*B - B*A], [a, b, c, d]) == {a: d, b: -S(2)/3*c}
assert solve([A*C - C*A], [a, b, c, d]) == {a: d - c, b: S(2)/3*c}
assert solve([A*B - B*A, A*C - C*A], [a, b, c, d]) == {a: d, b: 0, c: 0}
assert solve([Eq(A*B, B*A)], [a, b, c, d]) == {a: d, b: -S(2)/3*c}
assert solve([Eq(A*C, C*A)], [a, b, c, d]) == {a: d - c, b: S(2)/3*c}
assert solve([Eq(A*B, B*A), Eq(A*C, C*A)], [a, b, c, d]) == {a: d, b: 0, c: 0}
def test_solve_linear():
w = Wild('w')
assert solve_linear(x, x) == (0, 1)
assert solve_linear(x, y - 2*x) in [(x, y/3), (y, 3*x)]
assert solve_linear(x, y - 2*x, exclude=[x]) == (y, 3*x)
assert solve_linear(3*x - y, 0) in [(x, y/3), (y, 3*x)]
assert solve_linear(3*x - y, 0, [x]) == (x, y/3)
assert solve_linear(3*x - y, 0, [y]) == (y, 3*x)
assert solve_linear(x**2/y, 1) == (y, x**2)
assert solve_linear(w, x) in [(w, x), (x, w)]
assert solve_linear(cos(x)**2 + sin(x)**2 + 2 + y) == \
(y, -2 - cos(x)**2 - sin(x)**2)
assert solve_linear(cos(x)**2 + sin(x)**2 + 2 + y, symbols=[x]) == (0, 1)
assert solve_linear(Eq(x, 3)) == (x, 3)
assert solve_linear(1/(1/x - 2)) == (0, 0)
assert solve_linear((x + 1)*exp(-x), symbols=[x]) == (x + 1, exp(x))
assert solve_linear((x + 1)*exp(x), symbols=[x]) == ((x + 1)*exp(x), 1)
assert solve_linear(x*exp(-x**2), symbols=[x]) == (0, 0)
raises(ValueError, lambda: solve_linear(Eq(x, 3), 3))
def test_solve_undetermined_coeffs():
assert solve_undetermined_coeffs(a*x**2 + b*x**2 + b*x + 2*c*x + c + 1, [a, b, c], x) == \
{a: -2, b: 2, c: -1}
# Test that rational functions work
assert solve_undetermined_coeffs(a/x + b/(x + 1) - (2*x + 1)/(x**2 + x), [a, b], x) == \
{a: 1, b: 1}
# Test cancellation in rational functions
assert solve_undetermined_coeffs(((c + 1)*a*x**2 + (c + 1)*b*x**2 +
(c + 1)*b*x + (c + 1)*2*c*x + (c + 1)**2)/(c + 1), [a, b, c], x) == \
{a: -2, b: 2, c: -1}
def test_solve_inequalities():
x = Symbol('x')
system = [Lt(x**2 - 2, 0), Gt(x**2 - 1, 0)]
assert solve(system) == \
And(Or(And(Lt(-sqrt(2), re(x)), Lt(re(x), -1)),
And(Lt(1, re(x)), Lt(re(x), sqrt(2)))), Eq(im(x), 0))
x = Symbol('x', real=True)
system = [Lt(x**2 - 2, 0), Gt(x**2 - 1, 0)]
assert solve(system) == \
Or(And(Lt(-sqrt(2), x), Lt(x, -1)), And(Lt(1, x), Lt(x, sqrt(2))))
# issue 6627, 3448
assert solve((x - 3)/(x - 2) < 0, x) == And(Lt(2, x), Lt(x, 3))
assert solve(x/(x + 1) > 1, x) == And(Lt(-oo, x), Lt(x, -1))
def test_issue_4793():
assert solve(1/x) == []
assert solve(x*(1 - 5/x)) == [5]
assert solve(x + sqrt(x) - 2) == [1]
assert solve(-(1 + x)/(2 + x)**2 + 1/(2 + x)) == []
assert solve(-x**2 - 2*x + (x + 1)**2 - 1) == []
assert solve((x/(x + 1) + 3)**(-2)) == []
assert solve(x/sqrt(x**2 + 1), x) == [0]
assert solve(exp(x) - y, x) == [log(y)]
assert solve(exp(x)) == []
assert solve(x**2 + x + sin(y)**2 + cos(y)**2 - 1, x) in [[0, -1], [-1, 0]]
eq = 4*3**(5*x + 2) - 7
ans = solve(eq, x)
assert len(ans) == 5 and all(eq.subs(x, a).n(chop=True) == 0 for a in ans)
assert solve(log(x**2) - y**2/exp(x), x, y, set=True) == \
([y], set([
(-sqrt(exp(x)*log(x**2)),),
(sqrt(exp(x)*log(x**2)),)]))
assert solve(x**2*z**2 - z**2*y**2) == [{x: -y}, {x: y}, {z: 0}]
assert solve((x - 1)/(1 + 1/(x - 1))) == []
assert solve(x**(y*z) - x, x) == [1]
raises(NotImplementedError, lambda: solve(log(x) - exp(x), x))
raises(NotImplementedError, lambda: solve(2**x - exp(x) - 3))
def test_PR1964():
# issue 5171
assert solve(sqrt(x)) == solve(sqrt(x**3)) == [0]
assert solve(sqrt(x - 1)) == [1]
# issue 4462
a = Symbol('a')
assert solve(-3*a/sqrt(x), x) == []
# issue 4486
assert solve(2*x/(x + 2) - 1, x) == [2]
# issue 4496
assert set(solve((x**2/(7 - x)).diff(x))) == set([S(0), S(14)])
# issue 4695
f = Function('f')
assert solve((3 - 5*x/f(x))*f(x), f(x)) == [5*x/3]
# issue 4497
assert solve(1/(5 + x)**(S(1)/5) - 9, x) == [-295244/S(59049)]
assert solve(sqrt(x) + sqrt(sqrt(x)) - 4) == [-9*sqrt(17)/2 + 49*S.Half]
assert set(solve(Poly(sqrt(exp(x)) + sqrt(exp(-x)) - 4))) in \
[
set([2*log(-sqrt(3) + 2), 2*log(sqrt(3) + 2)]),
set([log(-4*sqrt(3) + 7), log(4*sqrt(3) + 7)]),
]
assert set(solve(Poly(exp(x) + exp(-x) - 4))) == \
set([log(-sqrt(3) + 2), log(sqrt(3) + 2)])
assert set(solve(x**y + x**(2*y) - 1, x)) == \
set([(-S.Half + sqrt(5)/2)**(1/y), (-S.Half - sqrt(5)/2)**(1/y)])
assert solve(exp(x/y)*exp(-z/y) - 2, y) == [(x - z)/log(2)]
assert solve(
x**z*y**z - 2, z) in [[log(2)/(log(x) + log(y))], [log(2)/(log(x*y))]]
# if you do inversion too soon then multiple roots as for the following will
# be missed, e.g. if exp(3*x) = exp(3) -> 3*x = 3
E = S.Exp1
assert set(solve(exp(3*x) - exp(3), x)) in [
set([S(1), log(-E/2 - sqrt(3)*E*I/2), log(-E/2 + sqrt(3)*E*I/2)]),
set([S(1), log(E*(-S(1)/2 - sqrt(3)*I/2)), log(E*(-S(1)/2 + sqrt(3)*I/2))]),
]
# coverage test
p = Symbol('p', positive=True)
assert solve((1/p + 1)**(p + 1)) == []
def test_issue_5197():
x = Symbol('x', real=True)
assert solve(x**2 + 1, x) == []
n = Symbol('n', integer=True, positive=True)
assert solve((n - 1)*(n + 2)*(2*n - 1), n) == [1]
x = Symbol('x', positive=True)
y = Symbol('y')
assert solve([x + 5*y - 2, -3*x + 6*y - 15], x, y) == []
# not {x: -3, y: 1} b/c x is positive
# The solution following should not contain (-sqrt(2), sqrt(2))
assert solve((x + y)*n - y**2 + 2, x, y) == [(sqrt(2), -sqrt(2))]
y = Symbol('y', positive=True)
# The solution following should not contain {y: -x*exp(x/2)}
assert solve(x**2 - y**2/exp(x), y, x) == [{y: x*exp(x/2)}]
assert solve(x**2 - y**2/exp(x), x, y) == [{x: 2*LambertW(y/2)}]
x, y, z = symbols('x y z', positive=True)
assert solve(z**2*x**2 - z**2*y**2/exp(x), y, x, z) == [{y: x*exp(x/2)}]
def test_checking():
assert set(
solve(x*(x - y/x), x, check=False)) == set([sqrt(y), S(0), -sqrt(y)])
assert set(solve(x*(x - y/x), x, check=True)) == set([sqrt(y), -sqrt(y)])
# {x: 0, y: 4} sets denominator to 0 in the following so system should return None
assert solve((1/(1/x + 2), 1/(y - 3) - 1)) == []
# 0 sets denominator of 1/x to zero so None is returned
assert solve(1/(1/x + 2)) == []
def test_issue_4671_4463_4467():
assert solve((sqrt(x**2 - 1) - 2)) in ([sqrt(5), -sqrt(5)],
[-sqrt(5), sqrt(5)])
assert set(solve((2**exp(y**2/x) + 2)/(x**2 + 15), y)) == set([
-sqrt(x)*sqrt(-log(log(2)) + log(log(2) + I*pi)),
sqrt(x)*sqrt(-log(log(2)) + log(log(2) + I*pi))])
C1, C2 = symbols('C1 C2')
f = Function('f')
assert solve(C1 + C2/x**2 - exp(-f(x)), f(x)) == [log(x**2/(C1*x**2 + C2))]
a = Symbol('a')
E = S.Exp1
assert solve(1 - log(a + 4*x**2), x) in (
[-sqrt(-a + E)/2, sqrt(-a + E)/2],
[sqrt(-a + E)/2, -sqrt(-a + E)/2]
)
assert solve(log(a**(-3) - x**2)/a, x) in (
[-sqrt(-1 + a**(-3)), sqrt(-1 + a**(-3))],
[sqrt(-1 + a**(-3)), -sqrt(-1 + a**(-3))],)
assert solve(1 - log(a + 4*x**2), x) in (
[-sqrt(-a + E)/2, sqrt(-a + E)/2],
[sqrt(-a + E)/2, -sqrt(-a + E)/2],)
assert set(solve((
a**2 + 1) * (sin(a*x) + cos(a*x)), x)) == set([-pi/(4*a), 3*pi/(4*a)])
assert solve(3 - (sinh(a*x) + cosh(a*x)), x) == [log(3)/a]
assert set(solve(3 - (sinh(a*x) + cosh(a*x)**2), x)) == \
set([log(-2 + sqrt(5))/a, log(-sqrt(2) + 1)/a,
log(-sqrt(5) - 2)/a, log(1 + sqrt(2))/a])
assert solve(atan(x) - 1) == [tan(1)]
def test_issue_5132():
r, t = symbols('r,t')
assert set(solve([r - x**2 - y**2, tan(t) - y/x], [x, y])) == \
set([(
-sqrt(r*sin(t)**2)/tan(t), -sqrt(r*sin(t)**2)),
(sqrt(r*sin(t)**2)/tan(t), sqrt(r*sin(t)**2))])
assert solve([exp(x) - sin(y), 1/y - 3], [x, y]) == \
[(log(sin(S(1)/3)), S(1)/3)]
assert solve([exp(x) - sin(y), 1/exp(y) - 3], [x, y]) == \
[(log(-sin(log(3))), -log(3))]
assert set(solve([exp(x) - sin(y), y**2 - 4], [x, y])) == \
set([(log(-sin(2)), -S(2)), (log(sin(2)), S(2))])
eqs = [exp(x)**2 - sin(y) + z**2, 1/exp(y) - 3]
assert solve(eqs, set=True) == \
([x, y], set([
(log(-sqrt(-z**2 - sin(log(3)))), -log(3)),
(log(sqrt(-z**2 - sin(log(3)))), -log(3))]))
assert solve(eqs, x, z, set=True) == \
([x], set([
(log(-sqrt(-z**2 + sin(y))),),
(log(sqrt(-z**2 + sin(y))),)]))
assert set(solve(eqs, x, y)) == \
set([
(log(-sqrt(-z**2 - sin(log(3)))), -log(3)),
(log(sqrt(-z**2 - sin(log(3)))), -log(3))])
assert set(solve(eqs, y, z)) == \
set([
(-log(3), -sqrt(-exp(2*x) - sin(log(3)))),
(-log(3), sqrt(-exp(2*x) - sin(log(3))))])
eqs = [exp(x)**2 - sin(y) + z, 1/exp(y) - 3]
assert solve(eqs, set=True) == ([x, y], set(
[
(log(-sqrt(-z - sin(log(3)))), -log(3)),
(log(sqrt(-z - sin(log(3)))), -log(3))]))
assert solve(eqs, x, z, set=True) == ([x], set(
[
(log(-sqrt(-z + sin(y))),),
(log(sqrt(-z + sin(y))),)]))
assert set(solve(eqs, x, y)) == set(
[
(log(-sqrt(-z - sin(log(3)))), -log(3)),
(log(sqrt(-z - sin(log(3)))), -log(3))])
assert solve(eqs, z, y) == \
[(-exp(2*x) - sin(log(3)), -log(3))]
assert solve((sqrt(x**2 + y**2) - sqrt(10), x + y - 4), set=True) == (
[x, y], set([(S(1), S(3)), (S(3), S(1))]))
assert set(solve((sqrt(x**2 + y**2) - sqrt(10), x + y - 4), x, y)) == \
set([(S(1), S(3)), (S(3), S(1))])
def test_issue_5335():
lam, a0, conc = symbols('lam a0 conc')
eqs = [lam + 2*y - a0*(1 - x/2)*x - 0.005*x/2*x,
a0*(1 - x/2)*x - 1*y - 0.743436700916726*y,
x + y - conc]
sym = [x, y, a0]
# there are 4 solutions but only two are valid
assert len(solve(eqs, sym, manual=True, minimal=True, simplify=False)) == 2
def test_issue_5335_float():
skip("This test hangs.")
lam, a0, conc = symbols('lam a0 conc')
eqs = [lam + 2*y - a0*(1 - x/2)*x - 0.005*x/2*x,
a0*(1 - x/2)*x - 1*y - 0.743436700916726*y,
x + y - conc]
sym = [x, y, a0]
assert len(
solve(eqs, sym, rational=False, check=False, simplify=False)) == 2
def test_issue_5767():
assert set(solve([x**2 + y + 4], [x])) == \
set([(-sqrt(-y - 4),), (sqrt(-y - 4),)])
def test_polysys():
assert set(solve([x**2 + 2/y - 2, x + y - 3], [x, y])) == \
set([(S(1), S(2)), (1 + sqrt(5), 2 - sqrt(5)),
(1 - sqrt(5), 2 + sqrt(5))])
assert solve([x**2 + y - 2, x**2 + y]) == []
# the ordering should be whatever the user requested
assert solve([x**2 + y - 3, x - y - 4], (x, y)) != solve([x**2 +
y - 3, x - y - 4], (y, x))
def test_unrad():
s = symbols('s', cls=Dummy)
# checkers to deal with possibility of answer coming
# back with a sign change (cf issue 5203)
def check(rv, ans):
rv, ans = list(rv), list(ans)
rv[0] = rv[0].expand()
ans[0] = ans[0].expand()
return rv[0] in [ans[0], -ans[0]] and rv[1:] == ans[1:]
def s_check(rv, ans):
# get the dummy
rv = list(rv)
d = rv[0].atoms(Dummy)
reps = list(zip(d, [s]*len(d)))
# replace s with this dummy
rv = (rv[0].subs(reps).expand(), [(p[0].subs(reps), p[1].subs(reps))
for p in rv[1]],
[a.subs(reps) for a in rv[2]])
ans = (ans[0].subs(reps).expand(), [(p[0].subs(reps), p[1].subs(reps))
for p in ans[1]],
[a.subs(reps) for a in ans[2]])
return str(rv[0]) in [str(ans[0]), str(-ans[0])] and \
str(rv[1:]) == str(ans[1:])
assert check(unrad(sqrt(x)),
(x, [], []))
assert check(unrad(sqrt(x) + 1),
(x - 1, [], []))
assert s_check(unrad(sqrt(x) + x**Rational(1, 3) + 2),
(2 + s**2 + s**3, [(s, x - s**6)], []))
assert check(unrad(sqrt(x)*x**Rational(1, 3) + 2),
(x**5 - 64, [], []))
assert check(unrad(sqrt(x) + (x + 1)**Rational(1, 3)),
(x**3 - (x + 1)**2, [], []))
assert check(unrad(sqrt(x) + sqrt(x + 1) + sqrt(2*x)),
(-2*sqrt(2)*x - 2*x + 1, [], []))
assert check(unrad(sqrt(x) + sqrt(x + 1) + 2),
(16*x - 9, [], []))
assert check(unrad(sqrt(x) + sqrt(x + 1) + sqrt(1 - x)),
(-4*x + 5*x**2, [], []))
assert check(unrad(a*sqrt(x) + b*sqrt(x) + c*sqrt(y) + d*sqrt(y)),
((a*sqrt(x) + b*sqrt(x))**2 - (c*sqrt(y) + d*sqrt(y))**2, [], []))
assert check(unrad(sqrt(x) + sqrt(1 - x)),
(2*x - 1, [], []))
assert check(unrad(sqrt(x) + sqrt(1 - x) - 3),
(9*x + (x - 5)**2 - 9, [], []))
assert check(unrad(sqrt(x) + sqrt(1 - x) + sqrt(2 + x)),
(-5*x**2 + 2*x - 1, [], []))
assert check(unrad(sqrt(x) + sqrt(1 - x) + sqrt(2 + x) - 3),
(-25*x**4 - 376*x**3 - 1256*x**2 + 2272*x - 784, [], []))
assert check(unrad(sqrt(x) + sqrt(1 - x) + sqrt(2 + x) - sqrt(1 - 2*x)),
(-41*x**4 - 40*x**3 - 232*x**2 + 160*x - 16, [], []))
assert check(unrad(sqrt(x) + sqrt(x + 1)), (S(1), [], []))
eq = sqrt(x) + sqrt(x + 1) + sqrt(1 - sqrt(x))
assert check(unrad(eq),
(16*x**3 - 9*x**2, [], []))
assert set(solve(eq, check=False)) == set([S(0), S(9)/16])
assert solve(eq) == []
# but this one really does have those solutions
assert set(solve(sqrt(x) - sqrt(x + 1) + sqrt(1 - sqrt(x)))) == \
set([S.Zero, S(9)/16])
'''NOTE
real_root changes the value of the result if the solution is
simplified; `a` in the text below is the root that is not 4/5:
>>> eq
sqrt(x) + sqrt(-x + 1) + sqrt(x + 1) - 6*sqrt(5)/5
>>> eq.subs(x, a).n()
-0.e-123 + 0.e-127*I
>>> real_root(eq.subs(x, a)).n()
-0.e-123 + 0.e-127*I
>>> (eq.subs(x,simplify(a))).n()
-0.e-126
>>> real_root(eq.subs(x, simplify(a))).n()
0.194825975605452 + 2.15093623885838*I
>>> sqrt(x).subs(x, real_root(a)).n()
0.809823827278194 - 0.e-25*I
>>> sqrt(x).subs(x, (a)).n()
0.809823827278194 - 0.e-25*I
>>> sqrt(x).subs(x, simplify(a)).n()
0.809823827278194 - 5.32999467690853e-25*I
>>> sqrt(x).subs(x, real_root(simplify(a))).n()
0.49864610868139 + 1.44572604257047*I
'''
eq = (sqrt(x) + sqrt(x + 1) + sqrt(1 - x) - 6*sqrt(5)/5)
ra = S('''-1484/375 - 4*(-1/2 + sqrt(3)*I/2)*(-12459439/52734375 +
114*sqrt(12657)/78125)**(1/3) - 172564/(140625*(-1/2 +
sqrt(3)*I/2)*(-12459439/52734375 + 114*sqrt(12657)/78125)**(1/3))''')
rb = S(4)/5
ans = solve(sqrt(x) + sqrt(x + 1) + sqrt(1 - x) - 6*sqrt(5)/5)
assert all(abs(eq.subs(x, i).n()) < 1e-10 for i in (ra, rb)) and \
len(ans) == 2 and \
set([i.n(chop=True) for i in ans]) == \
set([i.n(chop=True) for i in (ra, rb)])
raises(ValueError, lambda:
unrad(-root(x,3)**2 + 2**pi*root(x,3) - x + 2**pi))
raises(ValueError, lambda:
unrad(sqrt(x) + sqrt(x + 1) + sqrt(1 - sqrt(x)) + 3))
raises(ValueError, lambda:
unrad(sqrt(x) + (x + 1)**Rational(1, 3) + 2*sqrt(y)))
# same as last but consider only y
assert check(unrad(sqrt(x) + (x + 1)**Rational(1, 3) + 2*sqrt(y), y),
(4*y - (sqrt(x) + (x + 1)**(S(1)/3))**2, [], []))
assert check(unrad(sqrt(x/(1 - x)) + (x + 1)**Rational(1, 3)),
(x**3/(-x + 1)**3 - (x + 1)**2, [], [(-x + 1)**3]))
# same as last but consider only y; no y-containing denominators now
assert s_check(unrad(sqrt(x/(1 - x)) + 2*sqrt(y), y),
(x/(-x + 1) - 4*y, [], []))
assert check(unrad(sqrt(x)*sqrt(1 - x) + 2, x),
(x*(-x + 1) - 4, [], []))
# http://tutorial.math.lamar.edu/
# Classes/Alg/SolveRadicalEqns.aspx#Solve_Rad_Ex2_a
assert solve(Eq(x, sqrt(x + 6))) == [3]
assert solve(Eq(x + sqrt(x - 4), 4)) == [4]
assert solve(Eq(1, x + sqrt(2*x - 3))) == []
assert set(solve(Eq(sqrt(5*x + 6) - 2, x))) == set([-S(1), S(2)])
assert set(solve(Eq(sqrt(2*x - 1) - sqrt(x - 4), 2))) == set([S(5), S(13)])
assert solve(Eq(sqrt(x + 7) + 2, sqrt(3 - x))) == [-6]
# http://www.purplemath.com/modules/solverad.htm
assert solve((2*x - 5)**Rational(1, 3) - 3) == [16]
assert solve((x**3 - 3*x**2)**Rational(1, 3) + 1 - x) == []
assert set(solve(x + 1 - (x**4 + 4*x**3 - x)**Rational(1, 4))) == \
set([-S(1)/2, -S(1)/3])
assert set(solve(sqrt(2*x**2 - 7) - (3 - x))) == set([-S(8), S(2)])
assert solve(sqrt(2*x + 9) - sqrt(x + 1) - sqrt(x + 4)) == [0]
assert solve(sqrt(x + 4) + sqrt(2*x - 1) - 3*sqrt(x - 1)) == [5]
assert solve(sqrt(x)*sqrt(x - 7) - 12) == [16]
assert solve(sqrt(x - 3) + sqrt(x) - 3) == [4]
assert solve(sqrt(9*x**2 + 4) - (3*x + 2)) == [0]
assert solve(sqrt(x) - 2 - 5) == [49]
assert solve(sqrt(x - 3) - sqrt(x) - 3) == []
assert solve(sqrt(x - 1) - x + 7) == [10]
assert solve(sqrt(x - 2) - 5) == [27]
assert solve(sqrt(17*x - sqrt(x**2 - 5)) - 7) == [3]
assert solve(sqrt(x) - sqrt(x - 1) + sqrt(sqrt(x))) == []
# don't posify the expression in unrad and use _mexpand
z = sqrt(2*x + 1)/sqrt(x) - sqrt(2 + 1/x)
p = posify(z)[0]
assert solve(p) == []
assert solve(z) == []
assert solve(z + 6*I) == [-S(1)/11]
assert solve(p + 6*I) == []
eq = sqrt(2 + I) + 2*I
assert unrad(eq - x, x, all=True) == (x**4 + 4*x**2 + 8*x + 37, [], [])
ans = (81*x**8 - 2268*x**6 - 4536*x**5 + 22644*x**4 + 63216*x**3 -
31608*x**2 - 189648*x + 141358, [], [])
r = sqrt(sqrt(2)/3 + 7)
eq = sqrt(r) + r - x
assert unrad(eq, all=1)
r2 = sqrt(sqrt(2) + 21)/sqrt(3)
assert r != r2 and r.equals(r2)
assert unrad(eq - r + r2, all=True) == ans
@slow
def test_unrad_slow():
ans = solve(sqrt(x) + sqrt(x + 1) -
sqrt(1 - x) - sqrt(2 + x))
assert len(ans) == 1 and NS(ans[0])[:4] == '0.73'
# the fence optimization problem
# https://github.com/sympy/sympy/issues/4793#issuecomment-36994519
F = Symbol('F')
eq = F - (2*x + 2*y + sqrt(x**2 + y**2))
X = solve(eq, x, hint='minimal')[0]
Y = solve((x*y).subs(x, X).diff(y), y, simplify=False, minimal=True)
ans = 2*F/7 - sqrt(2)*F/14
assert any((a - ans).expand().is_zero for a in Y)
eq = S('''
-x + (1/2 - sqrt(3)*I/2)*(3*x**3/2 - x*(3*x**2 - 34)/2 + sqrt((-3*x**3
+ x*(3*x**2 - 34) + 90)**2/4 - 39304/27) - 45)**(1/3) + 34/(3*(1/2 -
sqrt(3)*I/2)*(3*x**3/2 - x*(3*x**2 - 34)/2 + sqrt((-3*x**3 + x*(3*x**2
- 34) + 90)**2/4 - 39304/27) - 45)**(1/3))''')
raises(NotImplementedError, lambda: solve(eq)) # not other code errors
def test__invert():
assert _invert(x - 2) == (2, x)
assert _invert(2) == (2, 0)
assert _invert(exp(1/x) - 3, x) == (1/log(3), x)
assert _invert(exp(1/x + a/x) - 3, x) == ((a + 1)/log(3), x)
assert _invert(a, x) == (a, 0)
def test_issue_4463():
assert solve(-a*x + 2*x*log(x), x) == [exp(a/2)]
assert solve(a/x + exp(x/2), x) == [2*LambertW(-a/2)]
assert solve(x**x) == []
assert solve(x**x - 2) == [exp(LambertW(log(2)))]
assert solve(((x - 3)*(x - 2))**((x - 3)*(x - 4))) == [2]
assert solve(
(a/x + exp(x/2)).diff(x), x) == [4*LambertW(sqrt(2)*sqrt(a)/4)]
def test_issue_5114():
a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r = symbols('a:r')
# there is no 'a' in the equation set but this is how the
# problem was originally posed
syms = a, b, c, f, h, k, n
eqs = [b + r/d - c/d,
c*(1/d + 1/e + 1/g) - f/g - r/d,
f*(1/g + 1/i + 1/j) - c/g - h/i,
h*(1/i + 1/l + 1/m) - f/i - k/m,
k*(1/m + 1/o + 1/p) - h/m - n/p,
n*(1/p + 1/q) - k/p]
assert len(solve(eqs, syms, manual=True, check=False, simplify=False)) == 1
def test_issue_5849():
I1, I2, I3, I4, I5, I6 = symbols('I1:7')
dI1, dI4, dQ2, dQ4, Q2, Q4 = symbols('dI1,dI4,dQ2,dQ4,Q2,Q4')
e = (
I1 - I2 - I3,
I3 - I4 - I5,
I4 + I5 - I6,
-I1 + I2 + I6,
-2*I1 - 2*I3 - 2*I5 - 3*I6 - dI1/2 + 12,
-I4 + dQ4,
-I2 + dQ2,
2*I3 + 2*I5 + 3*I6 - Q2,
I4 - 2*I5 + 2*Q4 + dI4
)
ans = [{
dQ4: I3 - I5,
dI1: -4*I2 - 8*I3 - 4*I5 - 6*I6 + 24,
I4: I3 - I5,
dQ2: I2,
Q2: 2*I3 + 2*I5 + 3*I6,
I1: I2 + I3,
Q4: -I3/2 + 3*I5/2 - dI4/2}]
assert solve(e, I1, I4, Q2, Q4, dI1, dI4, dQ2, dQ4, manual=True) == ans
# the matrix solver (tested below) doesn't like this because it produces
# a zero row in the matrix. Is this related to issue 4551?
assert [ei.subs(
ans[0]) for ei in e] == [0, 0, I3 - I6, -I3 + I6, 0, 0, 0, 0, 0]
def test_issue_5849_matrix():
'''Same as test_2750 but solved with the matrix solver.'''
I1, I2, I3, I4, I5, I6 = symbols('I1:7')
dI1, dI4, dQ2, dQ4, Q2, Q4 = symbols('dI1,dI4,dQ2,dQ4,Q2,Q4')
e = (
I1 - I2 - I3,
I3 - I4 - I5,
I4 + I5 - I6,
-I1 + I2 + I6,
-2*I1 - 2*I3 - 2*I5 - 3*I6 - dI1/2 + 12,
-I4 + dQ4,
-I2 + dQ2,
2*I3 + 2*I5 + 3*I6 - Q2,
I4 - 2*I5 + 2*Q4 + dI4
)
assert solve(e, I1, I4, Q2, Q4, dI1, dI4, dQ2, dQ4) == {
dI4: -I3 + 3*I5 - 2*Q4,
dI1: -4*I2 - 8*I3 - 4*I5 - 6*I6 + 24,
dQ2: I2,
I1: I2 + I3,
Q2: 2*I3 + 2*I5 + 3*I6,
dQ4: I3 - I5,
I4: I3 - I5}
def test_issue_5901():
f, g, h = map(Function, 'fgh')
a = Symbol('a')
D = Derivative(f(x), x)
G = Derivative(g(a), a)
assert solve(f(x) + f(x).diff(x), f(x)) == \
[-D]
assert solve(f(x) - 3, f(x)) == \
[3]
assert solve(f(x) - 3*f(x).diff(x), f(x)) == \
[3*D]
assert solve([f(x) - 3*f(x).diff(x)], f(x)) == \
{f(x): 3*D}
assert solve([f(x) - 3*f(x).diff(x), f(x)**2 - y + 4], f(x), y) == \
[{f(x): 3*D, y: 9*D**2 + 4}]
assert solve(-f(a)**2*g(a)**2 + f(a)**2*h(a)**2 + g(a).diff(a),
h(a), g(a), set=True) == \
([g(a)], set([
(-sqrt(h(a)**2 + G/f(a)**2),),
(sqrt(h(a)**2 + G/f(a)**2),)]))
args = [f(x).diff(x, 2)*(f(x) + g(x)) - g(x)**2 + 2, f(x), g(x)]
assert set(solve(*args)) == \
set([(-sqrt(2), sqrt(2)), (sqrt(2), -sqrt(2))])
eqs = [f(x)**2 + g(x) - 2*f(x).diff(x), g(x)**2 - 4]
assert solve(eqs, f(x), g(x), set=True) == \
([f(x), g(x)], set([
(-sqrt(2*D - 2), S(2)),
(sqrt(2*D - 2), S(2)),
(-sqrt(2*D + 2), -S(2)),
(sqrt(2*D + 2), -S(2))]))
# the underlying problem was in solve_linear that was not masking off
# anything but a Mul or Add; it now raises an error if it gets anything
# but a symbol and solve handles the substitutions necessary so solve_linear
# won't make this error
raises(
ValueError, lambda: solve_linear(f(x) + f(x).diff(x), symbols=[f(x)]))
assert solve_linear(f(x) + f(x).diff(x), symbols=[x]) == \
(f(x) + Derivative(f(x), x), 1)
assert solve_linear(f(x) + Integral(x, (x, y)), symbols=[x]) == \
(f(x) + Integral(x, (x, y)), 1)
assert solve_linear(f(x) + Integral(x, (x, y)) + x, symbols=[x]) == \
(x + f(x) + Integral(x, (x, y)), 1)
assert solve_linear(f(y) + Integral(x, (x, y)) + x, symbols=[x]) == \
(x, -f(y) - Integral(x, (x, y)))
assert solve_linear(x - f(x)/a + (f(x) - 1)/a, symbols=[x]) == \
(x, 1/a)
assert solve_linear(x + Derivative(2*x, x)) == \
(x, -2)
assert solve_linear(x + Integral(x, y), symbols=[x]) == \
(x, 0)
assert solve_linear(x + Integral(x, y) - 2, symbols=[x]) == \
(x, 2/(y + 1))
assert set(solve(x + exp(x)**2, exp(x))) == \
set([-sqrt(-x), sqrt(-x)])
assert solve(x + exp(x), x, implicit=True) == \
[-exp(x)]
assert solve(cos(x) - sin(x), x, implicit=True) == []
assert solve(x - sin(x), x, implicit=True) == \
[sin(x)]
assert solve(x**2 + x - 3, x, implicit=True) == \
[-x**2 + 3]
assert solve(x**2 + x - 3, x**2, implicit=True) == \
[-x + 3]
def test_issue_5912():
assert set(solve(x**2 - x - 0.1, rational=True)) == \
set([S(1)/2 + sqrt(35)/10, -sqrt(35)/10 + S(1)/2])
# [-0.0916079783099616, 1.09160797830996]
ans = solve(x**2 - x - 0.1, rational=False)
assert len(ans) == 2 and all(a.is_Number for a in ans)
ans = solve(x**2 - x - 0.1)
assert len(ans) == 2 and all(a.is_Number for a in ans)
def test_float_handling():
def test(e1, e2):
return len(e1.atoms(Float)) == len(e2.atoms(Float))
assert solve(x - 0.5, rational=True)[0].is_Rational
assert solve(x - 0.5, rational=False)[0].is_Float
assert solve(x - S.Half, rational=False)[0].is_Rational
assert solve(x - 0.5, rational=None)[0].is_Float
assert solve(x - S.Half, rational=None)[0].is_Rational
assert test(nfloat(1 + 2*x), 1.0 + 2.0*x)
for contain in [list, tuple, set]:
ans = nfloat(contain([1 + 2*x]))
assert type(ans) is contain and test(list(ans)[0], 1.0 + 2.0*x)
k, v = list(nfloat({2*x: [1 + 2*x]}).items())[0]
assert test(k, 2*x) and test(v[0], 1.0 + 2.0*x)
assert test(nfloat(cos(2*x)), cos(2.0*x))
assert test(nfloat(3*x**2), 3.0*x**2)
assert test(nfloat(3*x**2, exponent=True), 3.0*x**2.0)
assert test(nfloat(exp(2*x)), exp(2.0*x))
assert test(nfloat(x/3), x/3.0)
assert test(nfloat(x**4 + 2*x + cos(S(1)/3) + 1),
x**4 + 2.0*x + 1.94495694631474)
# don't call nfloat if there is no solution
tot = 100 + c + z + t
assert solve(((.7 + c)/tot - .6, (.2 + z)/tot - .3, t/tot - .1)) == []
def test_check_assumptions():
x = symbols('x', positive=True)
assert solve(x**2 - 1) == [1]
def test_issue_6056():
assert solve(tanh(x + 3)*tanh(x - 3) - 1) == []
assert set([simplify(w) for w in solve(tanh(x - 1)*tanh(x + 1) + 1)]) == set([
-log(2)/2 + log(1 - I),
-log(2)/2 + log(-1 - I),
-log(2)/2 + log(1 + I),
-log(2)/2 + log(-1 + I),])
assert set([simplify(w) for w in solve((tanh(x + 3)*tanh(x - 3) + 1)**2)]) == set([
-log(2)/2 + log(1 - I),
-log(2)/2 + log(-1 - I),
-log(2)/2 + log(1 + I),
-log(2)/2 + log(-1 + I),])
def test_issue_6060():
x = Symbol('x')
absxm3 = Piecewise(
(x - 3, S(0) <= x - 3),
(3 - x, S(0) > x - 3)
)
y = Symbol('y')
assert solve(absxm3 - y, x) == [
Piecewise((-y + 3, S(0) > -y), (S.NaN, True)),
Piecewise((y + 3, S(0) <= y), (S.NaN, True))
]
y = Symbol('y', positive=True)
assert solve(absxm3 - y, x) == [-y + 3, y + 3]
def test_issue_5673():
eq = -x + exp(exp(LambertW(log(x)))*LambertW(log(x)))
assert checksol(eq, x, 2) is True
assert checksol(eq, x, 2, numerical=False) is None
def test_exclude():
R, C, Ri, Vout, V1, Vminus, Vplus, s = \
symbols('R, C, Ri, Vout, V1, Vminus, Vplus, s')
Rf = symbols('Rf', positive=True) # to eliminate Rf = 0 soln
eqs = [C*V1*s + Vplus*(-2*C*s - 1/R),
Vminus*(-1/Ri - 1/Rf) + Vout/Rf,
C*Vplus*s + V1*(-C*s - 1/R) + Vout/R,
-Vminus + Vplus]
assert solve(eqs, exclude=s*C*R) == [
{
Rf: Ri*(C*R*s + 1)**2/(C*R*s),
Vminus: Vplus,
V1: 2*Vplus + Vplus/(C*R*s),
Vout: C*R*Vplus*s + 3*Vplus + Vplus/(C*R*s)},
{
Vplus: 0,
Vminus: 0,
V1: 0,
Vout: 0},
]
# TODO: Investingate why currently solution [0] is preferred over [1].
assert solve(eqs, exclude=[Vplus, s, C]) in [[{
Vminus: Vplus,
V1: Vout/2 + Vplus/2 + sqrt((Vout - 5*Vplus)*(Vout - Vplus))/2,
R: (Vout - 3*Vplus - sqrt(Vout**2 - 6*Vout*Vplus + 5*Vplus**2))/(2*C*Vplus*s),
Rf: Ri*(Vout - Vplus)/Vplus,
}, {
Vminus: Vplus,
V1: Vout/2 + Vplus/2 - sqrt((Vout - 5*Vplus)*(Vout - Vplus))/2,
R: (Vout - 3*Vplus + sqrt(Vout**2 - 6*Vout*Vplus + 5*Vplus**2))/(2*C*Vplus*s),
Rf: Ri*(Vout - Vplus)/Vplus,
}], [{
Vminus: Vplus,
Vout: (V1**2 - V1*Vplus - Vplus**2)/(V1 - 2*Vplus),
Rf: Ri*(V1 - Vplus)**2/(Vplus*(V1 - 2*Vplus)),
R: Vplus/(C*s*(V1 - 2*Vplus)),
}]]
def test_high_order_roots():
s = x**5 + 4*x**3 + 3*x**2 + S(7)/4
assert set(solve(s)) == set(Poly(s*4, domain='ZZ').all_roots())
def test_minsolve_linear_system():
def count(dic):
return len([x for x in dic.values() if x == 0])
assert count(solve([x + y + z, y + z + a + t], particular=True, quick=True)) \
== 3
assert count(solve([x + y + z, y + z + a + t], particular=True, quick=False)) \
== 3
assert count(solve([x + y + z, y + z + a], particular=True, quick=True)) == 1
assert count(solve([x + y + z, y + z + a], particular=True, quick=False)) == 2
def test_real_roots():
# cf. issue 6650
x = Symbol('x', real=True)
assert len(solve(x**5 + x**3 + 1)) == 1
@slow
def test_issue_6528():
if ON_TRAVIS:
skip("Too slow for travis.")
eqs = [
327600995*x**2 - 37869137*x + 1809975124*y**2 - 9998905626,
895613949*x**2 - 273830224*x*y + 530506983*y**2 - 10000000000]
assert len(solve(eqs, y, x)) == len(solve(eqs, y, x, manual=True)) == 4
def test_overdetermined():
x = symbols('x', real=True)
eqs = [Abs(4*x - 7) - 5, Abs(3 - 8*x) - 1]
assert solve(eqs, x) == [(S.Half,)]
assert solve(eqs, x, manual=True) == [(S.Half,)]
assert solve(eqs, x, manual=True, check=False) == [(S.Half,), (S(3),)]
def test_issue_6605():
x = symbols('x')
assert solve(4**(x/2) - 2**(x/3)) == [0]
# while the first one passed, this one failed
x = symbols('x', real=True)
assert solve(5**(x/2) - 2**(x/3)) == [0]
b = sqrt(6)*sqrt(log(2))/sqrt(log(5))
assert solve(5**(x/2) - 2**(3/x)) == [-b, b]
def test__ispow():
assert _ispow(x**2)
assert not _ispow(x)
assert not _ispow(True)
def test_issue_6644():
eq = -sqrt((m - q)**2 + (-m/(2*q) + S(1)/2)**2) + sqrt((-m**2/2 - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4)**2 + (m**2/2 - m - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4)**2)
assert solve(eq, q) == [
m**2/2 - sqrt(4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4,
m**2/2 + sqrt(4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4]
def test_issue_6752():
assert solve([a**2 + a, a - b], [a, b]) == [(-1, -1), (0, 0)]
assert solve([a**2 + a*c, a - b], [a, b]) == [(0, 0), (-c, -c)]
def test_issue_6792():
assert solve(x*(x - 1)**2*(x + 1)*(x**6 - x + 1)) == [
-1, 0, 1, RootOf(x**6 - x + 1, 0), RootOf(x**6 - x + 1, 1),
RootOf(x**6 - x + 1, 2), RootOf(x**6 - x + 1, 3), RootOf(x**6 - x + 1, 4),
RootOf(x**6 - x + 1, 5)]
def test_issues_6819_6820_6821_6248():
# issue 6821
x, y = symbols('x y', real=True)
assert solve(abs(x + 3) - 2*abs(x - 3)) == [1, 9]
assert solve([abs(x) - 2, arg(x) - pi], x) == [(-2,), (2,)]
assert set(solve(abs(x - 7) - 8)) == set([-S(1), S(15)])
# issue 7145
assert solve(2*abs(x) - abs(x - 1)) == [-1, Rational(1, 3)]
x = symbols('x')
assert solve([re(x) - 1, im(x) - 2], x) == [
{re(x): 1, x: 1 + 2*I, im(x): 2}]
# check for 'dict' handling of solution
eq = sqrt(re(x)**2 + im(x)**2) - 3
assert solve(eq) == solve(eq, x)
i = symbols('i', imaginary=True)
assert solve(abs(i) - 3) == [-3*I, 3*I]
raises(NotImplementedError, lambda: solve(abs(x) - 3))
w = symbols('w', integer=True)
assert solve(2*x**w - 4*y**w, w) == solve((x/y)**w - 2, w)
x, y = symbols('x y', real=True)
assert solve(x + y*I + 3) == {y: 0, x: -3}
# issue 2642
assert solve(x*(1 + I)) == [0]
x, y = symbols('x y', imaginary=True)
assert solve(x + y*I + 3 + 2*I) == {x: -2*I, y: 3*I}
x = symbols('x', real=True)
assert solve(x + y + 3 + 2*I) == {x: -3, y: -2*I}
# issue 6248
f = Function('f')
assert solve(f(x + 1) - f(2*x - 1)) == [2]
assert solve(log(x + 1) - log(2*x - 1)) == [2]
x = symbols('x')
assert solve(2**x + 4**x) == [I*pi/log(2)]
def test_issue_6989():
f = Function('f')
assert solve(Eq(-f(x), Piecewise((1, x > 0), (0, True))), f(x)) == \
[Piecewise((-1, x > 0), (0, True))]
def test_lambert_multivariate():
from sympy.abc import a, x, y
from sympy.solvers.bivariate import _filtered_gens, _lambert, _solve_lambert
assert _filtered_gens(Poly(x + 1/x + exp(x) + y), x) == set([x, exp(x)])
assert _lambert(x, x) == []
assert solve((x**2 - 2*x + 1).subs(x, log(x) + 3*x)) == [LambertW(3*S.Exp1)/3]
assert solve((x**2 - 2*x + 1).subs(x, (log(x) + 3*x)**2 - 1)) == \
[LambertW(3*exp(-sqrt(2)))/3, LambertW(3*exp(sqrt(2)))/3]
assert solve((x**2 - 2*x - 2).subs(x, log(x) + 3*x)) == \
[LambertW(3*exp(1 + sqrt(3)))/3, LambertW(3*exp(-sqrt(3) + 1))/3]
assert solve(x*log(x) + 3*x + 1, x) == [exp(-3 + LambertW(-exp(3)))]
eq = (x*exp(x) - 3).subs(x, x*exp(x))
assert solve(eq) == [LambertW(3*exp(-LambertW(3)))]
# coverage test
raises(NotImplementedError, lambda: solve(x - sin(x)*log(y - x), x))
# if sign is unknown then only this one solution is obtained
assert solve(3*log(a**(3*x + 5)) + a**(3*x + 5), x) == [
-((log(a**5) + LambertW(S(1)/3))/(3*log(a)))] # tested numerically
p = symbols('p', positive=True)
assert solve(3*log(p**(3*x + 5)) + p**(3*x + 5), x) == [
log((-3**(S(1)/3) - 3**(S(5)/6)*I)*LambertW(S(1)/3)**(S(1)/3)/(2*p**(S(5)/3)))/log(p),
log((-3**(S(1)/3) + 3**(S(5)/6)*I)*LambertW(S(1)/3)**(S(1)/3)/(2*p**(S(5)/3)))/log(p),
log((3*LambertW(S(1)/3)/p**5)**(1/(3*log(p)))),] # checked numerically
# check collection
assert solve(3*log(a**(3*x + 5)) + b*log(a**(3*x + 5)) + a**(3*x + 5), x) == [
-((log(a**5) + LambertW(1/(b + 3)))/(3*log(a)))]
eq = 4*2**(2*p + 3) - 2*p - 3
assert _solve_lambert(eq, p, _filtered_gens(Poly(eq), p)) == [
-S(3)/2 - LambertW(-4*log(2))/(2*log(2))]
# issue 4271
assert solve((a/x + exp(x/2)).diff(x, 2), x) == [
6*LambertW((-1)**(S(1)/3)*a**(S(1)/3)/3)]
assert solve((log(x) + x).subs(x, x**2 + 1)) == [
-I*sqrt(-LambertW(1) + 1), sqrt(-1 + LambertW(1))]
# these only give one of the solutions (see XFAIL below)
assert solve(x**3 - 3**x, x) == [-3/log(3)*LambertW(-log(3)/3)]
# replacing 3 with 2 in the above solution gives 2
assert solve(x**2 - 2**x, x) == [2]
assert solve(-x**2 + 2**x, x) == [2]
assert solve(3**cos(x) - cos(x)**3) == [
acos(-3*LambertW(-log(3)/3)/log(3))]
@XFAIL
def test_other_lambert():
from sympy.abc import x
assert solve(3*sin(x) - x*sin(3), x) == [3]
assert set(solve(3*log(x) - x*log(3))) == set(
[3, -3*LambertW(-log(3)/3)/log(3)])
a = S(6)/5
assert set(solve(x**a - a**x)) == set(
[a, -a*LambertW(-log(a)/a)/log(a)])
assert set(solve(3**cos(x) - cos(x)**3)) == set(
[acos(3), acos(-3*LambertW(-log(3)/3)/log(3))])
assert set(solve(x**2 - 2**x)) == set(
[2, -2/log(2)*LambertW(log(2)/2)])
def test_rewrite_trig():
assert solve(sin(x) + tan(x)) == [0, 2*pi]
assert solve(sin(x) + sec(x)) == [
-2*atan(-S.Half + sqrt(2 - 2*sqrt(3)*I)/2 + sqrt(3)*I/2),
2*atan(S.Half - sqrt(3)*I/2 + sqrt(2 - 2*sqrt(3)*I)/2),
2*atan(S.Half - sqrt(2 + 2*sqrt(3)*I)/2 + sqrt(3)*I/2),
2*atan(S.Half + sqrt(2 + 2*sqrt(3)*I)/2 + sqrt(3)*I/2)]
assert solve(sinh(x) + tanh(x)) == [0, I*pi]
# issue 6157
assert solve(2*sin(x) - cos(x), x) == [-2*atan(2 + sqrt(5)),
-2*atan(-sqrt(5) + 2)]
@XFAIL
def test_rewrite_trigh():
# if this import passes then the test below should also pass
from sympy import sech
assert solve(sinh(x) + sech(x)) == [
2*atanh(-S.Half + sqrt(5)/2 - sqrt(-2*sqrt(5) + 2)/2),
2*atanh(-S.Half + sqrt(5)/2 + sqrt(-2*sqrt(5) + 2)/2),
2*atanh(-sqrt(5)/2 - S.Half + sqrt(2 + 2*sqrt(5))/2),
2*atanh(-sqrt(2 + 2*sqrt(5))/2 - sqrt(5)/2 - S.Half)]
def test_uselogcombine():
eq = z - log(x) + log(y/(x*(-1 + y**2/x**2)))
assert solve(eq, x, force=True) == [-sqrt(y*(y - exp(z))), sqrt(y*(y - exp(z)))]
assert solve(log(x + 3) + log(1 + 3/x) - 3) == [
-3 + sqrt(-12 + exp(3))*exp(S(3)/2)/2 + exp(3)/2,
-sqrt(-12 + exp(3))*exp(S(3)/2)/2 - 3 + exp(3)/2]
def test_atan2():
assert solve(atan2(x, 2) - pi/3, x) == [2*sqrt(3)]
def test_errorinverses():
assert solve(erf(x)-y,x)==[erfinv(y)]
assert solve(erfinv(x)-y,x)==[erf(y)]
assert solve(erfc(x)-y,x)==[erfcinv(y)]
assert solve(erfcinv(x)-y,x)==[erfc(y)]
def test_issue_2725():
R = Symbol('R')
eq = sqrt(2)*R*sqrt(1/(R + 1)) + (R + 1)*(sqrt(2)*sqrt(1/(R + 1)) - 1)
sol = solve(eq, R, set=True)[1]
assert sol == set([(S(5)/3 + 40/(3*(251 + 3*sqrt(111)*I)**(S(1)/3)) +
(251 + 3*sqrt(111)*I)**(S(1)/3)/3,), ((-160 + (1 +
sqrt(3)*I)*(10 - (1 + sqrt(3)*I)*(251 +
3*sqrt(111)*I)**(S(1)/3))*(251 +
3*sqrt(111)*I)**(S(1)/3))/Mul(6, (1 +
sqrt(3)*I), (251 + 3*sqrt(111)*I)**(S(1)/3),
evaluate=False),)])
def test_issue_5114_6611():
# See that it doesn't hang; this solves in about 2 seconds.
# Also check that the solution is relatively small.
# Note: the system in issue 6611 solves in about 5 seconds and has
# an op-count of 138336 (with simplify=False).
b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r = symbols('b:r')
eqs = Matrix([
[b - c/d + r/d], [c*(1/g + 1/e + 1/d) - f/g - r/d],
[-c/g + f*(1/j + 1/i + 1/g) - h/i], [-f/i + h*(1/m + 1/l + 1/i) - k/m],
[-h/m + k*(1/p + 1/o + 1/m) - n/p], [-k/p + n*(1/q + 1/p)]])
v = Matrix([f, h, k, n, b, c])
ans = solve(list(eqs) , list(v), simplify=False)
# If time is taken to simplify then then 2617 below becomes
# 1168 and the time is about 50 seconds instead of 2.
assert sum([s.count_ops() for s in ans.values()]) <= 2617
def test_det_quick():
m = Matrix(3, 3, symbols('a:9'))
assert m.det() == det_quick(m) # calls det_perm
m[0, 0] = 1
assert m.det() == det_quick(m) # calls det_minor
m = Matrix(3, 3, list(range(9)))
assert m.det() == det_quick(m) # defaults to .det()
# make sure they work with Sparse
s = SparseMatrix(2, 2, (1, 2, 1, 4))
assert det_perm(s) == det_minor(s) == s.det()
def test_piecewise():
# if no symbol is given the piecewise detection must still work
assert solve(Piecewise((x - 2, Gt(x, 2)), (2 - x, True)) - 3) == [-1, 5]
def test_real_imag_splitting():
a, b = symbols('a b', real=True)
assert solve(sqrt(a**2 + b**2) - 3, a) == \
[-sqrt(-b**2 + 9), sqrt(-b**2 + 9)]
a, b = symbols('a b', imaginary=True)
assert solve(sqrt(a**2 + b**2) - 3, a) == []
def test_issue_7110():
y = -2*x**3 + 4*x**2 - 2*x + 5
assert any(ask(Q.real(i)) for i in solve(y))
def test_units():
assert solve(1/x - 1/(2*cm)) == [2*cm]
def test_issue_7547():
A, B, V = symbols('A,B,V')
eq1 = Eq(630.26*(V - 39.0)*V*(V + 39) - A + B, 0)
eq2 = Eq(B, 1.36*10**8*(V - 39))
eq3 = Eq(A, 5.75*10**5*V*(V + 39.0))
sol = Matrix(nsolve(Tuple(eq1, eq2, eq3), [A, B, V], (0, 0, 0)))
assert str(sol) == str(Matrix(
[['4442890172.68209'],
['4289299466.1432'],
['70.5389666628177']]))
def test_issue_7895():
r = symbols('r', real=True)
assert solve(sqrt(r) - 2) == [4]
def test_issue_2777():
# the equations represent two circles
x, y = symbols('x y', real=True)
e1, e2 = sqrt(x**2 + y**2) - 10, sqrt(y**2 + (-x + 10)**2) - 3
a, b = 191/S(20), 3*sqrt(391)/20
ans = [(a, -b), (a, b)]
assert solve((e1, e2), (x, y)) == ans
assert solve((e1, e2/(x - a)), (x, y)) == []
# make the 2nd circle's radius be -3
e2 += 6
assert solve((e1, e2), (x, y)) == []
assert solve((e1, e2), (x, y), check=False) == ans
def test_issue_7322():
number = 5.62527e-35
assert solve(x - number, x)[0] == number
| wolfram74/numerical_methods_iserles_notes | venv/lib/python2.7/site-packages/sympy/solvers/tests/test_solvers.py | Python | mit | 58,517 | 0.002956 |
from validate_app import validateApp
import os
from distutils import spawn
import sys
from parse_files import parseOutHTseq, bringTogether
from bashSub import bashSub
def checkPreprocessApplications():
applications = ["spades.py"]
source = ["http://bioinf.spbau.ru/spades"]
i = 0;
for app in applications:
if spawn.find_executable(app) is None:
sys.stderr.write("It doesn't look like you have app - " + app + "\n")
sys.stderr.write("Download it here - " + source[i] + "\n");
exit(0)
else:
sys.stderr.write(app + " found\n")
i += 0
def returnReads(dictSampleSeqFiles):
SE = ""
PE1 = ""
PE2 = ""
# data struct
# { (sampleKey, seqKey) : [[SE], [SE], [PE1, PE2], [PE1, PE2]] }
# diving into each of the sub lists in the dictionary value key
for e in dictSampleSeqFiles:
# if sublist only has one elment then it is SE read
if len(e) == 1:
if SE == "":
SE = e[0]
else:
SE += "," + e[0]
else:
if PE1 == "":
PE1 = e[0]
PE2 = e[1]
else:
PE1 += "," + e[0]
PE2 += "," + e[1]
return [SE, PE1, PE2]
def check_dir(Dir):
if not os.path.exists(Dir):
os.mkdir(Dir)
class spadesCMD:
def __init__(self):
self.metaDataFolder = "MetaData"
def execute(self, args):
time = 0
checkPreprocessApplications();
logFiles = []
# checkPreprocessApplications()
validate = validateApp()
validate.setValidation(True)
dictSampleSeqFiles = validate.validateSampleSheet(args.readFolder, args.spadesFolder, args.samplesFile, args.force, True)
#keys tuple 0 location being input folder
#1 location being output folder location
for keys in dictSampleSeqFiles.keys():
check_dir(args.spadesFolder)
check_dir(keys[1])
terminal = []
#countFile = os.path.join(keys[1], keys[0].split("/")[-1]) + ".counts"
print dictSampleSeqFiles[keys]
if (len(dictSampleSeqFiles[keys][1]) == 3):
terminal.append(bashSub("spades.py", dictSampleSeqFiles[keys][1], ['-1', '-2', '-s'], " --careful -t " + args.threads + " -o " + keys[1] + " -m " + args.memory, ''))
elif (len(dictSampleSeqFiles[keys][1]) == 2):
terminal.append(bashSub("spades.py", dictSampleSeqFiles[keys][1], ['-1', '-2'], "--careful -t " + args.threads + " -o " + keys[1] + " -m " + args.memory, ''))
print terminal[-1].getCommand()
terminal[-1].runCmd("")
sys.stderr.flush()
#time += runSortByName.returnTime() + runView.returnTime() + htseqCmd.returnTime()
#logFiles.append(parseOutHTseq(keys[1], keys[1].split("/")[-1]))
#bringTogether(logFiles, os.path.join(args.finalDir, "Counts_Summary.log"))
print "Total amount of seconds to run all samples"
print "Seconds: " + str(time)
| msettles/expHTS | expHTS/spadesCMD.py | Python | apache-2.0 | 3,121 | 0.006729 |
"""
Django settings for CL_Project project.
Generated by 'django-admin startproject' using Django 1.9.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p*zbr4!vavk7^si=#d_w7vl-_lvd=3g_fpus-nrv&e^%+57fel'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'songs',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'CL_Project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates', 'songs/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CL_Project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'assets'),
)
| pmakahmann/CL_Project | CL_Project/settings.py | Python | mit | 3,282 | 0.001219 |
# coding: utf-8
from __future__ import unicode_literals
import re
from hashlib import sha1
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
determine_ext,
float_or_none,
int_or_none,
unified_strdate,
)
class ProSiebenSat1BaseIE(InfoExtractor):
def _extract_video_info(self, url, clip_id):
client_location = url
video = self._download_json(
'http://vas.sim-technik.de/vas/live/v2/videos',
clip_id, 'Downloading videos JSON', query={
'access_token': self._TOKEN,
'client_location': client_location,
'client_name': self._CLIENT_NAME,
'ids': clip_id,
})[0]
if video.get('is_protected') is True:
raise ExtractorError('This video is DRM protected.', expected=True)
duration = float_or_none(video.get('duration'))
source_ids = [compat_str(source['id']) for source in video['sources']]
client_id = self._SALT[:2] + sha1(''.join([clip_id, self._SALT, self._TOKEN, client_location, self._SALT, self._CLIENT_NAME]).encode('utf-8')).hexdigest()
sources = self._download_json(
'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources' % clip_id,
clip_id, 'Downloading sources JSON', query={
'access_token': self._TOKEN,
'client_id': client_id,
'client_location': client_location,
'client_name': self._CLIENT_NAME,
})
server_id = sources['server_id']
def fix_bitrate(bitrate):
bitrate = int_or_none(bitrate)
if not bitrate:
return None
return (bitrate // 1000) if bitrate % 1000 == 0 else bitrate
formats = []
for source_id in source_ids:
client_id = self._SALT[:2] + sha1(''.join([self._SALT, clip_id, self._TOKEN, server_id, client_location, source_id, self._SALT, self._CLIENT_NAME]).encode('utf-8')).hexdigest()
urls = self._download_json(
'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources/url' % clip_id,
clip_id, 'Downloading urls JSON', fatal=False, query={
'access_token': self._TOKEN,
'client_id': client_id,
'client_location': client_location,
'client_name': self._CLIENT_NAME,
'server_id': server_id,
'source_ids': source_id,
})
if not urls:
continue
if urls.get('status_code') != 0:
raise ExtractorError('This video is unavailable', expected=True)
urls_sources = urls['sources']
if isinstance(urls_sources, dict):
urls_sources = urls_sources.values()
for source in urls_sources:
source_url = source.get('url')
if not source_url:
continue
protocol = source.get('protocol')
mimetype = source.get('mimetype')
if mimetype == 'application/f4m+xml' or 'f4mgenerator' in source_url or determine_ext(source_url) == 'f4m':
formats.extend(self._extract_f4m_formats(
source_url, clip_id, f4m_id='hds', fatal=False))
elif mimetype == 'application/x-mpegURL':
formats.extend(self._extract_m3u8_formats(
source_url, clip_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
else:
tbr = fix_bitrate(source['bitrate'])
if protocol in ('rtmp', 'rtmpe'):
mobj = re.search(r'^(?P<url>rtmpe?://[^/]+)/(?P<path>.+)$', source_url)
if not mobj:
continue
path = mobj.group('path')
mp4colon_index = path.rfind('mp4:')
app = path[:mp4colon_index]
play_path = path[mp4colon_index:]
formats.append({
'url': '%s/%s' % (mobj.group('url'), app),
'app': app,
'play_path': play_path,
'player_url': 'http://livepassdl.conviva.com/hf/ver/2.79.0.17083/LivePassModuleMain.swf',
'page_url': 'http://www.prosieben.de',
'tbr': tbr,
'ext': 'flv',
'format_id': 'rtmp%s' % ('-%d' % tbr if tbr else ''),
})
else:
formats.append({
'url': source_url,
'tbr': tbr,
'format_id': 'http%s' % ('-%d' % tbr if tbr else ''),
})
self._sort_formats(formats)
return {
'duration': duration,
'formats': formats,
}
class ProSiebenSat1IE(ProSiebenSat1BaseIE):
IE_NAME = 'prosiebensat1'
IE_DESC = 'ProSiebenSat.1 Digital'
_VALID_URL = r'''(?x)
https?://
(?:www\.)?
(?:
(?:
prosieben(?:maxx)?|sixx|sat1(?:gold)?|kabeleins(?:doku)?|the-voice-of-germany|7tv|advopedia
)\.(?:de|at|ch)|
ran\.de|fem\.com|advopedia\.de
)
/(?P<id>.+)
'''
_TESTS = [
{
# Tests changes introduced in https://github.com/rg3/youtube-dl/pull/6242
# in response to fixing https://github.com/rg3/youtube-dl/issues/6215:
# - malformed f4m manifest support
# - proper handling of URLs starting with `https?://` in 2.0 manifests
# - recursive child f4m manifests extraction
'url': 'http://www.prosieben.de/tv/circus-halligalli/videos/218-staffel-2-episode-18-jahresrueckblick-ganze-folge',
'info_dict': {
'id': '2104602',
'ext': 'flv',
'title': 'Episode 18 - Staffel 2',
'description': 'md5:8733c81b702ea472e069bc48bb658fc1',
'upload_date': '20131231',
'duration': 5845.04,
},
'params': {
# rtmp download
'skip_download': True,
},
},
{
'url': 'http://www.prosieben.de/videokatalog/Gesellschaft/Leben/Trends/video-Lady-Umstyling-f%C3%BCr-Audrina-Rebekka-Audrina-Fergen-billig-aussehen-Battal-Modica-700544.html',
'info_dict': {
'id': '2570327',
'ext': 'mp4',
'title': 'Lady-Umstyling für Audrina',
'description': 'md5:4c16d0c17a3461a0d43ea4084e96319d',
'upload_date': '20131014',
'duration': 606.76,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Seems to be broken',
},
{
'url': 'http://www.prosiebenmaxx.de/tv/experience/video/144-countdown-fuer-die-autowerkstatt-ganze-folge',
'info_dict': {
'id': '2429369',
'ext': 'mp4',
'title': 'Countdown für die Autowerkstatt',
'description': 'md5:809fc051a457b5d8666013bc40698817',
'upload_date': '20140223',
'duration': 2595.04,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'This video is unavailable',
},
{
'url': 'http://www.sixx.de/stars-style/video/sexy-laufen-in-ugg-boots-clip',
'info_dict': {
'id': '2904997',
'ext': 'mp4',
'title': 'Sexy laufen in Ugg Boots',
'description': 'md5:edf42b8bd5bc4e5da4db4222c5acb7d6',
'upload_date': '20140122',
'duration': 245.32,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'This video is unavailable',
},
{
'url': 'http://www.sat1.de/film/der-ruecktritt/video/im-interview-kai-wiesinger-clip',
'info_dict': {
'id': '2906572',
'ext': 'mp4',
'title': 'Im Interview: Kai Wiesinger',
'description': 'md5:e4e5370652ec63b95023e914190b4eb9',
'upload_date': '20140203',
'duration': 522.56,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'This video is unavailable',
},
{
'url': 'http://www.kabeleins.de/tv/rosins-restaurants/videos/jagd-auf-fertigkost-im-elsthal-teil-2-ganze-folge',
'info_dict': {
'id': '2992323',
'ext': 'mp4',
'title': 'Jagd auf Fertigkost im Elsthal - Teil 2',
'description': 'md5:2669cde3febe9bce13904f701e774eb6',
'upload_date': '20141014',
'duration': 2410.44,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'This video is unavailable',
},
{
'url': 'http://www.ran.de/fussball/bundesliga/video/schalke-toennies-moechte-raul-zurueck-ganze-folge',
'info_dict': {
'id': '3004256',
'ext': 'mp4',
'title': 'Schalke: Tönnies möchte Raul zurück',
'description': 'md5:4b5b271d9bcde223b54390754c8ece3f',
'upload_date': '20140226',
'duration': 228.96,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'This video is unavailable',
},
{
'url': 'http://www.the-voice-of-germany.de/video/31-andreas-kuemmert-rocket-man-clip',
'info_dict': {
'id': '2572814',
'ext': 'flv',
'title': 'Andreas Kümmert: Rocket Man',
'description': 'md5:6ddb02b0781c6adf778afea606652e38',
'upload_date': '20131017',
'duration': 469.88,
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.fem.com/wellness/videos/wellness-video-clip-kurztripps-zum-valentinstag.html',
'info_dict': {
'id': '2156342',
'ext': 'flv',
'title': 'Kurztrips zum Valentinstag',
'description': 'Romantischer Kurztrip zum Valentinstag? Nina Heinemann verrät, was sich hier wirklich lohnt.',
'duration': 307.24,
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.prosieben.de/tv/joko-gegen-klaas/videos/playlists/episode-8-ganze-folge-playlist',
'info_dict': {
'id': '439664',
'title': 'Episode 8 - Ganze Folge - Playlist',
'description': 'md5:63b8963e71f481782aeea877658dec84',
},
'playlist_count': 2,
},
{
'url': 'http://www.7tv.de/circus-halligalli/615-best-of-circus-halligalli-ganze-folge',
'info_dict': {
'id': '4187506',
'ext': 'flv',
'title': 'Best of Circus HalliGalli',
'description': 'md5:8849752efd90b9772c9db6fdf87fb9e9',
'upload_date': '20151229',
},
'params': {
'skip_download': True,
},
},
{
# geo restricted to Germany
'url': 'http://www.kabeleinsdoku.de/tv/mayday-alarm-im-cockpit/video/102-notlandung-im-hudson-river-ganze-folge',
'only_matching': True,
},
{
# geo restricted to Germany
'url': 'http://www.sat1gold.de/tv/edel-starck/video/11-staffel-1-episode-1-partner-wider-willen-ganze-folge',
'only_matching': True,
},
{
'url': 'http://www.sat1gold.de/tv/edel-starck/playlist/die-gesamte-1-staffel',
'only_matching': True,
},
{
'url': 'http://www.advopedia.de/videos/lenssen-klaert-auf/lenssen-klaert-auf-folge-8-staffel-3-feiertage-und-freie-tage',
'only_matching': True,
},
]
_TOKEN = 'prosieben'
_SALT = '01!8d8F_)r9]4s[qeuXfP%'
_CLIENT_NAME = 'kolibri-2.0.19-splec4'
_CLIPID_REGEXES = [
r'"clip_id"\s*:\s+"(\d+)"',
r'clipid: "(\d+)"',
r'clip[iI]d=(\d+)',
r'clip[iI]d\s*=\s*["\'](\d+)',
r"'itemImageUrl'\s*:\s*'/dynamic/thumbnails/full/\d+/(\d+)",
]
_TITLE_REGEXES = [
r'<h2 class="subtitle" itemprop="name">\s*(.+?)</h2>',
r'<header class="clearfix">\s*<h3>(.+?)</h3>',
r'<!-- start video -->\s*<h1>(.+?)</h1>',
r'<h1 class="att-name">\s*(.+?)</h1>',
r'<header class="module_header">\s*<h2>([^<]+)</h2>\s*</header>',
r'<h2 class="video-title" itemprop="name">\s*(.+?)</h2>',
r'<div[^>]+id="veeseoTitle"[^>]*>(.+?)</div>',
]
_DESCRIPTION_REGEXES = [
r'<p itemprop="description">\s*(.+?)</p>',
r'<div class="videoDecription">\s*<p><strong>Beschreibung</strong>: (.+?)</p>',
r'<div class="g-plusone" data-size="medium"></div>\s*</div>\s*</header>\s*(.+?)\s*<footer>',
r'<p class="att-description">\s*(.+?)\s*</p>',
r'<p class="video-description" itemprop="description">\s*(.+?)</p>',
r'<div[^>]+id="veeseoDescription"[^>]*>(.+?)</div>',
]
_UPLOAD_DATE_REGEXES = [
r'<meta property="og:published_time" content="(.+?)">',
r'<span>\s*(\d{2}\.\d{2}\.\d{4} \d{2}:\d{2}) \|\s*<span itemprop="duration"',
r'<footer>\s*(\d{2}\.\d{2}\.\d{4}) \d{2}:\d{2} Uhr',
r'<span style="padding-left: 4px;line-height:20px; color:#404040">(\d{2}\.\d{2}\.\d{4})</span>',
r'(\d{2}\.\d{2}\.\d{4}) \| \d{2}:\d{2} Min<br/>',
]
_PAGE_TYPE_REGEXES = [
r'<meta name="page_type" content="([^"]+)">',
r"'itemType'\s*:\s*'([^']*)'",
]
_PLAYLIST_ID_REGEXES = [
r'content[iI]d=(\d+)',
r"'itemId'\s*:\s*'([^']*)'",
]
_PLAYLIST_CLIP_REGEXES = [
r'(?s)data-qvt=.+?<a href="([^"]+)"',
]
def _extract_clip(self, url, webpage):
clip_id = self._html_search_regex(
self._CLIPID_REGEXES, webpage, 'clip id')
title = self._html_search_regex(self._TITLE_REGEXES, webpage, 'title')
info = self._extract_video_info(url, clip_id)
description = self._html_search_regex(
self._DESCRIPTION_REGEXES, webpage, 'description', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
upload_date = unified_strdate(self._html_search_regex(
self._UPLOAD_DATE_REGEXES, webpage, 'upload date', default=None))
info.update({
'id': clip_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
})
return info
def _extract_playlist(self, url, webpage):
playlist_id = self._html_search_regex(
self._PLAYLIST_ID_REGEXES, webpage, 'playlist id')
playlist = self._parse_json(
self._search_regex(
'var\s+contentResources\s*=\s*(\[.+?\]);\s*</script',
webpage, 'playlist'),
playlist_id)
entries = []
for item in playlist:
clip_id = item.get('id') or item.get('upc')
if not clip_id:
continue
info = self._extract_video_info(url, clip_id)
info.update({
'id': clip_id,
'title': item.get('title') or item.get('teaser', {}).get('headline'),
'description': item.get('teaser', {}).get('description'),
'thumbnail': item.get('poster'),
'duration': float_or_none(item.get('duration')),
'series': item.get('tvShowTitle'),
'uploader': item.get('broadcastPublisher'),
})
entries.append(info)
return self.playlist_result(entries, playlist_id)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
page_type = self._search_regex(
self._PAGE_TYPE_REGEXES, webpage,
'page type', default='clip').lower()
if page_type == 'clip':
return self._extract_clip(url, webpage)
elif page_type == 'playlist':
return self._extract_playlist(url, webpage)
| jbuchbinder/youtube-dl | youtube_dl/extractor/prosiebensat1.py | Python | unlicense | 17,378 | 0.002303 |
# -*- coding: utf-8 -*-
"""
@title: El Coladero
@description: Aplicación web para detectar y corregir vulnerabilidades
@author: Enrique Martín Martín
@email: emartinm@ucm.es
"""
from bottle import run, template, get, post, request
import sqlite3
@get('/show_all_questions')
def show_all_questions():
conn = sqlite3.connect("database.db")
cur = conn.cursor()
query = """SELECT author,title,time,tags,id
FROM Questions
ORDER BY time DESC"""
cur.execute(query)
res = list(cur.fetchall())
conn.close()
return template('messages.html', questions=res)
@post('/insert_question')
def insert_question():
author = request.forms['author']
title = request.forms['title']
tags = request.forms['tags']
body = request.forms['body']
conn = sqlite3.connect("database.db")
cur = conn.cursor()
qbody = """INSERT INTO Questions(author, title, tags, body, time)
VALUES ('{0}','{1}','{2}','{3}',CURRENT_TIMESTAMP)"""
query = qbody.format(author, title, tags, body)
cur.executescript(query)
conn.commit()
conn.close()
return "Pregunta insertada con exito"
@get('/show_question')
def show_question():
ident = request.query['id']
conn = sqlite3.connect("database.db")
cur = conn.cursor()
qbody1 = """SELECT author,title,time,tags,body
FROM Questions
WHERE id={0}"""
qbody2 = """SELECT author,time,body
FROM Replies
WHERE question_id={0}"""
query1 = qbody1.format(ident)
query2 = qbody2.format(ident)
cur.execute(query1)
question = cur.fetchone()
cur.execute(query2)
replies = list(cur.fetchall())
conn.close()
return template("message_detail.html", q=question, replies=replies, ident=ident)
@post('/insert_reply')
def insert_reply():
author = request.forms['author']
body = request.forms['body']
question_id = request.forms['question_id']
conn = sqlite3.connect('database.db')
cur = conn.cursor()
qbody = """INSERT INTO Replies(author,body,time,question_id)
VALUES ('{0}', '{1}', CURRENT_TIMESTAMP, {2})"""
query = qbody.format(author, body, question_id)
cur.execute(query)
conn.commit()
conn.close()
return "Contestación insertada con éxito"
@get('/search_question')
def search_question():
tag = request.query['tag']
conn = sqlite3.connect('database.db')
cur = conn.cursor()
qbody = """SELECT author,title,time,tags
FROM Questions
WHERE tags LIKE '%{0}%'
ORDER BY time DESC"""
print tag
print qbody.format(tag)
query = qbody.format(tag)
cur.execute(query)
res = list(cur.fetchall())
conn.close()
return template('messages_search.html', questions=res, tag=tag)
if __name__ == "__main__":
run(host='localhost',port=8080,debug=True)
| ferreiro/Python-NoSQL-Security | assignment6/El_Coladero/coladero.py | Python | mit | 2,952 | 0.006108 |
'''
Created on Sep 27, 2013
@author: leal
Default JSON MESSAGES
'''
import ast
import logging
import config.config
logger = logging.getLogger(__name__)
class Messages(object):
'''
classdocs
'''
messageTemplate = """{
'success' : '%r',
'message' : '%s',
'details' : %r
}"""
@staticmethod
def success(message,details=''):
messageAsStr = Messages.messageTemplate%(True,message,details)
logger.debug(messageAsStr)
messageAsDic = ast.literal_eval(messageAsStr)
return messageAsDic
@staticmethod
def error(message,details=''):
messageAsStr = Messages.messageTemplate%(False,message,details)
logger.debug(messageAsStr)
messageAsDic = ast.literal_eval(messageAsStr)
return messageAsDic
@staticmethod
def errorDetailed(message,complementaryMessage,value):
details = """{
'message' : %r,
'value' : %r
}"""%(complementaryMessage,value)
messageAsStr = Messages.messageTemplate%(False,message,
ast.literal_eval(details))
logger.debug(messageAsStr)
messageAsDic = ast.literal_eval(messageAsStr)
return messageAsDic
if __name__ == '__main__':
Messages.success("OK")
Messages.success("OK", "File received")
Messages.error("Error")
Messages.error("Error",details='There was an error processing XPTO.')
Messages.error("Error adding X.",details={'error' : 'xpto', 'valid' : [1,2,3]})
Messages.errorDetailed("Error adding X.","Valid values are", [1,2,3,5])
| ricleal/reductionServer | src/data/messages.py | Python | gpl-3.0 | 1,661 | 0.022878 |
import numpy as np
from sklearn.base import BaseEstimator
import pandas as pd
import operator
from estimator_base import *
from features_base import *
BINARY = "Binary"
CATEGORICAL = "Categorical"
NUMERICAL = "Numerical"
class FeatureMapper:
def __init__(self, features):
self.features = features
def fit(self, X, y=None):
for feature_name in self.features:
extractor.fit(X[feature_name].values[:,np.newaxis], y)
def transform(self, X):
return X[self.features].as_matrix()
def fit_transform(self, X, y=None):
return self.transform(X)
class SimpleTransform(BaseEstimator):
def __init__(self, transformer):
self.transformer = transformer
def fit(self, X, y=None):
return self
def fit_transform(self, X, y=None):
return self.transform(X)
def transform(self, X, y=None):
return np.array([self.transformer(x) for x in X], ndmin=2).T
class MultiColumnTransform(BaseEstimator):
def __init__(self, transformer):
self.transformer = transformer
def fit(self, X, y=None):
return self
def fit_transform(self, X, y=None):
return self.transform(X)
def transform(self, X, y=None):
return np.array([self.transformer(*x[1]) for x in X.iterrows()], ndmin=2).T
def get_all_features():
all_features = [
('Max', 'A', SimpleTransform(max)),
('Max', 'B', SimpleTransform(max)),
('Min', 'A', SimpleTransform(min)),
('Min', 'B', SimpleTransform(min)),
('Numerical', 'A type', SimpleTransform(lambda x: int(numerical(x)))),
('Numerical', 'B type', SimpleTransform(lambda x: int(numerical(x)))),
('Sub', ['Numerical[A type]','Numerical[B type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Numerical[A type],Numerical[B type]]', SimpleTransform(abs)),
('Number of Samples', 'A', SimpleTransform(len)),
('Log', 'Number of Samples[A]', SimpleTransform(np.log)),
('Number of Unique Samples', 'A', SimpleTransform(count_unique)),
('Number of Unique Samples', 'B', SimpleTransform(count_unique)),
('Max', ['Number of Unique Samples[A]','Number of Unique Samples[B]'], MultiColumnTransform(max)),
('Min', ['Number of Unique Samples[A]','Number of Unique Samples[B]'], MultiColumnTransform(min)),
('Sub', ['Number of Unique Samples[A]','Number of Unique Samples[B]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Number of Unique Samples[A],Number of Unique Samples[B]]', SimpleTransform(abs)),
('Log', 'Number of Unique Samples[A]', SimpleTransform(np.log)),
('Log', 'Number of Unique Samples[B]', SimpleTransform(np.log)),
('Max', ['Log[Number of Unique Samples[A]]','Log[Number of Unique Samples[B]]'], MultiColumnTransform(max)),
('Min', ['Log[Number of Unique Samples[A]]','Log[Number of Unique Samples[B]]'], MultiColumnTransform(min)),
('Sub', ['Log[Number of Unique Samples[A]]','Log[Number of Unique Samples[B]]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Log[Number of Unique Samples[A]],Log[Number of Unique Samples[B]]]', SimpleTransform(abs)),
('Ratio of Unique Samples', 'A', SimpleTransform(count_unique_ratio)),
('Ratio of Unique Samples', 'B', SimpleTransform(count_unique_ratio)),
('Max', ['Ratio of Unique Samples[A]','Ratio of Unique Samples[B]'], MultiColumnTransform(max)),
('Min', ['Ratio of Unique Samples[A]','Ratio of Unique Samples[B]'], MultiColumnTransform(min)),
('Sub', ['Ratio of Unique Samples[A]','Ratio of Unique Samples[B]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Ratio of Unique Samples[A],Ratio of Unique Samples[B]]', SimpleTransform(abs)),
('Normalized Value', ['A','A type'], MultiColumnTransform(normalize)),
('Normalized Value', ['B','B type'], MultiColumnTransform(normalize)),
('Count Value', ['A','A type'], MultiColumnTransform(count_value), ['Normalized Value[A,A type]']),
('Count Value', ['B','B type'], MultiColumnTransform(count_value), ['Normalized Value[B,B type]']),
('DisSeq', ['A','A type'], MultiColumnTransform(discrete_seq)),
('DisSeq', ['B','B type'], MultiColumnTransform(discrete_seq)),
('DisProb', ['A','A type'], MultiColumnTransform(discrete_probability), ['DisSeq[A,A type]']),
('DisProb', ['B','B type'], MultiColumnTransform(discrete_probability), ['DisSeq[B,B type]']),
('Normalized Entropy Baseline', ['A','A type'], MultiColumnTransform(normalized_entropy_baseline), ['Normalized Value[A,A type]']),
('Normalized Entropy Baseline', ['B','B type'], MultiColumnTransform(normalized_entropy_baseline), ['Normalized Value[B,B type]']),
('Max', ['Normalized Entropy Baseline[A,A type]','Normalized Entropy Baseline[B,B type]'], MultiColumnTransform(max)),
('Min', ['Normalized Entropy Baseline[A,A type]','Normalized Entropy Baseline[B,B type]'], MultiColumnTransform(min)),
('Sub', ['Normalized Entropy Baseline[A,A type]','Normalized Entropy Baseline[B,B type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Normalized Entropy Baseline[A,A type],Normalized Entropy Baseline[B,B type]]', SimpleTransform(abs)),
('Normalized Entropy', ['A','A type'], MultiColumnTransform(normalized_entropy), ['Count Value[A,A type]']),
('Normalized Entropy', ['B','B type'], MultiColumnTransform(normalized_entropy), ['Count Value[B,B type]']),
('Max', ['Normalized Entropy[A,A type]','Normalized Entropy[B,B type]'], MultiColumnTransform(max)),
('Min', ['Normalized Entropy[A,A type]','Normalized Entropy[B,B type]'], MultiColumnTransform(min)),
('Sub', ['Normalized Entropy[A,A type]','Normalized Entropy[B,B type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Normalized Entropy[A,A type],Normalized Entropy[B,B type]]', SimpleTransform(abs)),
('IGCI', ['A','A type','B','B type'], MultiColumnTransform(igci), ['Normalized Value[A,A type]', 'Normalized Value[B,B type]']),
('IGCI', ['B','B type','A','A type'], MultiColumnTransform(igci), ['Normalized Value[B,B type]', 'Normalized Value[A,A type]']),
('Sub', ['IGCI[A,A type,B,B type]','IGCI[B,B type,A,A type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[IGCI[A,A type,B,B type],IGCI[B,B type,A,A type]]', SimpleTransform(abs)),
('Gaussian Divergence', ['A','A type'], MultiColumnTransform(gaussian_divergence), ['Count Value[A,A type]']),
('Gaussian Divergence', ['B','B type'], MultiColumnTransform(gaussian_divergence), ['Count Value[B,B type]']),
('Max', ['Gaussian Divergence[A,A type]','Gaussian Divergence[B,B type]'], MultiColumnTransform(max)),
('Min', ['Gaussian Divergence[A,A type]','Gaussian Divergence[B,B type]'], MultiColumnTransform(min)),
('Sub', ['Gaussian Divergence[A,A type]','Gaussian Divergence[B,B type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Gaussian Divergence[A,A type],Gaussian Divergence[B,B type]]', SimpleTransform(abs)),
('Uniform Divergence', ['A','A type'], MultiColumnTransform(uniform_divergence), ['Count Value[A,A type]']),
('Uniform Divergence', ['B','B type'], MultiColumnTransform(uniform_divergence), ['Count Value[B,B type]']),
('Max', ['Uniform Divergence[A,A type]','Uniform Divergence[B,B type]'], MultiColumnTransform(max)),
('Min', ['Uniform Divergence[A,A type]','Uniform Divergence[B,B type]'], MultiColumnTransform(min)),
('Sub', ['Uniform Divergence[A,A type]','Uniform Divergence[B,B type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Uniform Divergence[A,A type],Uniform Divergence[B,B type]]', SimpleTransform(abs)),
('Discrete Entropy', ['A','A type'], MultiColumnTransform(discrete_entropy), ['DisProb[A,A type]']),
('Discrete Entropy', ['B','B type'], MultiColumnTransform(discrete_entropy), ['DisProb[B,B type]']),
('Max', ['Discrete Entropy[A,A type]','Discrete Entropy[B,B type]'], MultiColumnTransform(max)),
('Min', ['Discrete Entropy[A,A type]','Discrete Entropy[B,B type]'], MultiColumnTransform(min)),
('Sub', ['Discrete Entropy[A,A type]','Discrete Entropy[B,B type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Discrete Entropy[A,A type],Discrete Entropy[B,B type]]', SimpleTransform(abs)),
('Normalized Discrete Entropy', ['A','A type'], MultiColumnTransform(normalized_discrete_entropy), ['Discrete Entropy[A,A type]', 'Number of Unique Samples[A]']),
('Normalized Discrete Entropy', ['B','B type'], MultiColumnTransform(normalized_discrete_entropy), ['Discrete Entropy[B,B type]', 'Number of Unique Samples[B]']),
('Max', ['Normalized Discrete Entropy[A,A type]','Normalized Discrete Entropy[B,B type]'], MultiColumnTransform(max)),
('Min', ['Normalized Discrete Entropy[A,A type]','Normalized Discrete Entropy[B,B type]'], MultiColumnTransform(min)),
('Sub', ['Normalized Discrete Entropy[A,A type]','Normalized Discrete Entropy[B,B type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Normalized Discrete Entropy[A,A type],Normalized Discrete Entropy[B,B type]]', SimpleTransform(abs)),
('Discrete Joint Entropy', ['A','A type','B','B type'], MultiColumnTransform(discrete_joint_entropy), ['DisSeq[A,A type]', 'DisSeq[B,B type]']),
('Normalized Discrete Joint Entropy', ['A','A type','B','B type'], MultiColumnTransform(normalized_discrete_joint_entropy), ['Discrete Joint Entropy[A,A type,B,B type]']),
('Discrete Conditional Entropy', ['A','A type','B','B type'], MultiColumnTransform(discrete_conditional_entropy), ['Discrete Joint Entropy[A,A type,B,B type]', 'Discrete Entropy[B,B type]']),
('Discrete Conditional Entropy', ['B','B type','A','A type'], MultiColumnTransform(discrete_conditional_entropy), ['Discrete Joint Entropy[A,A type,B,B type]', 'Discrete Entropy[A,A type]']),
('Discrete Mutual Information', ['A','A type','B','B type'], MultiColumnTransform(discrete_mutual_information), ['Discrete Joint Entropy[A,A type,B,B type]', 'Discrete Entropy[A,A type]', 'Discrete Entropy[B,B type]']),
('Normalized Discrete Mutual Information', ['Discrete Mutual Information[A,A type,B,B type]','Min[Discrete Entropy[A,A type],Discrete Entropy[B,B type]]'], MultiColumnTransform(operator.div)),
('Normalized Discrete Mutual Information', ['Discrete Mutual Information[A,A type,B,B type]','Discrete Joint Entropy[A,A type,B,B type]'], MultiColumnTransform(operator.div)),
('Adjusted Mutual Information', ['A','A type','B','B type'], MultiColumnTransform(adjusted_mutual_information), ['DisSeq[A,A type]', 'DisSeq[B,B type]']),
('Polyfit', ['A','A type','B','B type'], MultiColumnTransform(fit)),
('Polyfit', ['B','B type','A','A type'], MultiColumnTransform(fit)),
('Sub', ['Polyfit[A,A type,B,B type]','Polyfit[B,B type,A,A type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Polyfit[A,A type,B,B type],Polyfit[B,B type,A,A type]]', SimpleTransform(abs)),
('Polyfit Error', ['A','A type','B','B type'], MultiColumnTransform(fit_error)),
('Polyfit Error', ['B','B type','A','A type'], MultiColumnTransform(fit_error)),
('Sub', ['Polyfit Error[A,A type,B,B type]','Polyfit Error[B,B type,A,A type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Polyfit Error[A,A type,B,B type],Polyfit Error[B,B type,A,A type]]', SimpleTransform(abs)),
('Normalized Error Probability', ['A','A type','B','B type'], MultiColumnTransform(normalized_error_probability), ['DisSeq[A,A type]', 'DisSeq[B,B type]', 'DisProb[A,A type]', 'DisProb[B,B type]']),
('Normalized Error Probability', ['B','B type','A','A type'], MultiColumnTransform(normalized_error_probability), ['DisSeq[B,B type]', 'DisSeq[A,A type]', 'DisProb[B,B type]', 'DisProb[A,A type]']),
('Sub', ['Normalized Error Probability[A,A type,B,B type]','Normalized Error Probability[B,B type,A,A type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Normalized Error Probability[A,A type,B,B type],Normalized Error Probability[B,B type,A,A type]]', SimpleTransform(abs)),
('Conditional Distribution Entropy Variance', ['A','A type','B','B type'], MultiColumnTransform(fit_noise_entropy), ['DisSeq[A,A type]', 'DisSeq[B,B type]', 'DisProb[A,A type]']),
('Conditional Distribution Entropy Variance', ['B','B type','A','A type'], MultiColumnTransform(fit_noise_entropy), ['DisSeq[B,B type]', 'DisSeq[A,A type]', 'DisProb[B,B type]']),
('Sub', ['Conditional Distribution Entropy Variance[A,A type,B,B type]','Conditional Distribution Entropy Variance[B,B type,A,A type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Conditional Distribution Entropy Variance[A,A type,B,B type],Conditional Distribution Entropy Variance[B,B type,A,A type]]', SimpleTransform(abs)),
('Conditional Distribution Skewness Variance', ['A','A type','B','B type'], MultiColumnTransform(fit_noise_skewness), ['DisSeq[A,A type]', 'DisProb[A,A type]']),
('Conditional Distribution Skewness Variance', ['B','B type','A','A type'], MultiColumnTransform(fit_noise_skewness), ['DisSeq[B,B type]', 'DisProb[B,B type]']),
('Sub', ['Conditional Distribution Skewness Variance[A,A type,B,B type]','Conditional Distribution Skewness Variance[B,B type,A,A type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Conditional Distribution Skewness Variance[A,A type,B,B type],Conditional Distribution Skewness Variance[B,B type,A,A type]]', SimpleTransform(abs)),
('Conditional Distribution Kurtosis Variance', ['A','A type','B','B type'], MultiColumnTransform(fit_noise_kurtosis), ['DisSeq[A,A type]', 'DisProb[A,A type]']),
('Conditional Distribution Kurtosis Variance', ['B','B type','A','A type'], MultiColumnTransform(fit_noise_kurtosis), ['DisSeq[B,B type]', 'DisProb[B,B type]']),
('Sub', ['Conditional Distribution Kurtosis Variance[A,A type,B,B type]','Conditional Distribution Kurtosis Variance[B,B type,A,A type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Conditional Distribution Kurtosis Variance[A,A type,B,B type],Conditional Distribution Kurtosis Variance[B,B type,A,A type]]', SimpleTransform(abs)),
('DisSeq2', ['A','A type'], MultiColumnTransform(discrete_seq2)),
('DisSeq2', ['B','B type'], MultiColumnTransform(discrete_seq2)),
('DisProb2', ['A','A type'], MultiColumnTransform(discrete_probability2), ['DisSeq2[A,A type]']),
('DisProb2', ['B','B type'], MultiColumnTransform(discrete_probability2), ['DisSeq2[B,B type]']),
('Conditional Distribution Similarity', ['A','A type','B','B type'], MultiColumnTransform(conditional_distribution_similarity), ['DisSeq2[A,A type]','DisProb2[A,A type]','DisProb2[B,B type]']),
('Conditional Distribution Similarity', ['B','B type','A','A type'], MultiColumnTransform(conditional_distribution_similarity), ['DisSeq2[B,B type]','DisProb2[B,B type]','DisProb2[A,A type]']),
('Sub', ['Conditional Distribution Similarity[A,A type,B,B type]','Conditional Distribution Similarity[B,B type,A,A type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Conditional Distribution Similarity[A,A type,B,B type],Conditional Distribution Similarity[B,B type,A,A type]]', SimpleTransform(abs)),
('Moment21', ['A','A type','B','B type'], MultiColumnTransform(moment21), ['Normalized Value[A,A type]', 'Normalized Value[B,B type]']),
('Moment21', ['B','B type','A','A type'], MultiColumnTransform(moment21), ['Normalized Value[B,B type]', 'Normalized Value[A,A type]']),
('Sub', ['Moment21[A,A type,B,B type]','Moment21[B,B type,A,A type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Moment21[A,A type,B,B type],Moment21[B,B type,A,A type]]', SimpleTransform(abs)),
('Abs', 'Moment21[A,A type,B,B type]', SimpleTransform(abs)),
('Abs', 'Moment21[B,B type,A,A type]', SimpleTransform(abs)),
('Sub', ['Abs[Moment21[A,A type,B,B type]]','Abs[Moment21[B,B type,A,A type]]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Abs[Moment21[A,A type,B,B type]],Abs[Moment21[B,B type,A,A type]]]', SimpleTransform(abs)),
('Moment31', ['A','A type','B','B type'], MultiColumnTransform(moment31), ['Normalized Value[A,A type]', 'Normalized Value[B,B type]']),
('Moment31', ['B','B type','A','A type'], MultiColumnTransform(moment31), ['Normalized Value[B,B type]', 'Normalized Value[A,A type]']),
('Sub', ['Moment31[A,A type,B,B type]','Moment31[B,B type,A,A type]'], MultiColumnTransform(operator.sub)),
('Abs','Sub[Moment31[A,A type,B,B type],Moment31[B,B type,A,A type]]', SimpleTransform(abs)),
('Abs','Moment31[A,A type,B,B type]', SimpleTransform(abs)),
('Abs','Moment31[B,B type,A,A type]', SimpleTransform(abs)),
('Sub', ['Abs[Moment31[A,A type,B,B type]]','Abs[Moment31[B,B type,A,A type]]'], MultiColumnTransform(operator.sub)),
('Abs','Sub[Abs[Moment31[A,A type,B,B type]],Abs[Moment31[B,B type,A,A type]]]', SimpleTransform(abs)),
('Skewness', ['A','A type'], MultiColumnTransform(normalized_skewness), ['Normalized Value[A,A type]']),
('Skewness', ['B','B type'], MultiColumnTransform(normalized_skewness), ['Normalized Value[B,B type]']),
('Sub', ['Skewness[A,A type]','Skewness[B,B type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Skewness[A,A type],Skewness[B,B type]]', SimpleTransform(abs)),
('Abs', 'Skewness[A,A type]', SimpleTransform(abs)),
('Abs', 'Skewness[B,B type]', SimpleTransform(abs)),
('Max', ['Abs[Skewness[A,A type]]','Abs[Skewness[B,B type]]'], MultiColumnTransform(max)),
('Min', ['Abs[Skewness[A,A type]]','Abs[Skewness[B,B type]]'], MultiColumnTransform(min)),
('Sub', ['Abs[Skewness[A,A type]]','Abs[Skewness[B,B type]]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Abs[Skewness[A,A type]],Abs[Skewness[B,B type]]]', SimpleTransform(abs)),
('Kurtosis', ['A','A type'], MultiColumnTransform(normalized_kurtosis), ['Normalized Value[A,A type]']),
('Kurtosis', ['B','B type'], MultiColumnTransform(normalized_kurtosis), ['Normalized Value[B,B type]']),
('Max', ['Kurtosis[A,A type]','Kurtosis[B,B type]'], MultiColumnTransform(max)),
('Min', ['Kurtosis[A,A type]','Kurtosis[B,B type]'], MultiColumnTransform(min)),
('Sub', ['Kurtosis[A,A type]','Kurtosis[B,B type]'], MultiColumnTransform(operator.sub)),
('Abs', 'Sub[Kurtosis[A,A type],Kurtosis[B,B type]]', SimpleTransform(abs)),
('Pearson R', ['A','A type','B','B type'], MultiColumnTransform(correlation), ['Normalized Error Probability[A,A type,B,B type]','Normalized Error Probability[B,B type,A,A type]']),
('HSIC', ['A','A type','B','B type'], MultiColumnTransform(normalized_hsic), ['Pearson R[A,A type,B,B type]']),
('Abs', 'Pearson R[A,A type,B,B type]', SimpleTransform(abs))
]
all_features = [fea if len(fea) > 3 else (fea[0], fea[1], fea[2], []) for fea in all_features]
used_feature_names = set(selected_direction_categorical_features + selected_direction_cn_features + selected_direction_numerical_features
+selected_independence_categorical_features + selected_independence_cn_features + selected_independence_numerical_features
+selected_symmetric_categorical_features + selected_symmetric_cn_features + selected_symmetric_numerical_features
+selected_onestep_categorical_features + selected_onestep_cn_features + selected_onestep_numerical_features)
used_feature_names_add = used_feature_names
all_features_clean = []
for feature_prefix, column_names, extractor, aux_column_names in reversed(all_features):
if not type(column_names) is list:
column_names = [column_names]
feature_name = feature_prefix + '[' + ','.join(column_names) + ']'
if len([x for x in used_feature_names_add if feature_name in x]) > 0:
used_feature_names_add = set(list(used_feature_names_add) + column_names + aux_column_names)
for feature_prefix, column_names, extractor, aux_column_names in all_features:
if type(column_names) is list:
feature_name = feature_prefix + '[' + ','.join(column_names) + ']'
else:
feature_name = feature_prefix + '[' + column_names + ']'
if len([x for x in used_feature_names_add if feature_name in x]) > 0:
all_features_clean.append((feature_prefix, column_names, extractor, aux_column_names))
return all_features_clean, used_feature_names
def extract_features(X, features=None, y=None):
if features is None:
features, _ = get_all_features()
X = X.copy()
X['A type'] = pd.Series.apply(X['A type'], lambda x: 0 if x == BINARY else 1 if x == CATEGORICAL else 2 if x == NUMERICAL else np.nan)
X['B type'] = pd.Series.apply(X['B type'], lambda x: 0 if x == BINARY else 1 if x == CATEGORICAL else 2 if x == NUMERICAL else np.nan)
for feature_name, column_names, extractor, aux_column_names in features:
if not type(column_names) is list:
column_names = [column_names]
feature_name = feature_name + '[' + ','.join(column_names) + ']'
if (feature_name[0] == '+') or (feature_name not in X.columns):
if feature_name[0] == '+':
feature_name = feature_name[1:]
column_names = column_names + aux_column_names
if len(column_names) > 1:
tmp = extractor.fit_transform(X[column_names], y)
else:
tmp = extractor.fit_transform(X[column_names[0]], y)
X[feature_name] = tmp
return X
def get_sym_col(col):
col = col.replace('A type', '<>').replace('B type', 'A type').replace('<>', 'B type')
col = col.replace('[A', '<>').replace('[B', '[A').replace('<>', '[B')
col = col.replace(',A,', '<>').replace(',B,', ',A,').replace('<>', ',B,')
return col
def extract_features2(X, X_inv, features=None, y=None): #, used_feature_names=used_feature_names):
symmetric_feature_names = ['HSIC[A,A type,B,B type]', 'Pearson R[A,A type,B,B type]', 'Discrete Joint Entropy[A,A type,B,B type]', 'Adjusted Mutual Information[A,A type,B,B type]']
if features is None:
features, _ = get_all_features()
X = X.copy()
X['A type'] = pd.Series.apply(X['A type'], lambda x: 0 if x == BINARY else 1 if x == CATEGORICAL else 2 if x == NUMERICAL else np.nan)
X['B type'] = pd.Series.apply(X['B type'], lambda x: 0 if x == BINARY else 1 if x == CATEGORICAL else 2 if x == NUMERICAL else np.nan)
for feature_name, column_names, extractor, aux_column_names in features:
if not type(column_names) is list:
column_names = [column_names]
feature_name = feature_name + '[' + ','.join(column_names) + ']'
if feature_name in symmetric_feature_names:
X[feature_name] = X_inv[feature_name]
continue
if get_sym_col(feature_name) in X_inv.columns:
X[feature_name] = X_inv[get_sym_col(feature_name)]
continue
if (feature_name[0] == '+') or (feature_name not in X.columns):
if feature_name[0] == '+':
feature_name = feature_name[1:]
column_names = column_names + aux_column_names
if len(column_names) > 1:
tmp = extractor.fit_transform(X[column_names], y)
else:
tmp = extractor.fit_transform(X[column_names[0]], y)
X[feature_name] = tmp
return X
| waynezhanghk/FastCausation | features.py | Python | apache-2.0 | 23,932 | 0.013497 |
#!/usr/bin/env python
# encoding: utf-8
"""
Author(s): Matthew Loper
See LICENCE.txt for licensing and contact information.
"""
import sys
import os
import unittest
import chumpy as ch
from chumpy import Ch
import numpy as np
from util_tests import get_earthmesh
class TestGeometry(unittest.TestCase):
def setUp(self):
np.random.seed(0)
def test_rodrigues(self):
from geometry import Rodrigues
rt = np.random.randn(3)
rt2 = rt + np.random.rand(3)*1e-5
foo1 = Rodrigues(rt = rt)
foo2 = Rodrigues(rt = rt2)
empirical = (foo2.r - foo1.r).flatten()
predicted = foo1.dr_wrt(foo1.rt).dot(rt2-rt)
self.assertTrue(np.max(np.abs(empirical - predicted)) < 1e-10)
def test_vert_normals(self):
from geometry import VertNormals
import numpy as np
mesh = get_earthmesh(np.zeros(3), np.zeros(3))
v, f = mesh.v*127., mesh.f
vn1 = VertNormals(f=f, v=v)
dr_predicted = vn1.dr_wrt(vn1.v).copy()
eps = .00001 * np.random.randn(v.size).reshape(v.shape)
v += eps
vn2 = VertNormals(v=v, f=f)
empirical_diff = (vn2.r - vn1.r).reshape((-1,3))
predicted_diff = dr_predicted.dot(eps.flatten()).reshape((-1,3))
if False:
print np.max(np.abs(empirical_diff-predicted_diff))
print empirical_diff[:6]
print predicted_diff[:6]
self.assertTrue(np.max(np.abs(empirical_diff-predicted_diff)) < 6e-13)
suite = unittest.TestLoader().loadTestsFromTestCase(TestGeometry)
if __name__ == '__main__':
unittest.main()
| dimatura/opendr | test_geometry.py | Python | mit | 1,656 | 0.008454 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Onset detection
===============
.. autosummary::
:toctree: generated/
onset_detect
onset_backtrack
onset_strength
onset_strength_multi
"""
import numpy as np
import scipy
from ._cache import cache
from . import core
from . import util
from .util.exceptions import ParameterError
from .feature.spectral import melspectrogram
__all__ = ["onset_detect", "onset_strength", "onset_strength_multi", "onset_backtrack"]
def onset_detect(
y=None,
sr=22050,
onset_envelope=None,
hop_length=512,
backtrack=False,
energy=None,
units="frames",
normalize=True,
**kwargs,
):
"""Locate note onset events by picking peaks in an onset strength envelope.
The `peak_pick` parameters were chosen by large-scale hyper-parameter
optimization over the dataset provided by [#]_.
.. [#] https://github.com/CPJKU/onset_db
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of ``y``
onset_envelope : np.ndarray [shape=(m,)]
(optional) pre-computed onset strength envelope
hop_length : int > 0 [scalar]
hop length (in samples)
units : {'frames', 'samples', 'time'}
The units to encode detected onset events in.
By default, 'frames' are used.
backtrack : bool
If ``True``, detected onset events are backtracked to the nearest
preceding minimum of ``energy``.
This is primarily useful when using onsets as slice points for segmentation.
energy : np.ndarray [shape=(m,)] (optional)
An energy function to use for backtracking detected onset events.
If none is provided, then ``onset_envelope`` is used.
normalize : bool
If ``True`` (default), normalize the onset envelope to have minimum of 0 and
maximum of 1 prior to detection. This is helpful for standardizing the
parameters of `librosa.util.peak_pick`.
Otherwise, the onset envelope is left unnormalized.
kwargs : additional keyword arguments
Additional parameters for peak picking.
See `librosa.util.peak_pick` for details.
Returns
-------
onsets : np.ndarray [shape=(n_onsets,)]
estimated positions of detected onsets, in whichever units
are specified. By default, frame indices.
.. note::
If no onset strength could be detected, onset_detect returns
an empty list.
Raises
------
ParameterError
if neither ``y`` nor ``onsets`` are provided
or if ``units`` is not one of 'frames', 'samples', or 'time'
See Also
--------
onset_strength : compute onset strength per-frame
onset_backtrack : backtracking onset events
librosa.util.peak_pick : pick peaks from a time series
Examples
--------
Get onset times from a signal
>>> y, sr = librosa.load(librosa.ex('trumpet'))
>>> librosa.onset.onset_detect(y=y, sr=sr, units='time')
array([0.07 , 0.232, 0.395, 0.604, 0.743, 0.929, 1.045, 1.115,
1.416, 1.672, 1.881, 2.043, 2.206, 2.368, 2.554, 3.019])
Or use a pre-computed onset envelope
>>> o_env = librosa.onset.onset_strength(y, sr=sr)
>>> times = librosa.times_like(o_env, sr=sr)
>>> onset_frames = librosa.onset.onset_detect(onset_envelope=o_env, sr=sr)
>>> import matplotlib.pyplot as plt
>>> D = np.abs(librosa.stft(y))
>>> fig, ax = plt.subplots(nrows=2, sharex=True)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... x_axis='time', y_axis='log', ax=ax[0])
>>> ax[0].set(title='Power spectrogram')
>>> ax[0].label_outer()
>>> ax[1].plot(times, o_env, label='Onset strength')
>>> ax[1].vlines(times[onset_frames], 0, o_env.max(), color='r', alpha=0.9,
... linestyle='--', label='Onsets')
>>> ax[1].legend()
"""
# First, get the frame->beat strength profile if we don't already have one
if onset_envelope is None:
if y is None:
raise ParameterError("y or onset_envelope must be provided")
onset_envelope = onset_strength(y=y, sr=sr, hop_length=hop_length)
# Shift onset envelope up to be non-negative
# (a common normalization step to make the threshold more consistent)
if normalize:
# Normalize onset strength function to [0, 1] range
onset_envelope = onset_envelope - onset_envelope.min()
# Max-scale with safe division
onset_envelope /= np.max(onset_envelope) + util.tiny(onset_envelope)
# Do we have any onsets to grab?
if not onset_envelope.any() or not np.all(np.isfinite(onset_envelope)):
onsets = np.array([], dtype=np.int)
else:
# These parameter settings found by large-scale search
kwargs.setdefault("pre_max", 0.03 * sr // hop_length) # 30ms
kwargs.setdefault("post_max", 0.00 * sr // hop_length + 1) # 0ms
kwargs.setdefault("pre_avg", 0.10 * sr // hop_length) # 100ms
kwargs.setdefault("post_avg", 0.10 * sr // hop_length + 1) # 100ms
kwargs.setdefault("wait", 0.03 * sr // hop_length) # 30ms
kwargs.setdefault("delta", 0.07)
# Peak pick the onset envelope
onsets = util.peak_pick(onset_envelope, **kwargs)
# Optionally backtrack the events
if backtrack:
if energy is None:
energy = onset_envelope
onsets = onset_backtrack(onsets, energy)
if units == "frames":
pass
elif units == "samples":
onsets = core.frames_to_samples(onsets, hop_length=hop_length)
elif units == "time":
onsets = core.frames_to_time(onsets, hop_length=hop_length, sr=sr)
else:
raise ParameterError("Invalid unit type: {}".format(units))
return onsets
def onset_strength(
y=None,
sr=22050,
S=None,
lag=1,
max_size=1,
ref=None,
detrend=False,
center=True,
feature=None,
aggregate=None,
**kwargs,
):
"""Compute a spectral flux onset strength envelope.
Onset strength at time ``t`` is determined by::
mean_f max(0, S[f, t] - ref[f, t - lag])
where ``ref`` is ``S`` after local max filtering along the frequency
axis [#]_.
By default, if a time series ``y`` is provided, S will be the
log-power Mel spectrogram.
.. [#] Böck, Sebastian, and Gerhard Widmer.
"Maximum filter vibrato suppression for onset detection."
16th International Conference on Digital Audio Effects,
Maynooth, Ireland. 2013.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
sampling rate of ``y``
S : np.ndarray [shape=(d, m)]
pre-computed (log-power) spectrogram
lag : int > 0
time lag for computing differences
max_size : int > 0
size (in frequency bins) of the local max filter.
set to `1` to disable filtering.
ref : None or np.ndarray [shape=(d, m)]
An optional pre-computed reference spectrum, of the same shape as ``S``.
If not provided, it will be computed from ``S``.
If provided, it will override any local max filtering governed by ``max_size``.
detrend : bool [scalar]
Filter the onset strength to remove the DC component
center : bool [scalar]
Shift the onset function by ``n_fft // (2 * hop_length)`` frames.
This corresponds to using a centered frame analysis in the short-time Fourier
transform.
feature : function
Function for computing time-series features, eg, scaled spectrograms.
By default, uses `librosa.feature.melspectrogram` with ``fmax=11025.0``
aggregate : function
Aggregation function to use when combining onsets
at different frequency bins.
Default: `np.mean`
kwargs : additional keyword arguments
Additional parameters to ``feature()``, if ``S`` is not provided.
Returns
-------
onset_envelope : np.ndarray [shape=(m,)]
vector containing the onset strength envelope
Raises
------
ParameterError
if neither ``(y, sr)`` nor ``S`` are provided
or if ``lag`` or ``max_size`` are not positive integers
See Also
--------
onset_detect
onset_strength_multi
Examples
--------
First, load some audio and plot the spectrogram
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.ex('trumpet'), duration=3)
>>> D = np.abs(librosa.stft(y))
>>> times = librosa.times_like(D)
>>> fig, ax = plt.subplots(nrows=2, sharex=True)
>>> librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log', x_axis='time', ax=ax[0])
>>> ax[0].set(title='Power spectrogram')
>>> ax[0].label_outer()
Construct a standard onset function
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr)
>>> ax[1].plot(times, 2 + onset_env / onset_env.max(), alpha=0.8,
... label='Mean (mel)')
Median aggregation, and custom mel options
>>> onset_env = librosa.onset.onset_strength(y=y, sr=sr,
... aggregate=np.median,
... fmax=8000, n_mels=256)
>>> ax[1].plot(times, 1 + onset_env / onset_env.max(), alpha=0.8,
... label='Median (custom mel)')
Constant-Q spectrogram instead of Mel
>>> C = np.abs(librosa.cqt(y=y, sr=sr))
>>> onset_env = librosa.onset.onset_strength(sr=sr, S=librosa.amplitude_to_db(C, ref=np.max))
>>> ax[1].plot(times, onset_env / onset_env.max(), alpha=0.8,
... label='Mean (CQT)')
>>> ax[1].legend()
>>> ax[1].set(ylabel='Normalized strength', yticks=[])
"""
if aggregate is False:
raise ParameterError(
"aggregate={} cannot be False when computing full-spectrum onset strength."
)
odf_all = onset_strength_multi(
y=y,
sr=sr,
S=S,
lag=lag,
max_size=max_size,
ref=ref,
detrend=detrend,
center=center,
feature=feature,
aggregate=aggregate,
channels=None,
**kwargs,
)
return odf_all[0]
def onset_backtrack(events, energy):
"""Backtrack detected onset events to the nearest preceding local
minimum of an energy function.
This function can be used to roll back the timing of detected onsets
from a detected peak amplitude to the preceding minimum.
This is most useful when using onsets to determine slice points for
segmentation, as described by [#]_.
.. [#] Jehan, Tristan.
"Creating music by listening"
Doctoral dissertation
Massachusetts Institute of Technology, 2005.
Parameters
----------
events : np.ndarray, dtype=int
List of onset event frame indices, as computed by `onset_detect`
energy : np.ndarray, shape=(m,)
An energy function
Returns
-------
events_backtracked : np.ndarray, shape=events.shape
The input events matched to nearest preceding minima of ``energy``.
Examples
--------
Backtrack the events using the onset envelope
>>> y, sr = librosa.load(librosa.ex('trumpet'), duration=3)
>>> oenv = librosa.onset.onset_strength(y=y, sr=sr)
>>> times = librosa.times_like(oenv)
>>> # Detect events without backtracking
>>> onset_raw = librosa.onset.onset_detect(onset_envelope=oenv,
... backtrack=False)
>>> onset_bt = librosa.onset.onset_backtrack(onset_raw, oenv)
Backtrack the events using the RMS values
>>> S = np.abs(librosa.stft(y=y))
>>> rms = librosa.feature.rms(S=S)
>>> onset_bt_rms = librosa.onset.onset_backtrack(onset_raw, rms[0])
Plot the results
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(nrows=3, sharex=True)
>>> librosa.display.specshow(librosa.amplitude_to_db(S, ref=np.max),
... y_axis='log', x_axis='time', ax=ax[0])
>>> ax[0].label_outer()
>>> ax[1].plot(times, oenv, label='Onset strength')
>>> ax[1].vlines(librosa.frames_to_time(onset_raw), 0, oenv.max(), label='Raw onsets')
>>> ax[1].vlines(librosa.frames_to_time(onset_bt), 0, oenv.max(), label='Backtracked', color='r')
>>> ax[1].legend()
>>> ax[1].label_outer()
>>> ax[2].plot(times, rms[0], label='RMS')
>>> ax[2].vlines(librosa.frames_to_time(onset_bt_rms), 0, rms.max(), label='Backtracked (RMS)', color='r')
>>> ax[2].legend()
"""
# Find points where energy is non-increasing
# all points: energy[i] <= energy[i-1]
# tail points: energy[i] < energy[i+1]
minima = np.flatnonzero((energy[1:-1] <= energy[:-2]) & (energy[1:-1] < energy[2:]))
# Pad on a 0, just in case we have onsets with no preceding minimum
# Shift by one to account for slicing in minima detection
minima = util.fix_frames(1 + minima, x_min=0)
# Only match going left from the detected events
return minima[util.match_events(events, minima, right=False)]
@cache(level=30)
def onset_strength_multi(
y=None,
sr=22050,
S=None,
n_fft=2048,
hop_length=512,
lag=1,
max_size=1,
ref=None,
detrend=False,
center=True,
feature=None,
aggregate=None,
channels=None,
**kwargs,
):
"""Compute a spectral flux onset strength envelope across multiple channels.
Onset strength for channel ``i`` at time ``t`` is determined by::
mean_{f in channels[i]} max(0, S[f, t+1] - S[f, t])
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time-series
sr : number > 0 [scalar]
sampling rate of ``y``
S : np.ndarray [shape=(d, m)]
pre-computed (log-power) spectrogram
n_fft : int > 0 [scalar]
FFT window size for use in ``feature()`` if ``S`` is not provided.
hop_length : int > 0 [scalar]
hop length for use in ``feature()`` if ``S`` is not provided.
lag : int > 0
time lag for computing differences
max_size : int > 0
size (in frequency bins) of the local max filter.
set to `1` to disable filtering.
ref : None or np.ndarray [shape=(d, m)]
An optional pre-computed reference spectrum, of the same shape as ``S``.
If not provided, it will be computed from ``S``.
If provided, it will override any local max filtering governed by ``max_size``.
detrend : bool [scalar]
Filter the onset strength to remove the DC component
center : bool [scalar]
Shift the onset function by ``n_fft // (2 * hop_length)`` frames.
This corresponds to using a centered frame analysis in the short-time Fourier
transform.
feature : function
Function for computing time-series features, eg, scaled spectrograms.
By default, uses `librosa.feature.melspectrogram` with ``fmax=11025.0``
Must support arguments: ``y, sr, n_fft, hop_length``
aggregate : function or False
Aggregation function to use when combining onsets
at different frequency bins.
If ``False``, then no aggregation is performed.
Default: `np.mean`
channels : list or None
Array of channel boundaries or slice objects.
If `None`, then a single channel is generated to span all bands.
kwargs : additional keyword arguments
Additional parameters to ``feature()``, if ``S`` is not provided.
Returns
-------
onset_envelope : np.ndarray [shape=(n_channels, m)]
array containing the onset strength envelope for each specified channel
Raises
------
ParameterError
if neither ``(y, sr)`` nor ``S`` are provided
See Also
--------
onset_strength
Notes
-----
This function caches at level 30.
Examples
--------
First, load some audio and plot the spectrogram
>>> import matplotlib.pyplot as plt
>>> y, sr = librosa.load(librosa.ex('choice'), duration=5)
>>> D = np.abs(librosa.stft(y))
>>> fig, ax = plt.subplots(nrows=2, sharex=True)
>>> img1 = librosa.display.specshow(librosa.amplitude_to_db(D, ref=np.max),
... y_axis='log', x_axis='time', ax=ax[0])
>>> ax[0].set(title='Power spectrogram')
>>> ax[0].label_outer()
>>> fig.colorbar(img1, ax=[ax[0]], format="%+2.f dB")
Construct a standard onset function over four sub-bands
>>> onset_subbands = librosa.onset.onset_strength_multi(y=y, sr=sr,
... channels=[0, 32, 64, 96, 128])
>>> img2 = librosa.display.specshow(onset_subbands, x_axis='time', ax=ax[1])
>>> ax[1].set(ylabel='Sub-bands', title='Sub-band onset strength')
>>> fig.colorbar(img2, ax=[ax[1]])
"""
if feature is None:
feature = melspectrogram
kwargs.setdefault("fmax", 11025.0)
if aggregate is None:
aggregate = np.mean
if lag < 1 or not isinstance(lag, (int, np.integer)):
raise ParameterError("lag must be a positive integer")
if max_size < 1 or not isinstance(max_size, (int, np.integer)):
raise ParameterError("max_size must be a positive integer")
# First, compute mel spectrogram
if S is None:
S = np.abs(feature(y=y, sr=sr, n_fft=n_fft, hop_length=hop_length, **kwargs))
# Convert to dBs
S = core.power_to_db(S)
# Ensure that S is at least 2-d
S = np.atleast_2d(S)
# Compute the reference spectrogram.
# Efficiency hack: skip filtering step and pass by reference
# if max_size will produce a no-op.
if ref is None:
if max_size == 1:
ref = S
else:
ref = scipy.ndimage.maximum_filter1d(S, max_size, axis=0)
elif ref.shape != S.shape:
raise ParameterError(
"Reference spectrum shape {} must match input spectrum {}".format(
ref.shape, S.shape
)
)
# Compute difference to the reference, spaced by lag
onset_env = S[:, lag:] - ref[:, :-lag]
# Discard negatives (decreasing amplitude)
onset_env = np.maximum(0.0, onset_env)
# Aggregate within channels
pad = True
if channels is None:
channels = [slice(None)]
else:
pad = False
if aggregate:
onset_env = util.sync(onset_env, channels, aggregate=aggregate, pad=pad, axis=0)
# compensate for lag
pad_width = lag
if center:
# Counter-act framing effects. Shift the onsets by n_fft / hop_length
pad_width += n_fft // (2 * hop_length)
onset_env = np.pad(onset_env, ([0, 0], [int(pad_width), 0]), mode="constant")
# remove the DC component
if detrend:
onset_env = scipy.signal.lfilter([1.0, -1.0], [1.0, -0.99], onset_env, axis=-1)
# Trim to match the input duration
if center:
onset_env = onset_env[:, : S.shape[1]]
return onset_env
| bmcfee/librosa | librosa/onset.py | Python | isc | 19,286 | 0.001141 |
import hashlib
import os
from database import Database
from email import send_welcome_email
from flask.ext.login import UserMixin
def get_hash(data):
hash = hashlib.sha256()
hash.update(os.environ['PASSWORD_TOKEN_SALT'])
hash.update(data)
return hash.hexdigest()
class User(UserMixin):
def __init__(self, user):
self.id = user['email']
self.user = user
@classmethod
def get(cls, id):
return User(Database.get_user(id))
@classmethod
def registered(cls, email):
return Database.get_user(email) != None
@classmethod
def create(cls, email):
Database.add_user(email)
send_welcome_email(email)
@classmethod
def update_password(cls, email, password):
Database.set_user_password(email, get_hash(password))
@classmethod
def login(cls, email, password):
user = Database.confirm_credentials(email, get_hash(password))
return None if user is None else User(user)
| bschoenfeld/va-court-scraper | courtutils/user.py | Python | mit | 990 | 0.00303 |
# Ant
#
# Copyright (c) 2012, Gustav Tiger <gustav@tiger.name>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import array
import collections
import struct
import threading
import time
import Queue
import logging
import usb
from message import Message
from commons import format_list
from driver import find_driver
_logger = logging.getLogger("garmin.ant.base.ant")
class Ant():
_RESET_WAIT = 1
def __init__(self):
self._driver = find_driver()
self._message_queue_cond = threading.Condition()
self._message_queue = collections.deque()
self._events = Queue.Queue()
self._buffer = array.array('B', [])
self._burst_data = array.array('B', [])
self._last_data = array.array('B', [])
self._running = True
self._driver.open()
self._worker_thread = threading.Thread(target=self._worker, name="ant.base")
self._worker_thread.start()
self.reset_system()
def start(self):
self._main()
def stop(self):
if self._running:
_logger.debug("Stoping ant.base")
self._running = False
self._worker_thread.join()
def _on_broadcast(self, message):
self._events.put(('event', (message._data[0],
Message.Code.EVENT_RX_BROADCAST, message._data[1:])))
def _on_acknowledge(self, message):
self._events.put(('event', (message._data[0],
Message.Code.EVENT_RX_ACKNOWLEDGED, message._data[1:])))
def _on_burst_data(self, message):
sequence = message._data[0] >> 5
channel = message._data[0] & 0b00011111
data = message._data[1:]
# First sequence
if sequence == 0:
self._burst_data = data
# Other
else:
self._burst_data.extend(data)
# Last sequence (indicated by bit 3)
if sequence & 0b100 != 0:
self._events.put(('event', (channel,
Message.Code.EVENT_RX_BURST_PACKET, self._burst_data)))
def _worker(self):
_logger.debug("Ant runner started")
while self._running:
try:
message = self.read_message()
if message == None:
break
# TODO: flag and extended for broadcast, acknowledge, and burst
# Only do callbacks for new data. Resent data only indicates
# a new channel timeslot.
if not (message._id == Message.ID.BROADCAST_DATA and
message._data == self._last_data):
# Notifications
if message._id in [Message.ID.STARTUP_MESSAGE, \
Message.ID.SERIAL_ERROR_MESSAGE]:
self._events.put(('response', (None, message._id,
message._data)))
# Response (no channel)
elif message._id in [Message.ID.RESPONSE_VERSION, \
Message.ID.RESPONSE_CAPABILITIES, \
Message.ID.RESPONSE_SERIAL_NUMBER]:
self._events.put(('response', (None, message._id,
message._data)))
# Response (channel)
elif message._id in [Message.ID.RESPONSE_CHANNEL_STATUS, \
Message.ID.RESPONSE_CHANNEL_ID]:
self._events.put(('response', (message._data[0],
message._id, message._data[1:])))
# Response (other)
elif (message._id == Message.ID.RESPONSE_CHANNEL \
and message._data[1] != 0x01):
self._events.put(('response', (message._data[0],
message._data[1], message._data[2:])))
# Channel event
elif message._id == Message.ID.BROADCAST_DATA:
self._on_broadcast(message)
elif message._id == Message.ID.ACKNOWLEDGE_DATA:
self._on_acknowledge(message)
elif message._id == Message.ID.BURST_TRANSFER_DATA:
self._on_burst_data(message)
elif message._id == Message.ID.RESPONSE_CHANNEL:
_logger.debug("Got channel event, %r", message)
self._events.put(('event', (message._data[0],
message._data[1], message._data[2:])))
else:
_logger.warning("Got unknown message, %r", message)
else:
_logger.debug("No new data this period")
# Send messages in queue, on indicated time slot
if message._id == Message.ID.BROADCAST_DATA:
time.sleep(0.1)
_logger.debug("Got broadcast data, examine queue to see if we should send anything back")
if self._message_queue_cond.acquire(blocking=False):
while len(self._message_queue) > 0:
m = self._message_queue.popleft()
self.write_message(m)
_logger.debug(" - sent message from queue, %r", m)
if(m._id != Message.ID.BURST_TRANSFER_DATA or \
m._data[0] & 0b10000000):# or m._data[0] == 0):
break
else:
_logger.debug(" - no messages in queue")
self._message_queue_cond.release()
self._last_data = message._data
except usb.USBError as e:
_logger.warning("%s, %r", type(e), e.args)
_logger.debug("Ant runner stopped")
def _main(self):
while self._running:
try:
(event_type, event) = self._events.get(True, 1.0)
self._events.task_done()
(channel, event, data) = event
if event_type == 'response':
self.response_function(channel, event, data)
elif event_type == 'event':
self.channel_event_function(channel, event, data)
else:
_logger.warning("Unknown message typ '%s': %r", event_type, event)
except Queue.Empty as e:
pass
def write_message_timeslot(self, message):
with self._message_queue_cond:
self._message_queue.append(message)
def write_message(self, message):
data = message.get()
self._driver.write(data)
_logger.debug("Write data: %s", format_list(data))
def read_message(self):
while self._running:
# If we have a message in buffer already, return it
if len(self._buffer) >= 5 and len(self._buffer) >= self._buffer[1] + 4:
packet = self._buffer[:self._buffer[1] + 4]
self._buffer = self._buffer[self._buffer[1] + 4:]
return Message.parse(packet)
# Otherwise, read some data and call the function again
else:
data = self._driver.read()
self._buffer.extend(data)
if data:
_logger.debug("Read data: %s (now have %s in buffer)",
format_list(data), format_list(self._buffer))
# Ant functions
def unassign_channel(self, channel):
pass
def assign_channel(self, channel, channelType, networkNumber):
message = Message(Message.ID.ASSIGN_CHANNEL, [channel, channelType, networkNumber])
self.write_message(message)
def open_channel(self, channel):
message = Message(Message.ID.OPEN_CHANNEL, [channel])
self.write_message(message)
def set_channel_id(self, channel, deviceNum, deviceType, transmissionType):
data = array.array('B', struct.pack("<BHBB", channel, deviceNum, deviceType, transmissionType))
message = Message(Message.ID.SET_CHANNEL_ID, data)
self.write_message(message)
def set_channel_period(self, channel, messagePeriod):
data = array.array('B', struct.pack("<BH", channel, messagePeriod))
message = Message(Message.ID.SET_CHANNEL_PERIOD, data)
self.write_message(message)
def set_channel_search_timeout(self, channel, timeout):
message = Message(Message.ID.SET_CHANNEL_SEARCH_TIMEOUT, [channel, timeout])
self.write_message(message)
def set_channel_rf_freq(self, channel, rfFreq):
message = Message(Message.ID.SET_CHANNEL_RF_FREQ, [channel, rfFreq])
self.write_message(message)
def set_network_key(self, network, key):
message = Message(Message.ID.SET_NETWORK_KEY, [network] + key)
self.write_message(message)
# This function is a bit of a mystery. It is mentioned in libgant,
# http://sportwatcher.googlecode.com/svn/trunk/libgant/gant.h and is
# also sent from the official ant deamon on windows.
def set_search_waveform(self, channel, waveform):
message = Message(Message.ID.SET_SEARCH_WAVEFORM, [channel] + waveform)
self.write_message(message)
def reset_system(self):
message = Message(Message.ID.RESET_SYSTEM, [0x00])
self.write_message(message)
time.sleep(self._RESET_WAIT)
def request_message(self, channel, messageId):
message = Message(Message.ID.REQUEST_MESSAGE, [channel, messageId])
self.write_message(message)
def send_acknowledged_data(self, channel, data):
assert len(data) == 8
message = Message(Message.ID.ACKNOWLEDGE_DATA,
array.array('B', [channel]) + data)
self.write_message_timeslot(message)
def send_burst_transfer_packet(self, channel_seq, data, first):
assert len(data) == 8
message = Message(Message.ID.BURST_TRANSFER_DATA,
array.array('B', [channel_seq]) + data)
self.write_message_timeslot(message)
def send_burst_transfer(self, channel, data):
assert len(data) % 8 == 0
_logger.debug("Send burst transfer, chan %s, data %s", channel, data)
packets = len(data) / 8
for i in range(packets):
sequence = ((i - 1) % 3) + 1
if i == 0:
sequence = 0
elif i == packets - 1:
sequence = sequence | 0b100
channel_seq = channel | sequence << 5
packet_data = data[i * 8:i * 8 + 8]
_logger.debug("Send burst transfer, packet %d, seq %d, data %s", i, sequence, packet_data)
self.send_burst_transfer_packet(channel_seq, packet_data, first=i==0)
def response_function(self, channel, event, data):
pass
def channel_event_function(self, channel, event, data):
pass
| ddboline/Garmin-Forerunner-610-Extractor_fork | ant/base/ant.py | Python | mit | 12,100 | 0.003884 |
# useMissiles
#
# Used by:
# Modules from group: Missile Launcher Heavy (12 of 12)
# Modules from group: Missile Launcher Rocket (15 of 15)
# Modules named like: Launcher (154 of 154)
# Structure Modules named like: Standup Launcher (7 of 7)
type = 'active', "projected"
def handler(fit, src, context):
# Set reload time to 10 seconds
src.reloadTime = 10000
if "projected" in context:
if src.item.group.name == 'Missile Launcher Bomb':
# Bomb Launcher Cooldown Timer
moduleReactivationDelay = src.getModifiedItemAttr("moduleReactivationDelay")
speed = src.getModifiedItemAttr("speed")
# Void and Focused Void Bombs
neutAmount = src.getModifiedChargeAttr("energyNeutralizerAmount")
if moduleReactivationDelay and neutAmount and speed:
fit.addDrain(src, speed + moduleReactivationDelay, neutAmount, 0)
# Lockbreaker Bombs
ecmStrengthBonus = src.getModifiedChargeAttr("scan{0}StrengthBonus".format(fit.scanType))
if ecmStrengthBonus:
strModifier = 1 - ecmStrengthBonus / fit.scanStrength
fit.ecmProjectedStr *= strModifier
| bsmr-eve/Pyfa | eos/effects/usemissiles.py | Python | gpl-3.0 | 1,203 | 0.002494 |
"""
Django settings for channel_worm project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd0vy02-g#nq@lg!s%5v$w(jilj@af791#1-3k9y7ea3c)djj!w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'digitizer',
'ion_channel'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'channelworm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates', )],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'channelworm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# Pycharm detected this
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates').replace('\\', '/'),
)
# TEMPLATE_LOADERS = (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# )
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(
os.path.dirname(__file__),
'static',
),
)
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media')
MEDIA_URL = '/media/'
| joebowen/ChannelWormDjango | ChannelWorm/channelworm/settings.py | Python | mit | 3,520 | 0 |
# -*- coding: utf-8 -*-
class TrafficEntry:
def __init__(self, broadcast_count=0L, broadcast_bytes=0L, unicast_count=0L, unicast_bytes=0L):
self.broadcast_count = broadcast_count
self.broadcast_bytes = broadcast_bytes
self.unicast_count = unicast_count
self.unicast_bytes = unicast_bytes
class Traffic:
def __init__(self, send=TrafficEntry(), recv=TrafficEntry()):
self.send = send
self.recv = recv
def in_rpc_traffic(self, pack):
if pack is None:
return
self.recv.broadcast_bytes = pack.get_value("Recv.BroadcastBytes")
self.recv.broadcast_count = pack.get_value("Recv.BroadcastCount")
self.recv.unicast_bytes = pack.get_value("Recv.UnicastBytes")
self.recv.unicast_count = pack.get_value("Recv.UnicastCount")
self.send.broadcast_bytes = pack.get_value("Send.BroadcastBytes")
self.send.broadcast_count = pack.get_value("Send.BroadcastCount")
self.send.unicast_bytes = pack.get_value("Send.UnicastBytes")
self.send.unicast_count = pack.get_value("Send.UnicastCount")
| relman/sevpn-mgmt-py | SevpnMgmtPy/admin_api/traffic.py | Python | mit | 1,143 | 0.000875 |
# coding=utf-8
"""**Tests for safe raster layer class**
contains tests for QGIS specific methods.
See test_io.py also
"""
__author__ = 'Dmitry Kolesov <kolesov.dm@gmail.com>'
__revision__ = '$Format:%H$'
__date__ = '28/12/2013'
__license__ = "GPL"
__copyright__ = 'Copyright 2012, Australia Indonesia Facility for '
__copyright__ += 'Disaster Reduction'
import os
import logging
import unittest
from qgis.core import QgsRasterLayer
from safe.storage.utilities import read_keywords
from safe.storage.raster import Raster
from safe.test.utilities import test_data_path, get_qgis_app
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
LOGGER = logging.getLogger('InaSAFE')
KEYWORD_PATH = test_data_path('hazard', 'jakarta_flood_design.xml')
RASTER_BASE = test_data_path('hazard', 'jakarta_flood_design')
class RasterTest(unittest.TestCase):
def setUp(self):
msg = 'Keyword file does not exist at %s' % KEYWORD_PATH
assert os.path.exists(KEYWORD_PATH), msg
def test_qgis_raster_layer_loading(self):
"""Test that reading from QgsRasterLayer works."""
# This line is the cause of the problem:
qgis_layer = QgsRasterLayer(RASTER_BASE + '.tif', 'test')
layer = Raster(data=qgis_layer)
qgis_extent = qgis_layer.dataProvider().extent()
qgis_extent = [qgis_extent.xMinimum(), qgis_extent.yMinimum(),
qgis_extent.xMaximum(), qgis_extent.yMaximum()]
layer_exent = layer.get_bounding_box()
self.assertListEqual(
layer_exent, qgis_extent,
'Expected %s extent, got %s' % (qgis_extent, layer_exent))
def test_convert_to_qgis_raster_layer(self):
"""Test that converting to QgsVectorLayer works."""
# Create vector layer
keywords = read_keywords(RASTER_BASE + '.keywords')
layer = Raster(data=RASTER_BASE + '.tif', keywords=keywords)
# Convert to QgsRasterLayer
qgis_layer = layer.as_qgis_native()
qgis_extent = qgis_layer.dataProvider().extent()
qgis_extent = [qgis_extent.xMinimum(), qgis_extent.yMinimum(),
qgis_extent.xMaximum(), qgis_extent.yMaximum()]
layer_exent = layer.get_bounding_box()
self.assertListEqual(
layer_exent, qgis_extent,
'Expected %s extent, got %s' % (qgis_extent, layer_exent))
if __name__ == '__main__':
suite = unittest.makeSuite(RasterTest, 'test')
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| dynaryu/inasafe | safe/storage/test/test_raster.py | Python | gpl-3.0 | 2,520 | 0.002778 |
#!/usr/bin/python3
#
# This file is part of Progesterone pipeline.
#
# Progesterone pipeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Progesterone pipeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Progesterone pipeline. If not, see <https://www.gnu.org/licenses/>.
#
from utils.mysqldb import *
import os
# UCSC doe not have the pointers directly back to ENCODE, so I found them for ESR1 - by hand
# encode_esr1_xps.tsv must contain 3 columns: UCSC id, encode experiment id, and encode file id
#########################################
def main():
conf_file = "/home/ivana/.mysql_conf"
mapping_file = "encode_esr1_xps.tsv"
for dependency in [conf_file, mapping_file]:
if not os.path.exists(dependency):
print(dependency,"not found")
exit()
encode_exp_id = {}
encode_file_id = {}
ucsc_ids = []
with open(mapping_file,"r") as inf:
for line in inf:
if 'UCSC' in line: continue # header
[ucsc, encode_exp, encode_file] = line.split("\t")[:3]
ucsc_ids.append(ucsc)
encode_exp_id[ucsc] = encode_exp
encode_file_id[ucsc] = encode_file
#########################
# plug in to local database
db = connect_to_mysql(conf_file)
cursor = db.cursor()
search_db(cursor,"set autocommit=1")
switch_to_db(cursor,'progesterone')
# this might not be the best idea if the database grows really large
# first make sure we have single entry for each of multiple ids
for line in search_db(cursor,"select id, external_id from xrefs where xtype='ucsc'"):
[xref_id, ucsc_str] = line
ucsc_ids_stored = ucsc_str.split(",")
if len(ucsc_ids_stored) <2: continue
for ucsc_id in ucsc_ids_stored:
store_or_update(cursor, 'xrefs', {'xtype':'ucsc', 'external_id':ucsc_id}, None)
# now for each single entry, make parent point to encode file, and encode file's parent to encode exp
for line in search_db(cursor,"select id, external_id from xrefs where xtype='ucsc' and external_id not like '%,%'"):
[ucsc_xref_id, ucsc_id] = line
if not ucsc_id in ucsc_ids: continue
encode_file_xref_id = store_or_update(cursor, 'xrefs', {'xtype':'encode', 'external_id': encode_file_id[ucsc_id]}, None)
search_db(cursor, "update xrefs set parent_id=%d where id=%d" % (encode_file_xref_id, ucsc_xref_id))
encode_exp_xref_id = store_or_update(cursor, 'xrefs', {'xtype':'encode', 'external_id': encode_exp_id[ucsc_id]}, None)
search_db(cursor, "update xrefs set parent_id=%d where id=%d" % (encode_exp_xref_id, encode_file_xref_id))
cursor.close()
db.close()
return True
#########################################
########################################
if __name__ == '__main__':
main()
| ivanamihalek/progesterone | 16_UCSC_sources_to_ENCODE.py | Python | gpl-2.0 | 3,087 | 0.024619 |
from django.shortcuts import render
from django.contrib.auth.models import User
from django.utils.text import slugify
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from .models import Element, Bucket
from .serializers import ElementSerializer
def homepage(request):
if request.user.is_authenticated():
to_watch_slug = slugify("{} {}".format(request.user.username, 'to_watch'))
to_watch = Bucket.objects.get(slug=to_watch_slug)
watched_slug = slugify("{} {}".format(request.user.username, 'watched'))
watched = Bucket.objects.get(slug=watched_slug)
to_watch_elements = to_watch.element_set.all()
watched_elements = watched.element_set.all()
context = {'to_watch': to_watch_elements, 'watched': watched_elements}
else:
context = {}
return render(request, 'core/lists.html', context)
@api_view(['POST'])
def add_element(request):
if request.method == 'POST':
data = JSONParser().parse(request)
bucket_slug = slugify("{} {}".format(request.user.username, data['bucket']))
bucket = Bucket.objects.get(slug=bucket_slug)
data['bucket'] = bucket.id
try:
inst = Element.objects.get(name=data['name'], trakt_id=data['trakt_id'])
serializer = ElementSerializer(inst, data=data)
except ObjectDoesNotExist:
serializer = ElementSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| skeuomorf/Binge | binge/core/views.py | Python | mit | 1,811 | 0.002209 |
from flask.ext.wtf import Form
from flask.ext.wtf.html5 import EmailField
from wtforms.validators import Required
from wtforms.fields import (
TextAreaField,
HiddenField
)
class LoginForm(Form):
email = EmailField('Email address', validators=[Required()])
next = HiddenField('next')
class FeedbackForm(Form):
feedback = TextAreaField('Your feedback', validators=[Required()])
| crossgovernmentservices/csdigital-prototype | application/frontend/forms.py | Python | mit | 402 | 0 |
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import xml.sax
import time
import uuid
import urllib
import boto
from boto.connection import AWSAuthConnection
from boto import handler
from boto.resultset import ResultSet
import boto.jsonresponse
import exception
import hostedzone
HZXML = """<?xml version="1.0" encoding="UTF-8"?>
<CreateHostedZoneRequest xmlns="%(xmlns)s">
<Name>%(name)s</Name>
<CallerReference>%(caller_ref)s</CallerReference>
<HostedZoneConfig>
<Comment>%(comment)s</Comment>
</HostedZoneConfig>
</CreateHostedZoneRequest>"""
#boto.set_stream_logger('dns')
class Route53Connection(AWSAuthConnection):
DefaultHost = 'route53.amazonaws.com'
"""The default Route53 API endpoint to connect to."""
Version = '2012-02-29'
"""Route53 API version."""
XMLNameSpace = 'https://route53.amazonaws.com/doc/2012-02-29/'
"""XML schema for this Route53 API version."""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
port=None, proxy=None, proxy_port=None,
host=DefaultHost, debug=0, security_token=None,
validate_certs=True):
AWSAuthConnection.__init__(self, host,
aws_access_key_id, aws_secret_access_key,
True, port, proxy, proxy_port, debug=debug,
security_token=security_token,
validate_certs=validate_certs)
def _required_auth_capability(self):
return ['route53']
def make_request(self, action, path, headers=None, data='', params=None):
if params:
pairs = []
for key, val in params.iteritems():
if val is None:
continue
pairs.append(key + '=' + urllib.quote(str(val)))
path += '?' + '&'.join(pairs)
return AWSAuthConnection.make_request(self, action, path,
headers, data)
# Hosted Zones
def get_all_hosted_zones(self, start_marker=None, zone_list=None):
"""
Returns a Python data structure with information about all
Hosted Zones defined for the AWS account.
:param int start_marker: start marker to pass when fetching additional
results after a truncated list
:param list zone_list: a HostedZones list to prepend to results
"""
params = {}
if start_marker:
params = {'marker': start_marker}
response = self.make_request('GET', '/%s/hostedzone' % self.Version,
params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='HostedZones',
item_marker=('HostedZone',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
if zone_list:
e['ListHostedZonesResponse']['HostedZones'].extend(zone_list)
while 'NextMarker' in e['ListHostedZonesResponse']:
next_marker = e['ListHostedZonesResponse']['NextMarker']
zone_list = e['ListHostedZonesResponse']['HostedZones']
e = self.get_all_hosted_zones(next_marker, zone_list)
return e
def get_hosted_zone(self, hosted_zone_id):
"""
Get detailed information about a particular Hosted Zone.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
"""
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='NameServers',
item_marker=('NameServer',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def get_hosted_zone_by_name(self, hosted_zone_name):
"""
Get detailed information about a particular Hosted Zone.
:type hosted_zone_name: str
:param hosted_zone_name: The fully qualified domain name for the Hosted
Zone
"""
if hosted_zone_name[-1] != '.':
hosted_zone_name += '.'
all_hosted_zones = self.get_all_hosted_zones()
for zone in all_hosted_zones['ListHostedZonesResponse']['HostedZones']:
#check that they gave us the FQDN for their zone
if zone['Name'] == hosted_zone_name:
return self.get_hosted_zone(zone['Id'].split('/')[-1])
def create_hosted_zone(self, domain_name, caller_ref=None, comment=''):
"""
Create a new Hosted Zone. Returns a Python data structure with
information about the newly created Hosted Zone.
:type domain_name: str
:param domain_name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication. If you omit the final period,
Amazon Route 53 assumes the domain is relative to the root.
This is the name you have registered with your DNS registrar.
It is also the name you will delegate from your registrar to
the Amazon Route 53 delegation servers returned in
response to this request.A list of strings with the image
IDs wanted.
:type caller_ref: str
:param caller_ref: A unique string that identifies the request
and that allows failed CreateHostedZone requests to be retried
without the risk of executing the operation twice. If you don't
provide a value for this, boto will generate a Type 4 UUID and
use that.
:type comment: str
:param comment: Any comments you want to include about the hosted
zone.
"""
if caller_ref is None:
caller_ref = str(uuid.uuid4())
params = {'name': domain_name,
'caller_ref': caller_ref,
'comment': comment,
'xmlns': self.XMLNameSpace}
xml = HZXML % params
uri = '/%s/hostedzone' % self.Version
response = self.make_request('POST', uri,
{'Content-Type': 'text/xml'}, xml)
body = response.read()
boto.log.debug(body)
if response.status == 201:
e = boto.jsonresponse.Element(list_marker='NameServers',
item_marker=('NameServer',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
else:
raise exception.DNSServerError(response.status,
response.reason,
body)
def delete_hosted_zone(self, hosted_zone_id):
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('DELETE', uri)
body = response.read()
boto.log.debug(body)
if response.status not in (200, 204):
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
# Resource Record Sets
def get_all_rrsets(self, hosted_zone_id, type=None,
name=None, identifier=None, maxitems=None):
"""
Retrieve the Resource Record Sets defined for this Hosted Zone.
Returns the raw XML data returned by the Route53 call.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
:type type: str
:param type: The type of resource record set to begin the record
listing from. Valid choices are:
* A
* AAAA
* CNAME
* MX
* NS
* PTR
* SOA
* SPF
* SRV
* TXT
Valid values for weighted resource record sets:
* A
* AAAA
* CNAME
* TXT
Valid values for Zone Apex Aliases:
* A
* AAAA
:type name: str
:param name: The first name in the lexicographic ordering of domain
names to be retrieved
:type identifier: str
:param identifier: In a hosted zone that includes weighted resource
record sets (multiple resource record sets with the same DNS
name and type that are differentiated only by SetIdentifier),
if results were truncated for a given DNS name and type,
the value of SetIdentifier for the next resource record
set that has the current DNS name and type
:type maxitems: int
:param maxitems: The maximum number of records
"""
from boto.route53.record import ResourceRecordSets
params = {'type': type, 'name': name,
'Identifier': identifier, 'maxitems': maxitems}
uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
response = self.make_request('GET', uri, params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
rs = ResourceRecordSets(connection=self, hosted_zone_id=hosted_zone_id)
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs
def change_rrsets(self, hosted_zone_id, xml_body):
"""
Create or change the authoritative DNS information for this
Hosted Zone.
Returns a Python data structure with information about the set of
changes, including the Change ID.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
:type xml_body: str
:param xml_body: The list of changes to be made, defined in the
XML schema defined by the Route53 service.
"""
uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
response = self.make_request('POST', uri,
{'Content-Type': 'text/xml'},
xml_body)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def get_change(self, change_id):
"""
Get information about a proposed set of changes, as submitted
by the change_rrsets method.
Returns a Python data structure with status information about the
changes.
:type change_id: str
:param change_id: The unique identifier for the set of changes.
This ID is returned in the response to the change_rrsets method.
"""
uri = '/%s/change/%s' % (self.Version, change_id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
| nanocell/lsync | python/boto/route53/connection.py | Python | gpl-3.0 | 13,538 | 0.000222 |
# -*- coding: utf-8 -*-
# Natural Language Toolkit: IBM Model 2
#
# Copyright (C) 2001-2013 NLTK Project
# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Lexical translation model that considers word order.
IBM Model 2 improves on Model 1 by accounting for word order.
An alignment probability is introduced, a(i | j,l,m), which predicts
a source word position, given its aligned target word's position.
The EM algorithm used in Model 2 is:
E step - In the training data, collect counts, weighted by prior
probabilities.
(a) count how many times a source language word is translated
into a target language word
(b) count how many times a particular position in the source
sentence is aligned to a particular position in the target
sentence
M step - Estimate new probabilities based on the counts from the E step
Notations:
i: Position in the source sentence
Valid values are 0 (for NULL), 1, 2, ..., length of source sentence
j: Position in the target sentence
Valid values are 1, 2, ..., length of target sentence
l: Number of words in the source sentence, excluding NULL
m: Number of words in the target sentence
s: A word in the source language
t: A word in the target language
References:
Philipp Koehn. 2010. Statistical Machine Translation.
Cambridge University Press, New York.
Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and
Robert L. Mercer. 1993. The Mathematics of Statistical Machine
Translation: Parameter Estimation. Computational Linguistics, 19 (2),
263-311.
"""
from __future__ import division
from collections import defaultdict
from nltk.translate import AlignedSent
from nltk.translate import Alignment
from nltk.translate import IBMModel
from nltk.translate import IBMModel1
from nltk.translate.ibm_model import Counts
import warnings
class IBMModel2(IBMModel):
"""
Lexical translation model that considers word order
>>> bitext = []
>>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small']))
>>> bitext.append(AlignedSent(['das', 'haus', 'ist', 'ja', 'groß'], ['the', 'house', 'is', 'big']))
>>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small']))
>>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house']))
>>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book']))
>>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book']))
>>> ibm2 = IBMModel2(bitext, 5)
>>> print(round(ibm2.translation_table['buch']['book'], 3))
1.0
>>> print(round(ibm2.translation_table['das']['book'], 3))
0.0
>>> print(round(ibm2.translation_table['buch'][None], 3))
0.0
>>> print(round(ibm2.translation_table['ja'][None], 3))
0.0
>>> print(ibm2.alignment_table[1][1][2][2])
0.938...
>>> print(round(ibm2.alignment_table[1][2][2][2], 3))
0.0
>>> print(round(ibm2.alignment_table[2][2][4][5], 3))
1.0
>>> test_sentence = bitext[2]
>>> test_sentence.words
['das', 'buch', 'ist', 'ja', 'klein']
>>> test_sentence.mots
['the', 'book', 'is', 'small']
>>> test_sentence.alignment
Alignment([(0, 0), (1, 1), (2, 2), (3, 2), (4, 3)])
"""
def __init__(self, sentence_aligned_corpus, iterations,
probability_tables=None):
"""
Train on ``sentence_aligned_corpus`` and create a lexical
translation model and an alignment model.
Translation direction is from ``AlignedSent.mots`` to
``AlignedSent.words``.
:param sentence_aligned_corpus: Sentence-aligned parallel corpus
:type sentence_aligned_corpus: list(AlignedSent)
:param iterations: Number of iterations to run training algorithm
:type iterations: int
:param probability_tables: Optional. Use this to pass in custom
probability values. If not specified, probabilities will be
set to a uniform distribution, or some other sensible value.
If specified, all the following entries must be present:
``translation_table``, ``alignment_table``.
See ``IBMModel`` for the type and purpose of these tables.
:type probability_tables: dict[str]: object
"""
super(IBMModel2, self).__init__(sentence_aligned_corpus)
if probability_tables is None:
# Get translation probabilities from IBM Model 1
# Run more iterations of training for Model 1, since it is
# faster than Model 2
ibm1 = IBMModel1(sentence_aligned_corpus, 2 * iterations)
self.translation_table = ibm1.translation_table
self.set_uniform_probabilities(sentence_aligned_corpus)
else:
# Set user-defined probabilities
self.translation_table = probability_tables['translation_table']
self.alignment_table = probability_tables['alignment_table']
for n in range(0, iterations):
self.train(sentence_aligned_corpus)
self.__align_all(sentence_aligned_corpus)
def set_uniform_probabilities(self, sentence_aligned_corpus):
# a(i | j,l,m) = 1 / (l+1) for all i, j, l, m
l_m_combinations = set()
for aligned_sentence in sentence_aligned_corpus:
l = len(aligned_sentence.mots)
m = len(aligned_sentence.words)
if (l, m) not in l_m_combinations:
l_m_combinations.add((l, m))
initial_prob = 1 / (l + 1)
if initial_prob < IBMModel.MIN_PROB:
warnings.warn("A source sentence is too long (" + str(l) +
" words). Results may be less accurate.")
for i in range(0, l + 1):
for j in range(1, m + 1):
self.alignment_table[i][j][l][m] = initial_prob
def train(self, parallel_corpus):
counts = Model2Counts()
for aligned_sentence in parallel_corpus:
src_sentence = [None] + aligned_sentence.mots
trg_sentence = ['UNUSED'] + aligned_sentence.words # 1-indexed
l = len(aligned_sentence.mots)
m = len(aligned_sentence.words)
# E step (a): Compute normalization factors to weigh counts
total_count = self.prob_all_alignments(src_sentence, trg_sentence)
# E step (b): Collect counts
for j in range(1, m + 1):
t = trg_sentence[j]
for i in range(0, l + 1):
s = src_sentence[i]
count = self.prob_alignment_point(
i, j, src_sentence, trg_sentence)
normalized_count = count / total_count[t]
counts.update_lexical_translation(normalized_count, s, t)
counts.update_alignment(normalized_count, i, j, l, m)
# M step: Update probabilities with maximum likelihood estimates
self.maximize_lexical_translation_probabilities(counts)
self.maximize_alignment_probabilities(counts)
def maximize_alignment_probabilities(self, counts):
MIN_PROB = IBMModel.MIN_PROB
for i, j_s in counts.alignment.items():
for j, src_sentence_lengths in j_s.items():
for l, trg_sentence_lengths in src_sentence_lengths.items():
for m in trg_sentence_lengths:
estimate = (counts.alignment[i][j][l][m] /
counts.alignment_for_any_i[j][l][m])
self.alignment_table[i][j][l][m] = max(estimate,
MIN_PROB)
def prob_all_alignments(self, src_sentence, trg_sentence):
"""
Computes the probability of all possible word alignments,
expressed as a marginal distribution over target words t
Each entry in the return value represents the contribution to
the total alignment probability by the target word t.
To obtain probability(alignment | src_sentence, trg_sentence),
simply sum the entries in the return value.
:return: Probability of t for all s in ``src_sentence``
:rtype: dict(str): float
"""
alignment_prob_for_t = defaultdict(lambda: 0.0)
for j in range(1, len(trg_sentence)):
t = trg_sentence[j]
for i in range(0, len(src_sentence)):
alignment_prob_for_t[t] += self.prob_alignment_point(
i, j, src_sentence, trg_sentence)
return alignment_prob_for_t
def prob_alignment_point(self, i, j, src_sentence, trg_sentence):
"""
Probability that position j in ``trg_sentence`` is aligned to
position i in the ``src_sentence``
"""
l = len(src_sentence) - 1
m = len(trg_sentence) - 1
s = src_sentence[i]
t = trg_sentence[j]
return self.translation_table[t][s] * self.alignment_table[i][j][l][m]
def prob_t_a_given_s(self, alignment_info):
"""
Probability of target sentence and an alignment given the
source sentence
"""
prob = 1.0
l = len(alignment_info.src_sentence) - 1
m = len(alignment_info.trg_sentence) - 1
for j, i in enumerate(alignment_info.alignment):
if j == 0:
continue # skip the dummy zeroeth element
trg_word = alignment_info.trg_sentence[j]
src_word = alignment_info.src_sentence[i]
prob *= (self.translation_table[trg_word][src_word] *
self.alignment_table[i][j][l][m])
return max(prob, IBMModel.MIN_PROB)
def __align_all(self, parallel_corpus):
for sentence_pair in parallel_corpus:
self.__align(sentence_pair)
def __align(self, sentence_pair):
"""
Determines the best word alignment for one sentence pair from
the corpus that the model was trained on.
The best alignment will be set in ``sentence_pair`` when the
method returns. In contrast with the internal implementation of
IBM models, the word indices in the ``Alignment`` are zero-
indexed, not one-indexed.
:param sentence_pair: A sentence in the source language and its
counterpart sentence in the target language
:type sentence_pair: AlignedSent
"""
best_alignment = []
l = len(sentence_pair.mots)
m = len(sentence_pair.words)
for j, trg_word in enumerate(sentence_pair.words):
# Initialize trg_word to align with the NULL token
best_prob = (self.translation_table[trg_word][None] *
self.alignment_table[0][j + 1][l][m])
best_prob = max(best_prob, IBMModel.MIN_PROB)
best_alignment_point = None
for i, src_word in enumerate(sentence_pair.mots):
align_prob = (self.translation_table[trg_word][src_word] *
self.alignment_table[i + 1][j + 1][l][m])
if align_prob >= best_prob:
best_prob = align_prob
best_alignment_point = i
best_alignment.append((j, best_alignment_point))
sentence_pair.alignment = Alignment(best_alignment)
class Model2Counts(Counts):
"""
Data object to store counts of various parameters during training.
Includes counts for alignment.
"""
def __init__(self):
super(Model2Counts, self).__init__()
self.alignment = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(
lambda: 0.0))))
self.alignment_for_any_i = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: 0.0)))
def update_lexical_translation(self, count, s, t):
self.t_given_s[t][s] += count
self.any_t_given_s[s] += count
def update_alignment(self, count, i, j, l, m):
self.alignment[i][j][l][m] += count
self.alignment_for_any_i[j][l][m] += count
| sdoran35/hate-to-hugs | venv/lib/python3.6/site-packages/nltk/translate/ibm2.py | Python | mit | 12,271 | 0.000815 |
def index_power(array, n):
if n>=len(array):
return -1
else:
return array[n]**n
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert index_power([1, 2, 3, 4], 2) == 9, "Square"
assert index_power([1, 3, 10, 100], 3) == 1000000, "Cube"
assert index_power([0, 1], 0) == 1, "Zero power"
assert index_power([1, 2], 3) == -1, "IndexError" | nesterione/problem-solving-and-algorithms | problems/Empireofcode/IndexPower.py | Python | apache-2.0 | 441 | 0.018141 |
# Copyright (C) 2001-2005 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Opcodes."""
import dns.exception
QUERY = 0
IQUERY = 1
STATUS = 2
NOTIFY = 4
UPDATE = 5
_by_text = {
'QUERY' : QUERY,
'IQUERY' : IQUERY,
'STATUS' : STATUS,
'NOTIFY' : NOTIFY,
'UPDATE' : UPDATE
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be true inverse.
_by_value = dict([(y, x) for x, y in _by_text.iteritems()])
class UnknownOpcode(dns.exception.DNSException):
"""Raised if an opcode is unknown."""
pass
def from_text(text):
"""Convert text into an opcode.
@param text: the textual opcode
@type text: string
@raises UnknownOpcode: the opcode is unknown
@rtype: int
"""
if text.isdigit():
value = int(text)
if value >= 0 and value <= 15:
return value
value = _by_text.get(text.upper())
if value is None:
raise UnknownOpcode
return value
def from_flags(flags):
"""Extract an opcode from DNS message flags.
@param flags: int
@rtype: int
"""
return (flags & 0x7800) >> 11
def to_flags(value):
"""Convert an opcode to a value suitable for ORing into DNS message
flags.
@rtype: int
"""
return (value << 11) & 0x7800
def to_text(value):
"""Convert an opcode to text.
@param value: the opcdoe
@type value: int
@raises UnknownOpcode: the opcode is unknown
@rtype: string
"""
text = _by_value.get(value)
if text is None:
text = str(value)
return text
def is_update(flags):
"""True if the opcode in flags is UPDATE.
@param flags: DNS flags
@type flags: int
@rtype: bool
"""
if (from_flags(flags) == UPDATE):
return True
return False
| liyongyue/dnsspider | dns/opcode.py | Python | isc | 2,603 | 0.005763 |
import unittest
import lexer
import parser
from tree import *
from tree import _TreeBuilder
class BuilderTest(unittest.TestCase):
''' Test of the _TreeBuilder (and Leaf and Tree) class. '''
def testTreeBuilding(self):
''' Test that the tree builder constructs trees correctly when
parsed. '''
l = lexer.Lexer("(('foo' : 0.1, 'bar' : 1.0) : 2, baz)")
handler = _TreeBuilder()
p = parser._Parser(l,handler)
p.parse()
t = handler.get_result()
self.assertEqual(len(t.get_edges()),2)
(t1,b1,l1), (t2,b2,l2) = t.get_edges()
self.assertEqual(len(t1.get_edges()),2)
self.assertEqual(l1, 2.0)
self.assertEqual(t2.__class__, Leaf)
self.assertEqual(l2, None)
self.assertEqual(t.leaves_identifiers, ['foo','bar','baz'])
class TestParseTree(unittest.TestCase):
''' Test of the parse_tree() function. '''
def testTreeStructure(self):
''' Test that a parsed tree has the right structure. '''
t = parse_tree("(('foo' : 0.1, 'bar' : 1.0) : 2, baz)")
self.assertEqual(len(t.get_edges()),2)
(t1,b1,l1), (t2,b2,l2) = t.get_edges()
self.assertEqual(len(t1.get_edges()),2)
self.assertEqual(l1, 2.0)
self.assertEqual(t2.__class__, Leaf)
self.assertEqual(l2, None)
self.assertEqual(t.leaves_identifiers, ['foo','bar','baz'])
def testSpecialCases(self):
''' Test that we can parse some special cases of trees. '''
tree = parse_tree("(B,(A,C,E),D);")
self.assertEqual(tree.leaves_identifiers,['B','A','C','E','D'])
tree = parse_tree("(,(,,),);")
self.assertEqual(tree.leaves_identifiers,['']*5)
# underscores are considered empty leaf names!
tree = parse_tree("(_,(_,_,_),_);")
self.assertEqual(tree.leaves_identifiers,['']*5)
# the rest is just checking that we do not crash on this input...
parse_tree("""
(
('Chimp':0.052625,
'Human':0.042375):0.007875,
'Gorilla':0.060125,
('Gibbon':0.124833,
'Orangutan':0.0971667):0.038875
);
""")
parse_tree("""
(
('Chimp':0.052625,
'Human':0.042375) 0.71 : 0.007875,
'Gorilla':0.060125,
('Gibbon':0.124833,
'Orangutan':0.0971667) 1.00 :0.038875
);
""")
class TreeTest(unittest.TestCase):
''' Test of the Tree (and Leaf and _TreeBuilder) class. '''
def testProperties(self):
''' Test that the tree properties lets us extract the right
information. '''
t = parse_tree('((A,B),C);')
self.assertEqual(t.leaves_identifiers, ['A','B','C'])
self.assertNotEqual(t.leaves, ['A','B','C'])
self.assertEqual(len(t.edges), 2)
(n1,_,_), (n2,_,_) = t.edges
self.assertEqual(type(n1), Tree)
self.assertEqual(type(n2), Leaf)
self.assertEqual(n2.identifier, 'C')
class TestFunctions(unittest.TestCase):
''' Test of the module-level functions. '''
def testAddParentLink(self):
''' Test the add_parent_links() function. '''
t = parse_tree('((A,B),C);')
add_parent_links(t)
self.assertEqual([str(l.parent) for l in t.leaves],
["('A', 'B')", "('A', 'B')", "(('A', 'B'), 'C')"])
def testLabel(self):
''' Test if trees with labels are parsed correctly. '''
t = parse_tree("(('A', 'B')label, 'C')")
self.assertEqual(str(t), "(('A', 'B')label, 'C')")
t = parse_tree("(('A', 'B')label, 'C')treelabel")
self.assertEqual(t.identifier, "treelabel")
t = parse_tree("(('A', 'B')label, 'C')1")
self.assertEqual(t.identifier, "1")
def testAddDistanceFromRoot(self):
''' Test the add_distance_from_root() function. '''
t = parse_tree('((A,B),C);')
add_distance_from_root(t)
self.assertEqual([l.distance_from_root for l in t.leaves],[0,0,0])
t = parse_tree('((A:2,B:3):1,C:6);')
add_distance_from_root(t)
self.assertEqual([l.distance_from_root for l in t.leaves],[3,4,6])
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(BuilderTest))
test_suite.addTest(unittest.makeSuite(TestParseTree))
test_suite.addTest(unittest.makeSuite(TreeTest))
test_suite.addTest(unittest.makeSuite(TestFunctions))
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(test_suite)
# from tree import TreeVisitor
# def relabel(tree):
# "Relabel the tree's leaves."
# # visitor pattern.
# class V(TreeVisitor):
# def __init__(self):
# self.count = 0
# def visit_leaf(self,leaf):
# leaf.identifier = str(self.count)
# self.count += 1
# # let visitor traverse tree
# tree.dfs_traverse(V())
#
# relabel(t)
# print t
| ptdtan/Ragout | lib/newick/treetest.py | Python | gpl-3.0 | 4,858 | 0.009881 |
from rx import Observable
from rx.testing import TestScheduler, ReactiveTest, is_prime, MockDisposable
from rx.disposables import Disposable, SerialDisposable
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
# Helper function for raising exceptions within lambdas
def _raise(ex):
raise RxException(ex)
class BooleanDisposable(object):
def __init__(self):
self.is_disposed = False
def dispose(self):
self.is_disposed = True
return self.is_disposed
def test_return_basic():
scheduler = TestScheduler()
def factory():
return Observable.return_value(42, scheduler)
results = scheduler.start(factory)
results.messages.assert_equal(
on_next(201, 42),
on_completed(201))
def test_return_disposed():
scheduler = TestScheduler()
def factory():
return Observable.return_value(42, scheduler)
results = scheduler.start(factory, disposed=200)
results.messages.assert_equal()
def test_return_disposed_after_next():
scheduler = TestScheduler()
d = SerialDisposable()
xs = Observable.return_value(42, scheduler)
results = scheduler.create_observer()
def action(scheduler, state):
def on_next(x):
d.dispose()
results.on_next(x)
def on_error(e):
results.on_error(e)
def on_completed():
results.on_completed()
d.disposable = xs.subscribe(on_next, on_error, on_completed)
return d.disposable
scheduler.schedule_absolute(100, action)
scheduler.start()
results.messages.assert_equal(on_next(101, 42))
def test_return_observer_throws():
scheduler1 = TestScheduler()
xs = Observable.return_value(1, scheduler1)
xs.subscribe(lambda x: _raise('ex'))
try:
scheduler1.start()
except RxException:
pass
scheduler2 = TestScheduler()
ys = Observable.return_value(1, scheduler2)
ys.subscribe(lambda x: x, lambda ex: ex, lambda: _raise('ex'))
try:
scheduler2.start()
except RxException:
pass
def test_never_basic():
scheduler = TestScheduler()
xs = Observable.never()
results = scheduler.create_observer()
xs.subscribe(results)
scheduler.start()
results.messages.assert_equal()
def test_throw_exception_basic():
scheduler = TestScheduler()
ex = 'ex'
def factory():
return Observable.throw_exception(ex, scheduler)
results = scheduler.start(factory)
results.messages.assert_equal(on_error(201, ex))
def test_throw_disposed():
scheduler = TestScheduler()
def factory():
return Observable.throw_exception('ex', scheduler)
results = scheduler.start(factory, disposed=200)
results.messages.assert_equal()
def test_throw_observer_throws():
scheduler = TestScheduler()
xs = Observable.throw_exception('ex', scheduler)
xs.subscribe(lambda x: None, lambda ex: _raise('ex'), lambda: None)
try:
return scheduler.start()
except RxException:
pass
def test_empty_basic():
scheduler = TestScheduler()
def factory():
return Observable.empty(scheduler)
results = scheduler.start(factory)
results.messages.assert_equal(on_completed(201))
def test_empty_disposed():
scheduler = TestScheduler()
def factory():
return Observable.empty(scheduler)
results = scheduler.start(factory, disposed=200)
results.messages.assert_equal()
def test_empty_observer_throw_exception():
scheduler = TestScheduler()
xs = Observable.empty(scheduler)
xs.subscribe(lambda x: None, lambda ex: None, lambda: _raise('ex'))
try:
return scheduler.start()
except RxException:
pass
def test__subscribe_to_enumerable_finite():
enumerable_finite = [1, 2, 3, 4, 5]
scheduler = TestScheduler()
def create():
return Observable.from_array(enumerable_finite, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(
on_next(201, 1),
on_next(202, 2),
on_next(203, 3),
on_next(204, 4),
on_next(205, 5),
on_completed(206)
)
def test_generate_finite():
scheduler = TestScheduler()
def create():
return Observable.generate(0,
lambda x: x <= 3,
lambda x: x + 1,
lambda x: x,
scheduler)
results = scheduler.start(create)
results.messages.assert_equal(
on_next(201, 0),
on_next(202, 1),
on_next(203, 2),
on_next(204, 3),
on_completed(205)
)
def test_generate_throw_condition():
scheduler = TestScheduler()
ex = 'ex'
def create():
return Observable.generate(0,
lambda x: _raise('ex'),
lambda x: x + 1,
lambda x: x,
scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_error(201, ex))
def test_generate_throw_result_selector():
scheduler = TestScheduler()
ex = 'ex'
def create():
return Observable.generate(0,
lambda x: True,
lambda x: x + 1,
lambda x: _raise('ex'),
scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_error(201, ex))
def test_generate_throw_iterate():
scheduler = TestScheduler()
ex = 'ex'
def create():
return Observable.generate(0,
lambda x: True,
lambda x: _raise(ex),
lambda x: x,
scheduler)
results = scheduler.start(create)
results.messages.assert_equal(
on_next(201, 0),
on_error(202, ex)
)
def test_generate_dispose():
scheduler = TestScheduler()
ex = 'ex'
def create():
return Observable.generate(0,
lambda x: True,
lambda x: x + 1,
lambda x: x,
scheduler)
results = scheduler.start(create, disposed=203)
results.messages.assert_equal(
on_next(201, 0),
on_next(202, 1))
def test_defer_complete():
xs = None
invoked = 0
scheduler = TestScheduler()
def create():
def defer():
nonlocal invoked, xs
invoked += 1
xs = scheduler.create_cold_observable(
on_next(100, scheduler.clock),
on_completed(200)
)
return xs
return Observable.defer(defer)
results = scheduler.start(create)
results.messages.assert_equal(
on_next(300, 200),
on_completed(400)
)
assert(1 == invoked)
return xs.subscriptions.assert_equal(subscribe(200, 400))
def test_defer_error():
scheduler = TestScheduler()
invoked = 0
xs = None
ex = 'ex'
def create():
def defer():
nonlocal invoked, xs
invoked += 1
xs = scheduler.create_cold_observable(on_next(100, scheduler.clock), on_error(200, ex))
return xs
return Observable.defer(defer)
results = scheduler.start(create)
results.messages.assert_equal(on_next(300, 200), on_error(400, ex))
assert (1 == invoked)
return xs.subscriptions.assert_equal(subscribe(200, 400))
def test_defer_dispose():
scheduler = TestScheduler()
invoked = 0
xs = None
def create():
def defer():
nonlocal invoked, xs
invoked += 1
xs = scheduler.create_cold_observable(on_next(100, scheduler.clock), on_next(200, invoked), on_next(1100, 1000))
return xs
return Observable.defer(defer)
results = scheduler.start(create)
results.messages.assert_equal(on_next(300, 200), on_next(400, 1))
assert(1 == invoked)
return xs.subscriptions.assert_equal(subscribe(200, 1000))
def test_defer_throw():
scheduler = TestScheduler()
invoked = 0
ex = 'ex'
def create():
def defer():
nonlocal invoked
invoked += 1
raise Exception(ex)
return Observable.defer(defer)
results = scheduler.start(create)
results.messages.assert_equal(on_error(200, ex))
assert(1 == invoked)
def test_using_null():
disposable = None
xs = None
_d = None
scheduler = TestScheduler()
dispose_invoked = 0
create_invoked = 0
def create():
def create_resources():
nonlocal dispose_invoked
dispose_invoked += 1
disposable = None
return disposable
def create_observable(d):
nonlocal create_invoked, xs, _d
_d = d
create_invoked += 1
xs = scheduler.create_cold_observable(on_next(100, scheduler.clock), on_completed(200))
return xs
return Observable.using(create_resources, create_observable)
results = scheduler.start(create)
assert(disposable == _d)
results.messages.assert_equal(on_next(300, 200), on_completed(400))
assert(1 == create_invoked)
assert(1 == dispose_invoked)
xs.subscriptions.assert_equal(subscribe(200, 400))
assert(disposable == None)
def test_using_complete():
disposable = None
xs = None
_d = None
scheduler = TestScheduler()
dispose_invoked = 0
create_invoked = 0
def create():
def create_resource():
nonlocal dispose_invoked, disposable
dispose_invoked += 1
disposable = MockDisposable(scheduler)
return disposable
def create_observable(d):
nonlocal _d, create_invoked, xs
_d = d
create_invoked += 1
xs = scheduler.create_cold_observable(on_next(100, scheduler.clock), on_completed(200))
return xs
return Observable.using(create_resource, create_observable)
results = scheduler.start(create)
assert(disposable == _d)
results.messages.assert_equal(on_next(300, 200), on_completed(400))
assert(create_invoked == 1)
assert(dispose_invoked == 1)
xs.subscriptions.assert_equal(subscribe(200, 400))
disposable.disposes.assert_equal(200, 400)
def test_using_error():
scheduler = TestScheduler()
dispose_invoked = 0
create_invoked = 0
ex = 'ex'
disposable = None
xs = None
_d = None
def create():
def create_resource():
nonlocal dispose_invoked, disposable
dispose_invoked += 1
disposable = MockDisposable(scheduler)
return disposable
def create_observable(d):
nonlocal _d, create_invoked, xs
_d = d
create_invoked += 1
xs = scheduler.create_cold_observable(on_next(100, scheduler.clock), on_error(200, ex))
return xs
return Observable.using(create_resource, create_observable)
results = scheduler.start(create)
assert (disposable == _d)
results.messages.assert_equal(on_next(300, 200), on_error(400, ex))
assert(create_invoked == 1)
assert(dispose_invoked == 1)
xs.subscriptions.assert_equal(subscribe(200, 400))
disposable.disposes.assert_equal(200, 400)
def test_using_dispose():
disposable = None
xs = None
_d = None
scheduler = TestScheduler()
dispose_invoked = 0
create_invoked = 0
def create():
def create_resource():
nonlocal dispose_invoked, disposable
dispose_invoked += 1
disposable = MockDisposable(scheduler)
return disposable
def create_observable(d):
nonlocal _d, create_invoked, xs
_d = d
create_invoked += 1
xs = scheduler.create_cold_observable(on_next(100, scheduler.clock), on_next(1000, scheduler.clock + 1))
return xs
return Observable.using(create_resource, create_observable)
results = scheduler.start(create)
assert(disposable == _d)
results.messages.assert_equal(on_next(300, 200))
assert(1 == create_invoked)
assert(1 == dispose_invoked)
xs.subscriptions.assert_equal(subscribe(200, 1000))
disposable.disposes.assert_equal(200, 1000)
def test_using_throw_resource_selector():
scheduler = TestScheduler()
dispose_invoked = 0
create_invoked = 0
ex = 'ex'
def create():
def create_resource():
nonlocal dispose_invoked
dispose_invoked += 1
raise _raise(ex)
def create_observable(d):
nonlocal create_invoked
create_invoked += 1
return Observable.never()
return Observable.using(create_resource, create_observable)
results = scheduler.start(create)
results.messages.assert_equal(on_error(200, ex))
assert(0 == create_invoked)
assert(1 == dispose_invoked)
def test_using_throw_resource_usage():
scheduler = TestScheduler()
dispose_invoked = 0
create_invoked = 0
disposable = None
ex = 'ex'
def create():
def create_resource():
nonlocal disposable, dispose_invoked
dispose_invoked += 1
disposable = MockDisposable(scheduler)
return disposable
def create_observable(d):
nonlocal create_invoked
create_invoked += 1
_raise(ex)
return Observable.using(create_resource, create_observable)
results = scheduler.start(create)
results.messages.assert_equal(on_error(200, ex))
assert(1 == create_invoked)
assert(1 == dispose_invoked)
return disposable.disposes.assert_equal(200, 200)
def test_create_next():
scheduler = TestScheduler()
def create():
def subscribe(o):
o.on_next(1)
o.on_next(2)
return lambda: None
return Observable.create(subscribe)
results = scheduler.start(create)
results.messages.assert_equal(on_next(200, 1), on_next(200, 2))
def test_create_completed():
scheduler = TestScheduler()
def create():
def subscribe(o):
o.on_completed()
o.on_next(100)
o.on_error('ex')
o.on_completed()
return lambda: None
return Observable.create(subscribe)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(200))
def test_create_error():
scheduler = TestScheduler()
ex = 'ex'
def create():
def subscribe(o):
o.on_error(ex)
o.on_next(100)
o.on_error('foo')
o.on_completed()
return lambda: None
return Observable.create(subscribe)
results = scheduler.start(create)
results.messages.assert_equal(on_error(200, ex))
def test_create_exception():
try:
return Observable.create(lambda o: _raise('ex')).subscribe()
except RxException:
pass
def test_create_dispose():
scheduler = TestScheduler()
def create():
def subscribe(o):
is_stopped = False
o.on_next(1)
o.on_next(2)
def action1(scheduler, state):
if not is_stopped:
return o.on_next(3)
scheduler.schedule_relative(600, action1)
def action2(scheduler, state):
if not is_stopped:
return o.on_next(4)
scheduler.schedule_relative(700, action2)
def action3(scheduler, state):
if not is_stopped:
return o.on_next(5)
scheduler.schedule_relative(900, action3)
def action4(scheduler, state):
if not is_stopped:
return o.on_next(6)
scheduler.schedule_relative(1100, action4)
def dispose():
nonlocal is_stopped
is_stopped = True
return dispose
return Observable.create(subscribe)
results = scheduler.start(create)
results.messages.assert_equal(on_next(200, 1), on_next(200, 2), on_next(800, 3), on_next(900, 4))
def test_create_observer_throws():
def subscribe(o):
o.on_next(1)
return lambda: None
try:
return Observable.create(subscribe).subscribe(lambda x: _raise('ex'))
except RxException:
pass
def subscribe2(o):
o.on_error('exception')
return lambda: None
try:
return Observable.create(subscribe2).subscribe(on_error=lambda ex: _raise('ex'))
except RxException:
pass
def subscribe3(o):
o.on_completed()
return lambda: None
try:
return Observable.create(subscribe3).subscribe(on_complete=lambda: _raise('ex'))
except RxException:
pass
def test_create_with_disposable_next():
scheduler = TestScheduler()
def create():
def subscribe(o):
o.on_next(1)
o.on_next(2)
return Disposable.empty()
return Observable.create_with_disposable(subscribe)
results = scheduler.start(create)
results.messages.assert_equal(on_next(200, 1), on_next(200, 2))
def test_create_with_disposable_completed():
scheduler = TestScheduler()
def create():
def subscribe(o):
o.on_completed()
o.on_next(100)
o.on_error('ex')
o.on_completed()
return Disposable.empty()
return Observable.create_with_disposable(subscribe)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(200))
def test_create_with_disposable_error():
scheduler = TestScheduler()
ex = 'ex'
def create():
def subscribe(o):
o.on_error(ex)
o.on_next(100)
o.on_error('foo')
o.on_completed()
return Disposable.empty()
return Observable.create_with_disposable(subscribe)
results = scheduler.start(create)
results.messages.assert_equal(on_error(200, ex))
def test_create_with_disposable_exception():
try:
return Observable.create_with_disposable(lambda: o, _raise('ex')).subscribe()
except RxException:
pass
def test_create_with_disposable_dispose():
scheduler = TestScheduler()
def create():
def subscribe(o):
d = BooleanDisposable()
o.on_next(1)
o.on_next(2)
def action1(scheduler, state):
if not d.is_disposed:
o.on_next(3)
scheduler.schedule_relative(600, action1)
def action2(scheduler, state):
if not d.is_disposed:
o.on_next(4)
scheduler.schedule_relative(700, action2)
def action3(scheduler, state):
if not d.is_disposed:
o.on_next(5)
scheduler.schedule_relative(900, action3)
def action4(scheduler, state):
if not d.is_disposed:
o.on_next(6)
scheduler.schedule_relative(1100, action4)
return d
return Observable.create_with_disposable(subscribe)
results = scheduler.start(create)
results.messages.assert_equal(on_next(200, 1), on_next(200, 2), on_next(800, 3), on_next(900, 4))
def test_create_with_disposable_observer_throws():
def subscribe1(o):
o.on_next(1)
return Disposable.empty()
def on_next(x):
_raise('ex')
try:
return Observable.create_with_disposable(subscribe1).subscribe(on_next)
except RxException:
pass
def subscribe2(o):
o.on_error('exception')
return Disposable.empty()
try:
return Observable.create_with_disposable(subscribe2).subscribe(on_error=lambda ex: _raise('ex'))
except RxException:
pass
def subscribe3(o):
o.on_completed()
return Disposable.empty()
try:
return Observable.create_with_disposable(subscribe3).subscribe(on_completed=_raise('ex'))
except RxException:
pass
def test_range_zero():
scheduler = TestScheduler()
def create():
return Observable.range(0, 0, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(201))
def test_range_one():
scheduler = TestScheduler()
def create():
return Observable.range(0, 1, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(201, 0), on_completed(202))
def test_range_five():
scheduler = TestScheduler()
def create():
return Observable.range(10, 5, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(
on_next(201, 10),
on_next(202, 11),
on_next(203, 12),
on_next(204, 13),
on_next(205, 14),
on_completed(206))
def test_range_dispose():
scheduler = TestScheduler()
def create():
return Observable.range(-10, 5, scheduler)
results = scheduler.start(create, disposed=204)
results.messages.assert_equal(on_next(201, -10), on_next(202, -9), on_next(203, -8))
def test_repeat_observable_basic():
scheduler = TestScheduler()
xs = scheduler.create_cold_observable(
on_next(100, 1),
on_next(150, 2),
on_next(200, 3),
on_completed(250))
results = scheduler.start(lambda: xs.repeat())
results.messages.assert_equal(
on_next(300, 1),
on_next(350, 2),
on_next(400, 3),
on_next(550, 1),
on_next(600, 2),
on_next(650, 3),
on_next(800, 1),
on_next(850, 2),
on_next(900, 3))
xs.subscriptions.assert_equal(
subscribe(200, 450),
subscribe(450, 700),
subscribe(700, 950),
subscribe(950, 1000))
def test_repeat_observable_infinite():
scheduler = TestScheduler()
xs = scheduler.create_cold_observable(on_next(100, 1), on_next(150, 2), on_next(200, 3))
results = scheduler.start(lambda: xs.repeat())
results.messages.assert_equal(on_next(300, 1), on_next(350, 2), on_next(400, 3))
return xs.subscriptions.assert_equal(subscribe(200, 1000))
def test_repeat_observable_error():
results = None
scheduler = TestScheduler()
ex = 'ex'
xs = scheduler.create_cold_observable(on_next(100, 1), on_next(150, 2), on_next(200, 3), on_error(250, ex))
results = scheduler.start(lambda: xs.repeat())
results.messages.assert_equal(on_next(300, 1), on_next(350, 2), on_next(400, 3), on_error(450, ex))
return xs.subscriptions.assert_equal(subscribe(200, 450))
def test_repeat_observable_throws():
scheduler1 = TestScheduler()
xs = Observable.return_value(1, scheduler1).repeat()
xs.subscribe(lambda x: _raise('ex'))
try:
return scheduler1.start()
except RxException:
pass
scheduler2 = TestScheduler()
ys = Observable.throw_exception('ex', scheduler2).repeat()
ys.subscribe(lambda ex: _raise('ex'))
try:
return scheduler2.start()
except RxException:
pass
scheduler3 = TestScheduler()
zs = Observable.return_value(1, scheduler3).repeat()
d = zs.subscribe(lambda: _raise('ex'))
scheduler3.schedule_absolute(210, lambda: d.dispose())
scheduler3.start()
xss = Observable.create(lambda o: _raise('ex')).repeat()
try:
return xss.subscribe()
except RxException:
pass
def test_repeat_observable_repeat_count_basic():
scheduler = TestScheduler()
xs = scheduler.create_cold_observable(on_next(5, 1), on_next(10, 2), on_next(15, 3), on_completed(20))
results = scheduler.start(lambda: xs.repeat(3))
results.messages.assert_equal(on_next(205, 1), on_next(210, 2), on_next(215, 3), on_next(225, 1), on_next(230, 2), on_next(235, 3), on_next(245, 1), on_next(250, 2), on_next(255, 3), on_completed(260))
xs.subscriptions.assert_equal(subscribe(200, 220), subscribe(220, 240), subscribe(240, 260))
def test_repeat_observable_repeat_count_dispose():
scheduler = TestScheduler()
xs = scheduler.create_cold_observable(on_next(5, 1), on_next(10, 2), on_next(15, 3), on_completed(20))
results = scheduler.start(lambda: xs.repeat(3), disposed=231)
results.messages.assert_equal(on_next(205, 1), on_next(210, 2), on_next(215, 3), on_next(225, 1), on_next(230, 2))
return xs.subscriptions.assert_equal(subscribe(200, 220), subscribe(220, 231))
def test_repeat_observable_repeat_count_infinite():
scheduler = TestScheduler()
xs = scheduler.create_cold_observable(on_next(100, 1), on_next(150, 2), on_next(200, 3))
results = scheduler.start(lambda: xs.repeat(3))
results.messages.assert_equal(on_next(300, 1), on_next(350, 2), on_next(400, 3))
return xs.subscriptions.assert_equal(subscribe(200, 1000))
def test_repeat_observable_repeat_count_error():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_cold_observable(on_next(100, 1), on_next(150, 2), on_next(200, 3), on_error(250, ex))
results = scheduler.start(lambda: xs.repeat(3))
results.messages.assert_equal(on_next(300, 1), on_next(350, 2), on_next(400, 3), on_error(450, ex))
return xs.subscriptions.assert_equal(subscribe(200, 450))
def test_repeat_observable_repeat_count_throws():
scheduler1 = TestScheduler()
xs = Observable.return_value(1, scheduler1).repeat(3)
xs.subscribe(lambda x: _raise('ex'))
try:
return scheduler1.start()
except RxException:
pass
scheduler2 = TestScheduler()
ys = Observable.throwException('ex1', scheduler2).repeat(3)
ys.subscribe(lambda ex: _raise('ex2'))
try:
return scheduler2.start()
except RxException:
pass
scheduler3 = TestScheduler()
zs = Observable.return_value(1, scheduler3).repeat(100)
d = zs.subscribe(on_complete=lambda: _raise('ex3'))
scheduler3.schedule_absolute(10, lambda: d.dispose())
scheduler3.start()
xss = Observable.create(lambda o: _raise('ex4')).repeat(3)
try:
return xss.subscribe()
except RxException:
pass
def test_retry_observable_basic():
scheduler = TestScheduler()
xs = scheduler.create_cold_observable(on_next(100, 1), on_next(150, 2), on_next(200, 3), on_completed(250))
results = scheduler.start(lambda: xs.retry())
results.messages.assert_equal(on_next(300, 1), on_next(350, 2), on_next(400, 3), on_completed(450))
xs.subscriptions.assert_equal(subscribe(200, 450))
def test_retry_observable_infinite():
scheduler = TestScheduler()
xs = scheduler.create_cold_observable(on_next(100, 1), on_next(150, 2), on_next(200, 3))
results = scheduler.start(lambda: xs.retry())
results.messages.assert_equal(on_next(300, 1), on_next(350, 2), on_next(400, 3))
return xs.subscriptions.assert_equal(subscribe(200, 1000))
def test_retry_observable_error():
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_cold_observable(on_next(100, 1), on_next(150, 2), on_next(200, 3), on_error(250, ex))
results = scheduler.start(lambda: xs.retry(), disposed=1100)
results.messages.assert_equal(on_next(300, 1), on_next(350, 2), on_next(400, 3), on_next(550, 1), on_next(600, 2), on_next(650, 3), on_next(800, 1), on_next(850, 2), on_next(900, 3), on_next(1050, 1))
return xs.subscriptions.assert_equal(subscribe(200, 450), subscribe(450, 700), subscribe(700, 950), subscribe(950, 1100))
def test_Retry_Observable_Throws():
scheduler1 = TestScheduler()
xs = Observable.return_value(1, scheduler1).retry()
xs.subscribe(lambda x: _raise('ex'))
try:
return scheduler1.start()
except RxException:
pass
scheduler2 = TestScheduler()
ys = Observable.throw_exception('ex', scheduler2).retry()
d = ys.subscribe(on_error=lambda ex: _raise('ex'))
scheduler2.schedule_absolute(210, lambda: d.dispose())
scheduler2.start()
scheduler3 = TestScheduler()
zs = Observable.return_value(1, scheduler3).retry()
zs.subscribe(on_completed=lambda: _raise('ex'))
try:
return scheduler3.start()
except RxException:
pass
xss = Observable.create(lambda o: _raise('ex')).retry()
try:
return xss.subscribe()
except RxException:
pass
def test_retry_observable_retry_count_basic():
scheduler = TestScheduler()
ex = 'ex'
xs = scheduler.create_cold_observable(on_next(5, 1), on_next(10, 2), on_next(15, 3), on_error(20, ex))
results = scheduler.start(lambda: xs.retry(3))
results.messages.assert_equal(on_next(205, 1), on_next(210, 2), on_next(215, 3), on_next(225, 1), on_next(230, 2), on_next(235, 3), on_next(245, 1), on_next(250, 2), on_next(255, 3), on_error(260, ex))
xs.subscriptions.assert_equal(subscribe(200, 220), subscribe(220, 240), subscribe(240, 260))
def test_retry_observable_retry_count_dispose():
scheduler = TestScheduler()
ex = 'ex'
xs = scheduler.create_cold_observable(on_next(5, 1), on_next(10, 2), on_next(15, 3), on_error(20, ex))
results = scheduler.start(lambda: xs.retry(3), disposed=231)
results.messages.assert_equal(on_next(205, 1), on_next(210, 2), on_next(215, 3), on_next(225, 1), on_next(230, 2))
xs.subscriptions.assert_equal(subscribe(200, 220), subscribe(220, 231))
def test_retry_observable_retry_count_dispose():
scheduler = TestScheduler()
ex = 'ex'
xs = scheduler.create_cold_observable(on_next(100, 1), on_next(150, 2), on_next(200, 3))
results = scheduler.start(lambda: xs.retry(3))
results.messages.assert_equal(on_next(300, 1), on_next(350, 2), on_next(400, 3))
xs.subscriptions.assert_equal(subscribe(200, 1000))
def test_retry_observable_retry_count_dispose():
scheduler = TestScheduler()
ex = 'ex'
xs = scheduler.create_cold_observable(on_next(100, 1), on_next(150, 2), on_next(200, 3), on_completed(250))
results = scheduler.start(lambda: xs.retry(3))
results.messages.assert_equal(on_next(300, 1), on_next(350, 2), on_next(400, 3), on_completed(450))
xs.subscriptions.assert_equal(subscribe(200, 450))
def test_retry_observable_retry_count_throws():
scheduler1 = TestScheduler()
xs = Observable.return_value(1, scheduler1).retry(3)
xs.subscribe(lambda x: _raise('ex'))
try:
return scheduler1.start()
except RxException:
pass
scheduler2 = TestScheduler()
ys = Observable.throwException('ex', scheduler2).retry(100)
d = ys.subscribe(on_error=lambda ex: _raise('ex'))
scheduler2.schedule_absolute(10, lambda: d.dispose())
scheduler2.start()
scheduler3 = TestScheduler()
zs = Observable.return_value(1, scheduler3).retry(100)
zs.subscribe(on_complete=lambda: _raise('ex'))
try:
return scheduler3.start()
except RxException:
pass
xss = Observable.create(lambda o: _raise('ex')).retry(100)
try:
return xss.subscribe()
except RxException:
pass
def test_repeat_value_count_zero():
scheduler = TestScheduler()
def create():
return Observable.repeat(42, 0, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(200))
def test_repeat_value_count_one():
scheduler = TestScheduler()
def create():
return Observable.repeat(42, 1, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(201, 42), on_completed(201))
def test_repeat_value_count_ten():
scheduler = TestScheduler()
def create():
return Observable.repeat(42, 10, scheduler)
results = scheduler.start(create)
results.messages.assert_equal(on_next(201, 42), on_next(202, 42), on_next(203, 42), on_next(204, 42), on_next(205, 42), on_next(206, 42), on_next(207, 42), on_next(208, 42), on_next(209, 42), on_next(210, 42), on_completed(210))
def test_repeat_value_count_dispose():
scheduler = TestScheduler()
def create():
return Observable.repeat(42, 10, scheduler)
results = scheduler.start(create, disposed=207)
results.messages.assert_equal(on_next(201, 42), on_next(202, 42), on_next(203, 42), on_next(204, 42), on_next(205, 42), on_next(206, 42))
def test_repeat_value():
scheduler = TestScheduler()
def create():
return Observable.repeat(42, -1, scheduler)
results = scheduler.start(create, disposed=207)
results.messages.assert_equal(on_next(201, 42), on_next(202, 42), on_next(203, 42), on_next(204, 42), on_next(205, 42), on_next(206, 42))
if __name__ == '__main__':
test_using_throw_resource_usage()
| Reactive-Extensions/RxPy | tests/test_observable_creation.py | Python | apache-2.0 | 34,022 | 0.006937 |
from .ownership import * # noqa
from .powerdns import * # noqa
from .requests import * # noqa
from .templates import * # noqa
from .tsigkeys import * # noqa
| allegro/django-powerdns-dnssec | powerdns/models/__init__.py | Python | bsd-2-clause | 162 | 0 |
import paddle.v2 as paddle
__all__ = ['googlenet']
def inception(name, input, channels, filter1, filter3R, filter3, filter5R,
filter5, proj):
cov1 = paddle.layer.img_conv(
name=name + '_1',
input=input,
filter_size=1,
num_channels=channels,
num_filters=filter1,
stride=1,
padding=0)
cov3r = paddle.layer.img_conv(
name=name + '_3r',
input=input,
filter_size=1,
num_channels=channels,
num_filters=filter3R,
stride=1,
padding=0)
cov3 = paddle.layer.img_conv(
name=name + '_3',
input=cov3r,
filter_size=3,
num_filters=filter3,
stride=1,
padding=1)
cov5r = paddle.layer.img_conv(
name=name + '_5r',
input=input,
filter_size=1,
num_channels=channels,
num_filters=filter5R,
stride=1,
padding=0)
cov5 = paddle.layer.img_conv(
name=name + '_5',
input=cov5r,
filter_size=5,
num_filters=filter5,
stride=1,
padding=2)
pool1 = paddle.layer.img_pool(
name=name + '_max',
input=input,
pool_size=3,
num_channels=channels,
stride=1,
padding=1)
covprj = paddle.layer.img_conv(
name=name + '_proj',
input=pool1,
filter_size=1,
num_filters=proj,
stride=1,
padding=0)
cat = paddle.layer.concat(name=name, input=[cov1, cov3, cov5, covprj])
return cat
def googlenet(input, class_dim):
# stage 1
conv1 = paddle.layer.img_conv(
name="conv1",
input=input,
filter_size=7,
num_channels=3,
num_filters=64,
stride=2,
padding=3)
pool1 = paddle.layer.img_pool(
name="pool1", input=conv1, pool_size=3, num_channels=64, stride=2)
# stage 2
conv2_1 = paddle.layer.img_conv(
name="conv2_1",
input=pool1,
filter_size=1,
num_filters=64,
stride=1,
padding=0)
conv2_2 = paddle.layer.img_conv(
name="conv2_2",
input=conv2_1,
filter_size=3,
num_filters=192,
stride=1,
padding=1)
pool2 = paddle.layer.img_pool(
name="pool2", input=conv2_2, pool_size=3, num_channels=192, stride=2)
# stage 3
ince3a = inception("ince3a", pool2, 192, 64, 96, 128, 16, 32, 32)
ince3b = inception("ince3b", ince3a, 256, 128, 128, 192, 32, 96, 64)
pool3 = paddle.layer.img_pool(
name="pool3", input=ince3b, num_channels=480, pool_size=3, stride=2)
# stage 4
ince4a = inception("ince4a", pool3, 480, 192, 96, 208, 16, 48, 64)
ince4b = inception("ince4b", ince4a, 512, 160, 112, 224, 24, 64, 64)
ince4c = inception("ince4c", ince4b, 512, 128, 128, 256, 24, 64, 64)
ince4d = inception("ince4d", ince4c, 512, 112, 144, 288, 32, 64, 64)
ince4e = inception("ince4e", ince4d, 528, 256, 160, 320, 32, 128, 128)
pool4 = paddle.layer.img_pool(
name="pool4", input=ince4e, num_channels=832, pool_size=3, stride=2)
# stage 5
ince5a = inception("ince5a", pool4, 832, 256, 160, 320, 32, 128, 128)
ince5b = inception("ince5b", ince5a, 832, 384, 192, 384, 48, 128, 128)
pool5 = paddle.layer.img_pool(
name="pool5",
input=ince5b,
num_channels=1024,
pool_size=7,
stride=7,
pool_type=paddle.pooling.Avg())
dropout = paddle.layer.addto(
input=pool5,
layer_attr=paddle.attr.Extra(drop_rate=0.4),
act=paddle.activation.Linear())
out = paddle.layer.fc(input=dropout,
size=class_dim,
act=paddle.activation.Softmax())
# fc for output 1
pool_o1 = paddle.layer.img_pool(
name="pool_o1",
input=ince4a,
num_channels=512,
pool_size=5,
stride=3,
pool_type=paddle.pooling.Avg())
conv_o1 = paddle.layer.img_conv(
name="conv_o1",
input=pool_o1,
filter_size=1,
num_filters=128,
stride=1,
padding=0)
fc_o1 = paddle.layer.fc(name="fc_o1",
input=conv_o1,
size=1024,
layer_attr=paddle.attr.Extra(drop_rate=0.7),
act=paddle.activation.Relu())
out1 = paddle.layer.fc(input=fc_o1,
size=class_dim,
act=paddle.activation.Softmax())
# fc for output 2
pool_o2 = paddle.layer.img_pool(
name="pool_o2",
input=ince4d,
num_channels=528,
pool_size=5,
stride=3,
pool_type=paddle.pooling.Avg())
conv_o2 = paddle.layer.img_conv(
name="conv_o2",
input=pool_o2,
filter_size=1,
num_filters=128,
stride=1,
padding=0)
fc_o2 = paddle.layer.fc(name="fc_o2",
input=conv_o2,
size=1024,
layer_attr=paddle.attr.Extra(drop_rate=0.7),
act=paddle.activation.Relu())
out2 = paddle.layer.fc(input=fc_o2,
size=class_dim,
act=paddle.activation.Softmax())
return out, out1, out2
| kuke/models | legacy/image_classification/googlenet.py | Python | apache-2.0 | 5,341 | 0 |
# Copyright 2015 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.common import utils
METADATA_PROXY_HANDLER_OPTS = [
cfg.StrOpt('admin_user',
help=_("Admin user")),
cfg.StrOpt('admin_password',
help=_("Admin password"),
secret=True),
cfg.StrOpt('admin_tenant_name',
help=_("Admin tenant name")),
cfg.StrOpt('auth_url',
help=_("Authentication URL")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('auth_region',
help=_("Authentication region")),
cfg.BoolOpt('auth_insecure',
default=False,
help=_("Turn off verification of the certificate for"
" ssl")),
cfg.StrOpt('auth_ca_cert',
help=_("Certificate Authority public key (CA cert) "
"file for ssl")),
cfg.StrOpt('endpoint_type',
default='adminURL',
help=_("Network service endpoint type to pull from "
"the keystone catalog")),
cfg.StrOpt('nova_metadata_ip', default='127.0.0.1',
help=_("IP address used by Nova metadata server.")),
cfg.IntOpt('nova_metadata_port',
default=8775,
help=_("TCP Port used by Nova metadata server.")),
cfg.StrOpt('metadata_proxy_shared_secret',
default='',
help=_('Shared secret to sign instance-id request'),
secret=True),
cfg.StrOpt('nova_metadata_protocol',
default='http',
choices=['http', 'https'],
help=_("Protocol to access nova metadata, http or https")),
cfg.BoolOpt('nova_metadata_insecure', default=False,
help=_("Allow to perform insecure SSL (https) requests to "
"nova metadata")),
cfg.StrOpt('nova_client_cert',
default='',
help=_("Client certificate for nova metadata api server.")),
cfg.StrOpt('nova_client_priv_key',
default='',
help=_("Private key of client certificate."))
]
UNIX_DOMAIN_METADATA_PROXY_OPTS = [
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location for Metadata Proxy UNIX domain socket')),
cfg.IntOpt('metadata_workers',
default=utils.cpu_count() // 2,
help=_('Number of separate worker processes for metadata '
'server')),
cfg.IntOpt('metadata_backlog',
default=4096,
help=_('Number of backlog requests to configure the '
'metadata server socket with'))
]
| projectcalico/calico-neutron | neutron/agent/metadata/config.py | Python | apache-2.0 | 3,403 | 0 |
"""Add TagAlias table
Revision ID: 9a0f78ff57d6
Revises: d19881f4c045
Create Date: 2017-03-19 15:34:30.271997
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9a0f78ff57d6'
down_revision = 'd19881f4c045'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tag_alias',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('alias_name', sa.String(), nullable=False),
sa.Column('tag_id', sa.Integer(), nullable=True),
sa.Column('guild_id', sa.BigInteger(), nullable=True),
sa.Column('user_id', sa.BigInteger(), nullable=True),
sa.ForeignKeyConstraint(['guild_id'], ['guild.id'], ),
sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tag_alias')
# ### end Alembic commands ###
| MJB47/Jokusoramame | migrations/versions/9a0f78ff57d6_add_tagalias_table.py | Python | mit | 1,126 | 0.008881 |
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from django.core.exceptions import ObjectDoesNotExist
from django.core.cache import cache
from django.http import HttpResponse, HttpResponseRedirect
from django.db import transaction
from django_redis import get_redis_connection
from users.models import PastebinUser
from users.forms import RegisterForm, LoginForm, ChangePreferencesForm, ChangePasswordForm, VerifyPasswordForm
from users.models import Favorite, SiteSettings
from pastes.models import Paste
from pastebin.util import Paginator
import math
def register_view(request):
"""
Register a new user
"""
# Check if the user is authenticated
if request.user.is_authenticated():
# User is already authenticated
return render(request, 'users/register/already_logged_in.html')
else:
register_form = RegisterForm(request.POST or None)
if request.method == 'POST': # Form data was submitted
if register_form.is_valid(): # Form data is valid
# Create the user
with transaction.atomic():
user = User.objects.create_user(register_form.cleaned_data['username'],
"N/A", # we don't deal with email addresses
register_form.cleaned_data['password'])
PastebinUser.create_user(user)
# TODO: Show a different message if the registration fails
return render(request, 'users/register/register_success.html')
# Show the registration page
return render(request, "users/register/register.html", { "form": register_form })
def login_view(request):
"""
Log the user in
"""
# Check if the user is authenticated
if request.user.is_authenticated():
# User is authenticated
return render(request, "users/login/logged_in.html")
else:
login_form = LoginForm(request.POST or None)
# User is NOT authenticated
if request.method == 'POST': # Form data was submitted
if login_form.is_valid(): # Form data is valid
user = authenticate(username = login_form.cleaned_data['username'],
password = login_form.cleaned_data['password'])
if user is not None and user.is_active:
login(request, user)
return render(request, "users/login/logged_in.html")
else:
# Couldn't authenticate, either the username or password is wrong
error = "User doesn't exist or the password is incorrect."
login_form._errors['password'] = login_form.error_class([error])
# Show the login form
return render(request, "users/login/login.html", { "form": login_form })
def logout_view(request):
"""
Logout the user and show the logout page
"""
if request.user.is_authenticated():
logout(request)
return render(request, 'users/logout/logged_out.html')
def profile(request, username, tab="home", page=1):
"""
Show a publicly visible profile page
"""
page = int(page)
try:
profile_user = cache.get("user:%s" % username)
if profile_user == None:
profile_user = User.objects.get(username=username)
cache.set("user:%s" % username, profile_user)
elif profile_user == False:
return render(request, "users/profile/profile_error.html", {"reason": "not_found"}, status=404)
except ObjectDoesNotExist:
cache.set("user:%s" % username, False)
return render(request, "users/profile/profile_error.html", {"reason": "not_found"}, status=404)
# Get user's settings
profile_settings = cache.get("site_settings:%s" % username)
if profile_settings == None:
try:
profile_settings = SiteSettings.objects.get(user=profile_user)
except ObjectDoesNotExist:
profile_settings = SiteSettings(user=profile_user)
profile_settings.save()
cache.set("site_settings:%s" % username, profile_settings)
if not profile_user.is_active:
return render(request, "users/profile/profile_error.html", {"reason": "not_found"}, status=404)
if request.user != profile_user:
total_paste_count = cache.get("user_public_paste_count:%s" % profile_user.username)
else:
total_paste_count = cache.get("user_paste_count:%s" % profile_user.username)
# If user is viewing his own profile, also include hidden pastes
if total_paste_count == None and request.user != profile_user:
total_paste_count = Paste.objects.filter(user=profile_user, removed=Paste.NO_REMOVAL).filter(hidden=False).count()
cache.set("user_public_paste_count:%s" % profile_user.username, total_paste_count)
elif total_paste_count == None and request.user == profile_user:
total_paste_count = Paste.objects.filter(user=profile_user, removed=Paste.NO_REMOVAL).count()
cache.set("user_paste_count:%s" % profile_user.username, total_paste_count)
total_favorite_count = cache.get("user_favorite_count:%s" % profile_user.username)
if total_favorite_count == None:
total_favorite_count = Favorite.objects.filter(user=profile_user).count()
cache.set("user_favorite_count:%s" % profile_user.username, total_favorite_count)
args = {"profile_user": profile_user,
"profile_settings": profile_settings,
"current_page": page,
"tab": tab,
"total_favorite_count": total_favorite_count,
"total_paste_count": total_paste_count}
if tab == "home":
return home(request, args)
elif tab == "pastes":
return pastes(request, profile_user, args, page)
elif tab == "favorites":
return favorites(request, profile_user, args, page)
# The remaining pages require authentication, so redirect through settings()
else:
return settings(request, profile_user, args, tab)
def settings(request, username, args={}, tab="change_password"):
"""
Show a page which allows the user to change his settings
"""
if not request.user.is_authenticated():
return render(request, "users/settings/settings_error.html", {"reason": "not_logged_in"})
profile_user = User.objects.get(username=username)
if request.user.id != profile_user.id:
return render(request, "users/settings/settings_error.html", {"reason": "incorrect_user"})
if tab == "change_preferences":
return change_preferences(request, args)
if tab == "change_password":
return change_password(request, args)
elif tab == "delete_account":
return delete_account(request, args)
def home(request, args):
"""
Display user profile's home with the most recent pastes and favorites
"""
# Get favorites only if user has made them public
if args["profile_settings"].public_favorites or request.user == args["profile_user"]:
args["favorites"] = cache.get("profile_favorites:%s" % args["profile_user"].username)
if args["favorites"] == None:
args["favorites"] = Favorite.objects.filter(user=args["profile_user"]).order_by('-added').select_related('paste')[:10]
cache.set("profile_favorites:%s" % args["profile_user"].username, args["favorites"])
if request.user == args["profile_user"]:
args["pastes"] = cache.get("profile_pastes:%s" % args["profile_user"].username)
if args["pastes"] == None:
args["pastes"] = Paste.objects.get_pastes(args["profile_user"], include_hidden=True, count=10)
cache.set("profile_pastes:%s" % args["profile_user"].username, args["pastes"])
else:
args["pastes"] = cache.get("profile_public_pastes:%s" % args["profile_user"].username)
if args["pastes"] == None:
args["pastes"] = Paste.objects.get_pastes(args["profile_user"], include_hidden=False, count=10)
cache.set("profile_public_pastes:%s" % args["profile_user"].username, args["pastes"])
return render(request, "users/profile/home/home.html", args)
def pastes(request, user, args, page=1):
"""
Show all of user's pastes
"""
PASTES_PER_PAGE = 15
args["total_pages"] = int(math.ceil(float(args["total_paste_count"]) / float(PASTES_PER_PAGE)))
if page > args["total_pages"]:
page = max(int(args["total_pages"]), 1)
offset = (page-1) * PASTES_PER_PAGE
if request.user == user:
args["pastes"] = cache.get("user_pastes:%s:%s" % (user.username, page))
if args["pastes"] == None:
args["pastes"] = Paste.objects.get_pastes(user, count=PASTES_PER_PAGE, include_hidden=True, offset=offset)
cache.set("user_pastes:%s:%s" % (user.username, page), args["pastes"])
else:
args["pastes"] = cache.get("user_public_pastes:%s:%s" % (user.username, page))
if args["pastes"] == None:
args["pastes"] = Paste.objects.get_pastes(user, count=PASTES_PER_PAGE, include_hidden=False, offset=offset)
cache.set("user_public_pastes:%s:%s" % (user.username, page), args["pastes"])
args["pages"] = Paginator.get_pages(page, PASTES_PER_PAGE, args["total_paste_count"])
args["current_page"] = page
return render(request, "users/profile/pastes/pastes.html", args)
def favorites(request, user, args, page=1):
"""
Show all of user's favorites
"""
FAVORITES_PER_PAGE = 15
if not args["profile_settings"].public_favorites and request.user != args["profile_user"]:
# Don't show pastes to other users if the user doesn't want to
return render(request, "users/profile/favorites/favorites_hidden.html", args)
args["total_pages"] = int(math.ceil(float(args["total_favorite_count"]) / float(FAVORITES_PER_PAGE)))
if page > args["total_pages"]:
page = max(int(args["total_pages"]), 1)
start = (page-1) * FAVORITES_PER_PAGE
end = start + FAVORITES_PER_PAGE
args["favorites"] = cache.get("user_favorites:%s:%s" % (user.username, page))
if args["favorites"] == None:
args["favorites"] = Favorite.objects.filter(user=user).select_related("paste")[start:end]
cache.set("user_favorites:%s:%s" % (user.username, page), args["favorites"])
args["pages"] = Paginator.get_pages(page, FAVORITES_PER_PAGE, args["total_favorite_count"])
args["current_page"] = page
return render(request, "users/profile/favorites/favorites.html", args)
def remove_favorite(request):
"""
Remove a favorite and redirect the user back to the favorite listing
"""
if "favorite_id" not in request.POST or not int(request.POST["favorite_id"]):
return HttpResponse("Favorite ID was not valid.", status=422)
if "page" not in request.POST or not int(request.POST["page"]):
return HttpResponse("Page was not valid.", status=422)
favorite_id = int(request.POST["favorite_id"])
page = int(request.POST["page"])
favorite = Favorite.objects.get(id=favorite_id)
if not request.user.is_authenticated():
return HttpResponse("You are not authenticated", status=422)
if favorite.user != request.user:
return HttpResponse("You can't delete someone else's favorites.", status=422)
favorite.delete()
cache.delete("profile_favorites:%s" % request.user.username)
cache.delete("user_favorite_count:%s" % request.user.username)
return HttpResponseRedirect(reverse("users:favorites", kwargs={"username": request.user.username,
"page": page}))
def change_preferences(request, args):
"""
Change various profile-related preferences
"""
site_settings = SiteSettings.objects.get(user=request.user)
form = ChangePreferencesForm(request.POST or None, initial={"public_favorites": site_settings.public_favorites})
preferences_changed = False
if form.is_valid():
cleaned_data = form.cleaned_data
site_settings.public_favorites = cleaned_data["public_favorites"]
site_settings.save()
cache.set("site_settings:%s" % request.user.username, site_settings)
preferences_changed = True
args["form"] = form
args["preferences_changed"] = preferences_changed
return render(request, "users/settings/change_preferences/change_preferences.html", args)
def change_password(request, args):
"""
Change the user's password
"""
form = ChangePasswordForm(request.POST or None, user=request.user)
password_changed = False
if form.is_valid():
cleaned_data = form.cleaned_data
request.user.set_password(cleaned_data["new_password"])
request.user.save()
# Session auth hash needs to be updated after changing the password
# or the user will be logged out
update_session_auth_hash(request, request.user)
password_changed = True
args["form"] = form
args["password_changed"] = password_changed
return render(request, "users/settings/change_password/change_password.html", args)
def delete_account(request, args):
"""
Delete the user's account
"""
form = VerifyPasswordForm(request.POST or None, user=request.user)
if form.is_valid():
PastebinUser.delete_user(request.user)
logout(request)
return render(request, "users/settings/delete_account/account_deleted.html")
args["form"] = form
return render(request, "users/settings/delete_account/delete_account.html", args) | Matoking/pastebin-django | users/views.py | Python | unlicense | 14,332 | 0.012071 |
# Imports
from django.conf.urls import url
from .models import OurFoto
from .views import HomeFoto, ShowFoto, DeleteFoto, AddFoto, \
EditFoto, SearchFoto
# Urls for app
urlpatterns = [
url(r'^$', HomeFoto.as_view(model = OurFoto), name = 'index'),
url(r'^foto/(?P<pk>\d+)/$', ShowFoto.as_view(model = OurFoto), name = 'foto'),
url(r'^add_foto/$', AddFoto.as_view(), name = 'add_foto'),
url(r'^edit_foto/(?P<pk>\d+)/$', EditFoto.as_view(model = OurFoto), name = 'edit_foto'),
url(r'^search_foto/$', SearchFoto.as_view(), name = 'search_foto'),
url(r'^delete_foto/(?P<pk>\d+)/$', DeleteFoto.as_view(model = OurFoto), name = 'delete_foto')
]
| Sergey19940808/OurFoto | repository_our_fotos/urls.py | Python | mit | 671 | 0.034277 |
from django.template import Library
from django.conf import settings
if "django.contrib.sites" in settings.INSTALLED_APPS:
from django.contrib.sites.models import Site
current_domain = lambda: Site.objects.get_current().domain
elif getattr(settings, "SITE_DOMAIN", None):
current_domain = lambda: settings.SITE_DOMAIN
else:
current_domain = lambda: "example.com"
register = Library()
def fully_qualified(url):
# if it's not a string the rest of this fn will bomb
if not isinstance(url, basestring): return ""
if url.startswith('http'):
return url
elif url.startswith("/"):
return 'http://%s%s' % (current_domain(), url)
else:
return 'http://%s' % url
@register.inclusion_tag('social_tags/twitter.html')
def twitter_share(url=None):
url = fully_qualified(url)
return locals()
@register.inclusion_tag('social_tags/facebook.html')
def facebook_share(url=None):
url = fully_qualified(url)
return locals()
@register.inclusion_tag('social_tags/linkedin.html')
def linkedin_share(url=None):
url = fully_qualified(url)
return locals()
@register.inclusion_tag('social_tags/email.html')
def email_share(url=None):
url = fully_qualified(url)
return locals()
@register.inclusion_tag('social_tags/google.html')
def google_plus(url=None):
url = fully_qualified(url)
return locals() | Rootbuzz/Django-Socialtags | socialtags/templatetags/social_tags.py | Python | mit | 1,400 | 0.007143 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
from userena.models import UserenaBaseProfile
from caching.base import CachingManager, CachingMixin
from apps.utils.db import retrieve_in_order_from_db
from apps.utils import poster
from libs.cassandra import CassandraConnection
class Person(CachingMixin, models.Model):
"""
Person model.
"""
person_id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=150)
objects = CachingManager()
def __unicode__(self):
return self.name
class Genre(CachingMixin, models.Model):
"""
Film genre model.
"""
genre_id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=50)
objects = CachingManager()
def __unicode__(self):
return self.name
class Country(CachingMixin, models.Model):
"""
Film country model.
"""
country_id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=50)
objects = CachingManager()
def __unicode__(self):
return self.name
class Language(CachingMixin, models.Model):
"""
Film country model.
"""
language_id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=50)
objects = CachingManager()
def __unicode__(self):
return self.name
class Film(CachingMixin, models.Model):
"""
Film model
"""
film_id = models.PositiveIntegerField(primary_key=True)
imdb_id = models.PositiveIntegerField(unique=True)
netflix_id = models.PositiveIntegerField(null=True, unique=True)
title = models.CharField(max_length=300)
year = models.PositiveSmallIntegerField(null=True)
runtime = models.PositiveSmallIntegerField(null=True)
rating = models.CharField(max_length=24, null=True)
released = models.DateField(null=True)
plot = models.TextField(null=True)
metascore = models.PositiveIntegerField(null=True)
imdb_rating = models.FloatField(null=True, default=0)
imdb_votes = models.PositiveIntegerField(null=True, default=0)
fullplot = models.TextField(null=True)
poster = models.URLField(null=True)
awards = models.PositiveIntegerField(null=True)
updated = models.DateField(null=True)
poster_file = models.ImageField(upload_to='posters', null=True)
n_votes = models.PositiveIntegerField(default=0)
sum_votes = models.FloatField(default=0)
directors = models.ManyToManyField(Person, related_name="director")
writers = models.ManyToManyField(Person, related_name="writer")
casts = models.ManyToManyField(Person, related_name="cast")
genres = models.ManyToManyField(Genre)
countries = models.ManyToManyField(Country)
languages = models.ManyToManyField(Language)
objects = CachingManager()
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('films:details', args=[self.film_id])
def get_poster(self):
if not self.poster_file:
poster.retrieve(self)
return self.poster_file
@property
def score(self):
"""
Calculate film score:
(1/2) imdb_votes sum_votes
---------------- * imdb_rating + -----------
total_votes total_votes
:return:
"""
total_votes = self.imdb_votes + self.n_votes
if total_votes:
score = (self.imdb_votes * self.imdb_rating / 2.0 + self.sum_votes) / total_votes
else:
score = 0.0
return score
@property
def similar_films(self):
from libs.lucene import FilmSearcher
with FilmSearcher() as searcher:
return searcher.more_like_this(self)
def set_preference(self, user):
"""
Set the preference rated by the given user to the film.
:param user: user
"""
query = "SELECT score FROM ratings " \
"WHERE user = %(user)s AND item = %(item)s"
parameters = {
'user': user.user.id,
'item': self.film_id
}
# Retrieve ratings from Cassandra
with CassandraConnection() as db:
try:
self.preference = db.execute(query, parameters)[0].score
except IndexError:
self.preference = None
def rate(self, user, score):
"""
Update film model with a new rating and remove recommendation if exists.
:param user: user
:param score: score
"""
score = float(score)
self.set_preference(user)
insert_query = "INSERT INTO ratings (user, item, score) " \
"VALUES ( %(user)s, %(item)s, %(score)s )"
select_query = "SELECT relevance FROM recommendations " \
"WHERE user = %(user)s AND item = %(item)s"
parameters = {
'user': user.user.id,
'item': self.film_id,
'score': float(score)
}
with CassandraConnection() as db:
db.execute(insert_query, parameters)
result = db.execute(select_query, parameters)
if result:
delete_query = "DELETE FROM recommendations " \
"WHERE user = %(user)s " \
"AND relevance = %(relevance)s " \
"AND item = %(item)s"
parameters['relevance'] = result[0].relevance
db.execute(delete_query, parameters)
if self.preference:
score -= self.preference
else:
self.n_votes += 1
self.sum_votes += score
self.save()
class MyUser(CachingMixin, UserenaBaseProfile):
user = models.OneToOneField(User, unique=True, related_name='profile')
objects = CachingManager()
def get_preferences_for_films(self, films):
"""
Get the ratings for the given films
:param films: list of Film objects
:return: list of films with preference attribute set
"""
# query = "SELECT item, score FROM ratings WHERE user = %(user)s AND item IN %(films)s"
query = "SELECT item, score FROM ratings WHERE user = %(user)s AND item IN (" \
+ ", ".join([str(film.film_id) for film in films]) + ")"
parameters = {'user': self.user.id}
# Retrieve ratings from Cassandra
with CassandraConnection() as db:
ratings = db.execute(query, parameters)
# Set rating field
ratings_dict = {item: score for (item, score) in ratings}
for film in films:
film.preference = ratings_dict.get(film.film_id, None)
return films
def get_rated_films(self, last=None, count=12):
"""
Gets a list of rated films by self.
:param last: id of the last film queried or None
:param count: number of elements to be retrieved
:return: list of films with preference attribute set
"""
parameters = {
'user': self.user.id,
'limit': count
}
if last:
query = "SELECT item, score " \
"FROM ratings " \
"WHERE user = %(user)s AND item > %(last)s " \
"LIMIT " \
"%(limit)s"
parameters['last'] = last
else:
query = "SELECT item, score " \
"FROM ratings " \
"WHERE user = %(user)s " \
"LIMIT %(limit)s"
# Retrieve ratings from Cassandra
with CassandraConnection() as db:
ratings = db.execute(query, parameters)
# Retrieve films info from the RDBMS
ids = [item for (item, score) in ratings]
films = retrieve_in_order_from_db(Film, ids)
# Set rating field
ratings_dict = {item: score for (item, score) in ratings}
for film in films:
film.preference = ratings_dict.get(film.film_id, None)
return films
def get_recommendations(self, last=None, count=12):
"""
Gets a list of recommended films for self.
:param last: (relevance, item) tuple of the last film queried or None
:param count: number of elements to be retrieved
:return: list of films where relevance attribute is set
"""
parameters = {
'user': self.user.id,
'limit': count
}
if last:
query = "SELECT item, relevance " \
"FROM recommendations " \
"WHERE user = %(user)s " \
"AND relevance <= %(last_relevance)s " \
" LIMIT " \
"%(limit)s"
parameters['last_relevance'] = last[0]
parameters['last_item'] = last[1]
else:
query = "SELECT item, relevance " \
"FROM recommendations " \
"WHERE user = %(user)s " \
"LIMIT %(limit)s"
# Retrieve recommendations from Cassandra
with CassandraConnection() as db:
recommendations = db.execute(query, parameters)
# Retrieve films info from the RDBMS
ids = [item for (item, score) in recommendations]
films = retrieve_in_order_from_db(Film, ids)
# Set relevance field
recommendations_dict = {item: score for (item, score) in recommendations}
for film in films:
film.relevance = recommendations_dict.get(film.film_id, None)
return films
| dvalcarce/filmyou-web | src/apps/films/models.py | Python | apache-2.0 | 9,791 | 0.000511 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ._common import unittest
from datetime import datetime, timedelta, date
from dateutil.tz import tzoffset
from dateutil.parser import *
import six
from six import assertRaisesRegex, PY3
from six.moves import StringIO
class ParserTest(unittest.TestCase):
def setUp(self):
self.tzinfos = {"BRST": -10800}
self.brsttz = tzoffset("BRST", -10800)
self.default = datetime(2003, 9, 25)
# Parser should be able to handle bytestring and unicode
base_str = '2014-05-01 08:00:00'
try:
# Python 2.x
self.uni_str = unicode(base_str)
self.str_str = str(base_str)
except NameError:
self.uni_str = str(base_str)
self.str_str = bytes(base_str.encode())
def testEmptyString(self):
with self.assertRaises(ValueError):
parse('')
def testNone(self):
with self.assertRaises(TypeError):
parse(None)
def testInvalidType(self):
with self.assertRaises(TypeError):
parse(13)
def testDuckTyping(self):
# We want to support arbitrary classes that implement the stream
# interface.
class StringPassThrough(object):
def __init__(self, stream):
self.stream = stream
def read(self, *args, **kwargs):
return self.stream.read(*args, **kwargs)
dstr = StringPassThrough(StringIO('2014 January 19'))
self.assertEqual(parse(dstr), datetime(2014, 1, 19))
def testParseStream(self):
dstr = StringIO('2014 January 19')
self.assertEqual(parse(dstr), datetime(2014, 1, 19))
def testParseStr(self):
self.assertEqual(parse(self.str_str),
parse(self.uni_str))
def testParserParseStr(self):
from dateutil.parser import parser
self.assertEqual(parser().parse(self.str_str),
parser().parse(self.uni_str))
def testParseUnicodeWords(self):
class rus_parserinfo(parserinfo):
MONTHS = [("янв", "Январь"),
("фев", "Февраль"),
("мар", "Март"),
("апр", "Апрель"),
("май", "Май"),
("июн", "Июнь"),
("июл", "Июль"),
("авг", "Август"),
("сен", "Сентябрь"),
("окт", "Октябрь"),
("ноя", "Ноябрь"),
("дек", "Декабрь")]
self.assertEqual(parse('10 Сентябрь 2015 10:20',
parserinfo=rus_parserinfo()),
datetime(2015, 9, 10, 10, 20))
def testParseWithNulls(self):
# This relies on the from __future__ import unicode_literals, because
# explicitly specifying a unicode literal is a syntax error in Py 3.2
# May want to switch to u'...' if we ever drop Python 3.2 support.
pstring = '\x00\x00August 29, 1924'
self.assertEqual(parse(pstring),
datetime(1924, 8, 29))
def testDateCommandFormat(self):
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
tzinfos=self.tzinfos),
datetime(2003, 9, 25, 10, 36, 28,
tzinfo=self.brsttz))
def testDateCommandFormatUnicode(self):
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
tzinfos=self.tzinfos),
datetime(2003, 9, 25, 10, 36, 28,
tzinfo=self.brsttz))
def testDateCommandFormatReversed(self):
self.assertEqual(parse("2003 10:36:28 BRST 25 Sep Thu",
tzinfos=self.tzinfos),
datetime(2003, 9, 25, 10, 36, 28,
tzinfo=self.brsttz))
def testDateCommandFormatWithLong(self):
if not PY3:
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
tzinfos={"BRST": long(-10800)}),
datetime(2003, 9, 25, 10, 36, 28,
tzinfo=self.brsttz))
def testDateCommandFormatIgnoreTz(self):
self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003",
ignoretz=True),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip1(self):
self.assertEqual(parse("Thu Sep 25 10:36:28 2003"),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip2(self):
self.assertEqual(parse("Thu Sep 25 10:36:28", default=self.default),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip3(self):
self.assertEqual(parse("Thu Sep 10:36:28", default=self.default),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip4(self):
self.assertEqual(parse("Thu 10:36:28", default=self.default),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip5(self):
self.assertEqual(parse("Sep 10:36:28", default=self.default),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip6(self):
self.assertEqual(parse("10:36:28", default=self.default),
datetime(2003, 9, 25, 10, 36, 28))
def testDateCommandFormatStrip7(self):
self.assertEqual(parse("10:36", default=self.default),
datetime(2003, 9, 25, 10, 36))
def testDateCommandFormatStrip8(self):
self.assertEqual(parse("Thu Sep 25 2003"),
datetime(2003, 9, 25))
def testDateCommandFormatStrip9(self):
self.assertEqual(parse("Sep 25 2003"),
datetime(2003, 9, 25))
def testDateCommandFormatStrip10(self):
self.assertEqual(parse("Sep 2003", default=self.default),
datetime(2003, 9, 25))
def testDateCommandFormatStrip11(self):
self.assertEqual(parse("Sep", default=self.default),
datetime(2003, 9, 25))
def testDateCommandFormatStrip12(self):
self.assertEqual(parse("2003", default=self.default),
datetime(2003, 9, 25))
def testDateRCommandFormat(self):
self.assertEqual(parse("Thu, 25 Sep 2003 10:49:41 -0300"),
datetime(2003, 9, 25, 10, 49, 41,
tzinfo=self.brsttz))
def testISOFormat(self):
self.assertEqual(parse("2003-09-25T10:49:41.5-03:00"),
datetime(2003, 9, 25, 10, 49, 41, 500000,
tzinfo=self.brsttz))
def testISOFormatStrip1(self):
self.assertEqual(parse("2003-09-25T10:49:41-03:00"),
datetime(2003, 9, 25, 10, 49, 41,
tzinfo=self.brsttz))
def testISOFormatStrip2(self):
self.assertEqual(parse("2003-09-25T10:49:41"),
datetime(2003, 9, 25, 10, 49, 41))
def testISOFormatStrip3(self):
self.assertEqual(parse("2003-09-25T10:49"),
datetime(2003, 9, 25, 10, 49))
def testISOFormatStrip4(self):
self.assertEqual(parse("2003-09-25T10"),
datetime(2003, 9, 25, 10))
def testISOFormatStrip5(self):
self.assertEqual(parse("2003-09-25"),
datetime(2003, 9, 25))
def testISOStrippedFormat(self):
self.assertEqual(parse("20030925T104941.5-0300"),
datetime(2003, 9, 25, 10, 49, 41, 500000,
tzinfo=self.brsttz))
def testISOStrippedFormatStrip1(self):
self.assertEqual(parse("20030925T104941-0300"),
datetime(2003, 9, 25, 10, 49, 41,
tzinfo=self.brsttz))
def testISOStrippedFormatStrip2(self):
self.assertEqual(parse("20030925T104941"),
datetime(2003, 9, 25, 10, 49, 41))
def testISOStrippedFormatStrip3(self):
self.assertEqual(parse("20030925T1049"),
datetime(2003, 9, 25, 10, 49, 0))
def testISOStrippedFormatStrip4(self):
self.assertEqual(parse("20030925T10"),
datetime(2003, 9, 25, 10))
def testISOStrippedFormatStrip5(self):
self.assertEqual(parse("20030925"),
datetime(2003, 9, 25))
def testPythonLoggerFormat(self):
self.assertEqual(parse("2003-09-25 10:49:41,502"),
datetime(2003, 9, 25, 10, 49, 41, 502000))
def testNoSeparator1(self):
self.assertEqual(parse("199709020908"),
datetime(1997, 9, 2, 9, 8))
def testNoSeparator2(self):
self.assertEqual(parse("19970902090807"),
datetime(1997, 9, 2, 9, 8, 7))
def testDateWithDash1(self):
self.assertEqual(parse("2003-09-25"),
datetime(2003, 9, 25))
def testDateWithDash2(self):
self.assertEqual(parse("2003-Sep-25"),
datetime(2003, 9, 25))
def testDateWithDash3(self):
self.assertEqual(parse("25-Sep-2003"),
datetime(2003, 9, 25))
def testDateWithDash4(self):
self.assertEqual(parse("25-Sep-2003"),
datetime(2003, 9, 25))
def testDateWithDash5(self):
self.assertEqual(parse("Sep-25-2003"),
datetime(2003, 9, 25))
def testDateWithDash6(self):
self.assertEqual(parse("09-25-2003"),
datetime(2003, 9, 25))
def testDateWithDash7(self):
self.assertEqual(parse("25-09-2003"),
datetime(2003, 9, 25))
def testDateWithDash8(self):
self.assertEqual(parse("10-09-2003", dayfirst=True),
datetime(2003, 9, 10))
def testDateWithDash9(self):
self.assertEqual(parse("10-09-2003"),
datetime(2003, 10, 9))
def testDateWithDash10(self):
self.assertEqual(parse("10-09-03"),
datetime(2003, 10, 9))
def testDateWithDash11(self):
self.assertEqual(parse("10-09-03", yearfirst=True),
datetime(2010, 9, 3))
def testDateWithDot1(self):
self.assertEqual(parse("2003.09.25"),
datetime(2003, 9, 25))
def testDateWithDot2(self):
self.assertEqual(parse("2003.Sep.25"),
datetime(2003, 9, 25))
def testDateWithDot3(self):
self.assertEqual(parse("25.Sep.2003"),
datetime(2003, 9, 25))
def testDateWithDot4(self):
self.assertEqual(parse("25.Sep.2003"),
datetime(2003, 9, 25))
def testDateWithDot5(self):
self.assertEqual(parse("Sep.25.2003"),
datetime(2003, 9, 25))
def testDateWithDot6(self):
self.assertEqual(parse("09.25.2003"),
datetime(2003, 9, 25))
def testDateWithDot7(self):
self.assertEqual(parse("25.09.2003"),
datetime(2003, 9, 25))
def testDateWithDot8(self):
self.assertEqual(parse("10.09.2003", dayfirst=True),
datetime(2003, 9, 10))
def testDateWithDot9(self):
self.assertEqual(parse("10.09.2003"),
datetime(2003, 10, 9))
def testDateWithDot10(self):
self.assertEqual(parse("10.09.03"),
datetime(2003, 10, 9))
def testDateWithDot11(self):
self.assertEqual(parse("10.09.03", yearfirst=True),
datetime(2010, 9, 3))
def testDateWithSlash1(self):
self.assertEqual(parse("2003/09/25"),
datetime(2003, 9, 25))
def testDateWithSlash2(self):
self.assertEqual(parse("2003/Sep/25"),
datetime(2003, 9, 25))
def testDateWithSlash3(self):
self.assertEqual(parse("25/Sep/2003"),
datetime(2003, 9, 25))
def testDateWithSlash4(self):
self.assertEqual(parse("25/Sep/2003"),
datetime(2003, 9, 25))
def testDateWithSlash5(self):
self.assertEqual(parse("Sep/25/2003"),
datetime(2003, 9, 25))
def testDateWithSlash6(self):
self.assertEqual(parse("09/25/2003"),
datetime(2003, 9, 25))
def testDateWithSlash7(self):
self.assertEqual(parse("25/09/2003"),
datetime(2003, 9, 25))
def testDateWithSlash8(self):
self.assertEqual(parse("10/09/2003", dayfirst=True),
datetime(2003, 9, 10))
def testDateWithSlash9(self):
self.assertEqual(parse("10/09/2003"),
datetime(2003, 10, 9))
def testDateWithSlash10(self):
self.assertEqual(parse("10/09/03"),
datetime(2003, 10, 9))
def testDateWithSlash11(self):
self.assertEqual(parse("10/09/03", yearfirst=True),
datetime(2010, 9, 3))
def testDateWithSpace1(self):
self.assertEqual(parse("2003 09 25"),
datetime(2003, 9, 25))
def testDateWithSpace2(self):
self.assertEqual(parse("2003 Sep 25"),
datetime(2003, 9, 25))
def testDateWithSpace3(self):
self.assertEqual(parse("25 Sep 2003"),
datetime(2003, 9, 25))
def testDateWithSpace4(self):
self.assertEqual(parse("25 Sep 2003"),
datetime(2003, 9, 25))
def testDateWithSpace5(self):
self.assertEqual(parse("Sep 25 2003"),
datetime(2003, 9, 25))
def testDateWithSpace6(self):
self.assertEqual(parse("09 25 2003"),
datetime(2003, 9, 25))
def testDateWithSpace7(self):
self.assertEqual(parse("25 09 2003"),
datetime(2003, 9, 25))
def testDateWithSpace8(self):
self.assertEqual(parse("10 09 2003", dayfirst=True),
datetime(2003, 9, 10))
def testDateWithSpace9(self):
self.assertEqual(parse("10 09 2003"),
datetime(2003, 10, 9))
def testDateWithSpace10(self):
self.assertEqual(parse("10 09 03"),
datetime(2003, 10, 9))
def testDateWithSpace11(self):
self.assertEqual(parse("10 09 03", yearfirst=True),
datetime(2010, 9, 3))
def testDateWithSpace12(self):
self.assertEqual(parse("25 09 03"),
datetime(2003, 9, 25))
def testStrangelyOrderedDate1(self):
self.assertEqual(parse("03 25 Sep"),
datetime(2003, 9, 25))
def testStrangelyOrderedDate2(self):
self.assertEqual(parse("2003 25 Sep"),
datetime(2003, 9, 25))
def testStrangelyOrderedDate3(self):
self.assertEqual(parse("25 03 Sep"),
datetime(2025, 9, 3))
def testHourWithLetters(self):
self.assertEqual(parse("10h36m28.5s", default=self.default),
datetime(2003, 9, 25, 10, 36, 28, 500000))
def testHourWithLettersStrip1(self):
self.assertEqual(parse("10h36m28s", default=self.default),
datetime(2003, 9, 25, 10, 36, 28))
def testHourWithLettersStrip2(self):
self.assertEqual(parse("10h36m", default=self.default),
datetime(2003, 9, 25, 10, 36))
def testHourWithLettersStrip3(self):
self.assertEqual(parse("10h", default=self.default),
datetime(2003, 9, 25, 10))
def testHourWithLettersStrip4(self):
self.assertEqual(parse("10 h 36", default=self.default),
datetime(2003, 9, 25, 10, 36))
def testAMPMNoHour(self):
with self.assertRaises(ValueError):
parse("AM")
with self.assertRaises(ValueError):
parse("Jan 20, 2015 PM")
def testHourAmPm1(self):
self.assertEqual(parse("10h am", default=self.default),
datetime(2003, 9, 25, 10))
def testHourAmPm2(self):
self.assertEqual(parse("10h pm", default=self.default),
datetime(2003, 9, 25, 22))
def testHourAmPm3(self):
self.assertEqual(parse("10am", default=self.default),
datetime(2003, 9, 25, 10))
def testHourAmPm4(self):
self.assertEqual(parse("10pm", default=self.default),
datetime(2003, 9, 25, 22))
def testHourAmPm5(self):
self.assertEqual(parse("10:00 am", default=self.default),
datetime(2003, 9, 25, 10))
def testHourAmPm6(self):
self.assertEqual(parse("10:00 pm", default=self.default),
datetime(2003, 9, 25, 22))
def testHourAmPm7(self):
self.assertEqual(parse("10:00am", default=self.default),
datetime(2003, 9, 25, 10))
def testHourAmPm8(self):
self.assertEqual(parse("10:00pm", default=self.default),
datetime(2003, 9, 25, 22))
def testHourAmPm9(self):
self.assertEqual(parse("10:00a.m", default=self.default),
datetime(2003, 9, 25, 10))
def testHourAmPm10(self):
self.assertEqual(parse("10:00p.m", default=self.default),
datetime(2003, 9, 25, 22))
def testHourAmPm11(self):
self.assertEqual(parse("10:00a.m.", default=self.default),
datetime(2003, 9, 25, 10))
def testHourAmPm12(self):
self.assertEqual(parse("10:00p.m.", default=self.default),
datetime(2003, 9, 25, 22))
def testAMPMRange(self):
with self.assertRaises(ValueError):
parse("13:44 AM")
with self.assertRaises(ValueError):
parse("January 25, 1921 23:13 PM")
def testPertain(self):
self.assertEqual(parse("Sep 03", default=self.default),
datetime(2003, 9, 3))
self.assertEqual(parse("Sep of 03", default=self.default),
datetime(2003, 9, 25))
def testWeekdayAlone(self):
self.assertEqual(parse("Wed", default=self.default),
datetime(2003, 10, 1))
def testLongWeekday(self):
self.assertEqual(parse("Wednesday", default=self.default),
datetime(2003, 10, 1))
def testLongMonth(self):
self.assertEqual(parse("October", default=self.default),
datetime(2003, 10, 25))
def testZeroYear(self):
self.assertEqual(parse("31-Dec-00", default=self.default),
datetime(2000, 12, 31))
def testFuzzy(self):
s = "Today is 25 of September of 2003, exactly " \
"at 10:49:41 with timezone -03:00."
self.assertEqual(parse(s, fuzzy=True),
datetime(2003, 9, 25, 10, 49, 41,
tzinfo=self.brsttz))
def testFuzzyWithTokens(self):
s = "Today is 25 of September of 2003, exactly " \
"at 10:49:41 with timezone -03:00."
self.assertEqual(parse(s, fuzzy_with_tokens=True),
(datetime(2003, 9, 25, 10, 49, 41,
tzinfo=self.brsttz),
('Today is ', 'of ', ', exactly at ',
' with timezone ', '.')))
def testFuzzyAMPMProblem(self):
# Sometimes fuzzy parsing results in AM/PM flag being set without
# hours - if it's fuzzy it should ignore that.
s1 = "I have a meeting on March 1, 1974."
s2 = "On June 8th, 2020, I am going to be the first man on Mars"
# Also don't want any erroneous AM or PMs changing the parsed time
s3 = "Meet me at the AM/PM on Sunset at 3:00 AM on December 3rd, 2003"
s4 = "Meet me at 3:00AM on December 3rd, 2003 at the AM/PM on Sunset"
self.assertEqual(parse(s1, fuzzy=True), datetime(1974, 3, 1))
self.assertEqual(parse(s2, fuzzy=True), datetime(2020, 6, 8))
self.assertEqual(parse(s3, fuzzy=True), datetime(2003, 12, 3, 3))
self.assertEqual(parse(s4, fuzzy=True), datetime(2003, 12, 3, 3))
def testFuzzyIgnoreAMPM(self):
s1 = "Jan 29, 1945 14:45 AM I going to see you there?"
self.assertEqual(parse(s1, fuzzy=True), datetime(1945, 1, 29, 14, 45))
def testExtraSpace(self):
self.assertEqual(parse(" July 4 , 1976 12:01:02 am "),
datetime(1976, 7, 4, 0, 1, 2))
def testRandomFormat1(self):
self.assertEqual(parse("Wed, July 10, '96"),
datetime(1996, 7, 10, 0, 0))
def testRandomFormat2(self):
self.assertEqual(parse("1996.07.10 AD at 15:08:56 PDT",
ignoretz=True),
datetime(1996, 7, 10, 15, 8, 56))
def testRandomFormat3(self):
self.assertEqual(parse("1996.July.10 AD 12:08 PM"),
datetime(1996, 7, 10, 12, 8))
def testRandomFormat4(self):
self.assertEqual(parse("Tuesday, April 12, 1952 AD 3:30:42pm PST",
ignoretz=True),
datetime(1952, 4, 12, 15, 30, 42))
def testRandomFormat5(self):
self.assertEqual(parse("November 5, 1994, 8:15:30 am EST",
ignoretz=True),
datetime(1994, 11, 5, 8, 15, 30))
def testRandomFormat6(self):
self.assertEqual(parse("1994-11-05T08:15:30-05:00",
ignoretz=True),
datetime(1994, 11, 5, 8, 15, 30))
def testRandomFormat7(self):
self.assertEqual(parse("1994-11-05T08:15:30Z",
ignoretz=True),
datetime(1994, 11, 5, 8, 15, 30))
def testRandomFormat8(self):
self.assertEqual(parse("July 4, 1976"), datetime(1976, 7, 4))
def testRandomFormat9(self):
self.assertEqual(parse("7 4 1976"), datetime(1976, 7, 4))
def testRandomFormat10(self):
self.assertEqual(parse("4 jul 1976"), datetime(1976, 7, 4))
def testRandomFormat11(self):
self.assertEqual(parse("7-4-76"), datetime(1976, 7, 4))
def testRandomFormat12(self):
self.assertEqual(parse("19760704"), datetime(1976, 7, 4))
def testRandomFormat13(self):
self.assertEqual(parse("0:01:02", default=self.default),
datetime(2003, 9, 25, 0, 1, 2))
def testRandomFormat14(self):
self.assertEqual(parse("12h 01m02s am", default=self.default),
datetime(2003, 9, 25, 0, 1, 2))
def testRandomFormat15(self):
self.assertEqual(parse("0:01:02 on July 4, 1976"),
datetime(1976, 7, 4, 0, 1, 2))
def testRandomFormat16(self):
self.assertEqual(parse("0:01:02 on July 4, 1976"),
datetime(1976, 7, 4, 0, 1, 2))
def testRandomFormat17(self):
self.assertEqual(parse("1976-07-04T00:01:02Z", ignoretz=True),
datetime(1976, 7, 4, 0, 1, 2))
def testRandomFormat18(self):
self.assertEqual(parse("July 4, 1976 12:01:02 am"),
datetime(1976, 7, 4, 0, 1, 2))
def testRandomFormat19(self):
self.assertEqual(parse("Mon Jan 2 04:24:27 1995"),
datetime(1995, 1, 2, 4, 24, 27))
def testRandomFormat20(self):
self.assertEqual(parse("Tue Apr 4 00:22:12 PDT 1995", ignoretz=True),
datetime(1995, 4, 4, 0, 22, 12))
def testRandomFormat21(self):
self.assertEqual(parse("04.04.95 00:22"),
datetime(1995, 4, 4, 0, 22))
def testRandomFormat22(self):
self.assertEqual(parse("Jan 1 1999 11:23:34.578"),
datetime(1999, 1, 1, 11, 23, 34, 578000))
def testRandomFormat23(self):
self.assertEqual(parse("950404 122212"),
datetime(1995, 4, 4, 12, 22, 12))
def testRandomFormat24(self):
self.assertEqual(parse("0:00 PM, PST", default=self.default,
ignoretz=True),
datetime(2003, 9, 25, 12, 0))
def testRandomFormat25(self):
self.assertEqual(parse("12:08 PM", default=self.default),
datetime(2003, 9, 25, 12, 8))
def testRandomFormat26(self):
self.assertEqual(parse("5:50 A.M. on June 13, 1990"),
datetime(1990, 6, 13, 5, 50))
def testRandomFormat27(self):
self.assertEqual(parse("3rd of May 2001"), datetime(2001, 5, 3))
def testRandomFormat28(self):
self.assertEqual(parse("5th of March 2001"), datetime(2001, 3, 5))
def testRandomFormat29(self):
self.assertEqual(parse("1st of May 2003"), datetime(2003, 5, 1))
def testRandomFormat30(self):
self.assertEqual(parse("01h02m03", default=self.default),
datetime(2003, 9, 25, 1, 2, 3))
def testRandomFormat31(self):
self.assertEqual(parse("01h02", default=self.default),
datetime(2003, 9, 25, 1, 2))
def testRandomFormat32(self):
self.assertEqual(parse("01h02s", default=self.default),
datetime(2003, 9, 25, 1, 0, 2))
def testRandomFormat33(self):
self.assertEqual(parse("01m02", default=self.default),
datetime(2003, 9, 25, 0, 1, 2))
def testRandomFormat34(self):
self.assertEqual(parse("01m02h", default=self.default),
datetime(2003, 9, 25, 2, 1))
def testRandomFormat35(self):
self.assertEqual(parse("2004 10 Apr 11h30m", default=self.default),
datetime(2004, 4, 10, 11, 30))
def test_99_ad(self):
self.assertEqual(parse('0099-01-01T00:00:00'),
datetime(99, 1, 1, 0, 0))
def test_31_ad(self):
self.assertEqual(parse('0031-01-01T00:00:00'),
datetime(31, 1, 1, 0, 0))
def testInvalidDay(self):
with self.assertRaises(ValueError):
parse("Feb 30, 2007")
def testUnspecifiedDayFallback(self):
# Test that for an unspecified day, the fallback behavior is correct.
self.assertEqual(parse("April 2009", default=datetime(2010, 1, 31)),
datetime(2009, 4, 30))
def testUnspecifiedDayFallbackFebNoLeapYear(self):
self.assertEqual(parse("Feb 2007", default=datetime(2010, 1, 31)),
datetime(2007, 2, 28))
def testUnspecifiedDayFallbackFebLeapYear(self):
self.assertEqual(parse("Feb 2008", default=datetime(2010, 1, 31)),
datetime(2008, 2, 29))
def testErrorType01(self):
self.assertRaises(ValueError,
parse, 'shouldfail')
def testCorrectErrorOnFuzzyWithTokens(self):
assertRaisesRegex(self, ValueError, 'Unknown string format',
parse, '04/04/32/423', fuzzy_with_tokens=True)
assertRaisesRegex(self, ValueError, 'Unknown string format',
parse, '04/04/04 +32423', fuzzy_with_tokens=True)
assertRaisesRegex(self, ValueError, 'Unknown string format',
parse, '04/04/0d4', fuzzy_with_tokens=True)
def testIncreasingCTime(self):
# This test will check 200 different years, every month, every day,
# every hour, every minute, every second, and every weekday, using
# a delta of more or less 1 year, 1 month, 1 day, 1 minute and
# 1 second.
delta = timedelta(days=365+31+1, seconds=1+60+60*60)
dt = datetime(1900, 1, 1, 0, 0, 0, 0)
for i in range(200):
self.assertEqual(parse(dt.ctime()), dt)
dt += delta
def testIncreasingISOFormat(self):
delta = timedelta(days=365+31+1, seconds=1+60+60*60)
dt = datetime(1900, 1, 1, 0, 0, 0, 0)
for i in range(200):
self.assertEqual(parse(dt.isoformat()), dt)
dt += delta
def testMicrosecondsPrecisionError(self):
# Skip found out that sad precision problem. :-(
dt1 = parse("00:11:25.01")
dt2 = parse("00:12:10.01")
self.assertEqual(dt1.microsecond, 10000)
self.assertEqual(dt2.microsecond, 10000)
def testMicrosecondPrecisionErrorReturns(self):
# One more precision issue, discovered by Eric Brown. This should
# be the last one, as we're no longer using floating points.
for ms in [100001, 100000, 99999, 99998,
10001, 10000, 9999, 9998,
1001, 1000, 999, 998,
101, 100, 99, 98]:
dt = datetime(2008, 2, 27, 21, 26, 1, ms)
self.assertEqual(parse(dt.isoformat()), dt)
def testHighPrecisionSeconds(self):
self.assertEqual(parse("20080227T21:26:01.123456789"),
datetime(2008, 2, 27, 21, 26, 1, 123456))
def testCustomParserInfo(self):
# Custom parser info wasn't working, as Michael Elsdörfer discovered.
from dateutil.parser import parserinfo, parser
class myparserinfo(parserinfo):
MONTHS = parserinfo.MONTHS[:]
MONTHS[0] = ("Foo", "Foo")
myparser = parser(myparserinfo())
dt = myparser.parse("01/Foo/2007")
self.assertEqual(dt, datetime(2007, 1, 1))
def testNoYearFirstNoDayFirst(self):
dtstr = '090107'
# Should be MMDDYY
self.assertEqual(parse(dtstr),
datetime(2007, 9, 1))
self.assertEqual(parse(dtstr, yearfirst=False, dayfirst=False),
datetime(2007, 9, 1))
def testYearFirst(self):
dtstr = '090107'
# Should be MMDDYY
self.assertEqual(parse(dtstr, yearfirst=True),
datetime(2009, 1, 7))
self.assertEqual(parse(dtstr, yearfirst=True, dayfirst=False),
datetime(2009, 1, 7))
def testDayFirst(self):
dtstr = '090107'
# Should be DDMMYY
self.assertEqual(parse(dtstr, dayfirst=True),
datetime(2007, 1, 9))
self.assertEqual(parse(dtstr, yearfirst=False, dayfirst=True),
datetime(2007, 1, 9))
def testDayFirstYearFirst(self):
dtstr = '090107'
# Should be YYDDMM
self.assertEqual(parse(dtstr, yearfirst=True, dayfirst=True),
datetime(2009, 7, 1))
def testUnambiguousYearFirst(self):
dtstr = '2015 09 25'
self.assertEqual(parse(dtstr, yearfirst=True),
datetime(2015, 9, 25))
def testUnambiguousDayFirst(self):
dtstr = '2015 09 25'
self.assertEqual(parse(dtstr, dayfirst=True),
datetime(2015, 9, 25))
def testUnambiguousDayFirstYearFirst(self):
dtstr = '2015 09 25'
self.assertEqual(parse(dtstr, dayfirst=True, yearfirst=True),
datetime(2015, 9, 25))
| dantebarba/docker-media-server | plex/Sub-Zero.bundle/Contents/Libraries/Shared/dateutil/test/test_parser.py | Python | gpl-3.0 | 31,922 | 0.000377 |
import json
from flask import session
from flask import url_for, redirect, request
from rauth import OAuth1Service, OAuth2Service
from meetneat.config import OAUTH_CREDENTIALS
class OAuthSignIn(object):
providers = None
def __init__(self, provider_name):
self.provider_name = provider_name
credentials = OAUTH_CREDENTIALS[provider_name]
self.consumer_id = credentials['id']
self.consumer_secret = credentials['secret']
def authorize(self):
pass
def callback(self):
pass
def get_callback_url(self):
return url_for('api.oauth_callback', provider=self.provider_name, _external=True)
@classmethod
def get_provider(cls, provider_name):
if cls.providers is None:
cls.providers = {}
for provider_class in cls.__subclasses__():
provider = provider_class()
cls.providers[provider.provider_name] = provider
return cls.providers[provider_name]
class FacebookSignIn(OAuthSignIn):
def __init__(self):
super(FacebookSignIn, self).__init__('facebook')
self.service = OAuth2Service(
name='facebook',
client_id=self.consumer_id,
client_secret=self.consumer_secret,
authorize_url='https://graph.facebook.com/oauth/authorize',
access_token_url='https://graph.facebook.com/oauth/access_token',
base_url='https://graph.facebook.com/'
)
def authorize(self):
return redirect(self.service.get_authorize_url(
scope='email',
response_type='code',
redirect_uri=self.get_callback_url())
)
def callback(self):
def decode_json(payload):
return json.loads(payload.decode('utf-8'))
if 'code' not in request.args:
return None, None, None
oauth_session = self.service.get_auth_session(
data={'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': self.get_callback_url()},
decoder=decode_json
)
me = oauth_session.get('me').json()
return (
'facebook$' + me['id'],
me.get('email').split('@')[0], # Facebook does not provide
# username, so the email's user
# is used instead
me.get('email')
)
class TwitterSignIn(OAuthSignIn):
def __init__(self):
super(TwitterSignIn, self).__init__('twitter')
self.service = OAuth1Service(
name='twitter',
consumer_key=self.consumer_id,
consumer_secret=self.consumer_secret,
request_token_url='https://api.twitter.com/oauth/request_token',
authorize_url='https://api.twitter.com/oauth/authorize',
access_token_url='https://api.twitter.com/oauth/access_token',
base_url='https://api.twitter.com/1.1/'
)
def authorize(self):
request_token = self.service.get_request_token(
params={'oauth_callback': self.get_callback_url()}
)
session['request_token'] = request_token
return redirect(self.service.get_authorize_url(request_token[0]))
def callback(self):
request_token = session.pop('request_token')
if 'oauth_verifier' not in request.args:
return None, None, None
oauth_session = self.service.get_auth_session(
request_token[0],
request_token[1],
data={'oauth_verifier': request.args['oauth_verifier']}
)
me = oauth_session.get('account/verify_credentials.json').json()
social_id = 'twitter$' + str(me.get('id'))
username = me.get('screen_name')
return social_id, username, None # Twitter does not provide email
| anthonyabeo/MeetNEat | api/oauth.py | Python | unlicense | 3,846 | 0.00026 |
from abc import ABCMeta,abstractmethod
from my_hue import *
# Would dynamically choose a trigger based on trigger type
def trigger_factory(trigger_type):
return None
class Trigger(object):
__metaclass__ = ABCMeta
def __init__(self):
self.action()
@abstractmethod
def action(self):
pass
class IClickerTrigger(object):
def __init__(self, clicker_id, response_info, time_of_trigger, sequence_number):
super(IClickerTrigger, self).__init__()
self.clicker_id = clicker_id
self.response_info = response_info
self.time_of_trigger = time_of_trigger
self.sequence_number = sequence_number
def action(self):
print self.response_info
button = 'a'
if button == 'a':
pass | Caveat4U/home_automation | trigger.py | Python | gpl-2.0 | 793 | 0.011349 |
#!/usr/bin/env python
__author__ = 'mehdi tlili'
import rospy
from tf2_msgs.msg import TFMessage
import tf
class Remapper(object):
def __init__(self):
self.br = tf.TransformBroadcaster()
rospy.Subscriber("/tf", TFMessage, self.tf_remapper)
def tf_remapper(self, msg):
if msg.transforms[0].header.frame_id == "/robot0":
self.br.sendTransform((0, 0, 0),
tf.transformations.quaternion_from_euler(0, 0, 0),
rospy.Time.now(),
"base_footprint",
"robot0")
if __name__ == '__main__':
rospy.init_node('remapper_nav')
remapper = Remapper()
rospy.spin()
| LCAS/teaching | turtlebot_simulator/turtlebot_stdr/nodes/tf_connector.py | Python | mit | 741 | 0.002699 |
import os
import sys
import mock
import logging
import unittest
from inaugurator.server import idlistener
from inaugurator.tests.common import PikaChannelMock
class Test(unittest.TestCase):
def setUp(self):
self.consumeCallback = mock.Mock()
self.channel = PikaChannelMock(self)
self.expectedStatusExchange = idlistener.statusExchange("delta-foxtrot")
self.tested = idlistener.IDListener("delta-foxtrot", self.consumeCallback, self.channel)
def test_Listen(self):
self.validateListenHappyFlow()
def test_StopListening(self):
queue = self.validateListenHappyFlow()
self.tested.stopListening()
self.validateOneStatusQueueIsAllocated(queue, allowOtherRequests=True)
self.channel.answerQueueDelete(queue)
self.validateNoStatusQueueIsAllocated()
self.validateMessages(self.basicConsumeCallback, isArrivalExpected=False)
def test_StopListeningBeforeExchangeDeclared(self):
self.validateNoStatusQueueIsAllocated()
self.tested.stopListening()
self.validateNoStatusQueueIsAllocated()
self.channel.answerExchangeDeclare(self.expectedStatusExchange)
self.validateNoStatusQueueIsAllocated()
def test_StopListeningBeforeQueueDeclared(self):
self.validateListenFlowUntilStatusQueueDeclare()
self.validateOneStatusQueueIsAllocating()
self.tested.stopListening()
self.validateOneStatusQueueIsAllocating()
queue = self.channel.answerQueueDeclare()
self.validateOneStatusQueueIsAllocated(queue, allowOtherRequests=True)
self.channel.answerQueueDelete(queue)
self.validateNoStatusQueueIsAllocated()
def test_StopListeningBeforeQueueBinded(self):
self.validateListenFlowUntilStatusQueueDeclare()
queue = self.channel.answerQueueDeclare()
self.validateOneStatusQueueIsAllocated(queue)
self.tested.stopListening()
self.validateOneStatusQueueIsAllocated(queue, allowOtherRequests=True)
queueBindCallback = self.channel.getQueueBindCallback()
queueBindCallback(queue)
self.validateOneStatusQueueIsAllocated(queue, allowOtherRequests=True)
self.channel.answerQueueDelete(queue)
self.validateNoStatusQueueIsAllocated(allowOtherRequests=True)
def test_StopListeningTwice(self):
queue = self.validateListenHappyFlow()
self.tested.stopListening()
self.channel.answerQueueDelete(queue)
self.validateNoStatusQueueIsAllocated()
self.tested.stopListening()
self.validateNoStatusQueueIsAllocated()
def test_MoreThanOneInstance(self):
for i in xrange(10):
queue = self.validateListenHappyFlow()
self.tested.stopListening()
self.channel.answerQueueDelete(queue)
self.validateNoStatusQueueIsAllocated()
self.tested = idlistener.IDListener("delta-foxtrot", self.consumeCallback, self.channel)
self.validateNoStatusQueueIsAllocated()
def validateListenFlowUntilStatusQueueDeclare(self):
self.validateNoStatusQueueIsAllocated()
self.channel.answerExchangeDeclare(self.expectedStatusExchange)
self.validateOneStatusQueueIsAllocating()
def validateListenFlowAfterQueueDeclare(self, queue):
queueBindCallback = self.channel.getQueueBindCallback()
queueBindCallback(queue)
self.basicConsumeCallback = self.channel.getBasicConsumeCallback()
self.validateMessages(self.basicConsumeCallback)
self.validateOneStatusQueueIsAllocated(queue)
def validateListenHappyFlow(self):
self.validateListenFlowUntilStatusQueueDeclare()
queue = self.channel.answerQueueDeclare()
self.validateListenFlowAfterQueueDeclare(queue)
self.validateOneStatusQueueIsAllocated(queue)
return queue
def validateMessages(self, basicConsumeCallback, isArrivalExpected=True):
message = 'I am a cool message.'
basicConsumeCallback(message)
self.assertEquals(self.consumeCallback.called, isArrivalExpected)
self.consumeCallback.reset_mock()
def validateOneStatusQueueIsAllocated(self, queue, allowOtherRequests=False):
self.assertEquals(set([queue]), self.channel.declaredQueues)
if not allowOtherRequests:
self.assertFalse(self.channel.requests)
def validateOneStatusQueueIsAllocating(self, allowDeleteRequests=False):
self.assertEquals(len(self.channel.requests), 1)
self.assertEquals(self.channel.requests[0][0], "declare")
if not allowDeleteRequests:
self.assertFalse(self.channel.declaredQueues)
def validateNoStatusQueueIsAllocated(self, allowOtherRequests=False):
self.assertFalse(self.channel.declaredQueues)
if not allowOtherRequests:
self.assertFalse(self.channel.requests)
self.assertFalse(self.channel.queue_bind.called)
self.assertFalse(self.channel.basic_consume.called)
if __name__ == '__main__':
_logger = logging.getLogger("inaugurator.server")
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
_logger.addHandler(handler)
_logger.setLevel(logging.DEBUG)
unittest.main()
| Stratoscale/inaugurator | inaugurator/tests/test_idlistener.py | Python | apache-2.0 | 5,274 | 0.000948 |
""" Copyright 2015 Kris Steinhoff, The University of Michigan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import box
import ConfigParser, csv, optparse, os, re, sys, json
from pprint import pprint
def update_counter(message):
sys.stdout.write("\r"+ message)
sys.stdout.flush()
# sys.stdout.write("\n")
def human_file_size(size): # http://stackoverflow.com/a/1094933/70554
format = "%3.1f %s"
tiers = ["bytes","KB","MB","GB"]
for t in tiers[:-1]:
if size < 1024.0:
return format % (size, t)
size /= 1024.0
return format % (size, tiers[-1])
def median(values):
values.sort()
count = len(values)
if count % 2 == 1:
return values[count/2]
else:
return ( values[(count/2)-1] + values[count/2] ) / 2.0
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("-d", "--dry-run", action="store_true", dest="dry_run", default=False, help="simulate changes")
(options, args) = parser.parse_args()
box = box.BoxApi()
config = ConfigParser.ConfigParser()
settings_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "settings.conf")
config.read(settings_file)
try:
group_id = config.get("add_shared_to_group", "group_id")
except:
print "group_id not configured (in add_shared_to_group section)"
sys.exit(1)
if len(args) > 0:
infile = csv.reader(open(args[0], "rb"))
else:
infile = csv.reader(sys.stdin)
headers = infile.next()
role_rules = {
"student": re.compile(r"(Enrolled)?Student(AA|DBRN|FLNT)"),
"staff": re.compile(r"(Regular|Temporary)Staff(AA|DBRN|FLNT)"),
"faculty": re.compile(r"Faculty(AA|DBRN|FLNT)"),
"sponsored": re.compile(r"SponsoredAffiliate(AA|DBNR|FLNT)")
}
types = ("user", "shared")
storage = ([], [])
affiliations = {}
roles = dict.fromkeys(role_rules.keys(), 0)
ids = []
for attr_values in infile:
attrs = dict(zip(headers, attr_values))
id = attrs["box_id"]
if attrs["box_account_type"].lower() == "shared":
ids.append(id)
for id in ids:
data = json.dumps({ "user": {"id": id}, "group": {"id": group_id, "role": "member"}})
if options.dry_run:
print data
else:
r = box.request("POST", "/group_memberships", data=data)
if r.status_code == 201:
print "User ID %s added to group." % id
elif r.status_code == 409:
print "User ID %s NOT added to group already exists." % id
else:
print "WARNING: Received an unexpected response:"
print r.text
| box-community/box-weekly-stats | add_shared_to_group.py | Python | apache-2.0 | 3,242 | 0.005861 |
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .images import (
ImageClassificationModelJob,
GenericImageModelJob,
ImageModelJob,
)
from .job import ModelJob
__all__ = [
'ImageClassificationModelJob',
'GenericImageModelJob',
'ImageModelJob',
'ModelJob',
]
| winnerineast/Origae-6 | origae/model/__init__.py | Python | gpl-3.0 | 352 | 0 |
#coding=utf-8
import codecs
import logging
import numpy as np
import os
from collections import defaultdict
# define a logger
logging.basicConfig(format="%(message)s", level=logging.INFO)
def load_embedding(filename, embedding_size):
"""
load embedding
"""
embeddings = []
word2idx = defaultdict(list)
idx2word = defaultdict(list)
idx = 0
with codecs.open(filename, mode="r", encoding="utf-8") as rf:
try:
for line in rf.readlines():
idx += 1
arr = line.split(" ")
if len(arr) != (embedding_size + 2):
logging.error("embedding error, index is:%s"%(idx))
continue
embedding = [float(val) for val in arr[1 : -1]]
word2idx[arr[0]] = len(word2idx)
idx2word[len(word2idx)] = arr[0]
embeddings.append(embedding)
except Exception as e:
logging.error("load embedding Exception," , e)
finally:
rf.close()
logging.info("load embedding finish!")
return embeddings, word2idx, idx2word
def sent_to_idx(sent, word2idx, sequence_len):
"""
convert sentence to index array
"""
unknown_id = word2idx.get("UNKNOWN", 0)
sent2idx = [word2idx.get(word, unknown_id) for word in sent.split("_")[:sequence_len]]
return sent2idx
def load_train_data(filename, word2idx, sequence_len):
"""
load train data
"""
ori_quests, cand_quests = [], []
with codecs.open(filename, mode="r", encoding="utf-8") as rf:
try:
for line in rf.readlines():
arr = line.strip().split(" ")
if len(arr) != 4 or arr[0] != "1":
logging.error("invalid data:%s"%(line))
continue
ori_quest = sent_to_idx(arr[2], word2idx, sequence_len)
cand_quest = sent_to_idx(arr[3], word2idx, sequence_len)
ori_quests.append(ori_quest)
cand_quests.append(cand_quest)
except Exception as e:
logging.error("load train data Exception," + e)
finally:
rf.close()
logging.info("load train data finish!")
return ori_quests, cand_quests
def create_valid(data, proportion=0.1):
if data is None:
logging.error("data is none")
os._exit(1)
data_len = len(data)
shuffle_idx = np.random.permutation(np.arange(data_len))
data = np.array(data)[shuffle_idx]
seperate_idx = int(data_len * (1 - proportion))
return data[:seperate_idx], data[seperate_idx:]
def load_test_data(filename, word2idx, sequence_len):
"""
load test data
"""
ori_quests, cand_quests, labels, qids = [], [], [], []
with codecs.open(filename, mode="r", encoding="utf-8") as rf:
try:
for line in rf.readlines():
arr = line.strip().split(" ")
if len(arr) != 4:
logging.error("invalid data:%s"%(line))
continue
ori_quest = sent_to_idx(arr[2], word2idx, sequence_len)
cand_quest = sent_to_idx(arr[3], word2idx, sequence_len)
label = int(arr[0])
result = int(arr[1].split(":")[1])
ori_quests.append(ori_quest)
cand_quests.append(cand_quest)
labels.append(label)
qids.append(result)
except Exception as e:
logging.error("load test error," , e)
finally:
rf.close()
logging.info("load test data finish!")
return ori_quests, cand_quests, labels, qids
def batch_iter(ori_quests, cand_quests, batch_size, epoches, is_valid=False):
"""
iterate the data
"""
data_len = len(ori_quests)
batch_num = int(data_len / batch_size)
ori_quests = np.array(ori_quests)
cand_quests = np.array(cand_quests)
for epoch in range(epoches):
if is_valid is not True:
shuffle_idx = np.random.permutation(np.arange(batch_num * batch_size))
ori_quests = np.array(ori_quests)[shuffle_idx]
cand_quests = np.array(cand_quests)[shuffle_idx]
for batch in range(batch_num):
start_idx = batch * batch_size
end_idx = min((batch + 1) * batch_size, data_len)
act_batch_size = end_idx - start_idx
# get negative questions
if is_valid:
neg_quests = cand_quests[start_idx : end_idx]
else:
randi_list = []
while len(randi_list) != act_batch_size:
[randi_list.append(idx) for idx in np.random.randint(0, data_len, 5 * act_batch_size) if start_idx < idx < end_idx and len(randi_list) < act_batch_size]
neg_quests = [cand_quests[idx] for idx in randi_list]
yield (ori_quests[start_idx : end_idx], cand_quests[start_idx : end_idx], neg_quests)
| gallupliu/QA | preprocess/data_helper.py | Python | apache-2.0 | 4,978 | 0.004219 |
import os
from setuptools import (
find_packages,
setup,
)
from setuptools.command.test import test
PACKAGE_DIR = 'src'
class PyTest(test):
def finalize_options(self):
test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest, sys
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='psycopg2-managed-connection',
description='Thread-safe connection manager for psycopg2 connections.',
version='1.0.0',
author='Ted Kaemming, Disqus',
author_email='ted@disqus.com',
license='Apache License 2.0',
setup_requires=(
'setuptools>=8.0',
),
install_requires=(
'psycopg2~=2.6',
),
packages=find_packages(PACKAGE_DIR),
package_dir={
'': PACKAGE_DIR,
},
zip_safe=False,
cmdclass = {
'test': PyTest,
},
tests_require=(
'pytest~=2.7',
),
)
| disqus/psycopg2-managed-connection | setup.py | Python | apache-2.0 | 971 | 0.00309 |
"""
@package ssw_wrap
@brief Simple python wrapper for SSW align library
To use the dynamic library libssw.so you may need to modify the LD_LIBRARY_PATH environment
variable to include the library directory (export LD_LIBRARY_PATH=$PWD) or for definitive
inclusion of the lib edit /etc/ld.so.conf and add the path or the directory containing the
library and update the cache by using /sbin/ldconfig as root
@copyright [The MIT licence](http://opensource.org/licenses/MIT)
@author Clement & Adrien Leger - 2014
"""
#~~~~~~~GLOBAL IMPORTS~~~~~~~#
# Standard library packages
from ctypes import *
import os
def _get_libssw_path():
base = os.path.dirname(__file__)
matches = [x for x in os.listdir(base) if (x.startswith("libssw") & x.endswith(".so"))]
if len(matches) < 1:
raise Exception("Couldn't find libssw.so in this directory: '{}'".format(base))
return os.path.join(base, matches[0])
libssw = cdll.LoadLibrary(_get_libssw_path())#os.path.join(os.path.dirname(__file__), 'libssw.so'))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
class CAlignRes(Structure):
"""
@class SSWAlignRes
@brief ctypes Structure with s_align struct mapping returned by SSWAligner.Align func
Correspond to the structure of the query profile
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~Ctype Structure~~~~~~~#
_fields_ = [('score', c_uint16),
('score2', c_uint16),
('ref_begin', c_int32),
('ref_end', c_int32),
('query_begin', c_int32),
('query_end', c_int32),
('ref_end2', c_int32),
('cigar', POINTER(c_uint32)),
('cigarLen', c_int32)]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
class Aligner(object):
"""
@class SSWAligner
@brief Wrapper for SSW align library
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~CLASS VARIABLES~~~~~~~#
# Dictionnary to map Nucleotide to int as expected by the SSW C library
base_to_int = { 'A':0, 'C':1, 'G':2, 'T':3, 'N':4, 'a':0, 'c':1, 'g':2, 't':3, 'n':4}
int_to_base = { 0:'A', 1:'C', 2:'G', 3:'T', 4:'N'}
# Load the ssw library using ctypes
# libssw = cdll.LoadLibrary('libssw.so')
#libssw = cdll.LoadLibrary(_get_libssw_path())#os.path.join(os.path.dirname(__file__), 'libssw.so'))
# Init and setup the functions pointer to map the one specified in the SSW lib
# ssw_init method
ssw_init = libssw.ssw_init
ssw_init.restype = c_void_p
ssw_init.argtypes = [POINTER(c_int8), c_int32, POINTER(c_int8), c_int32, c_int8]
# init_destroy function
init_destroy = libssw.init_destroy
init_destroy.restype = None
init_destroy.argtypes = [c_void_p]
# ssw_align function
ssw_align = libssw.ssw_align
ssw_align.restype = POINTER(CAlignRes)
ssw_align.argtypes = [c_void_p, POINTER(c_int8), c_int32, c_uint8, c_uint8, c_uint8, c_uint16, c_int32, c_int32]
# align_destroy function
align_destroy = libssw.align_destroy
align_destroy.restype = None
align_destroy.argtypes = [POINTER(CAlignRes)]
#~~~~~~~FONDAMENTAL METHODS~~~~~~~#
def __repr__(self):
msg = self.__str__()
msg += "SCORE PARAMETERS:\n"
msg += " Gap Weight Open: {} Extension: {}\n".format(-self.gap_open, -self.gap_extend)
msg += " Align Weight Match: {} Mismatch: {}\n\n".format(self.match, -self.mismatch)
msg += " Match/mismatch Score matrix\n"
msg += " \tA\tC\tG\tT\tN\n"
msg += " A\t{}\t{}\t{}\t{}\t{}\n".format(self.match, -self.mismatch, -self.mismatch, -self.mismatch, 0)
msg += " C\t{}\t{}\t{}\t{}\t{}\n".format(-self.mismatch, self.match, -self.mismatch, -self.mismatch, 0)
msg += " G\t{}\t{}\t{}\t{}\t{}\n".format(-self.mismatch, -self.mismatch, self.match, -self.mismatch, 0)
msg += " T\t{}\t{}\t{}\t{}\t{}\n".format(-self.mismatch, -self.mismatch, -self.mismatch, self.match, 0)
msg += " N\t{}\t{}\t{}\t{}\t{}\n\n".format(0,0,0,0,0)
msg += "RESULT PARAMETERS:\n"
msg += " Report cigar {}\n".format(self.report_cigar)
msg += " Report secondary match {}\n\n".format(self.report_secondary)
msg += "REFERENCE SEQUENCE :\n"
if self.ref_len <= 50:
msg += "".join([self.int_to_base[i] for i in self.ref_seq])+"\n"
else:
msg += "".join([self.int_to_base[self.ref_seq[i]] for i in range(50)])+"...\n"
msg += " Lenght :{} nucleotides\n".format(self.ref_len)
return msg
def __str__(self):
return "\n<Instance of {} from {} >\n".format(self.__class__.__name__, self.__module__)
def __init__(self,
ref_seq="",
match=2,
mismatch=2,
gap_open=3,
gap_extend=1,
report_secondary=False,
report_cigar=False):
"""
Initialize object by creating an interface with ssw library fonctions
A reference sequence is also assigned to the object for multiple alignment against queries
with the align function
@param ref_seq Reference sequence as a python string (case insensitive)
@param match Weight for a match
@param mismatch Absolute value of mismatch penalty
@param gap_open Absolute value of gap open penalty
@param gap_extend Absolute value of gap extend penalty
@param report_secondary Report the 2nd best alignement if true
@param report_cigar Report cigar string if true
"""
# Store overall alignment parameters
self.report_secondary = report_secondary
self.report_cigar = report_cigar
# Set gap penalties
self.set_gap(gap_open, gap_extend)
# Set the cost matrix
self.set_mat(match, mismatch)
# Set the reference sequence
self.set_ref(ref_seq)
#~~~~~~~SETTERS METHODS~~~~~~~#
def set_gap(self, gap_open=3, gap_extend=1):
"""
Store gapopen and gap extension penalties
"""
self.gap_open = gap_open
self.gap_extend = gap_extend
def set_mat(self, match=2, mismatch=2):
"""
Store match and mismatch scores then initialize a Cost matrix and fill it with match and
mismatch values. Ambiguous base: no penalty
"""
self.match = match
self.mismatch = mismatch
mat_decl = c_int8 * 25
self.mat = mat_decl(match, -mismatch, -mismatch, -mismatch, 0,
-mismatch, match, -mismatch, -mismatch, 0,
-mismatch, -mismatch, match, -mismatch, 0,
-mismatch, -mismatch, -mismatch, match, 0,
0, 0, 0, 0, 0)
def set_ref(self, ref_seq):
"""
Determine the size of the ref sequence and cast it in a c type integer matrix
"""
if ref_seq:
self.ref_len = len(ref_seq)
self.ref_seq = self._DNA_to_int_mat (ref_seq, self.ref_len)
else:
self.ref_len = 0
self.ref_seq = ""
#~~~~~~~PUBLIC METHODS~~~~~~~#
def align(self, query_seq, min_score=0, min_len=0):
"""
Perform the alignment of query against the object reference sequence
@param query_seq Query sequence as a python string (case insensitive)
@param min_score Minimal score of match. None will be return in case of filtering out
@param min_len Minimal length of match. None will be return in case of filtering out
@return A SSWAlignRes Object containing informations about the alignment.
"""
# Determine the size of the ref sequence and cast it in a c type integer matrix
query_len = len(query_seq)
query_seq = self._DNA_to_int_mat (query_seq, query_len)
# Create the query profile using the query sequence
profile = self.ssw_init(query_seq, # Query seq in c type integers
c_int32(query_len), # Length of Queryseq in bites
self.mat, # Score matrix
5, # Square root of the number of elements in mat
2) # flag = no estimation of the best alignment score
# Setup the mask_len parameters = distance between the optimal and suboptimal alignment
# if < 15, the function will NOT return the suboptimal alignment information
if query_len > 30:
mask_len = query_len//2
else:
mask_len = 15
c_result = self.ssw_align (profile, # Query profile
self.ref_seq, # Ref seq in c type integers
c_int32(self.ref_len), # Length of Refseq in bites
self.gap_open, # Absolute value of gap open penalty
self.gap_extend, # absolute value of gap extend penalty
1, # Bitwise FLAG for output values = return all
0, # Score filter = return all
0, # Distance filter = return all
mask_len) # Distance between the optimal and suboptimal alignment
# Transform the Cstructure into a python object if score and lenght match the requirements
score = c_result.contents.score
match_len = c_result.contents.query_end - c_result.contents.query_begin + 1
if score >= min_score and match_len >= min_len:
py_result = PyAlignRes(c_result, query_len, self.report_secondary, self.report_cigar)
else:
py_result = None
# Free reserved space by ssw.init and ssw_init methods.
self._init_destroy(profile)
self._align_destroy(c_result)
# Return the object
return py_result
#~~~~~~~PRIVATE METHODS~~~~~~~#
def _DNA_to_int_mat (self, seq, len_seq):
"""
Cast a python DNA string into a Ctype int8 matrix
"""
# Declare the matrix
query_num_decl = c_int8 * len_seq
query_num = query_num_decl()
# for each letters in ATCGN transform in integers thanks to self.base_to_int
for i in range(len_seq):
try:
value = self.base_to_int[seq[i]]
# if the base is not in the canonic DNA bases assign 4 as for N
except KeyError:
value = 4
finally:
query_num[i] = value
return query_num
def _init_destroy(self, profile):
"""
Free the space alocated for the matrix used by init
"""
self.init_destroy(profile)
def _align_destroy(self, align):
"""
Free the space alocated for the matrix used by align
"""
self.align_destroy(align)
# Load the ssw library using ctypes
#glibssw = cdll.LoadLibrary(os.path.join(os.path.dirname(__file__), 'libssw.so'))
# libssw = cdll.LoadLibrary('libssw.so')
# Init and setup the functions pointer to map the one specified in the SSW lib
# cigar_int_to_len function
cigar_int_to_len = libssw.cigar_int_to_len
cigar_int_to_len.restype = c_int32
cigar_int_to_len.argtypes = [c_int32]
# cigar_int_to_op function
cigar_int_to_op = libssw.cigar_int_to_op
cigar_int_to_op.restype = c_char
cigar_int_to_op.argtypes = [c_int32]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
class PyAlignRes(object):
"""
@class PyAlignRes
@brief Extract and verify result from a CAlignRes structure. A comprehensive python
object is created according to user requirements (+- cigar string and secondary alignment)
"""
def __repr__(self):
msg = self.__str__()
msg += "OPTIMAL MATCH\n"
msg += "Score {}\n".format(self.score)
msg += "Reference begin {}\n".format(self.ref_begin)
msg += "Reference end {}\n".format(self.ref_end)
msg += "Query begin {}\n".format(self.query_begin)
msg += "Query end {}\n".format(self.query_end)
if self.cigar_string:
msg += "Cigar_string {}\n".format(self.cigar_string)
if self.score2:
msg += "SUB-OPTIMAL MATCH\n"
msg += "Score 2 {}\n".format(self.score2)
msg += "Ref_end2 {}\n".format(self.ref_end2)
return msg
def __str__(self):
return "\n<Instance of {} from {} >\n".format(self.__class__.__name__, self.__module__)
def __init__ (self, Res, query_len, report_secondary=False, report_cigar=False):
"""
Parse CAlignRes structure and copy its values in object variables
@param Res A CAlignRes structure
@param query_len length of the query sequence
@param report_secondary Report the 2nd best alignement if true
@param report_cigar Report cigar string if true
"""
# Parse value in the C type structure pointer
# Minimal mandatory parameters
self.score = Res.contents.score
self.ref_begin = Res.contents.ref_begin
self.ref_end = Res.contents.ref_end
self.query_begin = Res.contents.query_begin
self.query_end = Res.contents.query_end
# Information for sub-optimal match if require and available
score2 = Res.contents.score2
if report_secondary and score2 != 0:
self.score2 = score2
self.ref_end2 = Res.contents.ref_end2
else:
self.score2 = None
self.ref_end2 = None
# Cigar Information if CIGAR string if require and available
cigar_len = Res.contents.cigarLen
if report_cigar and cigar_len > 0:
self.cigar_string = self._cigar_string (Res.contents.cigar, cigar_len, query_len)
else:
self.cigar_string = None
def _cigar_string(self, cigar, cigar_len, query_len):
"""
Convert cigar and cigarLen into an human readable Cigar string as in SAM files
"""
# Empty string for iterative writing of the cigar string
cigar_string = []
# If the query match do not start at its first base
# = introduce a softclip at the begining
if self.query_begin > 0:
op_len = self.query_begin
op_char = "S"
cigar_string.append('{}{}'.format(op_len, op_char))
# Iterate over the cigar (pointer to a vector of int)
for i in range(cigar_len):
op_len = cigar_int_to_len(cigar[i])
op_char = cigar_int_to_op(cigar[i]).decode("utf-8")
cigar_string.append('{}{}'.format(op_len, op_char))
# If the length of bases aligned is shorter than the overall query length
# = introduce a softclip at the end
end_len = query_len - self.query_end - 1
if end_len != 0:
op_len = end_len
op_char = "S"
cigar_string.append('{}{}'.format(op_len, op_char))
return "".join(cigar_string)
| svviz/svviz | src/ssw/ssw_wrap.py | Python | mit | 15,367 | 0.00898 |
# -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
calico.etcddriver.driver
~~~~~~~~~~~~~~~~~~~~~~~~
Contains the logic for the etcd driver process, which monitors etcd for
changes and sends them to Felix over a unix socket.
The driver is responsible for
* loading the configuration from etcd at start-of-day (Felix needs this before
it can receive further updates)
* handling the initial load of data from etcd
* watching etcd for changes
* doing the above in parallel and merging the result into a consistent
sequence of events
* resolving directory deletions so that if a directory is deleted, it tells
Felix about all the individual keys that are deleted.
"""
import logging
import random
import socket
from Queue import Queue, Empty
from functools import partial
from ijson import JSONError
from calico.stats import AggregateStat, RateStat
try:
# simplejson is a faster drop-in replacement.
import simplejson as json
except ImportError:
import json
from threading import Thread, Event, Lock
import time
from urlparse import urlparse
from ijson.backends import yajl2 as ijson
from urllib3 import HTTPConnectionPool, HTTPSConnectionPool
import urllib3.exceptions
import httplib
from calico.etcddriver.protocol import (
MessageReader, MSG_TYPE_INIT, MSG_TYPE_CONFIG, MSG_TYPE_RESYNC,
MSG_KEY_ETCD_URLS, MSG_KEY_HOSTNAME, MSG_KEY_LOG_FILE, MSG_KEY_SEV_FILE,
MSG_KEY_SEV_SYSLOG, MSG_KEY_SEV_SCREEN, STATUS_WAIT_FOR_READY,
STATUS_RESYNC, STATUS_IN_SYNC, MSG_TYPE_CONFIG_LOADED,
MSG_KEY_GLOBAL_CONFIG, MSG_KEY_HOST_CONFIG, MSG_TYPE_UPDATE, MSG_KEY_KEY,
MSG_KEY_VALUE, MessageWriter, MSG_TYPE_STATUS, MSG_KEY_STATUS,
MSG_KEY_KEY_FILE, MSG_KEY_CERT_FILE, MSG_KEY_CA_FILE, WriteFailed,
SocketClosed)
from calico.etcdutils import ACTION_MAPPING
from calico.common import complete_logging
from calico.monotonic import monotonic_time
from calico.datamodel_v1 import (
READY_KEY, CONFIG_DIR, dir_for_per_host_config, VERSION_DIR,
ROOT_DIR)
from calico.etcddriver.hwm import HighWaterTracker
_log = logging.getLogger(__name__)
# Bound on the size of the queue between watcher and resync thread. In
# general, Felix and the resync thread process much more quickly than the
# watcher can read from etcd so this is defensive.
WATCHER_QUEUE_SIZE = 20000
# Threshold in seconds for detecting watcher tight looping on exception.
REQ_TIGHT_LOOP_THRESH = 0.2
# How often to log stats.
STATS_LOG_INTERVAL = 30
class EtcdDriver(object):
def __init__(self, felix_sck):
# Wrap the socket with our protocol reader/writer objects.
self._msg_reader = MessageReader(felix_sck)
self._msg_writer = MessageWriter(felix_sck)
# Global stop event used to signal to all threads to stop.
self._stop_event = Event()
# Threads to own the connection from/to Felix. The resync thread
# is responsible for doing resyncs and merging updates from the
# watcher thread (which it manages).
self._reader_thread = Thread(target=self._read_from_socket,
name="reader-thread")
self._reader_thread.daemon = True
self._resync_thread = Thread(target=self._resync_and_merge,
name="resync-thread")
self._resync_thread.daemon = True
self._watcher_thread = None # Created on demand
self._watcher_stop_event = None
self._watcher_start_index = None
# High-water mark cache. Owned by resync thread.
self._hwms = HighWaterTracker()
self._first_resync = True
self._resync_http_pool = None
self._cluster_id = None
# Resync thread stats.
self._snap_keys_processed = RateStat("snapshot keys processed")
self._event_keys_processed = RateStat("event keys processed")
self._felix_updates_sent = RateStat("felix updates sent")
self._resync_stats = [
self._snap_keys_processed,
self._event_keys_processed,
self._felix_updates_sent,
]
self._last_resync_stat_log_time = monotonic_time()
# Set by the reader thread once the init message has been received
# from Felix.
self._init_received = Event()
# Initial config, received in the init message.
self._etcd_base_url = None
self._etcd_other_urls = []
# Lock for the etcd url fields: this is the only lock, and no thread
# ever recursively acquires it, so it cannot deadlock. Must be locked
# to access the _etcd_base_url and _etcd_other_urls fields (after they
# are initialized).
self._etcd_url_lock = Lock()
self._hostname = None
# Set by the reader thread once the logging config has been received
# from Felix. Triggers the first resync.
self._config_received = Event()
# Flag to request a resync. Set by the reader thread, polled by the
# resync and merge thread.
self._resync_requested = False
def start(self):
"""Starts the driver's reader and resync threads."""
self._reader_thread.start()
self._resync_thread.start()
def join(self, timeout=None):
"""
Blocks until the driver stops or until timeout expires.
:returns True if the driver stopped, False on timeout.
"""
self._stop_event.wait(timeout=timeout)
stopped = self._stop_event.is_set()
if stopped:
self._resync_thread.join(timeout=timeout)
resync_alive = self._resync_thread.is_alive()
stopped &= not resync_alive
_log.debug("Resync thread alive: %s", resync_alive)
self._reader_thread.join(timeout=timeout)
reader_alive = self._reader_thread.is_alive()
stopped &= not reader_alive
_log.debug("Reader thread alive: %s", reader_alive)
try:
self._watcher_thread.join(timeout=timeout)
watcher_alive = self._watcher_thread.is_alive()
stopped &= not watcher_alive
_log.debug("Watcher thread alive: %s", watcher_alive)
except AttributeError:
pass
return stopped
def stop(self):
_log.info("Stopping driver")
self._stop_event.set()
def _read_from_socket(self):
"""
Thread: reader thread. Reads messages from Felix and fans them out.
"""
try:
while not self._stop_event.is_set():
for msg_type, msg in self._msg_reader.new_messages(timeout=1):
if msg_type == MSG_TYPE_INIT:
# Init message, received at start of day.
self._handle_init(msg)
elif msg_type == MSG_TYPE_CONFIG:
# Config message, expected after we send the raw
# config to Felix.
self._handle_config(msg)
elif msg_type == MSG_TYPE_RESYNC:
# Request to do a resync.
self._handle_resync(msg)
else:
_log.error("Unexpected message from Felix: %s", msg)
raise RuntimeError("Unexpected message from Felix")
except SocketClosed:
_log.warning("Felix closed its socket. The driver must exit.")
except DriverShutdown:
_log.warning("Reader thread stopping due to driver shutdown.")
finally:
_log.info("Reader thread shutting down, triggering stop event")
self.stop()
def _handle_init(self, msg):
"""
Handle init message from Felix.
Called from the reader thread.
"""
# OK to dump the msg, it's a one-off.
_log.info("Got init message from Felix %s", msg)
etcd_urls = msg[MSG_KEY_ETCD_URLS]
# Shuffle the etcd URLs so that each client connects to different
# cluster nodes.
random.shuffle(etcd_urls)
self._etcd_base_url = etcd_urls[0].rstrip("/")
self._etcd_url_parts = urlparse(self._etcd_base_url)
self._etcd_other_urls = etcd_urls[1:]
self._etcd_key_file = msg[MSG_KEY_KEY_FILE]
self._etcd_cert_file = msg[MSG_KEY_CERT_FILE]
self._etcd_ca_file = msg[MSG_KEY_CA_FILE]
self._hostname = msg[MSG_KEY_HOSTNAME]
self._init_received.set()
def _handle_config(self, msg):
"""
Handle config message from Felix.
Called from the reader thread.
"""
complete_logging(msg[MSG_KEY_LOG_FILE],
file_level=msg[MSG_KEY_SEV_FILE],
syslog_level=msg[MSG_KEY_SEV_SYSLOG],
stream_level=msg[MSG_KEY_SEV_SCREEN],
gevent_in_use=False)
self._config_received.set()
_log.info("Received config from Felix: %s", msg)
def _handle_resync(self, msg):
_log.info("Got resync message from felix: %s", msg)
self._resync_requested = True
def _resync_and_merge(self):
"""
Thread: Resync-and-merge thread. Loads the etcd snapshot, merges
it with the events going on concurrently and sends the event stream
to Felix.
"""
_log.info("Resync thread started, waiting for config to be loaded...")
self._init_received.wait()
_log.info("Config loaded; continuing.")
while not self._stop_event.is_set():
_log.info("Stop event not set, starting new resync...")
self._reset_resync_thread_stats()
loop_start = monotonic_time()
try:
# Start with a fresh HTTP pool just in case it got into a bad
# state.
self._resync_http_pool = self.get_etcd_connection()
# Before we get to the snapshot, Felix needs the configuration.
self._send_status(STATUS_WAIT_FOR_READY)
self._wait_for_ready()
self._preload_config()
# Wait for config if we have not already received it.
self._wait_for_config()
# Kick off the snapshot request as far as the headers.
self._send_status(STATUS_RESYNC)
resp, snapshot_index = self._start_snapshot_request()
# Before reading from the snapshot, start the watcher thread.
self._ensure_watcher_running(snapshot_index)
# Incrementally process the snapshot, merging in events from
# the queue.
self._process_snapshot_and_events(resp, snapshot_index)
# We're now in-sync. Tell Felix.
self._send_status(STATUS_IN_SYNC)
# Then switch to processing events only.
self._process_events_only()
except WriteFailed:
_log.exception("Write to Felix failed; shutting down.")
self.stop()
except WatcherDied:
_log.warning("Watcher died; resyncing.")
self._stop_watcher() # Clean up the event
except (urllib3.exceptions.HTTPError,
httplib.HTTPException,
socket.error) as e:
_log.error("Request to etcd failed: %r; resyncing.", e)
self._stop_watcher()
self._rotate_etcd_url() # Try a different etcd URL if possible
if monotonic_time() - loop_start < 1:
_log.warning("May be tight looping, sleeping...")
time.sleep(1)
except ResyncRequested:
_log.info("Resync requested, looping to start a new resync. "
"Leaving watcher running if possible.")
except ResyncRequired:
_log.warn("Detected inconsistency requiring a full resync, "
"stopping watcher")
self._stop_watcher()
except DriverShutdown:
_log.info("Driver shut down.")
return
except:
_log.exception("Unexpected exception; shutting down.")
self.stop()
raise
finally:
self._first_resync = False
self._resync_requested = False
_log.info("Stop event set, exiting resync loop.")
def _rotate_etcd_url(self):
"""
Rotate the in use etcd URL if more than one is configured,
"""
if len(self._etcd_other_urls) > 0:
with self._etcd_url_lock:
self._etcd_other_urls.append(self._etcd_base_url)
self._etcd_base_url = self._etcd_other_urls.pop(0).rstrip("/")
self._etcd_url_parts = urlparse(self._etcd_base_url)
_log.info("Rotated etcd URL to: %s", self._etcd_base_url)
def _wait_for_config(self):
while not self._config_received.is_set():
_log.info("Waiting for Felix to process the config...")
self._check_stop_event()
self._config_received.wait(1)
_log.info("Felix sent us the config, continuing.")
def _wait_for_ready(self):
"""
Waits for the global Ready flag to be set. We don't load the first
snapshot until that flag is set.
"""
ready = False
while not ready and not self._stop_event.is_set():
# Read failure here will be handled by outer loop.
resp = self._etcd_request(self._resync_http_pool, READY_KEY)
try:
etcd_resp = json.loads(resp.data)
ready = etcd_resp["node"]["value"] == "true"
mod_idx = etcd_resp["node"]["modifiedIndex"]
except (TypeError, ValueError, KeyError) as e:
_log.warning("Failed to load Ready flag from etcd: %r", e)
time.sleep(1)
else:
_log.info("Ready flag set to %s", etcd_resp["node"]["value"])
self._hwms.update_hwm(READY_KEY, mod_idx)
self._check_stop_event()
def _check_stop_event(self):
if self._stop_event.is_set():
_log.info("Told to stop, raising DriverShutdown.")
raise DriverShutdown()
def _preload_config(self):
"""
Loads the config for Felix from etcd and sends it to Felix as a
dedicated message.
"""
_log.info("Pre-loading config.")
global_config = self._load_config(CONFIG_DIR)
host_config_dir = dir_for_per_host_config(self._hostname)
host_config = self._load_config(host_config_dir)
self._msg_writer.send_message(
MSG_TYPE_CONFIG_LOADED,
{
MSG_KEY_GLOBAL_CONFIG: global_config,
MSG_KEY_HOST_CONFIG: host_config,
}
)
_log.info("Sent config message to Felix.")
def _load_config(self, config_dir):
"""
Loads all the config keys from the given etcd directory.
"""
# Read failure here will be handled by outer loop.
resp = self._etcd_request(self._resync_http_pool,
config_dir, recursive=True)
try:
etcd_resp = json.loads(resp.data)
if etcd_resp.get("errorCode") == 100: # Not found
_log.info("No config found at %s", config_dir)
return {}
config_nodes = etcd_resp["node"]["nodes"]
config = {}
for node in config_nodes:
if "key" in node and "value" in node:
config[node["key"].split("/")[-1]] = node["value"]
except (TypeError, ValueError, KeyError) as e:
_log.warning("Failed to load config from etcd: %r,"
"data %r", e, resp.data)
raise ResyncRequired(e)
return config
def _start_snapshot_request(self):
"""
Issues the HTTP request to etcd to load the snapshot but only
loads it as far as the headers.
:return: tuple of response and snapshot's etcd index.
:raises HTTPException
:raises HTTPError
:raises socket.error
:raises DriverShutdown if the etcd cluster ID changes.
"""
_log.info("Loading snapshot headers...")
resp = self._etcd_request(self._resync_http_pool,
VERSION_DIR,
recursive=True,
timeout=120,
preload_content=False)
snapshot_index = int(resp.getheader("x-etcd-index", 1))
if not self._cluster_id:
_log.error("Snapshot response did not contain cluster ID, "
"resyncing to avoid inconsistency")
raise ResyncRequired()
_log.info("Got snapshot headers, snapshot index is %s; starting "
"watcher...", snapshot_index)
return resp, snapshot_index
def _etcd_request(self, http_pool, key, timeout=5, wait_index=None,
recursive=False, preload_content=None):
"""
Make a request to etcd on the given HTTP pool for the given key
and check the cluster ID.
:param timeout: Read timeout for the request.
:param int wait_index: If set, issues a watch request.
:param recursive: True to request a recursive GET or watch.
:return: The urllib3 Response object.
"""
resp = self._issue_etcd_request(
http_pool, key, timeout, wait_index,
recursive, preload_content
)
self._check_cluster_id(resp)
return resp
def _issue_etcd_request(self, http_pool, key, timeout=5, wait_index=None,
recursive=False, preload_content=None):
fields = {}
if recursive:
_log.debug("Adding recursive=true to request")
fields["recursive"] = "true"
if wait_index is not None:
_log.debug("Request is a watch, adding wait* headers and forcing "
"preload_content to False")
fields["wait"] = "true"
fields["waitIndex"] = wait_index
preload_content = False
if preload_content is None:
preload_content = True
resp = http_pool.request(
"GET",
self._calculate_url(key),
fields=fields or None,
timeout=timeout,
preload_content=preload_content
)
return resp
def _check_cluster_id(self, resp):
"""
Checks the x-etcd-cluster-id header for changes since the last call.
On change, stops the driver and raises DriverShutdown.
:param resp: urllib3 Response object.
"""
cluster_id = resp.getheader("x-etcd-cluster-id")
if cluster_id:
if self._cluster_id:
if self._cluster_id != cluster_id:
_log.error("etcd cluster ID changed from %s to %s. "
"This invalidates our local state so Felix "
"must restart.", self._cluster_id, cluster_id)
self.stop()
raise DriverShutdown()
else:
_log.info("First successful read from etcd. Cluster ID: %s",
cluster_id)
self._cluster_id = cluster_id
else:
# Missing on certain error responses.
_log.warning("etcd response was missing cluster ID header, unable "
"to check cluster ID")
def _process_snapshot_and_events(self, etcd_response, snapshot_index):
"""
Processes the etcd snapshot response incrementally while, concurrently,
merging in updates from the watcher thread.
:param etcd_response: file-like object representing the etcd response.
:param snapshot_index: the etcd index of the response.
"""
self._hwms.start_tracking_deletions()
parse_snapshot(etcd_response,
callback=partial(self._handle_etcd_node,
snapshot_index=snapshot_index))
# Save occupancy by throwing away the deletion tracking metadata.
self._hwms.stop_tracking_deletions()
# Scan for deletions that happened before the snapshot. We effectively
# mark all the values seen in the current snapshot above and then this
# sweeps the ones we didn't touch.
self._scan_for_deletions(snapshot_index)
def _handle_etcd_node(self, snap_mod, snap_key, snap_value,
snapshot_index=None):
"""
Callback for use with parse_snapshot. Called once for each key/value
pair that is found.
Handles the key/value itself and then checks for work from the
watcher.
:param snap_mod: Modified index of the key.
:param snap_key: The key itself.
:param snap_value: The value attached to the key.
:param snapshot_index: Index of the snapshot as a whole.
"""
assert snapshot_index is not None
self._snap_keys_processed.store_occurence()
old_hwm = self._hwms.update_hwm(snap_key, snapshot_index)
if snap_mod > old_hwm:
# This specific key's HWM is newer than the previous
# version we've seen, send an update.
self._on_key_updated(snap_key, snap_value)
# After we process an update from the snapshot, process several
# updates from the watcher queue (if there are any). We limit the
# number to ensure that we always finish the snapshot eventually.
# The limit isn't too sensitive but values much lower than 100 seemed
# to starve the watcher in testing.
for _ in xrange(100):
if not self._watcher_queue or self._watcher_queue.empty():
# Don't block on the watcher if there's nothing to do.
break
try:
self._handle_next_watcher_event(resync_in_progress=True)
except WatcherDied:
# Continue processing to ensure that we make
# progress.
_log.warning("Watcher thread died, continuing "
"with snapshot")
break
self._check_stop_event()
self._maybe_log_resync_thread_stats()
def _process_events_only(self):
"""
Loops processing the event stream from the watcher thread and feeding
it to etcd.
:raises WatcherDied:
:raises FelixWriteFailed:
:raises DriverShutdown:
"""
_log.info("In sync, now processing events only...")
while not self._stop_event.is_set():
self._handle_next_watcher_event(resync_in_progress=False)
self._msg_writer.flush()
self._check_stop_event()
def _scan_for_deletions(self, snapshot_index):
"""
Scans the high-water mark cache for keys that haven't been seen since
before the snapshot_index and deletes them.
"""
if self._first_resync:
_log.info("First resync: skipping deletion scan")
return
# Find any keys that were deleted while we were unable to
# keep up with etcd.
_log.info("Scanning for deletions")
deleted_keys = self._hwms.remove_old_keys(snapshot_index)
for ev_key in deleted_keys:
# We didn't see the value during the snapshot or via
# the event queue. It must have been deleted.
self._on_key_updated(ev_key, None)
_log.info("Found %d deleted keys", len(deleted_keys))
def _handle_next_watcher_event(self, resync_in_progress):
"""
Waits for an event on the watcher queue and sends it to Felix.
:raises DriverShutdown:
:raises WatcherDied:
:raises FelixWriteFailed:
:raises ResyncRequested:
"""
if self._watcher_queue is None:
raise WatcherDied()
while not self._stop_event.is_set():
# To make sure we always make progress, only trigger a new resync
# if we're not in the middle of one.
if not resync_in_progress and self._resync_requested:
_log.info("Resync requested, triggering one.")
raise ResyncRequested()
self._maybe_log_resync_thread_stats()
try:
event = self._next_watcher_event()
except Empty:
pass
else:
break
else:
raise DriverShutdown()
if event is None:
self._watcher_queue = None
raise WatcherDied()
self._event_keys_processed.store_occurence()
ev_mod, ev_key, ev_val = event
if ev_val is not None:
# Normal update.
self._hwms.update_hwm(ev_key, ev_mod)
self._on_key_updated(ev_key, ev_val)
else:
# Deletion. In case this is a directory deletion, we search the
# trie for anything that is under the deleted key and send
# individual deletions to Felix for each one.
deleted_keys = self._hwms.store_deletion(ev_key,
ev_mod)
for child_key in deleted_keys:
self._on_key_updated(child_key, None)
def _next_watcher_event(self):
"""Get the next event from the watcher queue
This is mostly here to allow it to be hooked in the UTs.
:raises Empty if there is no event within the timeout."""
return self._watcher_queue.get(timeout=1)
def _ensure_watcher_running(self, snapshot_index):
"""
Starts a new watcher from the given snapshot index, if needed.
"""
if (self._watcher_thread is not None and
self._watcher_thread.is_alive() and
self._watcher_stop_event is not None and
not self._watcher_stop_event.is_set() and
self._watcher_queue is not None and
self._watcher_start_index <= snapshot_index):
_log.info("Watcher is still alive and started from a valid index, "
"leaving it running")
return
self._watcher_start_index = snapshot_index
self._watcher_queue = Queue(maxsize=WATCHER_QUEUE_SIZE)
self._watcher_stop_event = Event()
# Note: we pass the queue and event in as arguments so that the thread
# will always access the current queue and event. If it used self.xyz
# to access them then an old thread that is shutting down could access
# a new queue.
self._watcher_thread = Thread(target=self.watch_etcd,
args=(snapshot_index + 1,
self._watcher_queue,
self._watcher_stop_event),
name="watcher-thread")
self._watcher_thread.daemon = True
self._watcher_thread.start()
def _stop_watcher(self):
"""
If it's running, signals the watcher thread to stop.
"""
if self._watcher_stop_event is not None:
_log.info("Watcher was running before, stopping it")
self._watcher_stop_event.set()
self._watcher_stop_event = None
def get_etcd_connection(self):
with self._etcd_url_lock:
port = self._etcd_url_parts.port or 2379
if self._etcd_url_parts.scheme == "https":
_log.debug("Getting new HTTPS connection to %s:%s",
self._etcd_url_parts.hostname, port)
pool = HTTPSConnectionPool(self._etcd_url_parts.hostname,
port,
key_file=self._etcd_key_file,
cert_file=self._etcd_cert_file,
ca_certs=self._etcd_ca_file,
maxsize=1)
else:
_log.debug("Getting new HTTP connection to %s:%s",
self._etcd_url_parts.hostname, port)
pool = HTTPConnectionPool(self._etcd_url_parts.hostname,
port,
maxsize=1)
return pool
def _on_key_updated(self, key, value):
"""
Called when we've worked out that a key has been updated/deleted.
Does any local processing and sends the update to Felix.
:param str key: The etcd key that has changed.
:param str|NoneType value: the new value of the key (None indicates
deletion).
"""
if key == READY_KEY and value != "true":
# Special case: the global Ready flag has been unset, trigger a
# resync, which will poll the Ready flag until it is set to true
# again.
_log.warning("Ready key no longer set to true, triggering resync.")
raise ResyncRequired()
self._msg_writer.send_message(
MSG_TYPE_UPDATE,
{
MSG_KEY_KEY: key,
MSG_KEY_VALUE: value,
},
flush=False
)
self._felix_updates_sent.store_occurence()
def _send_status(self, status):
"""
Queues the given status to felix as a status message.
"""
_log.info("Sending status to Felix: %s", status)
self._msg_writer.send_message(
MSG_TYPE_STATUS,
{
MSG_KEY_STATUS: status,
}
)
def _calculate_url(self, etcd_key):
with self._etcd_url_lock:
url = self._etcd_base_url + "/v2/keys/" + etcd_key.strip("/")
return url
def _reset_resync_thread_stats(self):
for stat in self._resync_stats:
stat.reset()
self._last_resync_stat_log_time = monotonic_time()
def _maybe_log_resync_thread_stats(self):
now = monotonic_time()
if now - self._last_resync_stat_log_time > STATS_LOG_INTERVAL:
for stat in self._resync_stats:
_log.info("STAT: Resync thread %s", stat)
stat.reset()
self._last_resync_stat_log_time = now
def watch_etcd(self, next_index, event_queue, stop_event):
"""
Thread: etcd watcher thread. Watches etcd for changes and
sends them over the queue to the resync thread, which owns
the socket to Felix.
Dies if it receives an error from etcd.
Note: it is important that we pass the index, queue and event
as parameters to ensure that each watcher thread only touches
the versions of those values that were created for it as
opposed to a later-created watcher thread.
:param int next_index: The etcd index to start watching from.
:param Queue event_queue: Queue of updates back to the resync thread.
:param Event stop_event: Event used to stop this thread when it is no
longer needed.
"""
_log.info("Watcher thread started with next index %s", next_index)
last_log_time = monotonic_time()
req_end_time = None
non_req_time_stat = AggregateStat("processing time", "ms")
etcd_response_time = None
etcd_response_time_stat = AggregateStat("etcd response time", "ms")
stats = [etcd_response_time_stat,
non_req_time_stat]
http = None
try:
while not self._stop_event.is_set() and not stop_event.is_set():
if not http:
_log.info("No HTTP pool, creating one...")
http = self.get_etcd_connection()
req_start_time = monotonic_time()
if req_end_time is not None:
# Calculate the time since the end of the previous request,
# i.e. the time we spent processing the response. Note:
# start and end are flipped because we just read the start
# time but we have the end time from the last loop.
non_req_time = req_start_time - req_end_time
non_req_time_stat.store_reading(non_req_time * 1000)
_log.debug("Waiting on etcd index %s", next_index)
try:
try:
resp = self._etcd_request(http,
VERSION_DIR,
recursive=True,
wait_index=next_index,
timeout=90)
finally:
# Make sure the time is available to both exception and
# mainline code paths.
req_end_time = monotonic_time()
etcd_response_time = req_end_time - req_start_time
if resp.status != 200:
_log.warning("etcd watch returned bad HTTP status to"
"poll on index %s: %s", next_index,
resp.status)
self._check_cluster_id(resp)
resp_body = resp.data # Force read inside try block.
except urllib3.exceptions.ReadTimeoutError:
# 100% expected when there are no events.
_log.debug("Watch read timed out, restarting watch at "
"index %s", next_index)
# Workaround urllib3 bug #718. After a ReadTimeout, the
# connection is incorrectly recycled.
http = None
continue
except (urllib3.exceptions.HTTPError,
httplib.HTTPException,
socket.error) as e:
# Not so expected but still possible to recover: etcd
# being restarted, for example.
assert etcd_response_time is not None
if etcd_response_time < REQ_TIGHT_LOOP_THRESH:
# Failed fast, make sure we don't tight loop.
delay = REQ_TIGHT_LOOP_THRESH - etcd_response_time
_log.warning("Connection to etcd failed with %r, "
"restarting watch at index %s after "
"delay %.3f", e, next_index, delay)
time.sleep(delay)
else:
_log.warning("Connection to etcd failed with %r, "
"restarting watch at index %s "
"immediately", e, next_index)
# If available, connect to a different etcd URL in case
# only the previous one has failed.
self._rotate_etcd_url()
# Workaround urllib3 bug #718. After a ReadTimeout, the
# connection is incorrectly recycled.
http = None
continue
# If we get to this point, we've got an etcd response to
# process; try to parse it.
try:
etcd_resp = json.loads(resp_body)
if "errorCode" in etcd_resp:
_log.error("Error from etcd for index %s: %s; "
"triggering a resync.",
next_index, etcd_resp)
break
node = etcd_resp["node"]
key = node["key"]
action = ACTION_MAPPING[etcd_resp["action"]]
is_dir = node.get("dir", False)
value = node.get("value")
dir_creation = False
if is_dir:
if action == "delete":
if key.rstrip("/") in (VERSION_DIR, ROOT_DIR):
# Special case: if the whole keyspace is
# deleted, that implies the ready flag is gone
# too. Break out of the loop to trigger a
# resync. This avoids queuing up a bunch of
# events that would be discarded by the
# resync thread.
_log.warning("Whole %s deleted, resyncing",
VERSION_DIR)
break
else:
# Just ignore sets to directories, we only track
# leaves.
_log.debug("Skipping non-delete to dir %s", key)
dir_creation = True
modified_index = node["modifiedIndex"]
except (KeyError, TypeError, ValueError):
_log.exception("Unexpected format for etcd response to"
"index %s: %r; triggering a resync.",
next_index, resp_body)
break
else:
# We successfully parsed the response, hand it off to the
# resync thread. Now we know that we got a response,
# we record that in the stat.
etcd_response_time_stat.store_reading(etcd_response_time *
1000)
if not dir_creation:
# The resync thread doesn't need to know about
# directory creations so we skip them. (It does need
# to know about deletions in order to clean up
# sub-keys.)
event_queue.put((modified_index, key, value))
next_index = modified_index + 1
# Opportunistically log stats.
now = monotonic_time()
if now - last_log_time > STATS_LOG_INTERVAL:
for stat in stats:
_log.info("STAT: Watcher %s", stat)
stat.reset()
_log.info("STAT: Watcher queue length: %s",
event_queue.qsize())
last_log_time = now
except DriverShutdown:
_log.warning("Watcher thread stopping due to driver shutdown.")
except:
_log.exception("Exception finishing watcher thread.")
raise
finally:
# Signal to the resync thread that we've exited.
event_queue.put(None)
# Make sure we get some stats output from the watcher.
for stat in stats:
_log.info("STAT: Final watcher %s", stat)
_log.info("Watcher thread finished. Signalled to resync thread. "
"Was at index %s. Queue length is %s.", next_index,
event_queue.qsize())
def parse_snapshot(resp, callback):
"""
Iteratively parses the response to the etcd snapshot, calling the
callback with each key/value pair found.
:raises ResyncRequired if the snapshot contains an error response.
"""
_log.debug("Parsing snapshot response...")
if resp.status != 200:
raise ResyncRequired("Read from etcd failed. HTTP status code %s",
resp.status)
parser = ijson.parse(resp) # urllib3 response is file-like.
try:
prefix, event, value = next(parser)
_log.debug("Read first token from response %s, %s, %s", prefix, event,
value)
if event == "start_map":
# As expected, response is a map.
_parse_map(parser, callback)
else:
_log.error("Response from etcd did non contain a JSON map.")
raise ResyncRequired("Bad response from etcd")
except JSONError:
_log.exception("Response from etcd containers bad JSON.")
raise ResyncRequired("Bad JSON from etcd")
def _parse_map(parser, callback):
"""
Searches the stream of JSON tokens for key/value pairs.
Calls itself recursively to handle subdirectories.
:param parser: iterator, returning JSON parse event tuples.
:param callback: callback to call when a key/value pair is found.
"""
# Expect a sequence of keys and values terminated by an "end_map" event.
mod_index = None
node_key = None
node_value = None
while True:
prefix, event, value = next(parser)
_log.debug("Parsing %s, %s, %s", prefix, event, value)
if event == "map_key":
map_key = value
prefix, event, value = next(parser)
if map_key == "modifiedIndex":
mod_index = value
elif map_key == "key":
node_key = value
elif map_key == "value":
node_value = value
elif map_key == "errorCode":
raise ResyncRequired("Error from etcd, etcd error code %s",
value)
elif map_key == "nodes":
while True:
prefix, event, value = next(parser)
if event == "start_map":
_parse_map(parser, callback)
elif event == "end_array":
break
else:
raise ValueError("Unexpected: %s" % event)
else:
assert event == "end_map", ("Unexpected JSON event %s %s %s" %
(prefix, event, value))
if (node_key is not None and
node_value is not None and
mod_index is not None):
callback(mod_index, node_key, node_value)
break
class WatcherDied(Exception):
pass
class DriverShutdown(Exception):
pass
class ResyncRequired(Exception):
pass
class ResyncRequested(Exception):
pass
| TrimBiggs/calico | calico/etcddriver/driver.py | Python | apache-2.0 | 43,284 | 0.000046 |
# -*- coding: utf-8 -*-
##################################################################
# Example of usage:
##################################################################
from __future__ import print_function, division, absolute_import
import sys
import time
import inspect
from numbers import Number
from datetime import datetime
from contextlib import contextmanager
from collections import OrderedDict, defaultdict
import numpy as np
try:
from tqdm import __version__ as tqdm_version
tqdm_version = int(tqdm_version.split(".")[0])
if tqdm_version < 4:
raise ImportError
from tqdm import tqdm as _tqdm
from tqdm._utils import _environ_cols_wrapper
except ImportError:
sys.stderr.write("[ERROR] Cannot import `tqdm` version >= 4.\n")
exit()
try:
import colorama
colorama.init()
from colorama import Fore as _Fore
_RED = _Fore.RED
_YELLOW = _Fore.YELLOW
_CYAN = _Fore.CYAN
_MAGENTA = _Fore.MAGENTA
_RESET = _Fore.RESET
except ImportError:
_RED, _YELLOW, _CYAN, _MAGENTA, _RESET = '', '', '', '', ''
_NUMBERS_CH = {
ord('0'): 0,
ord('1'): 1,
ord('2'): 2,
ord('3'): 3,
ord('4'): 4,
ord('5'): 5,
ord('6'): 6,
ord('7'): 7,
ord('8'): 8,
ord('9'): 9,
}
# ===========================================================================
# Helper
# ===========================================================================
_LAST_UPDATED_PROG = [None]
def add_notification(msg):
msg = _CYAN + "[%s]Notification:" % \
datetime.now().strftime('%d/%b-%H:%M:%S') + _RESET + msg + ''
_tqdm.write(msg)
class _FuncWrap(object):
def __init__(self, func, default_func=lambda x: x):
super(_FuncWrap, self).__init__()
if func is None:
func = default_func
assert inspect.isfunction(func), \
"Invalid function object of type: %s" % str(type(func))
self.func = func
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __getstate__(self):
import dill
return dill.dumps(self.func)
def __setstate__(self, states):
import dill
self.func = dill.loads(states)
def _default_dict_list_creator():
return defaultdict(list)
# ===========================================================================
# Progress bar
# ===========================================================================
class Progbar(object):
""" Comprehensive review of any progress, this object is
fully pickle-able, and can be used for storing history,
summaries and report of the progress as well.
Parameters
----------
target: int
total number of steps expected
interval: float
Minimum progress display update interval, in seconds.
keep: bool
whether to keep the progress bar when the epoch finished
print_report: bool
print updated report along with the progress bar for each update
print_summary: bool
print epoch summary after each epoch
count_func: call-able
a function takes the returned batch and return an integer for upating
progress.
report_func: call-able
a function takes the returned batch and a collection of pair
(key, value) for constructing the report.
progress_func : call-able
for post-processing the return value during processing into
a number representing addition in the progress
name: str or None
specific name for the progress bar
Examples
--------
>>> import numpy as np
>>> from odin.utils import Progbar
>>> x = list(range(10))
>>> for i in Progbar(target=x):
... pass
Note
----
Some special case:
* any report key contain "confusionmatrix" will be printed out using
`print_confusion`
* any report key
"""
FP = sys.stderr
def __init__(self, target, interval=0.08, keep=False,
print_progress=True, print_report=True, print_summary=False,
count_func=None, report_func=None, progress_func=None,
name=None):
self.__pb = None # tqdm object
if isinstance(target, Number):
self.target = int(target)
self.__iter_obj = None
elif hasattr(target, '__len__'):
self.target = len(target)
self.__iter_obj = target
else:
raise ValueError("Unsupport for `target` type: %s" %
str(target.__class__))
self._seen_so_far = defaultdict(int) # mapping: epoch_idx -> seen_so_far
n = len(str(self.target))
self._counter_fmt = '(%%%dd/%%%dd)' % (n, n)
if name is None:
name = "Progress-%s" % datetime.utcnow()
self._name = name
# ====== flags ====== #
self.__interval = float(interval)
self.__keep = keep
self.print_progress = bool(print_progress)
self.print_report = bool(print_report)
self.print_summary = bool(print_summary)
# ====== for history ====== #
self._report = OrderedDict()
self._last_report = None
self._last_print_time = None
self._epoch_summarizer_func = {}
# ====== recording history ====== #
# dictonary: {epoch_id: {key: [value1, value2, ...]}}
self._epoch_hist = defaultdict(_default_dict_list_creator)
self._epoch_summary = defaultdict(dict)
self._epoch_idx = 0
self._epoch_start_time = None
# ====== iter information ====== #
if self.__iter_obj is None and \
(count_func is not None or report_func is not None):
raise RuntimeError("`count_func` and `report_func` can only be used "
"when `target` is an iterator with specific length.")
#
self.__count_func = _FuncWrap(func=count_func,
default_func=lambda x: len(x))
self.__report_func = _FuncWrap(func=report_func,
default_func=lambda x: None)
# ====== check progress function ====== #
self._progress_func = _FuncWrap(func=progress_func,
default_func=lambda x: x)
# ====== other ====== #
self._labels = None # labels for printing the confusion matrix
# ==================== History management ==================== #
def __getitem__(self, key):
return self._report.__getitem__(key)
def __setitem__(self, key, val):
self._epoch_hist[self.epoch_idx][key].append(val)
return self._report.__setitem__(key, val)
def __delitem__(self, key):
return self._report.__delitem__(key)
def __iter__(self):
if self.__iter_obj is None:
raise RuntimeError("This Progbar cannot be iterated, "
"the set `target` must be iterable.")
for X in self.__iter_obj:
count = self.__count_func(X)
report = self.__report_func(X)
if report is not None:
for key, val in report:
self[key] = val
self.add(int(count))
yield X
del self.__iter_obj
del self.__count_func
del self.__report_func
# ==================== screen control ==================== #
@property
def epoch_idx(self):
return self._epoch_idx
@property
def nb_epoch(self):
return self._epoch_idx + 1
@property
def name(self):
return self._name
@property
def labels(self):
""" Special labels for printing the confusion matrix. """
return self._labels
@property
def history(self):
""" Return
dictonary:
{epoch_id : {tensor_name0: [batch_return1, batch_return2, ...],
tensor_name1: [batch_return1, batch_return2, ...],
...},
1 : {tensor_name0: [batch_return1, batch_return2, ...],
tensor_name1: [batch_return1, batch_return2, ...],
...},
... }
Example
-------
>>> for epoch_id, results in task.history.items():
>>> for tensor_name, values in results.items():
>>> print(tensor_name, len(values))
"""
return self._epoch_hist
def get_report(self, epoch=-1, key=None):
if epoch < 0:
epoch = self.nb_epoch + epoch - 1
return self._epoch_hist[epoch] if key is None else \
self._epoch_hist[epoch][key]
def set_summarizer(self, key, fn):
""" Epoch summarizer is a function, searching in the
report for given key, and summarize all the stored values
of each epoch into a readable format
i.e. the input arguments is a list of stored epoch report,
the output is a string.
"""
if not hasattr(fn, '__call__'):
raise ValueError('`fn` must be call-able.')
key = str(key)
self._epoch_summarizer_func[key] = _FuncWrap(func=fn, default_func=None)
return self
def set_name(self, name):
self._name = str(name)
return self
def set_labels(self, labels):
if labels is not None:
self._labels = tuple([str(l) for l in labels])
return self
def _formatted_report(self, report_dict, margin='', inc_name=True):
""" Convert a dictionary of key -> value to well formatted string."""
if inc_name:
text = _MAGENTA + "\t%s" % self.name + _RESET + '\n'
else:
text = ""
report_dict = sorted(report_dict.items(), key=lambda x: str(x[0]))
for i, (key, value) in enumerate(report_dict):
# ====== check value of key and value ====== #
key = margin + str(key).replace('\n', ' ')
# ====== special cases ====== #
if "confusionmatrix" in key.lower() or \
"confusion_matrix" in key.lower() or \
"confusion-matrix" in key.lower() or \
"confusion matrix" in key.lower():
from odin.visual.bashplot import print_confusion
value = print_confusion(value, labels=self.labels,
inc_stats=True)
# just print out string representation
else:
value = str(value)
# ====== multiple lines or not ====== #
if '\n' in value:
text += _YELLOW + key + _RESET + ":\n"
for line in value.split('\n'):
text += margin + ' ' + line + '\n'
else:
text += _YELLOW + key + _RESET + ": " + value + "\n"
return text[:-1]
@property
def progress_bar(self):
if self.__pb is None:
it = range(self.target)
self.__pb = _tqdm(iterable=it,
desc="Epoch%s" % str(self.epoch_idx),
leave=self.__keep, total=self.target,
file=Progbar.FP, unit='obj',
mininterval=self.__interval, maxinterval=10,
miniters=0, position=0)
self.__pb.clear()
self._epoch_start_time = time.time()
return self.__pb
@property
def seen_so_far(self):
return self._seen_so_far[self.epoch_idx]
def _generate_epoch_summary(self, epoch, inc_name=False, inc_counter=True):
seen_so_far = self._seen_so_far[epoch]
if seen_so_far == 0:
return ''
# ====== include name ====== #
if inc_name:
s = _MAGENTA + "%s" % self.name + _RESET
else:
s = ""
# ====== create epoch summary ====== #
if seen_so_far == self.target: # epoch already finished
speed = (1. / self._epoch_summary[epoch]['__avg_time__'])
elapsed = self._epoch_summary[epoch]['__total_time__']
else: # epoch hasn't finished
avg_time = (time.time() - self._epoch_start_time) / self.seen_so_far \
if self.progress_bar.avg_time is None else \
self.progress_bar.avg_time
speed = 1. / avg_time
elapsed = time.time() - self._epoch_start_time
# ====== counter ====== #
if inc_counter:
frac = seen_so_far / self.target
counter_epoch = self._counter_fmt % (seen_so_far, self.target)
percentage = "%6.2f%%%s " % (frac * 100, counter_epoch)
else:
percentage = ''
s += _RED + " Epoch %d " % epoch + _RESET + "%.4f(s) %s%.4f(obj/s)" % \
(elapsed, percentage, speed)
# epoch summary
summary = dict(self._epoch_summary[epoch])
if len(summary) > 2:
summary.pop('__total_time__', None)
summary.pop('__avg_time__', None)
s += '\n' + self._formatted_report(summary, margin=' ', inc_name=False)
return s
@property
def summary(self):
s = _MAGENTA + "Report \"%s\" TotalEpoch: %d\n" % \
(self.name, self.nb_epoch) + _RESET
# ====== create summary for each epoch ====== #
s += '\n'.join([self._generate_epoch_summary(i)
for i in range(self.nb_epoch)])
return s[:-1]
# ==================== same actions ==================== #
def add_notification(self, msg):
msg = _CYAN + "[%s][%s]Notification:" % \
(datetime.now().strftime('%d/%b-%H:%M:%S'),
_MAGENTA + self.name + _CYAN) + _RESET + msg
_tqdm.write(msg)
return self
def _new_epoch(self):
if self.__pb is None:
return
# calculate number of offset lines from last report
if self._last_report is None:
nlines = 0
else:
nlines = len(self._last_report.split('\n'))
# ====== reset progress bar (tqdm) ====== #
if self.__keep: # keep the last progress on screen
self.__pb.moveto(nlines)
else: # clear everything
for i in range(nlines):
Progbar.FP.write('\r')
console_width = _environ_cols_wrapper()(Progbar.FP)
Progbar.FP.write(' ' * (79 if console_width is None else console_width))
Progbar.FP.write('\r') # place cursor back at the beginning of line
self.__pb.moveto(1)
self.__pb.moveto(-(nlines * 2))
self.__pb.close()
# ====== create epoch summary ====== #
for key, values in self._epoch_hist[self._epoch_idx].items():
values = [v for v in values]
# provided summarizer function
if key in self._epoch_summarizer_func:
self._epoch_summary[self._epoch_idx][key] = self._epoch_summarizer_func[key](values)
# very heuristic way to deal with sequence of numbers
elif isinstance(values[0], Number):
self._epoch_summary[self._epoch_idx][key] = np.mean(values)
# numpy array
elif isinstance(values[0], np.ndarray):
self._epoch_summary[self._epoch_idx][key] = sum(v for v in values)
# total epoch time
total_time = time.time() - self._epoch_start_time
self._epoch_summary[self._epoch_idx]['__total_time__'] = total_time
# average time for 1 object
avg_time = self.__pb.avg_time
if avg_time is None:
avg_time = total_time / self.target
self._epoch_summary[self._epoch_idx]['__avg_time__'] = avg_time
# reset all flags
self.__pb = None
self._last_report = None
self._last_print_time = None
self._epoch_start_time = None
self._epoch_idx += 1
return self
@contextmanager
def safe_progress(self):
""" This context manager will automatically call `pause` if the
progress unfinished, hence, it doesn't mesh up the screen. """
yield None
if 0 < self.seen_so_far < self.target:
self.pause()
def pause(self):
""" Call `pause` if progress is running, hasn't finish, and
you want to print something else on the scree.
"""
# ====== clear the report ====== #
if self._last_report is not None:
nlines = len(self._last_report.split("\n"))
self.__pb.moveto(-nlines)
for i in range(nlines):
Progbar.FP.write('\r')
console_width = _environ_cols_wrapper()(Progbar.FP)
Progbar.FP.write(' ' * (79 if console_width is None else console_width))
Progbar.FP.write('\r') # place cursor back at the beginning of line
self.__pb.moveto(1)
else:
nlines = 0
# ====== clear the bar ====== #
if self.__pb is not None:
self.__pb.clear()
self.__pb.moveto(-nlines)
# ====== reset the last report ====== #
# because we already clean everything,
# set _last_report=None prevent
# further moveto(-nlines) in add()
self._last_report = None
return self
def add(self, n=1):
""" You need to call pause if """
n = self._progress_func(n)
if not isinstance(n, Number):
raise RuntimeError(
"Progress return an object, but not given `progress_func` for post-processing")
if n <= 0:
return self
fp = Progbar.FP
# ====== update information ====== #
seen_so_far = min(self._seen_so_far[self.epoch_idx] + n, self.target)
self._seen_so_far[self.epoch_idx] = seen_so_far
# ====== check last updated progress, for automatically pause ====== #
if _LAST_UPDATED_PROG[0] is None:
_LAST_UPDATED_PROG[0] = self
elif _LAST_UPDATED_PROG[0] != self:
_LAST_UPDATED_PROG[0].pause()
_LAST_UPDATED_PROG[0] = self
# ====== show report ====== #
if self.print_report:
curr_time = time.time()
# update the report
if self._last_print_time is None or \
time.time() - self._last_print_time > self.__interval or\
seen_so_far >= self.target:
self._last_print_time = curr_time
# move the cursor to last point
if self._last_report is not None:
nlines = len(self._last_report.split('\n'))
self.progress_bar.moveto(-nlines)
report = self._formatted_report(self._report)
# clear old report
if self._last_report is not None:
for i, l in enumerate(self._last_report.split('\n')):
fp.write('\r')
fp.write(' ' * len(l))
fp.write('\r') # place cursor back at the beginning of line
self.progress_bar.moveto(1)
self.progress_bar.clear()
self.progress_bar.moveto(-i - 1)
fp.write(report)
fp.flush()
self._last_report = report
self.progress_bar.moveto(1)
# ====== show progress ====== #
if self.print_progress:
self.progress_bar.update(n=n)
else:
self.progress_bar
# ====== end of epoch ====== #
if seen_so_far >= self.target:
self._new_epoch()
# print summary of epoch
if self.print_summary:
_tqdm.write(self._generate_epoch_summary(self.epoch_idx - 1,
inc_name=True,
inc_counter=False))
return self
| imito/odin | odin/utils/progbar.py | Python | mit | 17,930 | 0.010095 |
input = """
% Guess colours.
chosenColour(N,C) | notChosenColour(N,C) :- node(N), colour(C).
% At least one color per node.
:- #count{ C : chosenColour(X,C) } > 1, node(X).
:- #count{ C : chosenColour(X,C) } < 1, node(X).
% No two adjacent nodes have the same colour.
:- link(X,Y), X<Y, chosenColour(X,C), chosenColour(Y,C).
node(1).
node(2).
node(3).
node(4).
node(5).
link(1,2).
link(2,1).
link(1,3).
link(3,1).
link(2,3).
link(3,2).
link(3,5).
link(5,3).
link(4,5).
link(5,4).
colour(red0).
colour(green0).
colour(blue0).
"""
output = """
{chosenColour(1,blue0), chosenColour(2,green0), chosenColour(3,red0), chosenColour(4,blue0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,blue0), chosenColour(2,green0), chosenColour(3,red0), chosenColour(4,green0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,blue0), chosenColour(2,green0), chosenColour(3,red0), chosenColour(4,red0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,blue0), chosenColour(2,green0), chosenColour(3,red0), chosenColour(4,red0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,blue0), chosenColour(2,red0), chosenColour(3,green0), chosenColour(4,blue0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,blue0), chosenColour(2,red0), chosenColour(3,green0), chosenColour(4,green0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,blue0), chosenColour(2,red0), chosenColour(3,green0), chosenColour(4,green0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,blue0), chosenColour(2,red0), chosenColour(3,green0), chosenColour(4,red0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,blue0), chosenColour(3,red0), chosenColour(4,blue0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,blue0), chosenColour(3,red0), chosenColour(4,green0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,blue0), chosenColour(3,red0), chosenColour(4,red0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,blue0), chosenColour(3,red0), chosenColour(4,red0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,red0), chosenColour(3,blue0), chosenColour(4,blue0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,green0), notChosenColour(3,red0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,red0), chosenColour(3,blue0), chosenColour(4,blue0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,green0), notChosenColour(3,red0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,green0), chosenColour(2,red0), chosenColour(3,blue0), chosenColour(4,green0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,green0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,green0), chosenColour(2,red0), chosenColour(3,blue0), chosenColour(4,red0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,green0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,red0), chosenColour(2,blue0), chosenColour(3,green0), chosenColour(4,blue0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,green0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,red0), chosenColour(2,blue0), chosenColour(3,green0), chosenColour(4,green0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,green0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,red0), chosenColour(2,blue0), chosenColour(3,green0), chosenColour(4,green0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,green0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,red0), chosenColour(2,blue0), chosenColour(3,green0), chosenColour(4,red0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,green0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,red0), chosenColour(2,green0), chosenColour(3,blue0), chosenColour(4,blue0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,green0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,green0), notChosenColour(3,red0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,red0), chosenColour(2,green0), chosenColour(3,blue0), chosenColour(4,blue0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,green0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,green0), notChosenColour(3,red0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,red0), chosenColour(2,green0), chosenColour(3,blue0), chosenColour(4,green0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,green0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,green0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,red0), chosenColour(2,green0), chosenColour(3,blue0), chosenColour(4,red0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,green0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,green0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,blue0), notChosenColour(5,red0)}
"""
| Yarrick13/hwasp | tests/wasp1/AllAnswerSets/3col_aggregates_1_5_enc2.test.py | Python | apache-2.0 | 14,386 | 0.001738 |
#!/usr/bin/env python3
import fcntl
import logging
import os
import shlex
import subprocess
import threading
from barython.hooks import HooksPool
from barython.tools import splitted_sleep
logger = logging.getLogger("barython")
def protect_handler(handler):
def handler_wrapper(self, *args, **kwargs):
try:
if not self._refresh_lock.acquire(blocking=False):
return
result = handler(self, *args, **kwargs)
finally:
if self._lock_start:
try:
self._refresh_lock.release()
except RuntimeError:
pass
return result
return handler_wrapper
class Widget():
"""
Basic Widget
"""
#: cache the content after update
_content = None
_icon = None
_refresh = -1
@property
def content(self):
return self._content
@property
def icon(self):
return self._icon
@icon.setter
def icon(self, value):
self._icon = value
@property
def refresh(self):
if self._refresh == -1 and self.screens:
return min([screen.refresh for screen in self.screens])
else:
return max(0, self._refresh)
@refresh.setter
def refresh(self, value):
self._refresh = value
def decorate(self, text, fg=None, bg=None, padding=0, font=None, icon=None,
actions=None):
"""
Decorate a text with custom properties
:param fg: foreground
:param bg: background
:param padding: padding around the text
:param font: index of font to use
:param actions: dict of actions
"""
try:
joined_actions = "".join(
"%{{A{}:{}:}}".format(a, cmd) for a, cmd in actions.items()
)
except (TypeError, AttributeError):
joined_actions = ""
# if colors are reset in text, padding will not have the good colors
if padding and text:
padding_str = self.decorate(padding * " ", fg=fg, bg=bg, font=font)
else:
padding_str = ""
return (12*"{}").format(
joined_actions,
padding_str,
"%{{B{}}}".format(bg) if bg else "",
"%{{F{}}}".format(fg) if fg else "",
"%{{T{}}}".format(font) if font else "",
icon + " " if icon else "",
text,
"%{{T-}}".format(font) if font else "",
"%{F-}" if fg else "",
"%{B-}" if bg else "",
padding_str,
"%{A}" * len(actions) if actions else "",
)
def decorate_with_self_attributes(self, text, *args, **kwargs):
"""
Return self.decorate but uses self attributes for default values
"""
d_kwargs = {
"fg": self.fg, "bg": self.bg, "padding": self.padding,
"font": self.fonts[0] if self.fonts else None,
"actions": self.actions, **kwargs
}
for parameter, value in zip(("fg", "bg", "padding", "font", "actions"),
args):
d_kwargs[parameter] = value
return self.decorate(text, **d_kwargs)
def trigger_global_update(self, output=None, *args, **kwargs):
new_content = self.decorate_with_self_attributes(output)
self._update_screens(new_content)
@protect_handler
def handler(self, *args, **kwargs):
"""
To use with hooks
"""
with self._lock_update:
self.update()
splitted_sleep(self.refresh, stop=self._stop.is_set)
def organize_result(self, *args, **kwargs):
"""
Organize the info to show with the splitted infos received
Organize the panel without handling the decoration (fg, bg, etc…)
Override this method to change the way the info is printed
"""
result = "{} ".format(self.icon) if self.icon else ""
return result + "".join(*args, *kwargs.values())
def _update_screens(self, new_content):
"""
If content has changed, request the screen update
"""
if self._content != new_content:
self._content = new_content
for screen in self.screens:
threading.Thread(target=screen.update).start()
def continuous_update(self):
while not self._stop.is_set():
try:
self.update()
except Exception as e:
logger.error(e)
splitted_sleep(self.refresh, stop=self._stop.is_set)
def update(self):
pass
def propage_hooks_changes(self):
"""
Propage a change in the hooks pool
"""
if getattr(self, "screens", None):
for s in self.screens:
s.hooks.merge(self)
def start(self, *args, **kwargs):
self._stop.clear()
try:
if not self._lock_start.acquire(blocking=False):
return
if self.infinite:
self.continuous_update()
else:
self.update()
finally:
if self._lock_start:
try:
self._lock_start.release()
except RuntimeError:
pass
def stop(self):
self._stop.set()
def __init__(self, bg=None, fg=None, padding=0, fonts=None, icon="",
actions=None, refresh=-1, screens=None, infinite=False):
#: background for the widget
self.bg = bg
#: foreground for the widget
self.fg = fg
#: list of fonts index used
self.fonts = fonts if fonts is not None else tuple()
#: icon to use. Can be a string or a dict for some widgets, where icon
# will depend about the current value.
self._icon = icon
#: dictionnary of actions
self.actions = actions if actions is not None else dict()
#: padding
self.padding = padding
#: refresh rate
self.refresh = refresh
#: screens linked. Used for callbacks
self.screens = screens if screens is not None else set()
#: pool of hooks
self.hooks = HooksPool(parent=self)
#: run in an infinite loop or not
self.infinite = infinite
#: event to stop the widget
self._stop = threading.Event()
self._lock_start = threading.Condition()
self._lock_update = threading.Condition()
self._refresh_lock = threading.Semaphore(2)
class TextWidget(Widget):
text = ""
def update(self):
with self._lock_update:
new_content = self.decorate_with_self_attributes(
self.organize_result(self.text)
)
self._update_screens(new_content)
def start(self):
with self._lock_start:
self.update()
def __init__(self, text=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.text = self.text if text is None else text
self.infinite = False
class SubprocessWidget(Widget):
"""
Run a subprocess in a loop
"""
_subscribe_subproc = None
_subproc = None
def _no_blocking_read(self, output):
"""
Set the output to be non blockant and read it
"""
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
result = output.read()
except:
result = b""
fcntl.fcntl(fd, fcntl.F_SETFL, fl)
return result
def _init_subprocess(self, cmd):
"""
Start cmd in a subprocess, and split it if needed
"""
if self._stop.is_set():
return None
if isinstance(cmd, str):
cmd = shlex.split(cmd)
logger.debug("Launching {}".format(" ".join(cmd)))
return subprocess.Popen(
cmd, stdout=subprocess.PIPE, shell=self.shell, env=self.env
)
def _init_subscribe_subproc(self):
process_dead = (
self._subscribe_subproc is None or
self._subscribe_subproc.poll() is not None
)
if process_dead:
self._subscribe_subproc = self._init_subprocess(
self.subscribe_cmd
)
def notify(self, *args, **kwargs):
if self.subscribe_cmd:
self._init_subscribe_subproc()
self._subscribe_subproc.stdout.readline()
# hack to flush the stdout
try:
self._no_blocking_read(self._subscribe_subproc.stdout)
except:
pass
return True
def continuous_update(self):
while not self._stop.is_set():
try:
self.update()
except Exception as e:
logger.error(e)
try:
self._subproc.terminate()
except:
pass
finally:
splitted_sleep(self.refresh, stop=self._stop.is_set)
self.notify()
try:
self._subproc.terminate()
except:
pass
def update(self, *args, **kwargs):
with self._lock_update:
self._subproc = self._init_subprocess(self.cmd)
output = self._subproc.stdout.readline()
if output != b"":
self.trigger_global_update(self.organize_result(
output.decode().replace('\n', '').replace('\r', '')
))
if self._subproc.poll() is not None:
self._subproc = self._subproc.terminate()
def stop(self, *args, **kwargs):
super().stop(*args, **kwargs)
try:
self._subscribe_subproc.terminate()
self._subscribe_subproc = self._subscribe_subproc.wait()
except:
pass
try:
self._subproc = self._subproc.terminate()
self._subproc = self._subproc.wait()
except:
pass
def __init__(self, cmd, subscribe_cmd=None, shell=False, infinite=True,
*args, **kwargs):
super().__init__(*args, **kwargs, infinite=infinite)
#: override environment variables to get the same output everywhere
self.env = dict(os.environ)
self.env["LANG"] = "en_US"
#: command to run. Can be an iterable or a string
self.cmd = cmd
#: used as a notify: run the command, wait for any output, then run
# cmd.
self.subscribe_cmd = subscribe_cmd
#: value for the subprocess.Popen shell parameter. Default to False
self.shell = shell
| Anthony25/barython | barython/widgets/base.py | Python | bsd-3-clause | 10,749 | 0.000558 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
from dateutil.relativedelta import relativedelta
from datetime import datetime, date
import click
from werkzeug.security import generate_password_hash
import newspipe.models
from newspipe.bootstrap import application, db
from newspipe.controllers import UserController, ArticleController
logger = logging.getLogger("commands")
@application.cli.command("db_empty")
def db_empty():
"Will drop every datas stocked in db."
with application.app_context():
newspipe.models.db_empty(db)
@application.cli.command("db_create")
def db_create():
"Will create the database from conf parameters."
with application.app_context():
try:
db.create_all()
except Exception as e:
print(e)
@application.cli.command("create_admin")
@click.option("--nickname", default="admin", help="Nickname")
@click.option("--password", default="password", help="Password")
def create_admin(nickname, password):
"Will create an admin user."
admin = {
"is_admin": True,
"is_api": True,
"is_active": True,
"nickname": nickname,
"pwdhash": generate_password_hash(password),
}
with application.app_context():
try:
UserController(ignore_context=True).create(**admin)
except Exception as e:
print(e)
@application.cli.command("delete_user")
@click.option("--user-id", required=True, help="Id of the user to delete.")
def delete_user(user_id=None):
"Delete the user with the id specified in the command line."
try:
user = UserController().delete(user_id)
print("User {} deleted".format(user.nickname))
except Exception as e:
print(e)
@application.cli.command("delete_inactive_users")
@click.option("--last-seen", default=6, help="Number of months since last seen.")
def delete_inactive_users(last_seen):
"Delete inactive users (inactivity is given in parameter and specified in number of months)."
filter = {}
filter["last_seen__lt"] = date.today() - relativedelta(months=last_seen)
users = UserController().read(**filter)
for user in users:
db.session.delete(user)
try:
print("Deleting user {}...".format(user.nickname))
db.session.commit()
except:
db.session.rollback()
print("Inactive users deleted.")
@application.cli.command("disable_inactive_users")
@click.option("--last-seen", default=6, help="Number of months since last seen.")
def disable_inactive_users(last_seen):
"Disable inactive users (inactivity is given in parameter and specified in number of months)."
filter = {}
filter["last_seen__lt"] = date.today() - relativedelta(months=last_seen)
users = UserController().read(**filter)
for user in users:
user.is_active = False
user.is_public_profile = False
user.automatic_crawling = False
try:
print("Updating user {}...".format(user.nickname))
db.session.commit()
except:
db.session.rollback()
print("Inactive users disabled.")
@application.cli.command("delete_read_articles")
def delete_read_articles():
"Delete read articles (and not liked) retrieved since more than 60 days ago."
filter = {}
filter["user_id__ne"] = 1
filter["readed"] = True
filter["like"] = False
filter["retrieved_date__lt"] = date.today() - relativedelta(days=60)
articles = ArticleController().read(**filter).limit(5000)
for article in articles:
try:
db.session.delete(article)
db.session.commit()
except:
db.session.rollback()
print("Read articles deleted.")
@application.cli.command("fix_article_entry_id")
def fix_article_entry_id():
filter = {}
filter["entry_id"] = None
articles = ArticleController().read(**filter).limit(50)
for article in articles:
try:
article.entry_id = str(article.id)
db.session.commit()
except:
db.session.rollback()
@application.cli.command("fetch_asyncio")
@click.option("--user-id", default=None, help="Id of the user")
@click.option("--feed-id", default=None, help="If of the feed")
def fetch_asyncio(user_id=None, feed_id=None):
"Crawl the feeds with asyncio."
import asyncio
with application.app_context():
from newspipe.crawler import default_crawler
filters = {}
filters["is_active"] = True
filters["automatic_crawling"] = True
if None is not user_id:
filters["id"] = user_id
users = UserController().read(**filters).all()
try:
feed_id = int(feed_id)
except:
feed_id = None
loop = asyncio.get_event_loop()
queue = asyncio.Queue(maxsize=3, loop=loop)
producer_coro = default_crawler.retrieve_feed(queue, users, feed_id)
consumer_coro = default_crawler.insert_articles(queue, 1)
logger.info("Starting crawler.")
start = datetime.now()
loop.run_until_complete(asyncio.gather(producer_coro, consumer_coro))
end = datetime.now()
loop.close()
logger.info("Crawler finished in {} seconds.".format((end - start).seconds))
| JARR-aggregator/JARR | newspipe/commands.py | Python | agpl-3.0 | 5,322 | 0.002067 |
""" ------------------------------
bior2_6
cwt
by BEAR, 05/04/14
------------------------------ """
import scipy as sp
import numpy as np
from scipy.signal import convolve
#import pywt
_scale_max = 1024
_scale_max = int(2**(sp.ceil(sp.log2(_scale_max))))
tmp = np.loadtxt('bior2.6_1024.txt')
_x_bior2_6 = tmp[:,0]
_psi_bior2_6 = tmp[:,1]
#_, _psi_bior2_6, _, _, _x_bior2_6 = pywt.Wavelet('bior2.6').wavefun(sp.log2(_scale_max))
def bior2_6(length, width):
length = int(length)
width = int(width)
i = sp.arange(0, 13*width)
u = _psi_bior2_6[_scale_max*i/width]/sp.sqrt(width)
n = int(abs((length-width*13)/2))
if length > width*13:
u = sp.concatenate((u,sp.zeros(length-width*13)), axis=0)
u = sp.roll(u, n)
elif length < width*13:
u = u[n:n+length]
return u
def cwt(x, scales, wname, bplot=False):
coefs = sp.zeros((len(scales), len(x)))
for i in range(0, len(scales)):
if wname == 'bior2.6':
length = min(13*scales[i], len(x))
wavelet = bior2_6
coefs[i-1, :] = convolve(x, wavelet(length, i), mode='same')
if bplot:
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
plt.ion()
fig = plt.figure(num=None, figsize=(14,5), dpi=100, facecolor='w', edgecolor='k')
plt.clf()
gs = gridspec.GridSpec(3, 1)
ax1 = fig.add_subplot(gs[0,0])
ax1.plot(x,'b-')
ax2 = fig.add_subplot(gs[1:,0])
im = ax2.imshow(coefs[::-1,:], extent=[0, len(x), scales[0], scales[-1]], aspect='auto', cmap='jet')
ax2.invert_yaxis()
ax2.set_xlabel('t')
ax2.set_ylabel('scale')
l, b, w, h = ax2.get_position().bounds
cax = fig.add_axes([l+w+0.01, b, 0.02, h])
plt.colorbar(im, cax=cax)
plt.suptitle('cwt by python')
plt.draw()
plt.show(block=True)
return coefs
| bearicc/python-wavelet-transform | mycwt.py | Python | agpl-3.0 | 1,948 | 0.010267 |
from . import orderpoint_generator
| Vauxoo/stock-logistics-warehouse | stock_orderpoint_generator/wizard/__init__.py | Python | agpl-3.0 | 35 | 0 |
# coding: utf-8
#
# Copyright © 2010—2014 Andrey Mikhaylenko and contributors
#
# This file is part of Argh.
#
# Argh is free software under terms of the GNU Lesser
# General Public License version 3 (LGPLv3) as published by the Free
# Software Foundation. See the file README.rst for copying conditions.
#
"""
Interaction
~~~~~~~~~~~
"""
from argh.compat import text_type
from argh.io import safe_input
__all__ = ['confirm', 'safe_input']
def confirm(action, default=None, skip=False):
"""
A shortcut for typical confirmation prompt.
:param action:
a string describing the action, e.g. "Apply changes". A question mark
will be appended.
:param default:
`bool` or `None`. Determines what happens when user hits :kbd:`Enter`
without typing in a choice. If `True`, default choice is "yes". If
`False`, it is "no". If `None` the prompt keeps reappearing until user
types in a choice (not necessarily acceptable) or until the number of
iteration reaches the limit. Default is `None`.
:param skip:
`bool`; if `True`, no interactive prompt is used and default choice is
returned (useful for batch mode). Default is `False`.
Usage::
def delete(key, silent=False):
item = db.get(Item, args.key)
if confirm('Delete '+item.title, default=True, skip=silent):
item.delete()
print('Item deleted.')
else:
print('Operation cancelled.')
Returns `None` on `KeyboardInterrupt` event.
"""
MAX_ITERATIONS = 3
if skip:
return default
else:
defaults = {
None: ('y','n'),
True: ('Y','n'),
False: ('y','N'),
}
y, n = defaults[default]
prompt = text_type('{action}? ({y}/{n})').format(**locals())
choice = None
try:
if default is None:
cnt = 1
while not choice and cnt < MAX_ITERATIONS:
choice = safe_input(prompt)
cnt += 1
else:
choice = safe_input(prompt)
except KeyboardInterrupt:
return None
if choice in ('yes', 'y', 'Y'):
return True
if choice in ('no', 'n', 'N'):
return False
if default is not None:
return default
return None
| eschleicher/flask_shopping_list | venv/lib/python3.4/site-packages/argh/interaction.py | Python | mit | 2,403 | 0.00125 |
def catch_all(data):
print(data)
| PythonSanSebastian/python-rtmbot | plugins/ep_volunteer/catch_all.py | Python | mit | 38 | 0 |
from django.conf.urls.defaults import *
from wishlist.models import Item
from wishlist.views import add_item, delete_item, wishlist, bookmarklet
urlpatterns = patterns('',
(r'^add/$', add_item),
(r'^(?P<id>\d+)/delete/$', delete_item),
url(r'^tag/(?P<querytag>[^/]+)/$', view=wishlist, name="items_by_tag"),
(r'^bookmarklet/$', bookmarklet),
(r'^sort/(?P<sort_by>\w+)/(?P<sort>\w+)/$', wishlist),
(r'^$', wishlist),
)
| pigmonkey/django-wishlist | wishlist/urls.py | Python | bsd-3-clause | 443 | 0.002257 |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for vendor methods used by iLO modules."""
import mock
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import agent
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules.ilo import vendor as ilo_vendor
from ironic.drivers.modules import iscsi_deploy
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_ilo_info()
class VendorPassthruTestCase(db_base.DbTestCase):
def setUp(self):
super(VendorPassthruTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="iscsi_ilo")
self.node = obj_utils.create_test_node(self.context,
driver='iscsi_ilo',
driver_info=INFO_DICT)
@mock.patch.object(manager_utils, 'node_power_action', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'setup_vmedia', spec_set=True,
autospec=True)
def test_boot_into_iso(self, setup_vmedia_mock, power_action_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.boot_into_iso(task, boot_iso_href='foo')
setup_vmedia_mock.assert_called_once_with(task, 'foo',
ramdisk_options=None)
power_action_mock.assert_called_once_with(task, states.REBOOT)
@mock.patch.object(ilo_vendor.VendorPassthru, '_validate_boot_into_iso',
spec_set=True, autospec=True)
def test_validate_boot_into_iso(self, validate_boot_into_iso_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
vendor = ilo_vendor.VendorPassthru()
vendor.validate(task, method='boot_into_iso', foo='bar')
validate_boot_into_iso_mock.assert_called_once_with(
vendor, task, {'foo': 'bar'})
def test__validate_boot_into_iso_invalid_state(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.AVAILABLE
self.assertRaises(
exception.InvalidStateRequested,
task.driver.vendor._validate_boot_into_iso,
task, {})
def test__validate_boot_into_iso_missing_boot_iso_href(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.MANAGEABLE
self.assertRaises(
exception.MissingParameterValue,
task.driver.vendor._validate_boot_into_iso,
task, {})
@mock.patch.object(deploy_utils, 'validate_image_properties',
spec_set=True, autospec=True)
def test__validate_boot_into_iso_manage(self, validate_image_prop_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
info = {'boot_iso_href': 'foo'}
task.node.provision_state = states.MANAGEABLE
task.driver.vendor._validate_boot_into_iso(
task, info)
validate_image_prop_mock.assert_called_once_with(
task.context, {'image_source': 'foo'}, [])
@mock.patch.object(deploy_utils, 'validate_image_properties',
spec_set=True, autospec=True)
def test__validate_boot_into_iso_maintenance(
self, validate_image_prop_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
info = {'boot_iso_href': 'foo'}
task.node.maintenance = True
task.driver.vendor._validate_boot_into_iso(
task, info)
validate_image_prop_mock.assert_called_once_with(
task.context, {'image_source': 'foo'}, [])
@mock.patch.object(iscsi_deploy.VendorPassthru, 'continue_deploy',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'update_secure_boot_mode', autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', autospec=True)
def test_continue_deploy(self,
func_update_boot_mode,
func_update_secure_boot_mode,
pxe_vendorpassthru_mock):
kwargs = {'address': '123456'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.DEPLOYWAIT
task.node.target_provision_state = states.ACTIVE
task.driver.vendor.continue_deploy(task, **kwargs)
func_update_boot_mode.assert_called_once_with(task)
func_update_secure_boot_mode.assert_called_once_with(task, True)
pxe_vendorpassthru_mock.assert_called_once_with(
mock.ANY, task, **kwargs)
class IloVirtualMediaAgentVendorInterfaceTestCase(db_base.DbTestCase):
def setUp(self):
super(IloVirtualMediaAgentVendorInterfaceTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="agent_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='agent_ilo', driver_info=INFO_DICT)
@mock.patch.object(agent.AgentVendorInterface, 'reboot_to_instance',
spec_set=True, autospec=True)
@mock.patch.object(agent.AgentVendorInterface, 'check_deploy_success',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_secure_boot_mode', spec_set=True,
autospec=True)
def test_reboot_to_instance(self, func_update_secure_boot_mode,
func_update_boot_mode,
check_deploy_success_mock,
agent_reboot_to_instance_mock):
kwargs = {'address': '123456'}
check_deploy_success_mock.return_value = None
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.reboot_to_instance(task, **kwargs)
check_deploy_success_mock.assert_called_once_with(
mock.ANY, task.node)
func_update_boot_mode.assert_called_once_with(task)
func_update_secure_boot_mode.assert_called_once_with(task, True)
agent_reboot_to_instance_mock.assert_called_once_with(
mock.ANY, task, **kwargs)
@mock.patch.object(agent.AgentVendorInterface, 'reboot_to_instance',
spec_set=True, autospec=True)
@mock.patch.object(agent.AgentVendorInterface, 'check_deploy_success',
spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'update_boot_mode', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'update_secure_boot_mode', spec_set=True,
autospec=True)
def test_reboot_to_instance_deploy_fail(self, func_update_secure_boot_mode,
func_update_boot_mode,
check_deploy_success_mock,
agent_reboot_to_instance_mock):
kwargs = {'address': '123456'}
check_deploy_success_mock.return_value = "Error"
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.reboot_to_instance(task, **kwargs)
check_deploy_success_mock.assert_called_once_with(
mock.ANY, task.node)
self.assertFalse(func_update_boot_mode.called)
self.assertFalse(func_update_secure_boot_mode.called)
agent_reboot_to_instance_mock.assert_called_once_with(
mock.ANY, task, **kwargs)
| bacaldwell/ironic | ironic/tests/unit/drivers/modules/ilo/test_vendor.py | Python | apache-2.0 | 9,258 | 0 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns(
'',
# Examples:
# url(r'^$', 'chatter.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'', include('chatter.base.urls')),
url(r'^admin/', include(admin.site.urls)),
)
# The documentation for authentication views can be found at:
# https://docs.djangoproject.com/en/1.7/topics/auth/default/#module-django.contrib.auth.views
urlpatterns += patterns(
'django.contrib.auth.views',
url(r'^login/$', 'login', name='login'),
url(r'^logout/$', 'logout_then_login', name='logout'),
url(r'^reset/$', 'password_reset', name='password_reset'),
url(r'^reset/done/$', 'password_reset_done', name='password_reset_done'),
url(
r'^reset/confirm/'
r'(?P<uidb64>[0-9A-Za-z_\-]+)/'
r'(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
'password_reset_confirm',
name='password_reset-confirm'),
url(
r'^reset/complete/$',
'password_reset_complete',
name='password_reset_complete'),
)
| scott-w/pyne-django-tutorial | chatter/chatter/urls.py | Python | mit | 1,113 | 0 |
# -*- coding: utf-8 -*-
"""Base module for unittesting."""
from plone.app.testing import applyProfile
from plone.app.testing import FunctionalTesting
from plone.app.testing import IntegrationTesting
from plone.app.testing import login
from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import PloneSandboxLayer
from plone.app.testing import setRoles
from plone.app.testing import TEST_USER_ID
from plone.app.testing import TEST_USER_NAME
from plone.testing import z2
import unittest2 as unittest
class neweSitecontentLayer(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
"""Set up Zope."""
# Load ZCML
import newe.sitecontent
self.loadZCML(package=newe.sitecontent)
z2.installProduct(app, 'newe.sitecontent')
def setUpPloneSite(self, portal):
"""Set up Plone."""
# Install into Plone site using portal_setup
applyProfile(portal, 'newe.sitecontent:default')
# Login and create some test content
setRoles(portal, TEST_USER_ID, ['Manager'])
login(portal, TEST_USER_NAME)
portal.invokeFactory('Folder', 'folder')
# Commit so that the test browser sees these objects
portal.portal_catalog.clearFindAndRebuild()
import transaction
transaction.commit()
def tearDownZope(self, app):
"""Tear down Zope."""
z2.uninstallProduct(app, 'newe.sitecontent')
FIXTURE = CkSitecontentLayer()
INTEGRATION_TESTING = IntegrationTesting(
bases=(FIXTURE,), name="CkSitecontentLayer:Integration")
FUNCTIONAL_TESTING = FunctionalTesting(
bases=(FIXTURE,), name="CkSitecontentLayer:Functional")
class IntegrationTestCase(unittest.TestCase):
"""Base class for integration tests."""
layer = INTEGRATION_TESTING
class FunctionalTestCase(unittest.TestCase):
"""Base class for functional tests."""
layer = FUNCTIONAL_TESTING
| a25kk/newe | src/newe.sitecontent/newe/sitecontent/testing.py | Python | mit | 1,963 | 0 |
from flask import current_app, redirect, url_for, request, session, flash, send_file
from flask.ext import restful
from flask.ext.login import login_required, current_user, login_user, logout_user
import twitter
from request_parsers import *
from datetime import datetime
from models import *
from rauth.service import OAuth1Service
from rauth.utils import parse_utf8_qsl
from twitter_helpers import TwitterUser
import controllers
import traceback
class TwitterAuth(restful.Resource):
def get(self):
twitter_auth_loader = OAuth1Service(
name='twitter',
consumer_key=current_app.config['CONSUMER_KEY'],
consumer_secret=current_app.config['CONSUMER_SECRET'],
request_token_url='https://api.twitter.com/oauth/request_token',
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authorize',
base_url='https://api.twitter.com/1.1/'
)
oauth_callback = url_for('twittercallback', _external=True)
params = {'oauth_callback': oauth_callback}
auth_url = twitter_auth_loader.get_raw_request_token(params=params)
data = parse_utf8_qsl(auth_url.content)
session['twitter_oauth'] = (data['oauth_token'],
data['oauth_token_secret'])
return redirect(twitter_auth_loader.get_authorize_url(data['oauth_token'], **params))
class Login(restful.Resource):
def get(self):
return send_file('views/index.html')
#return current_app.send_static_file('views/login.html')
return {'status':'Welcome'}
class TwitterCallback(restful.Resource):
def get(self):
try:
print session
request_token, request_token_secret = session.pop('twitter_oauth')
twitter_auth_loader = OAuth1Service(
name='twitter',
consumer_key=current_app.config['CONSUMER_KEY'],
consumer_secret=current_app.config['CONSUMER_SECRET'],
request_token_url='https://api.twitter.com/oauth/request_token',
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authorize',
base_url='https://api.twitter.com/1.1/'
)
if not 'oauth_token' in request.args:
print 'You did not authorize the request'
return redirect(url_for('index'))
try:
creds = {'request_token': request_token,
'request_token_secret': request_token_secret}
params = {'oauth_verifier': request.args['oauth_verifier']}
sess = twitter_auth_loader.get_auth_session(params=params, **creds)
print sess.access_token
except Exception, e:
flash('There was a problem logging into Twitter: ' + str(e))
return redirect(url_for('index'))
api = twitter.Api(
current_app.config['CONSUMER_KEY'],
current_app.config['CONSUMER_SECRET'],
sess.access_token,
sess.access_token_secret
)
u = api.VerifyCredentials()
user = User.objects(twitter_id = u.id).first()
if not user:
user = User(twitter_id = u.id, screen_name = u.screen_name, registered_on = datetime.now(), access_token = sess.access_token, access_token_secret = sess.access_token_secret)
user.save()
else:
user.update(set__access_token = sess.access_token, set__access_token_secret = sess.access_token_secret)
login_user(user)
# return controllers.get_logged_in_users_list(user)
return redirect('http://localhost:8000')
except Exception as e:
import traceback
print traceback.format_exc(e)
restful.abort(500, message = 'Internal Server Error.')
class MyLists(restful.Resource):
@login_required
def get(self):
#args = list_parser.parse_args()
#TODO also return subscribed lists
user = current_user
try:
return controllers.get_logged_in_users_list(user)
pass
except twitter.TwitterError as e:
if e.message[0]['code'] == 88:
restful.abort(404, message = 'Limit for your access token has reached. Be patient and see some of the popular timelines')
except Exception as e:
import traceback
print traceback.format_exc(e)
restful.abort(500, message = 'internal server error.')
class CreateList(restful.Resource):
@login_required
def get(self):
args = create_list_parser.parse_args()
user = current_user
try:
return controllers.create_list(user, args['screen_name'])
pass
except twitter.TwitterError as e:
if e.message[0]['code'] == 34:
restful.abort(404, message = 'Sorry user not found on twitter.')
elif e.message[0]['code'] == 88:
restful.abort(404, message = 'Limit for your access token has reached. You can create more timenlines later. Try some of the popular timelines for now.')
except Exception as e:
import traceback
print traceback.format_exc(e)
restful.abort(500, message = 'internal server error.')
class SubscribeList(restful.Resource):
@login_required
def get(self):
args = subscribe_list_parser.parse_args()
user = current_user
try:
return controllers.subscribe_list(user, args['list_id'], args['owner_id'])
except twitter.TwitterError as e:
if e.message[0]['code'] == 88:
restful.abort(404, message = 'Limit for your access token has reached. You may subscribe to interesting timelines later. Just enjoy popular timelines for now.')
except Exception as e:
import traceback
print traceback.format_exc(e)
restful.abort(500, message = 'internal server error.')
class DiscoverList(restful.Resource):
@login_required
def get(self):
args = discover_list_parser.parse_args()
try:
list_objs = list(TimelineList._get_collection().find({'exists' : True}).skip(args['skip']).limit(args['limit']))
map(lambda x:x.pop('_id'),list_objs)
return list_objs
except Exception as e:
import traceback
print traceback.format_exc(e)
restful.abort(500, message = 'internal server error.')
class ListTimeline(restful.Resource):
@login_required
def get(self):
args = list_timeline_parser.parse_args()
user = current_user
try:
return controllers.list_timeline(user, args['list_id'], args['owner_id'], args['since_id'], args['count'])
except twitter.TwitterError as e:
if e.message[0]['code'] == 34:
controllers.update_list_status(args['list_id'], exists = False)
restful.abort(404, message = 'Sorry page not found')
except Exception as e:
import traceback
print traceback.format_exc(e)
restful.abort(500, message = 'internal server error.')
| airwoot/timeline-hack-core | app/resources.py | Python | mit | 7,302 | 0.015338 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-19 15:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import hindustani.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('data', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('mbid', models.UUIDField(blank=True, null=True)),
('gender', models.CharField(blank=True, choices=[(b'M', b'Male'), (b'F', b'Female')], max_length=1, null=True)),
('begin', models.CharField(blank=True, max_length=10, null=True)),
('end', models.CharField(blank=True, max_length=10, null=True)),
('artist_type', models.CharField(choices=[(b'P', b'Person'), (b'G', b'Group')], default=b'P', max_length=1)),
('dummy', models.BooleanField(db_index=True, default=False)),
('description_edited', models.BooleanField(default=False)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('group_members', models.ManyToManyField(blank=True, related_name='groups', to='hindustani.Artist')),
('images', models.ManyToManyField(related_name='hindustani_artist_image_set', to='data.Image')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='ArtistAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alias', models.CharField(max_length=100)),
('primary', models.BooleanField(default=False)),
('locale', models.CharField(blank=True, max_length=10, null=True)),
('artist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliases', to='hindustani.Artist')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='Composer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('mbid', models.UUIDField(blank=True, null=True)),
('gender', models.CharField(blank=True, choices=[(b'M', b'Male'), (b'F', b'Female')], max_length=1, null=True)),
('begin', models.CharField(blank=True, max_length=10, null=True)),
('end', models.CharField(blank=True, max_length=10, null=True)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_composer_image_set', to='data.Image')),
('references', models.ManyToManyField(blank=True, related_name='hindustani_composer_reference_set', to='data.Source')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_composer_source_set', to='data.Source')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='ComposerAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alias', models.CharField(max_length=100)),
('primary', models.BooleanField(default=False)),
('locale', models.CharField(blank=True, max_length=10, null=True)),
('composer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliases', to='hindustani.Composer')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='Form',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('common_name', models.CharField(max_length=50)),
('uuid', models.UUIDField(db_index=True)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_form_image_set', to='data.Image')),
('references', models.ManyToManyField(blank=True, related_name='hindustani_form_reference_set', to='data.Source')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_form_source_set', to='data.Source')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='FormAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('form', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliases', to='hindustani.Form')),
],
),
migrations.CreateModel(
name='Instrument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('percussion', models.BooleanField(default=False)),
('name', models.CharField(max_length=50)),
('mbid', models.UUIDField(blank=True, null=True)),
('hidden', models.BooleanField(default=False)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_instrument_image_set', to='data.Image')),
('references', models.ManyToManyField(blank=True, related_name='hindustani_instrument_reference_set', to='data.Source')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_instrument_source_set', to='data.Source')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='InstrumentPerformance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lead', models.BooleanField(default=False)),
('attributes', models.CharField(blank=True, max_length=200, null=True)),
('artist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Artist')),
('instrument', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='hindustani.Instrument')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='Laya',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('common_name', models.CharField(max_length=50)),
('uuid', models.UUIDField(db_index=True)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_laya_image_set', to='data.Image')),
('references', models.ManyToManyField(blank=True, related_name='hindustani_laya_reference_set', to='data.Source')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_laya_source_set', to='data.Source')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='LayaAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('laya', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliases', to='hindustani.Laya')),
],
),
migrations.CreateModel(
name='Lyrics',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lyrics', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Raag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('common_name', models.CharField(max_length=50)),
('uuid', models.UUIDField(db_index=True)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_raag_image_set', to='data.Image')),
('references', models.ManyToManyField(blank=True, related_name='hindustani_raag_reference_set', to='data.Source')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_raag_source_set', to='data.Source')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='RaagAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('raag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliases', to='hindustani.Raag')),
],
),
migrations.CreateModel(
name='Recording',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('mbid', models.UUIDField(blank=True, null=True)),
('length', models.IntegerField(blank=True, null=True)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='RecordingForm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sequence', models.IntegerField()),
('form', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Form')),
('recording', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Recording')),
],
),
migrations.CreateModel(
name='RecordingLaya',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sequence', models.IntegerField()),
('laya', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Laya')),
('recording', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Recording')),
],
),
migrations.CreateModel(
name='RecordingRaag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sequence', models.IntegerField()),
('raag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Raag')),
('recording', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Recording')),
],
),
migrations.CreateModel(
name='RecordingSection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sequence', models.IntegerField()),
('recording', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Recording')),
],
),
migrations.CreateModel(
name='RecordingTaal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sequence', models.IntegerField()),
('recording', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Recording')),
],
),
migrations.CreateModel(
name='Release',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mbid', models.UUIDField(blank=True, null=True)),
('title', models.CharField(max_length=100)),
('artistcredit', models.CharField(max_length=255)),
('year', models.IntegerField(blank=True, null=True)),
('status', models.CharField(blank=True, max_length=100, null=True)),
('rel_type', models.CharField(blank=True, max_length=100, null=True)),
('artists', models.ManyToManyField(related_name='primary_concerts', to='hindustani.Artist')),
('collection', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_releases', to='data.Collection')),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_release_image_set', to='data.Image')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='ReleaseRecording',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('track', models.IntegerField()),
('disc', models.IntegerField()),
('disctrack', models.IntegerField()),
('recording', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Recording')),
('release', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Release')),
],
options={
'ordering': ('track',),
},
),
migrations.CreateModel(
name='Section',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('common_name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='SectionAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('section', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliases', to='hindustani.Section')),
],
),
migrations.CreateModel(
name='Taal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('common_name', models.CharField(max_length=50)),
('num_maatras', models.IntegerField(null=True)),
('uuid', models.UUIDField(db_index=True)),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_taal_image_set', to='data.Image')),
('references', models.ManyToManyField(blank=True, related_name='hindustani_taal_reference_set', to='data.Source')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_taal_source_set', to='data.Source')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TaalAlias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('taal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliases', to='hindustani.Taal')),
],
),
migrations.CreateModel(
name='Work',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('mbid', models.UUIDField(blank=True, null=True)),
('composers', models.ManyToManyField(blank=True, related_name='works', to='hindustani.Composer')),
('description', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='data.Description')),
('images', models.ManyToManyField(related_name='hindustani_work_image_set', to='data.Image')),
('lyricists', models.ManyToManyField(blank=True, related_name='lyric_works', to='hindustani.Composer')),
('lyrics', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='hindustani.Lyrics')),
('references', models.ManyToManyField(blank=True, related_name='hindustani_work_reference_set', to='data.Source')),
('source', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_work_source_set', to='data.Source')),
],
options={
'abstract': False,
},
bases=(hindustani.models.HindustaniStyle, models.Model),
),
migrations.CreateModel(
name='WorkTime',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sequence', models.IntegerField()),
('time', models.IntegerField(blank=True, null=True)),
('recording', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Recording')),
('work', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Work')),
],
),
migrations.AddField(
model_name='release',
name='recordings',
field=models.ManyToManyField(through='hindustani.ReleaseRecording', to='hindustani.Recording'),
),
migrations.AddField(
model_name='release',
name='references',
field=models.ManyToManyField(blank=True, related_name='hindustani_release_reference_set', to='data.Source'),
),
migrations.AddField(
model_name='release',
name='source',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_release_source_set', to='data.Source'),
),
migrations.AddField(
model_name='recordingtaal',
name='taal',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Taal'),
),
migrations.AddField(
model_name='recordingsection',
name='section',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Section'),
),
migrations.AddField(
model_name='recording',
name='forms',
field=models.ManyToManyField(through='hindustani.RecordingForm', to='hindustani.Form'),
),
migrations.AddField(
model_name='recording',
name='images',
field=models.ManyToManyField(related_name='hindustani_recording_image_set', to='data.Image'),
),
migrations.AddField(
model_name='recording',
name='layas',
field=models.ManyToManyField(through='hindustani.RecordingLaya', to='hindustani.Laya'),
),
migrations.AddField(
model_name='recording',
name='performance',
field=models.ManyToManyField(through='hindustani.InstrumentPerformance', to='hindustani.Artist'),
),
migrations.AddField(
model_name='recording',
name='raags',
field=models.ManyToManyField(through='hindustani.RecordingRaag', to='hindustani.Raag'),
),
migrations.AddField(
model_name='recording',
name='references',
field=models.ManyToManyField(blank=True, related_name='hindustani_recording_reference_set', to='data.Source'),
),
migrations.AddField(
model_name='recording',
name='sections',
field=models.ManyToManyField(through='hindustani.RecordingSection', to='hindustani.Section'),
),
migrations.AddField(
model_name='recording',
name='source',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_recording_source_set', to='data.Source'),
),
migrations.AddField(
model_name='recording',
name='taals',
field=models.ManyToManyField(through='hindustani.RecordingTaal', to='hindustani.Taal'),
),
migrations.AddField(
model_name='recording',
name='works',
field=models.ManyToManyField(through='hindustani.WorkTime', to='hindustani.Work'),
),
migrations.AddField(
model_name='instrumentperformance',
name='recording',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hindustani.Recording'),
),
migrations.AddField(
model_name='artist',
name='main_instrument',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='hindustani.Instrument'),
),
migrations.AddField(
model_name='artist',
name='references',
field=models.ManyToManyField(blank=True, related_name='hindustani_artist_reference_set', to='data.Source'),
),
migrations.AddField(
model_name='artist',
name='source',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hindustani_artist_source_set', to='data.Source'),
),
]
| MTG/dunya | hindustani/migrations/0001_initial.py | Python | agpl-3.0 | 25,073 | 0.004706 |
from unittest import TestCase
from mock import patch
from regulations.generator.layers.paragraph_markers import *
class ParagraphMarkersLayerTest(TestCase):
@patch('regulations.generator.layers.paragraph_markers.loader')
def test_apply_layer(self, loader):
pml = ParagraphMarkersLayer({
'1001-12-a': [{'text': '(a)', 'locations': [0]}],
'1001-12-q': [{'text': 'q.', 'locations': [1]}]
})
self.assertEqual([], pml.apply_layer('1002-01-01'))
a = pml.apply_layer('1001-12-a')
self.assertEqual(1, len(a))
self.assertEqual('(a)', a[0][0])
self.assertEqual([0], a[0][2])
call_args = loader.get_template.return_value.render.call_args[0][0]
self.assertEqual('(a)', call_args['paragraph'])
self.assertEqual('a', call_args['paragraph_stripped'])
q = pml.apply_layer('1001-12-q')
self.assertEqual(1, len(q))
self.assertEqual('q.', q[0][0])
self.assertEqual([1], q[0][2])
call_args = loader.get_template.return_value.render.call_args[0][0]
self.assertEqual('q.', call_args['paragraph'])
self.assertEqual('q', call_args['paragraph_stripped'])
| ascott1/regulations-site | regulations/tests/layers_paragraph_markers_tests.py | Python | cc0-1.0 | 1,212 | 0.00165 |
# -*- coding: utf-8 -*-
import re
from module.plugins.internal.Crypter import Crypter
class LetitbitNetFolder(Crypter):
__name__ = "LetitbitNet"
__type__ = "crypter"
__version__ = "0.16"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?letitbit\.net/folder/\w+'
__config__ = [("activated" , "bool" , "Activated" , True ),
("use_premium" , "bool" , "Use premium account if available", True ),
("folder_per_package", "Default;Yes;No", "Create folder for each package" , "Default")]
__description__ = """Letitbit.net folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("DHMH", "webmaster@pcProfil.de"),
("z00nx", "z00nx0@gmail.com")]
FOLDER_PATTERN = r'<table>(.*)</table>'
LINK_PATTERN = r'<a href="(.+?)" target="_blank">'
def decrypt(self, pyfile):
html = self.load(pyfile.url)
folder = re.search(self.FOLDER_PATTERN, html, re.S)
if folder is None:
self.error(_("FOLDER_PATTERN not found"))
self.links.extend(re.findall(self.LINK_PATTERN, folder.group(0)))
| Guidobelix/pyload | module/plugins/crypter/LetitbitNetFolder.py | Python | gpl-3.0 | 1,221 | 0.015561 |
# service.py is part of Panopticon.
# Panopticon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Panopticon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Panopticon. If not, see <http://www.gnu.org/licenses/>.
from panopticon.core.base import ServiceAttribute
from panopticon.core.actions.base import (ManagerDependantActionLauncher,
DependantAction, DecoratorAction, ActionManager)
class ServiceAction(ServiceAttribute, DependantAction):
_excluded_values_names = ["manager"]
def __init__(self, name=None, service=None, launcher=None):
super(ServiceAction, self).__init__(name=name, service=service)
DependantAction.__init__(self, launcher=launcher)
def check_required(self, action):
return action.service in self.service.required_services
def set_running_env(self, running_env):
running_env.action = self
running_env.service = self.service
def __repr__(self):
fields = []
if self.service is not None:
fields.append("service:'%s'" % self.service)
if self.name is not None:
fields.append("name:'%s'" % self.name)
return "<%s %s>" % (self.__class__.__name__, " ".join(fields))
def __str__(self):
if self.service is None:
return self.name
else:
return ".".join((str(self.service), self.name))
class ServiceDecoratorAction(DecoratorAction, ServiceAction):
def __init__(self, function, name=None, service=None, launcher=None):
super(ServiceDecoratorAction, self).__init__(function)
ServiceAction.__init__(self, name=name, service=service,
launcher=launcher)
service_action = ServiceDecoratorAction
class ServiceActionLauncher(ManagerDependantActionLauncher, ServiceAction):
def __init__(self, name=None, service=None, launcher=None):
super(ServiceActionLauncher, self).__init__(name, service.roles)
ServiceAction.__init__(self, name=name, service=service,
launcher=launcher)
def launch(self, *args, **kwargs):
super(ServiceActionLauncher, self).launch(*args, **kwargs)
class ServiceActionManager(ActionManager):
action_launcher_class = ServiceActionLauncher
_managed_obj_name = "service"
_manager_attribute_class = ServiceAction
def _get_base_dict(self):
service_action_class = self.action_launcher_class
actions = {}
defined_action_names = []
for aname, action in self.service._meta["actions"]:
defined_action_names.append(aname)
actions[aname] = action
for rname, role in self.service.roles:
for raname, action in role.actions:
if not raname in defined_action_names:
new_action = service_action_class(name=raname,
service=self.service)
actions[raname] = new_action
defined_action_names.append(raname)
return actions
| llou/panopticon | panopticon/core/actions/service.py | Python | gpl-3.0 | 3,447 | 0.004062 |
import argparse
import sys
import logging
import os
import csv
class ReadItem:
def __init__(self, sequence, totalCount):
self.Sequence = sequence
self.TotalCount = totalCount
self.SampleMap = {}
class AnnotationItem:
def __init__(self, sequence, totalCount, category, counts):
self.Sequence = sequence
self.TotalCount = totalCount
self.Categories = [category]
self.Counts = counts
def getValue(value):
return value.TotalCount
def getFilename(value):
return value[1]
def update(logger, args):
logger.info("Reading short reads:" + input + " ...")
shortReadMap = {}
shortReadFiles = []
shortFileList = []
with open(input, 'r') as sr:
for line in sr:
parts = line.rstrip().split('\t')
shortFileList.append(parts)
shortFileList = sorted(shortFileList, key=getFilename)
for parts in shortFileList:
sampleFile = parts[0]
sample = parts[1]
shortReadFiles.append(sample)
logger.info(" Reading " + sampleFile + " ...")
with open(sampleFile, 'r') as fin:
fin.readline()
for line in fin:
reads = line.rstrip().split('\t')
count = int(reads[1])
seq = reads[2].rstrip()
if not seq in shortReadMap:
ri = ReadItem(seq, count)
shortReadMap[seq] = ri
else:
ri = shortReadMap[seq]
ri.TotalCount += count
ri.SampleMap[sample] = count
if minSampleCount > 1 or minReadCount > 1:
shortReads = []
for read in shortReadMap.values():
validSampleCount = len([v for v in read.SampleMap.values() if v >= minReadCount])
if validSampleCount >= minSampleCount:
shortReads.append(read)
else:
shortReads = shortReadMap.values()
shortReads = sorted(shortReads, key=getValue, reverse=True)
if len(shortReads) > maxNumber:
shortReads = shortReads[0:maxNumber]
logger.info("Reading max mapped reads:" + maxMapped + " ...")
maxmappedReads = {}
with open(maxMapped, 'r') as sr:
for line in sr:
parts = line.split('\t')
logger.info(" Reading " + parts[0] + " ...")
with open(parts[0], 'r') as fin:
while True:
qname = fin.readline().rstrip()
if not qname:
break
seq = fin.readline()
fin.readline()
fin.readline()
if qname.endswith("_"):
maxmappedReads[seq.rstrip()] = 1
cnames = names.split(",")
logger.info("Reading annotated reads:" + annotated + " ...")
annotatedReadMap = {}
annotatedFiles = []
with open(annotated, 'r') as annolist:
iIndex = -1
for row in annolist:
parts = row.split('\t')
annofile = parts[0]
iIndex = iIndex + 1
category = cnames[iIndex]
logger.info(" Reading " + annofile + " ...")
with open(annofile, 'r') as sr:
annotatedFiles = sr.readline().rstrip().split('\t')[1:]
for line in sr:
parts = line.rstrip().split('\t')
seq = parts[0]
if seq not in annotatedReadMap:
totalCount = sum(int(p) for p in parts[1:])
annotatedReadMap[seq] = AnnotationItem(seq, totalCount, category, parts[1:])
else:
annotatedReadMap[seq].Categories.append(category)
annotatedReads = sorted(annotatedReadMap.values(), key=getValue, reverse=True)
output = outputPrefix + ".tsv"
logger.info("Writing explain result:" + output + " ...")
with open(output, "w") as sw:
sw.write("ShortRead\tShortReadCount\tShortReadLength\t" + "\t".join(["SRS_" + f for f in shortReadFiles]) + "\tIsMaxMapped\tParentRead\tParentReadCount\tParentReadCategory\t" + "\t".join(["PRS_" + f for f in annotatedFiles]) + "\n")
emptyAnnotation = "\t\t\t\t" + "\t".join(["" for af in annotatedFiles]) + "\n"
for shortRead in shortReads:
shortSeq = shortRead.Sequence
shortSeqCount = shortRead.TotalCount
seqMap = shortRead.SampleMap
sw.write("%s\t%s\t%d" % (shortSeq, shortSeqCount, len(shortSeq)))
for fname in shortReadFiles:
if fname in seqMap:
sw.write("\t%s" % seqMap[fname])
else:
sw.write("\t0")
sw.write("\t" + str(shortSeq in maxmappedReads))
bFound = False
for annotatedRead in annotatedReads:
annoSeq = annotatedRead.Sequence
if shortSeq in annoSeq:
bFound = True
sw.write("\t%s\t%s\t%s\t%s\n" % (annoSeq, annotatedRead.TotalCount, "/".join(annotatedRead.Categories[0]), "\t".join(annotatedRead.Counts)))
break
if not bFound:
sw.write(emptyAnnotation)
logger.info("Done.")
def main():
parser = argparse.ArgumentParser(description="Matching short reads with annotated reads.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
DEBUG=False
NOT_DEBUG = not DEBUG
parser.add_argument('-i', '--input', action='store', nargs='?', help='Input short reads', required=NOT_DEBUG)
parser.add_argument('-m', '--maxMapped', action='store', nargs='?', help='Input reads exceed maximum mapping to genome', required=NOT_DEBUG)
parser.add_argument('-a', '--annotated', action='store', nargs='?', help='Input annotated reads', required=NOT_DEBUG)
parser.add_argument('-n', '--names', action='store', nargs='?', help='Input annotated reads categories, split by ''', required=NOT_DEBUG)
parser.add_argument('--maxNumber', action='store', default=100, nargs='?', help='Input number of top short reads for annotation')
parser.add_argument('--minReadCount', action='store', default=3, nargs='?', help='Input minimum copy of short reads in sample for annotation')
parser.add_argument('--minSampleCount', action='store', default=2, nargs='?', help='Input minimum number of sample with valid read count')
parser.add_argument('-o', '--output', action='store', nargs='?', default="-", help="Output prefix of matched reads file", required=NOT_DEBUG)
if NOT_DEBUG and len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if DEBUG:
args.input = "T:/Shared/Labs/Vickers Lab/Tiger/projects/20180809_smallRNA_269_933_2002_human/data_visualization/short_reads_source/result/match__fileList1.list"
args.maxMapped = "T:/Shared/Labs/Vickers Lab/Tiger/projects/20180809_smallRNA_269_933_2002_human/data_visualization/short_reads_source/result/match__fileList2.list"
args.annotated = "T:/Shared/Labs/Vickers Lab/Tiger/projects/20180809_smallRNA_269_933_2002_human/data_visualization/short_reads_source/result/match__fileList3.list"
args.names = "Host miRNA,Host tRNA,Host snRNA,Host snoRNA,Host rRNA,Host other small RNA,Host Genome,Microbiome Bacteria,Environment Bacteria,Fungus,Non host tRNA,Non host rRNA"
#args.names = "Host miRNA,Host tRNA"
args.output = "T:/Shared/Labs/Vickers Lab/Tiger/projects/20180809_smallRNA_269_933_2002_human/data_visualization/short_reads_source/result/match2"
logger = logging.getLogger('updateCount')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
match(logger, args.input, args.names, args.annotated, args.maxMapped, args.maxNumber, args.minReadCount, args.minSampleCount, args.output)
if __name__ == "__main__":
main()
| shengqh/ngsperl | lib/SmallRNA/updateShortReadParentCount.py | Python | apache-2.0 | 7,328 | 0.019378 |
default_app_config = 'providers.edu.iowaresearch.apps.AppConfig'
"""
Example Record
<record>
<header>
<identifier>oai:ir.uiowa.edu:iwp_archive-1227</identifier>
<datestamp>2016-07-05T19:23:14Z</datestamp>
<setSpec>publication:iwp</setSpec>
<setSpec>publication:grad</setSpec>
<setSpec>publication:iwp_archive</setSpec>
<setSpec>publication:harvest</setSpec>
<setSpec>publication:fullharvest</setSpec>
</header>
<metadata>
<oai_dc:dc xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/
http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:title>Writing Sample</dc:title>
<dc:creator>Gamerro, Carlos</dc:creator>
<dc:description>
Excerpts from The Adventure of the Busts of Eva Perón and The Islands.
</dc:description>
<dc:date>2008-10-01T07:00:00Z</dc:date>
<dc:type>text</dc:type>
<dc:format>application/pdf</dc:format>
<dc:identifier>http://ir.uiowa.edu/iwp_archive/228</dc:identifier>
<dc:identifier>
http://ir.uiowa.edu/cgi/viewcontent.cgi?article=1227&context=iwp_archive
</dc:identifier>
<dc:rights>Copyright © 2008 Carlos Gamerro</dc:rights>
<dc:source>
International Writing Program Archive of Residents' Work
</dc:source>
<dc:language>eng</dc:language>
<dc:publisher>Iowa Research Online</dc:publisher>
</oai_dc:dc>
</metadata>
</record>
"""
| zamattiac/SHARE | providers/edu/iowaresearch/__init__.py | Python | apache-2.0 | 1,863 | 0.000537 |
# -*- coding: utf-8 -*-
"""Version information."""
"""
Kontalk XMPP server
Copyright (C) 2014 Kontalk Devteam <devteam@kontalk.org>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
NAME = "Kontalk XMPP server"
IDENTITY = "kontalk"
PACKAGE = "kontalk-xmppserver"
VERSION = "devel"
AUTHORS = (
{
"name": "Daniele Ricci",
"email": "daniele.athome@gmail.com"
},
)
| daniele-athome/kontalk-legacy-xmppserver | kontalk/xmppserver/version.py | Python | gpl-3.0 | 967 | 0 |
#!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import sys
import cPickle as pickle
import bisect
import random
import csv
import matplotlib.pyplot as plt
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
def index(a, x):
'Locate the leftmost value exactly equal to x'
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
raise ValueError
sys.path.append(base_directory+"/github/reduction/experimental/classifier")
sys.path.append(base_directory+"/github/pyIBCC/python")
import ibcc
from iterativeEM import IterativeEM
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
client = pymongo.MongoClient()
db = client['condor_2014-11-23']
classification_collection = db["condor_classifications"]
subject_collection = db["condor_subjects"]
gold = pickle.load(open(base_directory+"/condor_gold.pickle","rb"))
gold.sort(key = lambda x:x[1])
to_sample_from = (zip(*gold)[0])[1301:]
sample = random.sample(to_sample_from,100)
big_userList = []
big_subjectList = []
animal_count = 0
f = open(base_directory+"/Databases/condor_ibcc.csv","wb")
f.write("a,b,c\n")
alreadyDone = []
subjectVote = {}
gold_condor = []
only_one = []
vote_list = []
for count,zooniverse_id in enumerate(sample):
subject = subject_collection.find_one({"zooniverse_id":zooniverse_id})
if subject["classification_count"] < 3:
print "**"
only_one.append(zooniverse_id)
continue
print count
#gold standard
gold_classification = classification_collection.find_one({"user_name":"wreness", "subjects.zooniverse_id":zooniverse_id})
assert gold_classification["tutorial"] == False
found_condor = False
try:
mark_index = [ann.keys() for ann in gold_classification["annotations"]].index(["marks",])
markings = gold_classification["annotations"][mark_index].values()[0]
try:
for animal in markings.values():
animal_type = animal["animal"]
found_condor = (animal_type == "condor")
except KeyError:
continue
except ValueError:
pass
if found_condor:
gold_condor.append(1)
else:
gold_condor.append(0)
alreadyDone = []
classification_count = 0
for classification in classification_collection.find({"subjects.zooniverse_id":zooniverse_id}):
if "user_name" in classification:
user = classification["user_name"]
else:
user = classification["user_ip"]
#print user
if ("user_name" in classification) and (classification["user_name"] == "wreness"):
continue
if user in alreadyDone:
continue
classification_count += 1
if classification_count == 3:
break
alreadyDone.append(user)
if not(user in big_userList):
big_userList.append(user)
if not(zooniverse_id in big_subjectList):
big_subjectList.append(zooniverse_id)
user_index = big_userList.index(user)
subject_index = big_subjectList.index(zooniverse_id)
try:
mark_index = [ann.keys() for ann in classification["annotations"]].index(["marks",])
markings = classification["annotations"][mark_index].values()[0]
found = False
for animal in markings.values():
animal_type = animal["animal"]
if animal_type in ["condor"]:
found = True
break
if found:
vote_list.append((user_index,subject_index,1))
f.write(str(user_index) + ","+str(subject_index) + ",1\n")
if not(zooniverse_id in subjectVote):
subjectVote[zooniverse_id] = [1]
else:
subjectVote[zooniverse_id].append(1)
else:
vote_list.append((user_index,subject_index,0))
f.write(str(user_index) + ","+str(subject_index) + ",0\n")
if not(zooniverse_id in subjectVote):
subjectVote[zooniverse_id] = [0]
else:
subjectVote[zooniverse_id].append(0)
except (ValueError,KeyError):
f.write(str(user_index) + ","+str(subject_index) + ",0\n")
if not(zooniverse_id in subjectVote):
subjectVote[zooniverse_id] = [0]
else:
subjectVote[zooniverse_id].append(0)
if classification_count == 0:
print subject
assert classification_count > 0
condor_count = 0.
total_count = 0.
false_positives = []
true_positives = []
false_negatives = []
true_negatives = []
confusion = [[0.,0.],[0.,0.]]
for votes in subjectVote.values():
if np.mean(votes) >= 0.5:
condor_count += 1
confusion[1][1] += np.mean(votes)
confusion[1][0] += 1 - np.mean(votes)
true_positives.append(np.mean(votes))
#false_negatives.append(1-np.mean(votes))
else:
#false_positives.append(np.mean(votes))
true_negatives.append(1-np.mean(votes))
confusion[0][0] += 1 - np.mean(votes)
confusion[0][1] += np.mean(votes)
total_count += 1
pp = condor_count / total_count
print confusion
confusion = [[max(int(confusion[0][0]),1),max(int(confusion[0][1]),1)],[max(int(confusion[1][0]),1),max(int(confusion[1][1]),1)]]
print confusion
print pp
f.close()
with open(base_directory+"/Databases/condor_ibcc.py","wb") as f:
f.write("import numpy as np\n")
f.write("scores = np.array([0,1])\n")
f.write("nScores = len(scores)\n")
f.write("nClasses = 2\n")
f.write("inputFile = \""+base_directory+"/Databases/condor_ibcc.csv\"\n")
f.write("outputFile = \""+base_directory+"/Databases/condor_ibcc.out\"\n")
f.write("confMatFile = \""+base_directory+"/Databases/condor_ibcc.mat\"\n")
f.write("nu0 = np.array(["+str(int((1-pp)*100))+","+str(int(pp*100))+"])\n")
f.write("alpha0 = np.array("+str(confusion)+")\n")
#f.write("alpha0 = np.array([[185,1],[6,52]])\n")
#f.write("alpha0 = np.array([[3,1],[1,3]])\n")
#start by removing all temp files
try:
os.remove(base_directory+"/Databases/condor_ibcc.out")
except OSError:
pass
try:
os.remove(base_directory+"/Databases/condor_ibcc.mat")
except OSError:
pass
try:
os.remove(base_directory+"/Databases/condor_ibcc.csv.dat")
except OSError:
pass
#pickle.dump((big_subjectList,big_userList),open(base_directory+"/Databases/tempOut.pickle","wb"))
ibcc.runIbcc(base_directory+"/Databases/condor_ibcc.py")
values = []
errors = 0
low = 0
X_positive = []
X_negative = []
with open(base_directory+"/Databases/condor_ibcc.out","rb") as f:
ibcc_results = csv.reader(f, delimiter=' ')
for ii,row in enumerate(ibcc_results):
if ii == 20000:
break
wreness_condor = gold_condor[ii]
ibcc_condor = float(row[2])
if wreness_condor == 0:
X_negative.append(ibcc_condor)
else:
X_positive.append(ibcc_condor)
#print X_negative
# print X_positive
# plt.hist([X_positive,X_negative],10)
# plt.show()
alpha_list = X_negative[:]
alpha_list.extend(X_positive)
alpha_list.sort()
roc_X = []
roc_Y = []
for alpha in alpha_list:
positive_count = sum([1 for x in X_positive if x >= alpha])
positive_rate = positive_count/float(len(X_positive))
negative_count = sum([1 for x in X_negative if x >= alpha])
negative_rate = negative_count/float(len(X_negative))
roc_X.append(negative_rate)
roc_Y.append(positive_rate)
#print roc_X
plt.plot(roc_X,roc_Y,color="red")
X_positive = []
X_negative = []
#repeat with MV
for subject_index,zooniverse_id in enumerate(big_subjectList):
votes = subjectVote[zooniverse_id]
wreness_condor = gold_condor[subject_index]
if wreness_condor == 0:
X_negative.append(np.mean(votes))
else:
X_positive.append(np.mean(votes))
alpha_list = X_negative[:]
alpha_list.extend(X_positive)
alpha_list.sort()
roc_X = []
roc_Y = []
for alpha in alpha_list:
positive_count = sum([1 for x in X_positive if x >= alpha])
positive_rate = positive_count/float(len(X_positive))
negative_count = sum([1 for x in X_negative if x >= alpha])
negative_rate = negative_count/float(len(X_negative))
roc_X.append(negative_rate)
roc_Y.append(positive_rate)
#print roc_X
plt.plot(roc_X,roc_Y,color="green")
classify = IterativeEM()
classify.__classify__(vote_list,2)
estimates = classify.__getEstimates__()
X_positive = []
X_negative = []
for subject_index,zooniverse_id in enumerate(big_subjectList):
probability = estimates[subject_index]
wreness_condor = gold_condor[subject_index]
if wreness_condor == 0:
X_negative.append(probability)
else:
X_positive.append(probability)
alpha_list = X_negative[:]
alpha_list.extend(X_positive)
alpha_list.sort()
roc_X = []
roc_Y = []
for alpha in alpha_list:
positive_count = sum([1 for x in X_positive if x >= alpha])
positive_rate = positive_count/float(len(X_positive))
negative_count = sum([1 for x in X_negative if x >= alpha])
negative_rate = negative_count/float(len(X_negative))
roc_X.append(negative_rate)
roc_Y.append(positive_rate)
#print roc_X
plt.plot(roc_X,roc_Y,color="blue")
#plt.xlim((0,1.05))
plt.plot((0,1),(0,1),'--')
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
#plt.plot([0.058],[0.875],'o')
plt.show() | camallen/aggregation | experimental/condor/presentation/condor_IBCC.py | Python | apache-2.0 | 9,662 | 0.00859 |
# -*- coding: utf-8 -*-
from exceptions import DropPage, AbortProcess
| sunlightlabs/wikipedia-dump-tools | wikitools/__init__.py | Python | gpl-3.0 | 71 | 0 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2011 Gareth McMullin <gareth@blacksphere.co.nz>
## Copyright (C) 2012-2014 Uwe Hermann <uwe@hermann-uwe.de>
## Copyright (C) 2019 DreamSourceLab <support@dreamsourcelab.com>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
from collections import namedtuple
Data = namedtuple('Data', ['ss', 'es', 'val'])
'''
OUTPUT_PYTHON format:
Packet:
[<ptype>, <data1>, <data2>]
<ptype>:
- 'DATA': <data1> contains the MOSI data, <data2> contains the MISO data.
The data is _usually_ 8 bits (but can also be fewer or more bits).
Both data items are Python numbers (not strings), or None if the respective
channel was not supplied.
- 'BITS': <data1>/<data2> contain a list of bit values in this MOSI/MISO data
item, and for each of those also their respective start-/endsample numbers.
- 'CS-CHANGE': <data1> is the old CS# pin value, <data2> is the new value.
Both data items are Python numbers (0/1), not strings. At the beginning of
the decoding a packet is generated with <data1> = None and <data2> being the
initial state of the CS# pin or None if the chip select pin is not supplied.
- 'TRANSFER': <data1>/<data2> contain a list of Data() namedtuples for each
byte transferred during this block of CS# asserted time. Each Data() has
fields ss, es, and val.
Examples:
['CS-CHANGE', None, 1]
['CS-CHANGE', 1, 0]
['DATA', 0xff, 0x3a]
['BITS', [[1, 80, 82], [1, 83, 84], [1, 85, 86], [1, 87, 88],
[1, 89, 90], [1, 91, 92], [1, 93, 94], [1, 95, 96]],
[[0, 80, 82], [1, 83, 84], [0, 85, 86], [1, 87, 88],
[1, 89, 90], [1, 91, 92], [0, 93, 94], [0, 95, 96]]]
['DATA', 0x65, 0x00]
['DATA', 0xa8, None]
['DATA', None, 0x55]
['CS-CHANGE', 0, 1]
['TRANSFER', [Data(ss=80, es=96, val=0xff), ...],
[Data(ss=80, es=96, val=0x3a), ...]]
'''
# Key: (CPOL, CPHA). Value: SPI mode.
# Clock polarity (CPOL) = 0/1: Clock is low/high when inactive.
# Clock phase (CPHA) = 0/1: Data is valid on the leading/trailing clock edge.
spi_mode = {
(0, 0): 0, # Mode 0
(0, 1): 1, # Mode 1
(1, 0): 2, # Mode 2
(1, 1): 3, # Mode 3
}
class ChannelError(Exception):
pass
class Decoder(srd.Decoder):
api_version = 3
id = '1:spi'
name = '1:SPI'
longname = 'Serial Peripheral Interface'
desc = 'Full-duplex, synchronous, serial bus.'
license = 'gplv2+'
inputs = ['logic']
outputs = ['spi']
tags = ['Embedded/industrial']
channels = (
{'id': 'clk', 'type': 0, 'name': 'CLK', 'desc': 'Clock'},
)
optional_channels = (
{'id': 'miso', 'type': 107, 'name': 'MISO', 'desc': 'Master in, slave out'},
{'id': 'mosi', 'type': 109, 'name': 'MOSI', 'desc': 'Master out, slave in'},
{'id': 'cs', 'type': -1, 'name': 'CS#', 'desc': 'Chip-select'},
)
options = (
{'id': 'cs_polarity', 'desc': 'CS# polarity', 'default': 'active-low',
'values': ('active-low', 'active-high')},
{'id': 'cpol', 'desc': 'Clock polarity (CPOL)', 'default': 0,
'values': (0, 1)},
{'id': 'cpha', 'desc': 'Clock phase (CPHA)', 'default': 0,
'values': (0, 1)},
{'id': 'bitorder', 'desc': 'Bit order',
'default': 'msb-first', 'values': ('msb-first', 'lsb-first')},
{'id': 'wordsize', 'desc': 'Word size', 'default': 8,
'values': tuple(range(5,129,1))},
{'id': 'frame', 'desc': 'Frame Decoder', 'default': 'no',
'values': ('yes', 'no')},
)
annotations = (
('106', 'miso-data', 'MISO data'),
('108', 'mosi-data', 'MOSI data'),
('207', 'miso-bits', 'MISO bits'),
('209', 'mosi-bits', 'MOSI bits'),
('1000', 'warnings', 'Human-readable warnings'),
('6', 'miso-transfer', 'MISO transfer'),
('8', 'mosi-transfer', 'MOSI transfer'),
)
annotation_rows = (
('miso-bits', 'MISO bits', (2,)),
('miso-data', 'MISO data', (0,)),
('miso-transfer', 'MISO transfer', (5,)),
('mosi-bits', 'MOSI bits', (3,)),
('mosi-data', 'MOSI data', (1,)),
('mosi-transfer', 'MOSI transfer', (6,)),
('other', 'Other', (4,)),
)
binary = (
('miso', 'MISO'),
('mosi', 'MOSI'),
)
def __init__(self):
self.reset()
def reset(self):
self.samplerate = None
self.bitcount = 0
self.misodata = self.mosidata = 0
self.misobits = []
self.mosibits = []
self.misobytes = []
self.mosibytes = []
self.ss_block = -1
self.samplenum = -1
self.ss_transfer = -1
self.cs_was_deasserted = False
self.have_cs = self.have_miso = self.have_mosi = None
def start(self):
self.out_python = self.register(srd.OUTPUT_PYTHON)
self.out_ann = self.register(srd.OUTPUT_ANN)
self.out_binary = self.register(srd.OUTPUT_BINARY)
self.out_bitrate = self.register(srd.OUTPUT_META,
meta=(int, 'Bitrate', 'Bitrate during transfers'))
self.bw = (self.options['wordsize'] + 7) // 8
def metadata(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
def putw(self, data):
self.put(self.ss_block, self.samplenum, self.out_ann, data)
def putdata(self, frame):
# Pass MISO and MOSI bits and then data to the next PD up the stack.
so = self.misodata if self.have_miso else None
si = self.mosidata if self.have_mosi else None
so_bits = self.misobits if self.have_miso else None
si_bits = self.mosibits if self.have_mosi else None
if self.have_miso:
ss, es = self.misobits[-1][1], self.misobits[0][2]
bdata = so.to_bytes(self.bw, byteorder='big')
self.put(ss, es, self.out_binary, [0, bdata])
if self.have_mosi:
ss, es = self.mosibits[-1][1], self.mosibits[0][2]
bdata = si.to_bytes(self.bw, byteorder='big')
self.put(ss, es, self.out_binary, [1, bdata])
self.put(ss, es, self.out_python, ['BITS', si_bits, so_bits])
self.put(ss, es, self.out_python, ['DATA', si, so])
if frame:
if self.have_miso:
self.misobytes.append(Data(ss=ss, es=es, val=so))
if self.have_mosi:
self.mosibytes.append(Data(ss=ss, es=es, val=si))
# Bit annotations.
if self.have_miso:
for bit in self.misobits:
self.put(bit[1], bit[2], self.out_ann, [2, ['%d' % bit[0]]])
if self.have_mosi:
for bit in self.mosibits:
self.put(bit[1], bit[2], self.out_ann, [3, ['%d' % bit[0]]])
# Dataword annotations.
if self.have_miso:
self.put(ss, es, self.out_ann, [0, ['%02X' % self.misodata]])
if self.have_mosi:
self.put(ss, es, self.out_ann, [1, ['%02X' % self.mosidata]])
def reset_decoder_state(self):
self.misodata = 0 if self.have_miso else None
self.mosidata = 0 if self.have_mosi else None
self.misobits = [] if self.have_miso else None
self.mosibits = [] if self.have_mosi else None
self.bitcount = 0
def cs_asserted(self, cs):
active_low = (self.options['cs_polarity'] == 'active-low')
return (cs == 0) if active_low else (cs == 1)
def handle_bit(self, miso, mosi, clk, cs, frame):
# If this is the first bit of a dataword, save its sample number.
if self.bitcount == 0:
self.ss_block = self.samplenum
self.cs_was_deasserted = \
not self.cs_asserted(cs) if self.have_cs else False
ws = self.options['wordsize']
bo = self.options['bitorder']
# Receive MISO bit into our shift register.
if self.have_miso:
if bo == 'msb-first':
self.misodata |= miso << (ws - 1 - self.bitcount)
else:
self.misodata |= miso << self.bitcount
# Receive MOSI bit into our shift register.
if self.have_mosi:
if bo == 'msb-first':
self.mosidata |= mosi << (ws - 1 - self.bitcount)
else:
self.mosidata |= mosi << self.bitcount
# Guesstimate the endsample for this bit (can be overridden below).
es = self.samplenum
if self.bitcount > 0:
if self.have_miso:
es += self.samplenum - self.misobits[0][1]
elif self.have_mosi:
es += self.samplenum - self.mosibits[0][1]
if self.have_miso:
self.misobits.insert(0, [miso, self.samplenum, es])
if self.have_mosi:
self.mosibits.insert(0, [mosi, self.samplenum, es])
if self.bitcount > 0 and self.have_miso:
self.misobits[1][2] = self.samplenum
if self.bitcount > 0 and self.have_mosi:
self.mosibits[1][2] = self.samplenum
self.bitcount += 1
# Continue to receive if not enough bits were received, yet.
if self.bitcount != ws:
return
self.putdata(frame)
# Meta bitrate.
if self.samplerate:
elapsed = 1 / float(self.samplerate)
elapsed *= (self.samplenum - self.ss_block + 1)
bitrate = int(1 / elapsed * ws)
self.put(self.ss_block, self.samplenum, self.out_bitrate, bitrate)
if self.have_cs and self.cs_was_deasserted:
self.putw([4, ['CS# was deasserted during this data word!']])
self.reset_decoder_state()
def find_clk_edge(self, miso, mosi, clk, cs, first, frame):
if self.have_cs and (first or (self.matched & (0b1 << self.have_cs))):
# Send all CS# pin value changes.
oldcs = None if first else 1 - cs
self.put(self.samplenum, self.samplenum, self.out_python,
['CS-CHANGE', oldcs, cs])
if frame:
if self.cs_asserted(cs):
self.ss_transfer = self.samplenum
self.misobytes = []
self.mosibytes = []
elif self.ss_transfer != -1:
if self.have_miso:
self.put(self.ss_transfer, self.samplenum, self.out_ann,
[5, [' '.join(format(x.val, '02X') for x in self.misobytes)]])
if self.have_mosi:
self.put(self.ss_transfer, self.samplenum, self.out_ann,
[6, [' '.join(format(x.val, '02X') for x in self.mosibytes)]])
self.put(self.ss_transfer, self.samplenum, self.out_python,
['TRANSFER', self.mosibytes, self.misobytes])
# Reset decoder state when CS# changes (and the CS# pin is used).
self.reset_decoder_state()
# We only care about samples if CS# is asserted.
if self.have_cs and not self.cs_asserted(cs):
return
# Ignore sample if the clock pin hasn't changed.
if first or not (self.matched & (0b1 << 0)):
return
# Found the correct clock edge, now get the SPI bit(s).
self.handle_bit(miso, mosi, clk, cs, frame)
def decode(self):
# The CLK input is mandatory. Other signals are (individually)
# optional. Yet either MISO or MOSI (or both) must be provided.
# Tell stacked decoders when we don't have a CS# signal.
if not self.has_channel(0):
raise ChannelError('CLK pin required.')
self.have_miso = self.has_channel(1)
self.have_mosi = self.has_channel(2)
if not self.have_miso and not self.have_mosi:
raise ChannelError('Either MISO or MOSI (or both) pins required.')
self.have_cs = self.has_channel(3)
if not self.have_cs:
self.put(0, 0, self.out_python, ['CS-CHANGE', None, None])
frame = self.options['frame'] == 'yes'
# We want all CLK changes. We want all CS changes if CS is used.
# Map 'have_cs' from boolean to an integer index. This simplifies
# evaluation in other locations.
# Sample data on rising/falling clock edge (depends on mode).
mode = spi_mode[self.options['cpol'], self.options['cpha']]
if mode == 0 or mode == 3: # Sample on rising clock edge
wait_cond = [{0: 'r'}]
else: # Sample on falling clock edge
wait_cond = [{0: 'f'}]
if self.have_cs:
self.have_cs = len(wait_cond)
wait_cond.append({3: 'e'})
# "Pixel compatibility" with the v2 implementation. Grab and
# process the very first sample before checking for edges. The
# previous implementation did this by seeding old values with
# None, which led to an immediate "change" in comparison.
(clk, miso, mosi, cs) = self.wait({})
self.find_clk_edge(miso, mosi, clk, cs, True, frame)
while True:
(clk, miso, mosi, cs) = self.wait(wait_cond)
self.find_clk_edge(miso, mosi, clk, cs, False, frame)
| DreamSourceLab/DSView | libsigrokdecode4DSL/decoders/1-spi/pd.py | Python | gpl-3.0 | 13,821 | 0.002532 |
import copy
from pdcglobal import *
from .effect import Effect
import dungeon
class FloatingEyeGazeEffect(Effect):
def __init__(self, host, owner):
dur = d(10)
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Stuns the enemy'
def tick(self):
self.host.timer += 1000
if self.host == self.host.game.player:
self.host.game.shout('You are stunned by the Floating Eye`s gaze!')
else:
self.host.game.shout('%s is stunned by the Floating Eye`s gaze!' % (self.host.name))
Effect.tick(self)
class AcidSplatterEffect(Effect):
notrigger=[]
def __init__(self, host, owner):
dur = 1
Effect.__init__(self, dur, host, owner)
actors = owner.game.get_all_srd_actors(owner.pos())
for act in actors:
Effect.__init__(self, dur, act, owner)
weaponinfotext = 'Splatters the enemy'
def tick(self):
self.host.game.do_damage(self.host, d(3), D_ACID, self.owner)
if self.host == self.host.game.player:
self.host.game.shout('You are splashed by acid!')
else:
self.host.game.shout('%s is splashed by acid!' % (self.host.name))
Effect.tick(self)
class FrostEffect(Effect):
def __init__(self, host, owner):
dur = 1
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Freezes the enemy'
def tick(self):
self.host.game.do_damage(self.host, d(3), D_COLD, self.owner)
if self.host == self.host.game.player:
self.host.game.shout('You are freezing!')
else:
self.host.game.shout('%s is freezing!' % (self.host.name))
Effect.tick(self)
class HeatEffect(Effect):
def __init__(self, host, owner):
dur = 1
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Burns the enemy'
def tick(self):
self.host.game.do_damage(self.host, d(3), D_FIRE, self.owner)
if self.host == self.host.game.player:
self.host.game.shout('You are getting burned!')
else:
self.host.game.shout('%s is getting burned!' % (self.host.name))
Effect.tick(self)
class SplitEffect(Effect):
notrigger=[]
def __init__(self, host, owner):
dur = 1
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'You should not read this'
def tick(self):
new_pos = self.host.game.get_free_adj(self.owner.pos())
if new_pos != None:
self.owner.game.shout('%s splits in half!' % (self.owner.name))
new = dungeon.Populator.create_creature(self.owner.pop_name, self.owner.filename)
new.set_pos(new_pos)
new.game.add_actor(new)
self.owner.health = self.owner.health / 2 + 1
self.owner.cur_health = self.owner.cur_health / 2 + 1
new.health = self.owner.health
new.cur_health = self.owner.cur_health
new.xp_value = self.owner.xp_value / 3 + 2
Effect.tick(self)
class DazzleEffect(Effect):
def __init__(self, host, owner):
dur = d(4)
Effect.__init__(self, dur, host, owner)
weaponinfotext = 'Blinds the enemy'
def tick(self):
self.host.dazzled = True
if self.host == self.host.game.player:
self.host.game.shout('You are blinded!')
else:
self.host.game.shout('%s is blinded!' % (self.host.name))
Effect.tick(self)
| cycladesnz/chambersAndCreatures | src/effects/dv_effects.py | Python | gpl-2.0 | 3,581 | 0.006981 |
import re
import os.path
from operator import or_
from functools import reduce
from datetime import date
import logging
import urllib.parse
from django.conf import settings
from django.urls import reverse
from django.db.models.functions import ExtractYear
from django.db.models import Sum, Count
from django.template.loader import render_to_string
from django.utils.translation import gettext_lazy as _, get_language
from elasticsearch_dsl import (
DocType,
Object,
Keyword,
MetaField,
Text,
Completion,
Nested,
Date,
Boolean,
Search,
Double,
Index,
analyzer,
tokenizer,
)
from elasticsearch_dsl.query import Q
import jmespath
from procurements.models import Transactions
from .constants import (
CATALOG_INDICES,
BANK_EDRPOUS,
INCOME_TYPES,
MONETARY_ASSETS_TYPES,
OLD_DECLARATION_INDEX,
NACP_DECLARATION_INDEX,
NACP_DECLARATION_NEW_FORMAT_INDEX,
NUMBER_OF_SHARDS,
NUMBER_OF_REPLICAS,
NACP_SELECTORS_TO_TRANSLATE,
PAPER_SELECTORS_TO_TRANSLATE,
)
from .utils import parse_fullname, blacklist
from .templatetags.catalog import parse_raw_family_string
from .converters import PaperToNACPConverter, ConverterError
from .translator import HTMLTranslator
logger = logging.getLogger(__name__)
class NoneAwareDate(Date):
"""Elasticsearch DSL Date field chokes on None values and parses empty
strings as current date, hence the workaround.
TODO: move this upstream in some form."""
def _to_python(self, data):
if data is None:
return data
return super(NoneAwareDate, self)._to_python(data)
namesAutocompleteAnalyzer = analyzer(
"namesAutocompleteAnalyzer",
tokenizer=tokenizer(
"autocompleteTokenizer",
type="edge_ngram",
min_gram=1,
max_gram=25,
token_chars=["letter", "digit"],
),
filter=["lowercase"],
)
namesAutocompleteSearchAnalyzer = analyzer(
"namesAutocompleteSearchAnalyzer", tokenizer=tokenizer("whitespace"), filter=["lowercase"]
)
class AbstractDeclaration(object):
def infocard(self):
raise NotImplemented()
def raw_source(self):
raise NotImplemented()
def unified_source(self):
raise NotImplemented()
def related_entities(self):
raise NotImplemented()
@property
def original_url(self):
raise NotImplemented()
def _is_change_form(self):
raise NotImplemented
def related_documents(self):
return [
document.api_response(fields=["related_entities", "guid", "aggregated_data"])
for document in self.similar_declarations(limit=100)
if not document._is_change_form()
]
def guid(self):
return self.meta.id
def extra_phrases(self):
return [
self.general.post.post,
self.general.post.office,
self.general.post.region,
getattr(self.general.post, "actual_region", ""),
self.intro.doc_type,
]
def prepare_translations(self, language, infocard_only=False):
assert self.CONTENT_SELECTORS, "You should define CONTENT_SELECTORS first"
if language == "en":
extra_phrases = self.extra_phrases()
if infocard_only:
self.translator = HTMLTranslator(html=None, selectors=[], extra_phrases=extra_phrases)
else:
self.translator = HTMLTranslator(
html=self.raw_html(),
selectors=self.CONTENT_SELECTORS,
extra_phrases=extra_phrases,
)
def raw_en_html(self):
assert hasattr(self, "translator"), "You should call prepare_translations first"
return self.translator.get_translated_html()
def _name_search_query(self):
name = "{} {} {}".format(self.general.last_name, self.general.name, self.general.patronymic).strip()
return urllib.parse.quote(name)
def _full_name(self, language):
name = "{} {} {}".format(self.general.last_name, self.general.name, self.general.patronymic).strip()
if language == "en":
assert hasattr(self, "translator"), "You should call prepare_translations first"
phrase = self.translator.translate(name, just_transliterate=True)
return phrase["translation"]
else:
return name
def _translate_one_field(self, field, language):
if field:
if language == "en":
assert hasattr(self, "translator"), "You should call prepare_translations first"
phrase = self.translator.translate(field)
return phrase["translation"]
else:
return field
else:
return ""
def _position(self, language):
return self._translate_one_field(self.general.post.post, language)
def _office(self, language):
return self._translate_one_field(self.general.post.office, language)
def _region(self, language):
return self._translate_one_field(self.general.post.region, language)
def _actual_region(self, language):
return self._translate_one_field(self.general.post.actual_region, language)
def _declaration_type(self, language):
return self._translate_one_field(self.intro.doc_type, language)
def api_response(self, fields=None):
all_fields = ["guid", "infocard", "raw_source", "unified_source", "related_entities"]
if fields is None:
fields = all_fields
else:
fields = [f for f in fields if f in set(all_fields + ["guid", "aggregated_data", "related_documents"])]
return {f: getattr(self, f)() for f in fields}
def similar_declarations(self, language=None, limit=12):
res = {"exact": [], "maybe": []}
if getattr(self.intro, "user_declarant_id", None):
index = OLD_DECLARATION_INDEX
res["exact"] = (
NACPDeclaration.search()
.filter("term", **{"intro.user_declarant_id": self.intro.user_declarant_id})
.query(~Q("term", _id=self.meta.id))
.sort("-intro.doc_type")
)
else:
index = CATALOG_INDICES
fields = [
"general.last_name",
"general.name",
"general.patronymic",
"general.full_name",
]
res["maybe"] = (
Search(index=index)
.query(
"multi_match",
query=self.general.full_name,
operator="and",
fields=fields,
)
.query(~Q("term", _id=self.meta.id))
)
for k, s in res.items():
if not s:
continue
s = s.doc_type(NACPDeclaration, Declaration)
if k == "maybe":
s = s[:limit]
else:
s = s[:30]
res[k] = s.execute()
if language is not None:
for d in res[k]:
d.prepare_translations(language, infocard_only=True)
return res
def family_declarations(self, language=None, limit=12, return_full_body=False):
def filter_silly_names(name):
if not name:
return False
last_name, first_name, patronymic = parse_fullname(name)
if len(first_name) == 1 or first_name.endswith("."):
return False
if len(patronymic) == 1 or patronymic.endswith("."):
return False
return True
s = Search(index=CATALOG_INDICES)
family_members = self.get_family_members()
subqs = []
for name in filter(filter_silly_names, family_members):
subqs.append(
Q(
"multi_match",
query=name,
operator="and",
fields=[
"general.last_name",
"general.name",
"general.patronymic",
"general.full_name",
],
)
)
if subqs:
s = s.query(reduce(or_, subqs)).query(~Q("term", _id=self.meta.id))
s = s.doc_type(NACPDeclaration, Declaration)
docs = s[:limit].execute()
if language is not None:
for d in docs:
d.prepare_translations(language, infocard_only=True)
return docs
else:
return None
def get_family_members(self):
"""
Should return list of family member names
"""
family = getattr(self.general, "family", None)
if family:
for member in family:
if hasattr(member, "family_name"):
yield member.family_name
else:
for member in parse_raw_family_string(getattr(self.general, "family_raw", "")):
if "family_name" in member:
yield member["family_name"]
declarations_idx = Index(OLD_DECLARATION_INDEX)
declarations_idx.settings(number_of_shards=NUMBER_OF_SHARDS, number_of_replicas=NUMBER_OF_REPLICAS)
declarations_idx.analyzer(namesAutocompleteAnalyzer)
declarations_idx.analyzer(namesAutocompleteSearchAnalyzer)
@declarations_idx.doc_type
class Declaration(DocType, AbstractDeclaration):
"""Declaration document.
Assumes there's a dynamic mapping with all fields not indexed by default."""
persons = Text(analyzer="ukrainian", copy_to="all")
countries = Text(analyzer="ukrainian", copy_to="all")
companies = Text(analyzer="ukrainian", copy_to="all")
names_autocomplete = Text(
analyzer="namesAutocompleteAnalyzer",
search_analyzer="namesAutocompleteSearchAnalyzer",
fields={"raw": Text(index=True)},
term_vector="with_positions_offsets",
)
all = Text(analyzer="ukrainian")
general = Object(
properties={
"full_name_suggest": Completion(preserve_separators=False),
"full_name": Text(index=True, analyzer="ukrainian"),
"full_name_for_sorting": Keyword(index=True, ignore_above=100), # only for sorting purposes
"name": Text(index=True, analyzer="ukrainian"),
"patronymic": Text(index=True, analyzer="ukrainian"),
"last_name": Text(index=True, analyzer="ukrainian"),
"family_raw": Text(index=True, analyzer="ukrainian"),
"family": Nested(
properties={
"name": Text(index=True, analyzer="ukrainian"),
"relations": Keyword(index=False),
"inn": Keyword(index=False),
}
),
"post_raw": Text(index=True, analyzer="ukrainian"),
"post": Object(
properties={
"region": Text(
index=True,
analyzer="ukrainian",
fields={"raw": Keyword(index=True)},
),
"office": Text(
index=True,
analyzer="ukrainian",
fields={"raw": Keyword(index=True)},
),
"post": Text(
index=True,
analyzer="ukrainian",
fields={"raw": Keyword(index=True)},
),
}
),
"addresses": Nested(
properties={
"place": Text(index=False),
"place_hidden": Boolean(index=False),
"place_district": Text(index=False),
"place_district_hidden": Boolean(index=False),
"place_city": Text(index=False),
"place_city_hidden": Boolean(index=False),
"place_city_type": Keyword(index=False),
"place_city_type_hidden": Boolean(index=False),
"place_address": Text(index=False),
"place_address_hidden": Boolean(index=False),
"place_address_type": Keyword(index=False),
}
),
}
)
declaration = Object(
properties={
"date": NoneAwareDate(),
"notfull": Boolean(index=False),
"notfull_lostpages": Keyword(index=False),
"additional_info": Boolean(index=False),
"additional_info_text": Text(index=False),
"needs_scancopy_check": Boolean(index=False),
}
)
intro = Object(
properties={
"declaration_year": Keyword(index=True),
"doc_type": Keyword(index=True),
"date": NoneAwareDate(index=True),
}
)
ft_src = Text(index=True, analyzer="ukrainian", copy_to="all")
# concatinated from set of fields for regular search (not deepsearch mode)
index_card = Text(index=True, analyzer="ukrainian")
INDEX_CARD_FIELDS = [
"general.last_name",
"general.name",
"general.patronymic",
"general.full_name",
"general.post.post",
"general.post.office",
"general.post.region",
"general.post.actual_region",
"intro.declaration_year",
"intro.doc_type",
"declaration.source",
"declaration.url",
]
CONTENT_SELECTORS = PAPER_SELECTORS_TO_TRANSLATE
INCOME_SINGLE_PROPERTIES = {
"value": Keyword(index=False),
"value_unclear": Boolean(index=False),
"comment": Text(index=False),
"family": Keyword(index=False),
"family_unclear": Boolean(index=False),
"family_comment": Text(index=False),
}
INCOME_LIST_PROPERTIES = {
"country": Keyword(index=False),
"country_comment": Text(index=False),
"cur": Keyword(index=False),
"cur_units": Keyword(index=False),
"uah_equal": Keyword(index=False),
}
income = Object(
properties={
"5": Object(properties=INCOME_SINGLE_PROPERTIES),
"6": Object(properties=INCOME_SINGLE_PROPERTIES),
"7": Object(properties=INCOME_SINGLE_PROPERTIES),
"8": Object(properties=INCOME_SINGLE_PROPERTIES),
"9": Object(properties=INCOME_SINGLE_PROPERTIES),
"10": Object(properties=INCOME_SINGLE_PROPERTIES),
"11": Object(properties=INCOME_SINGLE_PROPERTIES),
"12": Object(properties=INCOME_SINGLE_PROPERTIES),
"13": Object(properties=INCOME_SINGLE_PROPERTIES),
"14": Object(properties=INCOME_SINGLE_PROPERTIES),
"15": Object(properties=INCOME_SINGLE_PROPERTIES),
"16": Object(properties=INCOME_SINGLE_PROPERTIES),
"17": Object(properties=INCOME_SINGLE_PROPERTIES),
"18": Object(properties=INCOME_SINGLE_PROPERTIES),
"19": Object(properties=INCOME_SINGLE_PROPERTIES),
"20": Object(properties=INCOME_SINGLE_PROPERTIES),
"21": Nested(properties=INCOME_LIST_PROPERTIES),
"22": Nested(properties=INCOME_LIST_PROPERTIES),
}
)
ESTATE_PROPERTIES = {
"region": Text(index=False),
"address": Text(index=False),
"space": Keyword(index=False),
"space_units": Keyword(index=False),
"space_comment": Text(index=False),
"costs": Keyword(index=False),
"costs_comment": Text(index=False),
"costs_rent": Keyword(index=False),
"costs_rent_comment": Text(index=False),
"costs_property": Keyword(index=False),
"costs_property_comment": Text(index=False),
}
estate = Object(
properties={
"23": Nested(properties=ESTATE_PROPERTIES),
"24": Nested(properties=ESTATE_PROPERTIES),
"25": Nested(properties=ESTATE_PROPERTIES),
"26": Nested(properties=ESTATE_PROPERTIES),
"27": Nested(properties=ESTATE_PROPERTIES),
"28": Nested(properties=ESTATE_PROPERTIES),
"29": Nested(properties=ESTATE_PROPERTIES),
"30": Nested(properties=ESTATE_PROPERTIES),
"31": Nested(properties=ESTATE_PROPERTIES),
"32": Nested(properties=ESTATE_PROPERTIES),
"33": Nested(properties=ESTATE_PROPERTIES),
"34": Nested(properties=ESTATE_PROPERTIES),
}
)
VEHICLE_PROPERTIES = {
"brand": Text(index=False),
"brand_info": Text(index=False),
"year": Keyword(index=False),
"sum": Keyword(index=False),
"sum_comment": Text(index=False),
"sum_rent": Keyword(index=False),
"sum_rent_comment": Text(index=False),
"brand_hidden": Boolean(index=False),
"brand_info_hidden": Boolean(index=False),
"brand_info_unclear": Boolean(index=False),
}
vehicle = Object(
properties={
"35": Nested(properties=VEHICLE_PROPERTIES),
"36": Nested(properties=VEHICLE_PROPERTIES),
"37": Nested(properties=VEHICLE_PROPERTIES),
"38": Nested(properties=VEHICLE_PROPERTIES),
"39": Nested(properties=VEHICLE_PROPERTIES),
"40": Nested(properties=VEHICLE_PROPERTIES),
"41": Nested(properties=VEHICLE_PROPERTIES),
"42": Nested(properties=VEHICLE_PROPERTIES),
"43": Nested(properties=VEHICLE_PROPERTIES),
"44": Nested(properties=VEHICLE_PROPERTIES),
}
)
BANKS_PROPERTIES = {
"sum": Keyword(index=False),
"sum_hidden": Boolean(index=False),
"sum_units": Keyword(index=False),
"sum_comment": Text(index=False),
"sum_foreign": Keyword(index=False),
"sum_foreign_units": Keyword(index=False),
"sum_foreign_comment": Text(index=False),
}
banks = Object(
properties={
"45": Nested(properties=BANKS_PROPERTIES),
"46": Nested(properties=BANKS_PROPERTIES),
"47": Nested(properties=BANKS_PROPERTIES),
"48": Nested(properties=BANKS_PROPERTIES),
"49": Nested(properties=BANKS_PROPERTIES),
"50": Nested(properties=BANKS_PROPERTIES),
"51": Nested(properties=BANKS_PROPERTIES),
"52": Nested(properties=BANKS_PROPERTIES),
"53": Nested(properties=BANKS_PROPERTIES),
}
)
LIABILITIES_PROPERTIES = {
"sum": Keyword(index=False),
"sum_comment": Text(index=False),
"sum_units": Keyword(index=False),
"sum_foreign": Keyword(index=False),
"sum_foreign_comment": Text(index=False),
}
liabilities = Object(
properties={
"54": Nested(properties=LIABILITIES_PROPERTIES),
"55": Nested(properties=LIABILITIES_PROPERTIES),
"56": Nested(properties=LIABILITIES_PROPERTIES),
"57": Nested(properties=LIABILITIES_PROPERTIES),
"58": Nested(properties=LIABILITIES_PROPERTIES),
"59": Nested(properties=LIABILITIES_PROPERTIES),
"60": Nested(properties=LIABILITIES_PROPERTIES),
"61": Nested(properties=LIABILITIES_PROPERTIES),
"62": Nested(properties=LIABILITIES_PROPERTIES),
"63": Nested(properties=LIABILITIES_PROPERTIES),
"64": Nested(properties=LIABILITIES_PROPERTIES),
}
)
def raw_source(self):
src = self.to_dict()
return blacklist(src, ["ft_src", "index_card", "translator"])
def infocard(self):
return {
"first_name": self.general.name,
"patronymic": self.general.patronymic,
"last_name": self.general.last_name,
"office": self.general.post.office,
"position": self.general.post.post,
"source": getattr(self.declaration, "source", getattr(self, "source", "")),
"id": self.meta.id,
"url": settings.SITE_URL + reverse("details", kwargs={"declaration_id": self.meta.id}),
"document_type": self.intro.doc_type,
"is_corrected": False,
"declaration_year": getattr(self.intro, "declaration_year"),
"created_date": getattr(self.intro, "date", getattr(self.declaration, "date", "")),
}
def related_entities(self):
return {
"people": {"family": list(self.get_family_members())},
"documents": {"corrected": [], "originals": []},
"companies": {"owned": [], "related": [], "all": []},
}
def unified_source(self):
try:
doc = self.to_dict()
doc["id"] = self.meta.id
converter = PaperToNACPConverter(doc)
return converter.convert()
except ConverterError:
return None
def _is_change_form(self):
return False
@property
def original_url(self):
return self.declaration.url
def aggregated_data(self):
return self.aggregated
def extra_phrases(self):
res = super().extra_phrases()
for vehicle in self.get_vehicles():
res += vehicle
return res
def red_flags(self):
return []
def get_vehicles(self):
res = []
if hasattr(self, "vehicle"):
for field in [
"34",
"35",
"36",
"37",
"38",
"39",
"40",
"41",
"42",
"43",
"44",
]:
car_infos = getattr(self.vehicle, field, [])
for car_info in car_infos:
res.append([car_info["brand"], car_info["brand_info"]])
return res
# Temporary solution to provide enough aggregated data
# to make it possible to compare old and new declarations
# TODO: REPLACE ME
@property
def aggregated(self):
language = get_language()
if hasattr(self, "_aggregated"):
return self._aggregated
def to_float(doc, key):
try:
return float(str(getattr(doc, key, "0") or "0").replace(",", "."))
except ValueError:
return 0.0
def get_exchange_rate(year, curr):
rates = {
"2011": {"USD": 7.98, "EUR": 10.29, "RUB": 0.250}, # As on 2011/12/30
"2012": {"USD": 7.99, "EUR": 10.53, "RUB": 0.263}, # As on 2012/12/29
"2013": {"USD": 7.99, "EUR": 11.04, "RUB": 0.244}, # As on 2013/12/30
"2014": {"USD": 15.76, "EUR": 19.23, "RUB": 0.303}, # As on 2014/12/29
"2015": {"USD": 24.00, "EUR": 26.22, "RUB": 0.329}, # As on 2015/12/31
"2016": { # As on 2016/12/31
"USD": 27.1908,
"EUR": 28.4226,
"RUB": 0.4511,
},
"2017": { # As on 2017/12/31
"USD": 28.0672,
"EUR": 33.4954,
"RUB": 0.4870,
},
}
if year not in rates:
return
if curr not in rates[year]:
return
return rates[year][curr]
def to_space(space):
areas_koef = {"га": 10000, "cоток": 100, "соток": 100, "м²": 1}
units = getattr(space, "space_units", "")
return to_float(space, "space") * areas_koef.get(units, 1)
resp = {
"incomes.presents.all": 0,
"incomes.family": 0,
"incomes.declarant": 0,
"assets.cash.total": 0,
"assets.family": 0,
"assets.declarant": 0,
"incomes.total": 0,
"assets.total": 0,
"expenses.total": 0,
"liabilities.total": 0,
"estate.family_land": 0,
"estate.declarant_land": 0,
"estate.family_other": 0,
"estate.declarant_other": 0,
"vehicles.all_names": "",
}
if hasattr(self, "income"):
resp["incomes.declarant"] = to_float(self.income["5"], "value")
resp["incomes.family"] = to_float(self.income["5"], "family")
resp["incomes.presents.all"] = to_float(self.income["11"], "value") + to_float(self.income["11"], "family")
resp["incomes.total"] = resp["incomes.declarant"] + resp["incomes.family"]
if hasattr(self, "liabilities"):
for field in [
"54",
"55",
"56",
"57",
"58",
"59",
"60",
"61",
"62",
"63",
"64",
]:
if hasattr(self.liabilities, field):
resp["liabilities.total"] += to_float(getattr(self.liabilities, field), "sum")
if hasattr(self, "banks"):
for d_key, k in (("45", "declarant"), ("51", "family")):
for a in getattr(self.banks, d_key, []):
try:
currency = getattr(a, "sum_units", "UAH") or "UAH"
amount = to_float(a, "sum")
if currency == "грн":
currency = "UAH"
if currency != "UAH":
rate = get_exchange_rate(str(self.intro.declaration_year), currency)
if rate is None:
continue
amount *= rate
resp["assets.{}".format(k)] += amount
except ValueError:
continue
resp["assets.total"] = resp["assets.family"] + resp["assets.declarant"]
vehicles = []
for brand, brand_info in self.get_vehicles():
if language == "en" and hasattr(self, "translator"):
vehicles.append(
"{} {}".format(
self._translate_one_field(brand, language), self._translate_one_field(brand_info, language)
).replace(";", "")
)
else:
vehicles.append("{} {}".format(brand, brand_info).replace(";", ""))
resp["vehicles.all_names"] += "; ".join(vehicles)
if hasattr(self, "estate"):
for d_key, k in (
("24", "declarant_other"),
("30", "family_other"),
("25", "declarant_other"),
("31", "family_other"),
("26", "declarant_other"),
("32", "family_other"),
("27", "declarant_other"),
("33", "family_other"),
("28", "declarant_other"),
("34", "family_other"),
):
estate_infos = getattr(self.estate, d_key, [])
for space in estate_infos:
resp["estate.{}".format(k)] += to_space(space)
for d_key, k in (("23", "declarant_land"), ("29", "family_land")):
estate_infos = getattr(self.estate, d_key, [])
for space in estate_infos:
resp["estate.{}".format(k)] += to_space(space)
self._aggregated = resp
return resp
def raw_html(self):
doc = render_to_string("decl_form.jinja", {"declaration": self})
return doc
class Meta:
pass
# commenting it out for now to not to ruin existing index
# doc_type = "paper_declaration_doctype"
nacp_declarations_idx = Index(NACP_DECLARATION_INDEX)
nacp_declarations_idx.settings(number_of_shards=NUMBER_OF_SHARDS, number_of_replicas=NUMBER_OF_REPLICAS)
nacp_declarations_idx.analyzer(namesAutocompleteAnalyzer)
nacp_declarations_idx.analyzer(namesAutocompleteSearchAnalyzer)
@nacp_declarations_idx.doc_type
class NACPDeclaration(DocType, AbstractDeclaration):
"""NACP Declaration document.
Assumes there's a dynamic mapping with all fields not indexed by default."""
persons = Text(analyzer="ukrainian", copy_to="all")
countries = Text(analyzer="ukrainian", copy_to="all")
companies = Text(analyzer="ukrainian", copy_to="all")
names_autocomplete = Text(
analyzer="namesAutocompleteAnalyzer",
search_analyzer="namesAutocompleteSearchAnalyzer",
fields={"raw": Text(index=True)},
term_vector="with_positions_offsets",
)
all = Text(analyzer="ukrainian")
general = Object(
properties={
"full_name": Text(index=True, analyzer="ukrainian"),
"full_name_for_sorting": Keyword(index=True, ignore_above=100), # only for sorting purposes
"name": Text(index=True, analyzer="ukrainian"),
"patronymic": Text(index=True, analyzer="ukrainian"),
"last_name": Text(index=True, analyzer="ukrainian"),
"post": Object(
properties={
"actual_region": Text(
index=True,
analyzer="ukrainian",
fields={"raw": Keyword(index=True)},
),
"region": Text(
index=True,
analyzer="ukrainian",
fields={"raw": Keyword(index=True)},
),
"office": Text(
index=True,
analyzer="ukrainian",
fields={"raw": Keyword(index=True)},
),
"post_type": Text(
index=True,
analyzer="ukrainian",
fields={"raw": Keyword(index=True)},
),
"post": Text(
index=True,
analyzer="ukrainian",
fields={"raw": Keyword(index=True)},
),
}
),
}
)
declaration = Object(properties={"date": NoneAwareDate()})
estate = Object(properties={"region": Text(index=True, analyzer="ukrainian", fields={"raw": Keyword(index=True)})})
intro = Object(
properties={
"declaration_year": Keyword(index=True),
"declaration_year_to": NoneAwareDate(),
"declaration_year_from": NoneAwareDate(),
"doc_type": Keyword(index=True),
"date": NoneAwareDate(index=True),
}
)
ft_src = Text(index=True, analyzer="ukrainian", copy_to="all")
nacp_orig = Object(include_in_all=False, enabled=False)
# concatinated from set of fields for regular search (not deepsearch mode)
index_card = Text(index=True, analyzer="ukrainian")
INDEX_CARD_FIELDS = [
"general.last_name",
"general.name",
"general.patronymic",
"general.full_name",
"general.post.post",
"general.post.office",
"general.post.region",
"general.post.actual_region",
"intro.declaration_year",
"intro.doc_type",
"declaration.source",
"declaration.url",
]
CONTENT_SELECTORS = NACP_SELECTORS_TO_TRANSLATE
ENABLED_FLAGS = {
# "untimely_submission": {
# "name": _("Повідомлення про суттєві зміни в майновому стані подано пізніше встановленого законом терміну"),
# "description": _(
# """Суб’єкт декларування не подав повідомлення про суттєві зміни в майновому стані у десятиденний
# строк з моменту отримання доходу, придбання майна або здійснення видатку"""
# ),
# },
"assets_to_income_flag": {
"name": _("Активи в 10+ разів перевищують доходи"),
"description": _(
"""Сума грошових активів (розділ 12) декларанта і родини в 10+ разів вища за суму всіх доходів
(розділ 11) декларанта і родини. Суми, що зазначені у іноземній валюті, переведені в грн за
курсом на 31 грудня звітного року"""
),
},
"liabilities_to_inc_and_assets_flag": {
"name": _("Зобов’язання в 2+ разів перевищують активи і доходи"),
"description": _(
"""Сума фінансових зобов'язань (розділ 13) декларанта і родини в 2+ рази перевищує суму грошових
активів (розділ 12) та доходів (розділ 11) декларанта і родини. Суми, що зазначені у іноземній
валюті, переведені в грн за курсом на 31 грудня звітного року"""
),
},
"estate_purch_no_cost_flag": {
"name": _("Не вказано вартість нерухомості, права на яку набуті в поточному році"),
"description": _(
"""В декларації вказано об'єкт нерухомості (розділ 3), що належить декларанту чи члену родини
на праві власності (зокрема сумісної), при цьому дата набуття права співпадає зі звітним роком,
а показник вартості нерухомості на дату набуття права не вказано або дорівнює 0"""
),
},
"vehicle_purch_no_cost_flag": {
"name": _("Не вказано вартість ТЗ, права на який набуто в поточному році"),
"description": _(
"""В декларації вказано транспортний засіб (розділ 6), що належить декларанту чи члену родини
на праві власності (зокрема сумісної), при цьому дата набуття права співпадає зі звітним роком,
а показник вартості ТЗ не вказано або дорівнює 0"""
),
},
"cash_flag": {
"name": _("Готівка > 5 млн.грн."),
"description": _(
"""Сума грошових активів (розділ 12) декларанта і родини, вид активів "готівкові кошти"
перевищує 5 млн.грн Суми, що зазначені у іноземній валюті, переведені в грн за курсом на
31 грудня звітного року"""
),
},
"expenses_to_inc_and_assets_flag": {
"name": _("Видатки в 3+ разів перевищують активи і доходи"),
"description": _(
"""Сума видатків (розділ 14) декларанта в 3+ рази перевищує суму грошових активів (розділ 12)
та доходів (розділ 11) декларанта і родини. Суми, що зазначені у іноземній валюті, переведені
в грн за курсом на 31 грудня звітного року"""
),
},
"income_presents_to_total_flag": {
"name": _("Подарунки, призи, благодійна допомога складають >75% доходу"),
"description": _(
"""Сума доходів (розділ 11) декларанта і родини, вид доходів "Подарунок у негрошовій формі",
"Подарунок у грошовій формі", "Благодійна допомога", "Приз" складають > 75% загальної суми
доходів декларанта і родини. Суми, що зазначені у іноземній валюті, переведені в грн за
курсом на 31 грудня звітного року"""
),
},
"house_no_land_flag": {
"name": _("Є будинок/ дача, немає землі"),
"description": _(
"""В декларації вказано об'єкт нерухомості (розділ 3), вид об'єкту "Житловий будинок" або
"Садовий (дачний) будинок", що належить декларанту чи члену родини на праві власності
(в т.ч. сумісної). При цьому в декларації не вказано об'єкт нерухомості типу "Земельна ділянка",
що належить декларанту чи члену родини на праві власності (зокрема сумісної)"""
),
},
"garage_wo_car_flag": {
"name": _("Є гараж, немає авто"),
"description": _(
"""В декларації вказано об'єкт нерухомості (розділ 3) виду "Гараж", що належить декларанту чи
члену родини на праві власності (зокрема сумісної). При цьому в декларації не вказано жодного
транспортного засобу (розділ 6), який належить декларанту чи члену родини на праві власності
(зокрема сумісної)"""
),
},
"lux_cars_flag_v2": {
"name": _('Є "люксові" авто*'),
"description": _(
"""В декларації вказано транспортний засіб (розділ 6), рік випуску якого більше за 2011
(або рік випуску не вказано), марка і модель яких належить
<a href="https://docs.google.com/spreadsheets/d/1DJj2Ms5ivi7m61m5X_ib0XqCvZQRIDrbN3FYX1A_-HE/edit#gid=0">до наступного списку</a>."""
),
},
"estate_purch_no_cost_flag": {
"name": _("Не вказано вартість нерухомості, права на яку набуті в поточному році"),
"description": _(
"""В декларації вказано об'єкт нерухомості (розділ 3), що належить декларанту чи члену
родини на праві власності (Власність \ Спільна власність), при цьому дата набуття права співпадає зі звітним роком,
а показники вартості нерухомості на дату набуття права та на дату останньої оцінки не вказано або дорівнюють 0"""
),
},
"vehicle_purch_no_cost_flag": {
"name": _("Не вказано вартість ТЗ, право на який набуто в поточному році"),
"description": _(
"""В декларації вказано транспортний засіб (розділ 6), що належить декларанту чи члену
родини на праві власності (Власність \ Спільна власність), при цьому дата набуття права співпадає зі звітним роком, а показник
вартості ТЗ не вказано або дорівнює 0"""
),
},
"has_huge_prize": {
"name": _("Призи / лотереї більше 10 тис"),
"description": _(
"""Задекларовано дохід (декларантом або членом родини) виграші в лотерею чи призи загальною
сумою >= 10 тис. грн. Перевіряється тип доходу “Приз”, а також тип доходу “Інше” з пошуком в описі за ключовими словами
типу “виграш” і т.п."""
),
},
"has_foreign_real_estate": {
"name": _("Є нерухомість за кордоном"),
"description": _(
"""Задекларовано об'єкт нерухомості будь-якого типу, що належить декларанту чи члену родини
(Власність / Спільна власність), і країною місцезнаходження об'єкту є не Україна"""
),
},
"has_non_bank_liabilities": {
"name": _(
"Має фінансові зобов’язання по відношенню не до банків, страхових чи лізингових компаній або пенсійних фондів"
),
"description": _(
"""Перевіряється назва особи, щодо якої є зобов’язання, порівнюється з списком назв банків та їхніх кодів ЄДРПОУ.
Також перевіряється опис зобов’язання на предмет страхових чи лізингових компаній та пенсійних фондів"""
),
},
"has_aircraft_flag": {
"name": _("Має повітряний транспортний засіб"),
"description": _("""Задекларовано ТЗ виду “Повітряний засіб”, на будь-якому праві власності"""),
},
"has_major_real_estate": {
"name": _("Має нерухомість площею більше 300 м2"),
"description": _(
"""Задекларовано нерухомість (тип 'Житловий будинок', 'Квартира', 'Кімната', 'Садовий
(дачний) будинок') площею більше 300 м2 на будь-якому праві власності"""
),
},
"family_member_did_not_provide_information": {
"name": _("Член сім’ї не надав інформацію"),
"description": _(
"""Щодо будь-якого задекларованого об’єкта є дані, які відсутні, бо член сім’ї не надав
інформацію. Частина менш важливих даних, які в принципі можуть бути невідомі члену родини (податковий номер компанії,
і т.п.) не перевіряється."""
),
},
}
COUNTRIES = {
"0": _("Без громадянства"),
"1": _("Україна"),
"2": _("Австралія"),
"3": _("Австрія"),
"4": _("Азербайджан"),
"5": _("Аландські острови"),
"6": _("Албанія"),
"7": _("Алжир"),
"8": _("Американське Самоа"),
"9": _("Американські Віргінські острови"),
"10": _("Ангілья"),
"11": _("Ангола"),
"12": _("Андорра"),
"13": _("Антарктида"),
"14": _("Антигуа і Барбуда"),
"15": _("Аргентина"),
"16": _("Аруба"),
"17": _("Афганістан"),
"18": _("Багамські Острови"),
"19": _("Бангладеш"),
"20": _("Барбадос"),
"21": _("Бахрейн"),
"23": _("Бельгія"),
"22": _("Беліз"),
"24": _("Бенін"),
"25": _("Бермудські острови"),
"26": _("Білорусь"),
"27": _("Болгарія"),
"28": _("Болівія"),
"29": _("Боснія і Герцеговина"),
"30": _("Ботсвана"),
"31": _("Бразилія"),
"32": _("Британська територія в Індійському океані"),
"33": _("Британські Віргінські острови"),
"34": _("Бруней"),
"35": _("Буркіна-Фасо"),
"36": _("Бурунді"),
"37": _("Бутан"),
"38": _("Вануату"),
"39": _("Ватикан"),
"40": _("Велика Британія"),
"41": _("Венесуела"),
"42": _("В'єтнам"),
"43": _("Вірменія"),
"44": _("Волліс і Футуна"),
"45": _("Габон"),
"46": _("Гаїті"),
"48": _("Гамбія"),
"49": _("Гана"),
"47": _("Гаяна"),
"50": _("Гваделупа"),
"51": _("Гватемала"),
"52": _("Гвінея"),
"53": _("Гвінея-Бісау"),
"54": _("Гернсі"),
"61": _("Гібралтар"),
"55": _("Гондурас"),
"56": _("Гонконг"),
"57": _("Гренада"),
"58": _("Греція"),
"59": _("Грузія"),
"60": _("Гуам"),
"62": _("Ґренландія"),
"63": _("Данія"),
"65": _("Джерсі"),
"66": _("Джибуті"),
"67": _("Домініка"),
"68": _("Домініканська Республіка"),
"64": _("ДР Конго"),
"75": _("Єгипет"),
"70": _("Еквадор"),
"71": _("Екваторіальна Гвінея"),
"76": _("Ємен"),
"72": _("Еритрея"),
"73": _("Естонія"),
"74": _("Ефіопія"),
"77": _("Замбія"),
"78": _("Західна Сахара"),
"79": _("Зімбабве"),
"69": _("Зовнішні малі острови США"),
"80": _("Ізраїль"),
"81": _("Індія"),
"82": _("Індонезія"),
"83": _("Ірак"),
"84": _("Іран"),
"85": _("Ірландія"),
"86": _("Ісландія"),
"87": _("Іспанія"),
"88": _("Італія"),
"89": _("Йорданія"),
"90": _("Кабо-Верде"),
"91": _("Казахстан"),
"92": _("Кайманові острови"),
"93": _("Камбоджа"),
"94": _("Камерун"),
"95": _("Канада"),
"96": _("Катар"),
"97": _("Кенія"),
"98": _("Киргизстан"),
"101": _("Кіпр"),
"102": _("Кірибаті"),
"99": _("КНР"),
"103": _("Кокосові острови (Кілінг)"),
"104": _("Колумбія"),
"105": _("Коморські Острови"),
"106": _("Конго"),
"107": _("Коста-Рика"),
"108": _("Кот-д'Івуар"),
"109": _("Куба"),
"110": _("Кувейт"),
"111": _("Лаос"),
"112": _("Латвія"),
"113": _("Лесото"),
"114": _("Литва"),
"115": _("Ліберія"),
"116": _("Ліван"),
"117": _("Лівія"),
"118": _("Ліхтенштейн"),
"119": _("Люксембург"),
"120": _("Маврикій"),
"121": _("Мавританія"),
"122": _("Мадагаскар"),
"123": _("Майотта"),
"124": _("Макао"),
"125": _("Македонія"),
"126": _("Малаві"),
"127": _("Малайзія"),
"129": _("Мальдіви"),
"128": _("Малі"),
"130": _("Мальта"),
"131": _("Марокко"),
"132": _("Мартиніка"),
"133": _("Маршаллові Острови"),
"134": _("Мексика"),
"135": _("Мозамбік"),
"136": _("Молдова"),
"137": _("Монако"),
"138": _("Монголія"),
"139": _("Монтсеррат"),
"140": _("М'янма"),
"141": _("Намібія"),
"142": _("Науру"),
"143": _("Непал"),
"144": _("Нігер"),
"145": _("Нігерія"),
"146": _("Нідерланди"),
"147": _("Нідерландські Антильські острови"),
"148": _("Нікарагуа"),
"149": _("Німеччина"),
"150": _("Ніуе"),
"151": _("Нова Зеландія"),
"152": _("Нова Каледонія"),
"153": _("Норвегія"),
"154": _("ОАЕ"),
"155": _("Оман"),
"156": _("Острів Буве"),
"157": _("Острів Мен"),
"158": _("Острів Норфолк"),
"159": _("Острів Різдва"),
"161": _("Острови Герд і Макдональд"),
"162": _("Острови Кука"),
"160": _("Острови Святої Єлени, Вознесіння і Тристан-да-Кунья"),
"214": _("Острови Теркс і Кайкос"),
"163": _("Пакистан"),
"164": _("Палау"),
"165": _("Палестина"),
"166": _("Панама"),
"167": _("Папуа - Нова Гвінея"),
"173": _("ПАР"),
"168": _("Парагвай"),
"169": _("Перу"),
"170": _("Південна Джорджія та Південні Сандвічеві острови"),
"171": _("Південна Корея"),
"172": _("Південний Судан"),
"100": _("Північна Корея"),
"174": _("Північні Маріанські острови"),
"175": _("Піткерн"),
"176": _("Польща"),
"177": _("Португалія"),
"178": _("Пуерто-Рико"),
"179": _("Реюньйон"),
"180": _("Росія"),
"181": _("Руанда"),
"182": _("Румунія"),
"183": _("Сальвадор"),
"184": _("Самоа"),
"185": _("Сан-Марино"),
"186": _("Сан-Томе і Принсіпі"),
"187": _("Саудівська Аравія"),
"188": _("Свазіленд"),
"189": _("Свальбард і Ян-Маєн"),
"190": _("Сейшельські Острови"),
"191": _("Сен-Бартельмі"),
"192": _("Сенегал"),
"193": _("Сен-Мартін"),
"194": _("Сен-П'єр і Мікелон"),
"195": _("Сент-Вінсент і Гренадини"),
"196": _("Сент-Кіттс і Невіс"),
"197": _("Сент-Люсія"),
"198": _("Сербія"),
"209": _("Сьєрра-Леоне"),
"199": _("Сирія"),
"200": _("Сінгапур"),
"201": _("Словаччина"),
"202": _("Словенія"),
"203": _("Соломонові Острови"),
"204": _("Сомалі"),
"206": _("Судан"),
"207": _("Суринам"),
"208": _("Східний Тимор"),
"205": _("США"),
"210": _("Таджикистан"),
"211": _("Таїланд"),
"212": _("Тайвань"),
"213": _("Танзанія"),
"215": _("Того"),
"216": _("Токелау"),
"217": _("Тонга"),
"218": _("Тринідад і Тобаго"),
"219": _("Тувалу"),
"220": _("Туніс"),
"221": _("Туреччина"),
"222": _("Туркменістан"),
"223": _("Уганда"),
"224": _("Угорщина"),
"225": _("Узбекистан"),
"227": _("Уругвай"),
"228": _("Фарерські острови"),
"229": _("Федеративні Штати Мікронезії"),
"230": _("Фіджі"),
"231": _("Філіппіни"),
"232": _("Фінляндія"),
"233": _("Фолклендські (Мальвінські) острови"),
"234": _("Франція"),
"235": _("Французька Гвіана"),
"236": _("Французька Полінезія"),
"237": _("Французькі Південні території"),
"238": _("Хорватія"),
"239": _("Центральноафриканська Республіка"),
"240": _("Чад"),
"241": _("Чехія"),
"242": _("Чилі"),
"243": _("Чорногорія"),
"244": _("Швейцарія"),
"245": _("Швеція"),
"246": _("Шрі-Ланка"),
"247": _("Ямайка"),
"248": _("Японія"),
}
def get_country(self, country_id):
country_id = str(country_id)
if country_id not in self.COUNTRIES:
return _("Невідома країна ") + country_id
else:
return self.COUNTRIES[country_id]
def _parse_relatives(self):
if hasattr(self.nacp_orig, "step_2"):
return {
str(person.get("id", person_id)): person for person_id, person in self.nacp_orig.step_2.to_dict().items()
}
else:
return {}
def resolve_relative(self, person_id):
parsed = self._parse_relatives()
return parsed.get(str(person_id), None)
def raw_html(self):
fname = os.path.join(
settings.NACP_DECLARATIONS_PATH,
self.meta.id[5:7],
os.path.basename(self.declaration.basename) + ".html",
)
try:
with open(fname, "r") as fp:
d = fp.read()
except FileNotFoundError:
logger.error("Cannot find declaration {}".format(self.meta.id))
return _("<h2>Вибачте, декларація тимчасово відсутня, але ми вже працюємо над вирішенням проблеми</h2>")
m = re.search(r"<\/style>(.*)</body>", d)
try:
declaration_html = m.group(1)
except (AttributeError, IndexError):
logger.error("Cannot parse declaration {}".format(self.meta.id))
return _("<h2>Вибачте, декларація тимчасово відсутня, але ми вже працюємо над вирішенням проблеми</h2>")
# OH LORD, THAT'S NOT WHAT I'VE BEEN TAUGHT IN UNIVERSITY
doc = declaration_html.replace("</div></div></div><header><h2>", "</div></div><header><h2>")
# MY ASS IS ON FIRE
doc = re.sub(r"</table>\s*<header>", "</table></div><header>", doc)
doc = re.sub(r"</h2>\s*<div", "</h2></header><div", doc)
doc = re.sub(r"</span></h2>У", "</span></h2></header>У", doc)
companies = self._all_companies()
codes = [c.lstrip("0") for c in companies if c.isdigit() and 4 < len(c) < 9]
for c in set(codes):
if c:
full_code = c.rjust(8, "0")
doc = re.sub(
r"\b0*{}\b".format(c),
' <a href="https://ring.org.ua/edr/uk/company/{}" target="_blank">{}</a>'.format(
full_code, full_code
),
doc,
)
return doc
af_paths = [
jmespath.compile("step_7.*.emitent_ua_company_code"),
jmespath.compile("step_7.*.rights[].*.ua_company_code[]"),
jmespath.compile("step_8.*.corporate_rights_company_code"),
jmespath.compile("step_8.*.rights[].*.ua_company_code[]"),
jmespath.compile("step_9.*.beneficial_owner_company_code"),
]
def _is_change_form(self):
return self.intro.doc_type and self.intro.doc_type == "Форма змін"
def _affiliated_companies(self, src=None):
# For now
if self._is_change_form():
return []
results = []
if src is None:
src = self.nacp_orig.to_dict()
for path in self.af_paths:
results += path.search(src) or []
return set(filter(None, results))
rl_paths = {
"step_11": jmespath.compile("step_11.*"),
"step_12": jmespath.compile("step_12.*"),
}
def _related_companies(self, src=None):
# For now
if self._is_change_form():
return []
results = []
if src is None:
src = self.nacp_orig.to_dict()
for section in self.rl_paths["step_11"].search(src) or []:
try:
section = section or {}
obj_type = section.get("objectType", "").lower()
other_obj_type = section.get("otherObjectType", "").lower()
if obj_type in INCOME_TYPES or other_obj_type in INCOME_TYPES:
results += [section.get("source_ua_company_code", "")]
except AttributeError:
pass
for section in self.rl_paths["step_12"].search(src) or []:
try:
section = section or {}
obj_type = section.get("objectType", "").lower()
if obj_type in MONETARY_ASSETS_TYPES:
results += [section.get("organization_ua_company_code", "")]
except AttributeError:
pass
return set(filter(None, results))
ac_paths = [
jmespath.compile("step_2.*.source_ua_company_code[]"),
jmespath.compile("step_3.*.beneficial_owner_company_code[]"),
jmespath.compile("step_3.*.rights[].*.ua_company_code[]"),
jmespath.compile("step_4.*.addition_company_code[]"),
jmespath.compile("step_4.*.rights[].*.ua_company_code[]"),
jmespath.compile("step_4.undefined.rights[].*.ua_company_code[]"),
jmespath.compile("step_5.*.emitent_ua_company_code[]"),
jmespath.compile("step_5.*.rights[].*.ua_company_code[]"),
jmespath.compile("step_6.*.corporate_rights_company_code[]"),
jmespath.compile("step_6.*.rights[].*.ua_company_code[]"),
jmespath.compile("step_10.*.rights[].*.ua_company_code[]"),
jmespath.compile("step_11.*.rights[].*.ua_company_code[]"),
jmespath.compile("step_11.*.rights[].*.ua_company_name[]"),
jmespath.compile("step_12.*.rights[].*.ua_company_code[]"),
jmespath.compile("step_13.*.emitent_ua_company_code[]"),
jmespath.compile("step_13.*.emitent_ua_company_name[]"),
jmespath.compile("step_13.*.guarantor[].*.guarantor_ua_company_code[]"),
jmespath.compile("step_13.*.guarantor_realty[].*.realty_rights_ua_company_code[]"),
jmespath.compile("step_13.*.guarantor_realty[].*.realty_rights_ua_company_code[]"),
jmespath.compile("step_15.*.emitent_ua_company_code[]"),
jmespath.compile("step_16.org.*.reestrCode[]"),
jmespath.compile("step_16.part_org.*.reestrCode[]"),
jmespath.compile("step_7.*.emitent_ua_company_code"),
jmespath.compile("step_7.*.rights[].*.ua_company_code[]"),
jmespath.compile("step_8.*.corporate_rights_company_code"),
jmespath.compile("step_8.*.rights[].*.ua_company_code[]"),
jmespath.compile("step_9.*.beneficial_owner_company_code"),
jmespath.compile("step_11.*.source_ua_company_code"),
jmespath.compile("step_12.*.organization_ua_company_code"),
]
def _all_companies(self, src=None):
# For now
if self._is_change_form():
return []
results = []
if src is None:
src = self.nacp_orig.to_dict()
for path in self.ac_paths:
results += path.search(src) or []
return set(filter(None, results))
def related_companies(self, affiliated_only=True):
"""
Prepares data to use with procurement dataset
"""
src = self.nacp_orig.to_dict()
res = self._affiliated_companies(src)
if not affiliated_only:
res += self._related_companies(src)
res = filter(None, map(lambda x: x.strip().lstrip("0"), set(res)))
return list(set(res) - BANK_EDRPOUS)
def get_procurement_earnings_by_year(self, affiliated_only=True):
# Safety valve against transactions with malformed dates
next_year_dt = date(date.today().year + 1, 1, 1)
return (
Transactions.objects.select_related("seller")
.filter(
seller__code__in=self.related_companies(affiliated_only),
date__lt=next_year_dt,
)
.annotate(year=ExtractYear("date"))
.values("year")
.annotate(count=Count("pk"), sum_uah=Sum("volume_uah"))
)
def get_procurement_earnings_by_company(self, affiliated_only=True):
# Safety valve against transactions with malformed dates
next_year_dt = date(date.today().year + 1, 1, 1)
return (
Transactions.objects.select_related("seller")
.filter(
seller__code__in=self.related_companies(affiliated_only),
date__lt=next_year_dt,
)
.values("seller__code", "seller__pk", "seller__name")
.annotate(count=Count("pk"), sum_uah=Sum("volume_uah"))
)
def infocard(self):
return {
"first_name": self.general.name,
"patronymic": self.general.patronymic,
"last_name": self.general.last_name,
"office": self.general.post.office,
"position": self.general.post.post,
"source": self.declaration.source,
"id": self.meta.id,
"user_declarant_id": getattr(self.intro, "user_declarant_id"),
"url": settings.SITE_URL + reverse("details", kwargs={"declaration_id": self.meta.id}),
"document_type": self.intro.doc_type,
"is_corrected": self.intro.corrected,
"created_date": self.intro.date,
"declaration_year": getattr(self.intro, "declaration_year"),
}
def raw_source(self):
return {"url": "https://public-api.nazk.gov.ua/v2/documents/%s" % self.meta.id.replace("nacp_", "")}
@property
def original_url(self):
return "https://public.nazk.gov.ua/documents/%s" % self.meta.id.replace("nacp_", "")
def related_entities(self):
src = self.nacp_orig.to_dict()
owned_companies = self._affiliated_companies(src)
related_companies = self._related_companies(src)
all_companies = self._all_companies(src)
return {
"people": {"family": list(self.get_family_members())},
"documents": {
"corrected": list(getattr(self, "corrected_declarations", []) or []),
"originals": list(getattr(self, "original_declarations", []) or []),
},
"companies": {
"owned": list(owned_companies),
"related": list(related_companies),
"all": list(all_companies),
},
}
def unified_source(self):
return self.nacp_orig.to_dict()
def aggregated_data(self):
if hasattr(self, "aggregated"):
return self.aggregated.to_dict()
else:
return {}
def red_flags(self):
res = []
if hasattr(self, "aggregated"):
for f, flag in self.ENABLED_FLAGS.items():
if str(getattr(self.aggregated, f, "false")).lower() == "true":
res.append(
{
"flag": f,
"text": flag["name"],
"description": flag["description"],
}
)
return res
class Meta:
doc_type = "nacp_declaration_doctype"
nacp_declarations_new_format_idx = Index(NACP_DECLARATION_NEW_FORMAT_INDEX)
nacp_declarations_new_format_idx.settings(number_of_shards=NUMBER_OF_SHARDS, number_of_replicas=NUMBER_OF_REPLICAS)
nacp_declarations_new_format_idx.analyzer(namesAutocompleteAnalyzer)
nacp_declarations_new_format_idx.analyzer(namesAutocompleteSearchAnalyzer)
@nacp_declarations_new_format_idx.doc_type
class NACPDeclarationNewFormat(NACPDeclaration):
def raw_html(self):
doc = render_to_string("nacp_declaration_form.jinja", {"declaration": self})
return doc
class Meta:
doc_type = "nacp_declaration_doctype"
| dchaplinsky/declarations.com.ua | declarations_site/catalog/elastic_models.py | Python | mit | 66,029 | 0.001889 |
# -*- coding: utf-8 -*-
#
## This file is part of Invenio.
## Copyright (C) 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebMessage database models.
"""
# General imports
from invenio.base.globals import cfg
from invenio.ext.sqlalchemy import db
# Create your models here.
from string import strip
from invenio.modules.accounts.models import User, Usergroup
from sqlalchemy.ext.associationproxy import association_proxy
class MsgMESSAGE(db.Model):
"""Represents a MsgMESSAGE record."""
def __str__(self):
return "From: %s<%s>, Subject: <%s> %s" % \
(self.user_from.nickname or _('None'),
self.user_from.email or _('unknown'),
self.subject, self.body)
__tablename__ = 'msgMESSAGE'
id = db.Column(db.Integer(15, unsigned=True), nullable=False,
primary_key=True,
autoincrement=True)
id_user_from = db.Column(db.Integer(15, unsigned=True),
db.ForeignKey(User.id),
nullable=True, server_default='0')
_sent_to_user_nicks = db.Column(db.Text, name='sent_to_user_nicks',
nullable=False)
_sent_to_group_names = db.Column(db.Text, name='sent_to_group_names',
nullable=False)
subject = db.Column(db.Text, nullable=False)
body = db.Column(db.Text, nullable=True)
sent_date = db.Column(db.DateTime, nullable=False,
server_default='1900-01-01 00:00:00') # db.func.now() -> 'NOW()'
received_date = db.Column(db.DateTime,
server_default='1900-01-01 00:00:00')
user_from = db.relationship(User, backref='sent_messages')
#recipients = db.relationship(User,
# secondary=lambda: UserMsgMESSAGE.__table__,
# collection_class=set)
recipients = association_proxy('sent_to_users', 'user_to',
creator=lambda u:UserMsgMESSAGE(user_to=u))
@db.hybrid_property
def sent_to_user_nicks(self):
""" Alias for column 'sent_to_user_nicks'. """
return self._sent_to_user_nicks
@db.hybrid_property
def sent_to_group_names(self):
""" Alias for column 'sent_to_group_names'. """
return self._sent_to_group_names
@db.validates('_sent_to_user_nicks')
def validate_sent_to_user_nicks(self, key, value):
user_nicks = filter(len, map(strip,
value.split(cfg['CFG_WEBMESSAGE_SEPARATOR'])))
assert len(user_nicks) == len(set(user_nicks))
if len(user_nicks) > 0:
assert len(user_nicks) == \
User.query.filter(User.nickname.in_(user_nicks)).count()
return cfg['CFG_WEBMESSAGE_SEPARATOR'].join(user_nicks)
@db.validates('_sent_to_group_names')
def validate_sent_to_group_names(self, key, value):
group_names = filter(len, map(strip,
value.split(cfg['CFG_WEBMESSAGE_SEPARATOR'])))
assert len(group_names) == len(set(group_names))
if len(group_names) > 0:
assert len(group_names) == \
Usergroup.query.filter(Usergroup.name.in_(group_names)).count()
return cfg['CFG_WEBMESSAGE_SEPARATOR'].join(group_names)
@sent_to_user_nicks.setter
def sent_to_user_nicks(self, value):
old_user_nicks = self.user_nicks
self._sent_to_user_nicks = value
to_add = set(self.user_nicks)-set(old_user_nicks)
to_del = set(old_user_nicks)-set(self.user_nicks)
if len(self.group_names):
to_del = to_del-set([u.nickname for u in User.query.\
join(User.usergroups).filter(
Usergroup.name.in_(self.group_names)).\
all()])
if len(to_del):
is_to_del = lambda u: u.nickname in to_del
remove_old = filter(is_to_del, self.recipients)
for u in remove_old:
self.recipients.remove(u)
if len(to_add):
for u in User.query.filter(User.nickname.\
in_(to_add)).all():
if u not in self.recipients:
self.recipients.append(u)
@sent_to_group_names.setter
def sent_to_group_names(self, value):
old_group_names = self.group_names
self._sent_to_group_names = value
groups_to_add = set(self.group_names)-set(old_group_names)
groups_to_del = set(old_group_names)-set(self.group_names)
if len(groups_to_del):
to_del = set([u.nickname for u in User.query.\
join(User.usergroups).filter(
Usergroup.name.in_(groups_to_del)).\
all()])-set(self.user_nicks)
is_to_del = lambda u: u.nickname in to_del
remove_old = filter(is_to_del, self.recipients)
for u in remove_old:
self.recipients.remove(u)
if len(groups_to_add):
for u in User.query.join(User.usergroups).filter(db.and_(
Usergroup.name.in_(groups_to_add),
db.not_(User.nickname.in_(self.user_nicks)))).all():
if u not in self.recipients:
self.recipients.append(u)
@property
def user_nicks(self):
if not self._sent_to_user_nicks:
return []
return filter(len, map(strip,
self._sent_to_user_nicks.split(cfg['CFG_WEBMESSAGE_SEPARATOR'])))
@property
def group_names(self):
if not self._sent_to_group_names:
return []
return filter(len, map(strip,
self.sent_to_group_names.split(cfg['CFG_WEBMESSAGE_SEPARATOR'])))
#TODO consider moving following lines to separate file.
from invenio.modules.messages.config import CFG_WEBMESSAGE_EMAIL_ALERT
from invenio.config import CFG_WEBCOMMENT_ALERT_ENGINE_EMAIL
from invenio.utils.date import datetext_format
from datetime import datetime
def email_alert(mapper, connection, target):
""" Sends email alerts to message recipients. """
from invenio.ext.template import render_template_to_string
from invenio.ext.email import send_email, scheduled_send_email
m = target
is_reminder = m.received_date is not None \
and m.received_date > datetime.now()
alert = send_email
if is_reminder:
alert = lambda *args, **kwargs: scheduled_send_email(*args,
other_bibtasklet_arguments=[
m.received_date.strftime(datetext_format)],
**kwargs)
for u in m.recipients:
if isinstance(u.settings, dict) and \
u.settings.get('webmessage_email_alert', True):
try:
alert(
cfg['CFG_WEBCOMMENT_ALERT_ENGINE_EMAIL'],
u.email,
subject = m.subject,
content = render_template_to_string(
'messages/email_alert.html',
message=m, user=u))
except:
# FIXME tests are not in request context
pass
# Registration of email_alert invoked from blueprint
# in order to use before_app_first_request.
# Reading config CFG_WEBMESSAGE_EMAIL_ALERT
# required app context.
def email_alert_register():
if cfg['CFG_WEBMESSAGE_EMAIL_ALERT']:
from sqlalchemy import event
# Register after insert callback.
event.listen(MsgMESSAGE, 'after_insert', email_alert)
class UserMsgMESSAGE(db.Model):
"""Represents a UserMsgMESSAGE record."""
__tablename__ = 'user_msgMESSAGE'
id_user_to = db.Column(db.Integer(15, unsigned=True),
db.ForeignKey(User.id), nullable=False,
server_default='0', primary_key=True)
id_msgMESSAGE = db.Column(db.Integer(15, unsigned=True),
db.ForeignKey(MsgMESSAGE.id),
nullable=False, server_default='0',
primary_key=True)
status = db.Column(db.Char(1), nullable=False,
server_default='N')
user_to = db.relationship(User, backref='received_messages',
collection_class=set)
message = db.relationship(MsgMESSAGE, backref='sent_to_users',
collection_class=set)
__all__ = ['MsgMESSAGE',
'UserMsgMESSAGE']
| lnielsen/invenio | invenio/modules/messages/models.py | Python | gpl-2.0 | 8,905 | 0.007748 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import date, datetime
from dateutil import relativedelta
import json
import time
from openerp.osv import fields, osv
from openerp.tools.float_utils import float_compare, float_round
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from openerp.exceptions import Warning
from openerp import SUPERUSER_ID, api
import openerp.addons.decimal_precision as dp
from openerp.addons.procurement import procurement
import logging
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# Incoterms
#----------------------------------------------------------
class stock_incoterms(osv.osv):
_name = "stock.incoterms"
_description = "Incoterms"
_columns = {
'name': fields.char('Name', required=True, help="Incoterms are series of sales terms. They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices."),
'code': fields.char('Code', size=3, required=True, help="Incoterm Standard Code"),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide an INCOTERM you will not use."),
}
_defaults = {
'active': True,
}
#----------------------------------------------------------
# Stock Location
#----------------------------------------------------------
class stock_location(osv.osv):
_name = "stock.location"
_description = "Inventory Locations"
_parent_name = "location_id"
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
_rec_name = 'complete_name'
def _location_owner(self, cr, uid, location, context=None):
''' Return the company owning the location if any '''
return location and (location.usage == 'internal') and location.company_id or False
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = m.name
parent = m.location_id
while parent:
res[m.id] = parent.name + ' / ' + res[m.id]
parent = parent.location_id
return res
def _get_sublocations(self, cr, uid, ids, context=None):
""" return all sublocations of the given stock locations (included) """
if context is None:
context = {}
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
return self.search(cr, uid, [('id', 'child_of', ids)], context=context_with_inactive)
def _name_get(self, cr, uid, location, context=None):
name = location.name
while location.location_id and location.usage != 'view':
location = location.location_id
name = location.name + '/' + name
return name
def name_get(self, cr, uid, ids, context=None):
res = []
for location in self.browse(cr, uid, ids, context=context):
res.append((location.id, self._name_get(cr, uid, location, context=context)))
return res
_columns = {
'name': fields.char('Location Name', required=True, translate=True),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a location without deleting it."),
'usage': fields.selection([
('supplier', 'Supplier Location'),
('view', 'View'),
('internal', 'Internal Location'),
('customer', 'Customer Location'),
('inventory', 'Inventory'),
('procurement', 'Procurement'),
('production', 'Production'),
('transit', 'Transit Location')],
'Location Type', required=True,
help="""* Supplier Location: Virtual location representing the source location for products coming from your suppliers
\n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products
\n* Internal Location: Physical locations inside your own warehouses,
\n* Customer Location: Virtual location representing the destination location for products sent to your customers
\n* Inventory: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories)
\n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (supplier or production) is not known yet. This location should be empty when the procurement scheduler has finished running.
\n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products
\n* Transit Location: Counterpart location that should be used in inter-companies or inter-warehouses operations
""", select=True),
'complete_name': fields.function(_complete_name, type='char', string="Location Name",
store={'stock.location': (_get_sublocations, ['name', 'location_id', 'active'], 10)}),
'location_id': fields.many2one('stock.location', 'Parent Location', select=True, ondelete='cascade'),
'child_ids': fields.one2many('stock.location', 'location_id', 'Contains'),
'partner_id': fields.many2one('res.partner', 'Owner', help="Owner of the location if not internal"),
'comment': fields.text('Additional Information'),
'posx': fields.integer('Corridor (X)', help="Optional localization details, for information purpose only"),
'posy': fields.integer('Shelves (Y)', help="Optional localization details, for information purpose only"),
'posz': fields.integer('Height (Z)', help="Optional localization details, for information purpose only"),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this location is shared between companies'),
'scrap_location': fields.boolean('Is a Scrap Location?', help='Check this box to allow using this location to put scrapped/damaged goods.'),
'removal_strategy_id': fields.many2one('product.removal', 'Removal Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to take the products from, which lot etc. for this location. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."),
'putaway_strategy_id': fields.many2one('product.putaway', 'Put Away Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to store the products. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."),
'loc_barcode': fields.char('Location Barcode'),
}
_defaults = {
'active': True,
'usage': 'internal',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location', context=c),
'posx': 0,
'posy': 0,
'posz': 0,
'scrap_location': False,
}
_sql_constraints = [('loc_barcode_company_uniq', 'unique (loc_barcode,company_id)', 'The barcode for a location must be unique per company !')]
def create(self, cr, uid, default, context=None):
if not default.get('loc_barcode', False):
default.update({'loc_barcode': default.get('complete_name', False)})
return super(stock_location, self).create(cr, uid, default, context=context)
def get_putaway_strategy(self, cr, uid, location, product, context=None):
''' Returns the location where the product has to be put, if any compliant putaway strategy is found. Otherwise returns None.'''
putaway_obj = self.pool.get('product.putaway')
loc = location
while loc:
if loc.putaway_strategy_id:
res = putaway_obj.putaway_apply(cr, uid, loc.putaway_strategy_id, product, context=context)
if res:
return res
loc = loc.location_id
def _default_removal_strategy(self, cr, uid, context=None):
return 'fifo'
def get_removal_strategy(self, cr, uid, location, product, context=None):
''' Returns the removal strategy to consider for the given product and location.
:param location: browse record (stock.location)
:param product: browse record (product.product)
:rtype: char
'''
if product.categ_id.removal_strategy_id:
return product.categ_id.removal_strategy_id.method
loc = location
while loc:
if loc.removal_strategy_id:
return loc.removal_strategy_id.method
loc = loc.location_id
return self._default_removal_strategy(cr, uid, context=context)
def get_warehouse(self, cr, uid, location, context=None):
"""
Returns warehouse id of warehouse that contains location
:param location: browse record (stock.location)
"""
wh_obj = self.pool.get("stock.warehouse")
whs = wh_obj.search(cr, uid, [('view_location_id.parent_left', '<=', location.parent_left),
('view_location_id.parent_right', '>=', location.parent_left)], context=context)
return whs and whs[0] or False
#----------------------------------------------------------
# Routes
#----------------------------------------------------------
class stock_location_route(osv.osv):
_name = 'stock.location.route'
_description = "Inventory Routes"
_order = 'sequence'
_columns = {
'name': fields.char('Route Name', required=True),
'sequence': fields.integer('Sequence'),
'pull_ids': fields.one2many('procurement.rule', 'route_id', 'Pull Rules', copy=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the route without removing it."),
'push_ids': fields.one2many('stock.location.path', 'route_id', 'Push Rules', copy=True),
'product_selectable': fields.boolean('Applicable on Product'),
'product_categ_selectable': fields.boolean('Applicable on Product Category'),
'warehouse_selectable': fields.boolean('Applicable on Warehouse'),
'supplied_wh_id': fields.many2one('stock.warehouse', 'Supplied Warehouse'),
'supplier_wh_id': fields.many2one('stock.warehouse', 'Supplier Warehouse'),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this route is shared between all companies'),
}
_defaults = {
'sequence': lambda self, cr, uid, ctx: 0,
'active': True,
'product_selectable': True,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location.route', context=c),
}
def write(self, cr, uid, ids, vals, context=None):
'''when a route is deactivated, deactivate also its pull and push rules'''
if isinstance(ids, (int, long)):
ids = [ids]
res = super(stock_location_route, self).write(cr, uid, ids, vals, context=context)
if 'active' in vals:
push_ids = []
pull_ids = []
for route in self.browse(cr, uid, ids, context=context):
if route.push_ids:
push_ids += [r.id for r in route.push_ids if r.active != vals['active']]
if route.pull_ids:
pull_ids += [r.id for r in route.pull_ids if r.active != vals['active']]
if push_ids:
self.pool.get('stock.location.path').write(cr, uid, push_ids, {'active': vals['active']}, context=context)
if pull_ids:
self.pool.get('procurement.rule').write(cr, uid, pull_ids, {'active': vals['active']}, context=context)
return res
#----------------------------------------------------------
# Quants
#----------------------------------------------------------
class stock_quant(osv.osv):
"""
Quants are the smallest unit of stock physical instances
"""
_name = "stock.quant"
_description = "Quants"
def _get_quant_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for q in self.browse(cr, uid, ids, context=context):
res[q.id] = q.product_id.code or ''
if q.lot_id:
res[q.id] = q.lot_id.name
res[q.id] += ': ' + str(q.qty) + q.product_id.uom_id.name
return res
def _calc_inventory_value(self, cr, uid, ids, name, attr, context=None):
context = dict(context or {})
res = {}
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for quant in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if quant.company_id.id != uid_company_id:
#if the company of the quant is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = quant.company_id.id
quant = self.browse(cr, uid, quant.id, context=context)
res[quant.id] = self._get_inventory_value(cr, uid, quant, context=context)
return res
def _get_inventory_value(self, cr, uid, quant, context=None):
return quant.product_id.standard_price * quant.qty
_columns = {
'name': fields.function(_get_quant_name, type='char', string='Identifier'),
'product_id': fields.many2one('product.product', 'Product', required=True, ondelete="restrict", readonly=True, select=True),
'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="restrict", readonly=True, select=True, auto_join=True),
'qty': fields.float('Quantity', required=True, help="Quantity of products in this quant, in the default unit of measure of the product", readonly=True, select=True),
'package_id': fields.many2one('stock.quant.package', string='Package', help="The package containing this quant", readonly=True, select=True),
'packaging_type_id': fields.related('package_id', 'packaging_id', type='many2one', relation='product.packaging', string='Type of packaging', readonly=True, store=True),
'reservation_id': fields.many2one('stock.move', 'Reserved for Move', help="The move the quant is reserved for", readonly=True, select=True),
'lot_id': fields.many2one('stock.production.lot', 'Lot', readonly=True, select=True, ondelete="restrict"),
'cost': fields.float('Unit Cost'),
'owner_id': fields.many2one('res.partner', 'Owner', help="This is the owner of the quant", readonly=True, select=True),
'create_date': fields.datetime('Creation Date', readonly=True),
'in_date': fields.datetime('Incoming Date', readonly=True, select=True),
'history_ids': fields.many2many('stock.move', 'stock_quant_move_rel', 'quant_id', 'move_id', 'Moves', help='Moves that operate(d) on this quant', copy=False),
'company_id': fields.many2one('res.company', 'Company', help="The company to which the quants belong", required=True, readonly=True, select=True),
'inventory_value': fields.function(_calc_inventory_value, string="Inventory Value", type='float', readonly=True),
# Used for negative quants to reconcile after compensated by a new positive one
'propagated_from_id': fields.many2one('stock.quant', 'Linked Quant', help='The negative quant this is coming from', readonly=True, select=True),
'negative_move_id': fields.many2one('stock.move', 'Move Negative Quant', help='If this is a negative quant, this will be the move that caused this negative quant.', readonly=True),
'negative_dest_location_id': fields.related('negative_move_id', 'location_dest_id', type='many2one', relation='stock.location', string="Negative Destination Location", readonly=True,
help="Technical field used to record the destination location of a move that created a negative quant"),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.quant', context=c),
}
def init(self, cr):
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_quant_product_location_index',))
if not cr.fetchone():
cr.execute('CREATE INDEX stock_quant_product_location_index ON stock_quant (product_id, location_id, company_id, qty, in_date, reservation_id)')
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
''' Overwrite the read_group in order to sum the function field 'inventory_value' in group by'''
res = super(stock_quant, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy)
if 'inventory_value' in fields:
for line in res:
if '__domain' in line:
lines = self.search(cr, uid, line['__domain'], context=context)
inv_value = 0.0
for line2 in self.browse(cr, uid, lines, context=context):
inv_value += line2.inventory_value
line['inventory_value'] = inv_value
return res
def action_view_quant_history(self, cr, uid, ids, context=None):
'''
This function returns an action that display the history of the quant, which
mean all the stock moves that lead to this quant creation with this quant quantity.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_move_form2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context={})[0]
move_ids = []
for quant in self.browse(cr, uid, ids, context=context):
move_ids += [move.id for move in quant.history_ids]
result['domain'] = "[('id','in',[" + ','.join(map(str, move_ids)) + "])]"
return result
def quants_reserve(self, cr, uid, quants, move, link=False, context=None):
'''This function reserves quants for the given move (and optionally given link). If the total of quantity reserved is enough, the move's state
is also set to 'assigned'
:param quants: list of tuple(quant browse record or None, qty to reserve). If None is given as first tuple element, the item will be ignored. Negative quants should not be received as argument
:param move: browse record
:param link: browse record (stock.move.operation.link)
'''
toreserve = []
reserved_availability = move.reserved_availability
#split quants if needed
for quant, qty in quants:
if qty <= 0.0 or (quant and quant.qty <= 0.0):
raise osv.except_osv(_('Error!'), _('You can not reserve a negative quantity or a negative quant.'))
if not quant:
continue
self._quant_split(cr, uid, quant, qty, context=context)
toreserve.append(quant.id)
reserved_availability += quant.qty
#reserve quants
if toreserve:
self.write(cr, SUPERUSER_ID, toreserve, {'reservation_id': move.id}, context=context)
#if move has a picking_id, write on that picking that pack_operation might have changed and need to be recomputed
if move.picking_id:
self.pool.get('stock.picking').write(cr, uid, [move.picking_id.id], {'recompute_pack_op': True}, context=context)
#check if move'state needs to be set as 'assigned'
rounding = move.product_id.uom_id.rounding
if float_compare(reserved_availability, move.product_qty, precision_rounding=rounding) == 0 and move.state in ('confirmed', 'waiting') :
self.pool.get('stock.move').write(cr, uid, [move.id], {'state': 'assigned'}, context=context)
elif float_compare(reserved_availability, 0, precision_rounding=rounding) > 0 and not move.partially_available:
self.pool.get('stock.move').write(cr, uid, [move.id], {'partially_available': True}, context=context)
def quants_move(self, cr, uid, quants, move, location_to, location_from=False, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, context=None):
"""Moves all given stock.quant in the given destination location. Unreserve from current move.
:param quants: list of tuple(browse record(stock.quant) or None, quantity to move)
:param move: browse record (stock.move)
:param location_to: browse record (stock.location) depicting where the quants have to be moved
:param location_from: optional browse record (stock.location) explaining where the quant has to be taken (may differ from the move source location in case a removal strategy applied). This parameter is only used to pass to _quant_create if a negative quant must be created
:param lot_id: ID of the lot that must be set on the quants to move
:param owner_id: ID of the partner that must own the quants to move
:param src_package_id: ID of the package that contains the quants to move
:param dest_package_id: ID of the package that must be set on the moved quant
"""
quants_reconcile = []
to_move_quants = []
self._check_location(cr, uid, location_to, context=context)
for quant, qty in quants:
if not quant:
#If quant is None, we will create a quant to move (and potentially a negative counterpart too)
quant = self._quant_create(cr, uid, qty, move, lot_id=lot_id, owner_id=owner_id, src_package_id=src_package_id, dest_package_id=dest_package_id, force_location_from=location_from, force_location_to=location_to, context=context)
else:
self._quant_split(cr, uid, quant, qty, context=context)
to_move_quants.append(quant)
quants_reconcile.append(quant)
if to_move_quants:
to_recompute_move_ids = [x.reservation_id.id for x in to_move_quants if x.reservation_id and x.reservation_id.id != move.id]
self.move_quants_write(cr, uid, to_move_quants, move, location_to, dest_package_id, context=context)
self.pool.get('stock.move').recalculate_move_state(cr, uid, to_recompute_move_ids, context=context)
if location_to.usage == 'internal':
# Do manual search for quant to avoid full table scan (order by id)
cr.execute("""
SELECT 0 FROM stock_quant, stock_location WHERE product_id = %s AND stock_location.id = stock_quant.location_id AND
((stock_location.parent_left >= %s AND stock_location.parent_left < %s) OR stock_location.id = %s) AND qty < 0.0 LIMIT 1
""", (move.product_id.id, location_to.parent_left, location_to.parent_right, location_to.id))
if cr.fetchone():
for quant in quants_reconcile:
self._quant_reconcile_negative(cr, uid, quant, move, context=context)
def move_quants_write(self, cr, uid, quants, move, location_dest_id, dest_package_id, context=None):
context=context or {}
vals = {'location_id': location_dest_id.id,
'history_ids': [(4, move.id)],
'reservation_id': False}
if not context.get('entire_pack'):
vals.update({'package_id': dest_package_id})
self.write(cr, SUPERUSER_ID, [q.id for q in quants], vals, context=context)
def quants_get_prefered_domain(self, cr, uid, location, product, qty, domain=None, prefered_domain_list=[], restrict_lot_id=False, restrict_partner_id=False, context=None):
''' This function tries to find quants in the given location for the given domain, by trying to first limit
the choice on the quants that match the first item of prefered_domain_list as well. But if the qty requested is not reached
it tries to find the remaining quantity by looping on the prefered_domain_list (tries with the second item and so on).
Make sure the quants aren't found twice => all the domains of prefered_domain_list should be orthogonal
'''
if domain is None:
domain = []
quants = [(None, qty)]
#don't look for quants in location that are of type production, supplier or inventory.
if location.usage in ['inventory', 'production', 'supplier']:
return quants
res_qty = qty
if not prefered_domain_list:
return self.quants_get(cr, uid, location, product, qty, domain=domain, restrict_lot_id=restrict_lot_id, restrict_partner_id=restrict_partner_id, context=context)
for prefered_domain in prefered_domain_list:
res_qty_cmp = float_compare(res_qty, 0, precision_rounding=product.uom_id.rounding)
if res_qty_cmp > 0:
#try to replace the last tuple (None, res_qty) with something that wasn't chosen at first because of the prefered order
quants.pop()
tmp_quants = self.quants_get(cr, uid, location, product, res_qty, domain=domain + prefered_domain, restrict_lot_id=restrict_lot_id, restrict_partner_id=restrict_partner_id, context=context)
for quant in tmp_quants:
if quant[0]:
res_qty -= quant[1]
quants += tmp_quants
return quants
def quants_get(self, cr, uid, location, product, qty, domain=None, restrict_lot_id=False, restrict_partner_id=False, context=None):
"""
Use the removal strategies of product to search for the correct quants
If you inherit, put the super at the end of your method.
:location: browse record of the parent location where the quants have to be found
:product: browse record of the product to find
:qty in UoM of product
"""
result = []
domain = domain or [('qty', '>', 0.0)]
if restrict_partner_id:
domain += [('owner_id', '=', restrict_partner_id)]
if restrict_lot_id:
domain += [('lot_id', '=', restrict_lot_id)]
if location:
removal_strategy = self.pool.get('stock.location').get_removal_strategy(cr, uid, location, product, context=context)
result += self.apply_removal_strategy(cr, uid, location, product, qty, domain, removal_strategy, context=context)
return result
def apply_removal_strategy(self, cr, uid, location, product, quantity, domain, removal_strategy, context=None):
if removal_strategy == 'fifo':
order = 'in_date, id'
return self._quants_get_order(cr, uid, location, product, quantity, domain, order, context=context)
elif removal_strategy == 'lifo':
order = 'in_date desc, id desc'
return self._quants_get_order(cr, uid, location, product, quantity, domain, order, context=context)
raise osv.except_osv(_('Error!'), _('Removal strategy %s not implemented.' % (removal_strategy,)))
def _quant_create(self, cr, uid, qty, move, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False,
force_location_from=False, force_location_to=False, context=None):
'''Create a quant in the destination location and create a negative quant in the source location if it's an internal location.
'''
if context is None:
context = {}
price_unit = self.pool.get('stock.move').get_price_unit(cr, uid, move, context=context)
location = force_location_to or move.location_dest_id
rounding = move.product_id.uom_id.rounding
vals = {
'product_id': move.product_id.id,
'location_id': location.id,
'qty': float_round(qty, precision_rounding=rounding),
'cost': price_unit,
'history_ids': [(4, move.id)],
'in_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': move.company_id.id,
'lot_id': lot_id,
'owner_id': owner_id,
'package_id': dest_package_id,
}
if move.location_id.usage == 'internal':
#if we were trying to move something from an internal location and reach here (quant creation),
#it means that a negative quant has to be created as well.
negative_vals = vals.copy()
negative_vals['location_id'] = force_location_from and force_location_from.id or move.location_id.id
negative_vals['qty'] = float_round(-qty, precision_rounding=rounding)
negative_vals['cost'] = price_unit
negative_vals['negative_move_id'] = move.id
negative_vals['package_id'] = src_package_id
negative_quant_id = self.create(cr, SUPERUSER_ID, negative_vals, context=context)
vals.update({'propagated_from_id': negative_quant_id})
#create the quant as superuser, because we want to restrict the creation of quant manually: we should always use this method to create quants
quant_id = self.create(cr, SUPERUSER_ID, vals, context=context)
return self.browse(cr, uid, quant_id, context=context)
def _quant_split(self, cr, uid, quant, qty, context=None):
context = context or {}
rounding = quant.product_id.uom_id.rounding
if float_compare(abs(quant.qty), abs(qty), precision_rounding=rounding) <= 0: # if quant <= qty in abs, take it entirely
return False
qty_round = float_round(qty, precision_rounding=rounding)
new_qty_round = float_round(quant.qty - qty, precision_rounding=rounding)
# Fetch the history_ids manually as it will not do a join with the stock moves then (=> a lot faster)
cr.execute("""SELECT move_id FROM stock_quant_move_rel WHERE quant_id = %s""", (quant.id,))
res = cr.fetchall()
new_quant = self.copy(cr, SUPERUSER_ID, quant.id, default={'qty': new_qty_round, 'history_ids': [(4, x[0]) for x in res]}, context=context)
self.write(cr, SUPERUSER_ID, quant.id, {'qty': qty_round}, context=context)
return self.browse(cr, uid, new_quant, context=context)
def _get_latest_move(self, cr, uid, quant, context=None):
move = False
for m in quant.history_ids:
if not move or m.date > move.date:
move = m
return move
@api.cr_uid_ids_context
def _quants_merge(self, cr, uid, solved_quant_ids, solving_quant, context=None):
path = []
for move in solving_quant.history_ids:
path.append((4, move.id))
self.write(cr, SUPERUSER_ID, solved_quant_ids, {'history_ids': path}, context=context)
def _quant_reconcile_negative(self, cr, uid, quant, move, context=None):
"""
When new quant arrive in a location, try to reconcile it with
negative quants. If it's possible, apply the cost of the new
quant to the conter-part of the negative quant.
"""
solving_quant = quant
dom = [('qty', '<', 0)]
if quant.lot_id:
dom += [('lot_id', '=', quant.lot_id.id)]
dom += [('owner_id', '=', quant.owner_id.id)]
dom += [('package_id', '=', quant.package_id.id)]
dom += [('id', '!=', quant.propagated_from_id.id)]
quants = self.quants_get(cr, uid, quant.location_id, quant.product_id, quant.qty, dom, context=context)
product_uom_rounding = quant.product_id.uom_id.rounding
for quant_neg, qty in quants:
if not quant_neg or not solving_quant:
continue
to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id)], context=context)
if not to_solve_quant_ids:
continue
solving_qty = qty
solved_quant_ids = []
for to_solve_quant in self.browse(cr, uid, to_solve_quant_ids, context=context):
if float_compare(solving_qty, 0, precision_rounding=product_uom_rounding) <= 0:
continue
solved_quant_ids.append(to_solve_quant.id)
self._quant_split(cr, uid, to_solve_quant, min(solving_qty, to_solve_quant.qty), context=context)
solving_qty -= min(solving_qty, to_solve_quant.qty)
remaining_solving_quant = self._quant_split(cr, uid, solving_quant, qty, context=context)
remaining_neg_quant = self._quant_split(cr, uid, quant_neg, -qty, context=context)
#if the reconciliation was not complete, we need to link together the remaining parts
if remaining_neg_quant:
remaining_to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id), ('id', 'not in', solved_quant_ids)], context=context)
if remaining_to_solve_quant_ids:
self.write(cr, SUPERUSER_ID, remaining_to_solve_quant_ids, {'propagated_from_id': remaining_neg_quant.id}, context=context)
if solving_quant.propagated_from_id and solved_quant_ids:
self.write(cr, SUPERUSER_ID, solved_quant_ids, {'propagated_from_id': solving_quant.propagated_from_id.id}, context=context)
#delete the reconciled quants, as it is replaced by the solved quants
self.unlink(cr, SUPERUSER_ID, [quant_neg.id], context=context)
if solved_quant_ids:
#price update + accounting entries adjustments
self._price_update(cr, uid, solved_quant_ids, solving_quant.cost, context=context)
#merge history (and cost?)
self._quants_merge(cr, uid, solved_quant_ids, solving_quant, context=context)
self.unlink(cr, SUPERUSER_ID, [solving_quant.id], context=context)
solving_quant = remaining_solving_quant
def _price_update(self, cr, uid, ids, newprice, context=None):
self.write(cr, SUPERUSER_ID, ids, {'cost': newprice}, context=context)
def quants_unreserve(self, cr, uid, move, context=None):
related_quants = [x.id for x in move.reserved_quant_ids]
if related_quants:
#if move has a picking_id, write on that picking that pack_operation might have changed and need to be recomputed
if move.picking_id:
self.pool.get('stock.picking').write(cr, uid, [move.picking_id.id], {'recompute_pack_op': True}, context=context)
if move.partially_available:
self.pool.get("stock.move").write(cr, uid, [move.id], {'partially_available': False}, context=context)
self.write(cr, SUPERUSER_ID, related_quants, {'reservation_id': False}, context=context)
def _quants_get_order(self, cr, uid, location, product, quantity, domain=[], orderby='in_date', context=None):
''' Implementation of removal strategies
If it can not reserve, it will return a tuple (None, qty)
'''
if context is None:
context = {}
domain += location and [('location_id', 'child_of', location.id)] or []
domain += [('product_id', '=', product.id)]
if context.get('force_company'):
domain += [('company_id', '=', context.get('force_company'))]
else:
domain += [('company_id', '=', self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id)]
res = []
offset = 0
while float_compare(quantity, 0, precision_rounding=product.uom_id.rounding) > 0:
quants = self.search(cr, uid, domain, order=orderby, limit=10, offset=offset, context=context)
if not quants:
res.append((None, quantity))
break
for quant in self.browse(cr, uid, quants, context=context):
rounding = product.uom_id.rounding
if float_compare(quantity, abs(quant.qty), precision_rounding=rounding) >= 0:
res += [(quant, abs(quant.qty))]
quantity -= abs(quant.qty)
elif float_compare(quantity, 0.0, precision_rounding=rounding) != 0:
res += [(quant, quantity)]
quantity = 0
break
offset += 10
return res
def _check_location(self, cr, uid, location, context=None):
if location.usage == 'view':
raise osv.except_osv(_('Error'), _('You cannot move to a location of type view %s.') % (location.name))
return True
#----------------------------------------------------------
# Stock Picking
#----------------------------------------------------------
class stock_picking(osv.osv):
_name = "stock.picking"
_inherit = ['mail.thread']
_description = "Picking List"
_order = "priority desc, date asc, id desc"
def _set_min_date(self, cr, uid, id, field, value, arg, context=None):
move_obj = self.pool.get("stock.move")
if value:
move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines]
move_obj.write(cr, uid, move_ids, {'date_expected': value}, context=context)
def _set_priority(self, cr, uid, id, field, value, arg, context=None):
move_obj = self.pool.get("stock.move")
if value:
move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines]
move_obj.write(cr, uid, move_ids, {'priority': value}, context=context)
def get_min_max_date(self, cr, uid, ids, field_name, arg, context=None):
""" Finds minimum and maximum dates for picking.
@return: Dictionary of values
"""
res = {}
for id in ids:
res[id] = {'min_date': False, 'max_date': False, 'priority': '1'}
if not ids:
return res
cr.execute("""select
picking_id,
min(date_expected),
max(date_expected),
max(priority)
from
stock_move
where
picking_id IN %s
group by
picking_id""", (tuple(ids),))
for pick, dt1, dt2, prio in cr.fetchall():
res[pick]['min_date'] = dt1
res[pick]['max_date'] = dt2
res[pick]['priority'] = prio
return res
def create(self, cr, user, vals, context=None):
context = context or {}
if ('name' not in vals) or (vals.get('name') in ('/', False)):
ptype_id = vals.get('picking_type_id', context.get('default_picking_type_id', False))
sequence_id = self.pool.get('stock.picking.type').browse(cr, user, ptype_id, context=context).sequence_id.id
vals['name'] = self.pool.get('ir.sequence').get_id(cr, user, sequence_id, 'id', context=context)
return super(stock_picking, self).create(cr, user, vals, context)
def _state_get(self, cr, uid, ids, field_name, arg, context=None):
'''The state of a picking depends on the state of its related stock.move
draft: the picking has no line or any one of the lines is draft
done, draft, cancel: all lines are done / draft / cancel
confirmed, waiting, assigned, partially_available depends on move_type (all at once or partial)
'''
res = {}
for pick in self.browse(cr, uid, ids, context=context):
if (not pick.move_lines) or any([x.state == 'draft' for x in pick.move_lines]):
res[pick.id] = 'draft'
continue
if all([x.state == 'cancel' for x in pick.move_lines]):
res[pick.id] = 'cancel'
continue
if all([x.state in ('cancel', 'done') for x in pick.move_lines]):
res[pick.id] = 'done'
continue
order = {'confirmed': 0, 'waiting': 1, 'assigned': 2}
order_inv = {0: 'confirmed', 1: 'waiting', 2: 'assigned'}
lst = [order[x.state] for x in pick.move_lines if x.state not in ('cancel', 'done')]
if pick.move_type == 'one':
res[pick.id] = order_inv[min(lst)]
else:
#we are in the case of partial delivery, so if all move are assigned, picking
#should be assign too, else if one of the move is assigned, or partially available, picking should be
#in partially available state, otherwise, picking is in waiting or confirmed state
res[pick.id] = order_inv[max(lst)]
if not all(x == 2 for x in lst):
if any(x == 2 for x in lst):
res[pick.id] = 'partially_available'
else:
#if all moves aren't assigned, check if we have one product partially available
for move in pick.move_lines:
if move.partially_available:
res[pick.id] = 'partially_available'
break
return res
def _get_pickings(self, cr, uid, ids, context=None):
res = set()
for move in self.browse(cr, uid, ids, context=context):
if move.picking_id:
res.add(move.picking_id.id)
return list(res)
def _get_pickings_dates_priority(self, cr, uid, ids, context=None):
res = set()
for move in self.browse(cr, uid, ids, context=context):
if move.picking_id and (not (move.picking_id.min_date < move.date_expected < move.picking_id.max_date) or move.priority > move.picking_id.priority):
res.add(move.picking_id.id)
return list(res)
def _get_pack_operation_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for pick in self.browse(cr, uid, ids, context=context):
res[pick.id] = False
if pick.pack_operation_ids:
res[pick.id] = True
return res
def _get_quant_reserved_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for pick in self.browse(cr, uid, ids, context=context):
res[pick.id] = False
for move in pick.move_lines:
if move.reserved_quant_ids:
res[pick.id] = True
continue
return res
def check_group_lot(self, cr, uid, context=None):
""" This function will return true if we have the setting to use lots activated. """
return self.pool.get('res.users').has_group(cr, uid, 'stock.group_production_lot')
def check_group_pack(self, cr, uid, context=None):
""" This function will return true if we have the setting to use package activated. """
return self.pool.get('res.users').has_group(cr, uid, 'stock.group_tracking_lot')
def action_assign_owner(self, cr, uid, ids, context=None):
for picking in self.browse(cr, uid, ids, context=context):
packop_ids = [op.id for op in picking.pack_operation_ids]
self.pool.get('stock.pack.operation').write(cr, uid, packop_ids, {'owner_id': picking.owner_id.id}, context=context)
_columns = {
'name': fields.char('Reference', select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=False),
'origin': fields.char('Source Document', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Reference of the document", select=True),
'backorder_id': fields.many2one('stock.picking', 'Back Order of', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True, copy=False),
'note': fields.text('Notes'),
'move_type': fields.selection([('direct', 'Partial'), ('one', 'All at once')], 'Delivery Method', required=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="It specifies goods to be deliver partially or all at once"),
'state': fields.function(_state_get, type="selection", copy=False,
store={
'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_type'], 20),
'stock.move': (_get_pickings, ['state', 'picking_id', 'partially_available'], 20)},
selection=[
('draft', 'Draft'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('partially_available', 'Partially Available'),
('assigned', 'Ready to Transfer'),
('done', 'Transferred'),
], string='Status', readonly=True, select=True, track_visibility='onchange',
help="""
* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Partially Available: some products are available and reserved\n
* Ready to Transfer: products reserved, simply waiting for confirmation.\n
* Transferred: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""
),
'priority': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_priority, type='selection', selection=procurement.PROCUREMENT_PRIORITIES, string='Priority',
store={'stock.move': (_get_pickings_dates_priority, ['priority', 'picking_id'], 20)}, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, select=1, help="Priority for this picking. Setting manually a value here would set it as priority for all the moves",
track_visibility='onchange', required=True),
'min_date': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_min_date,
store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Scheduled Date', select=1, help="Scheduled time for the first part of the shipment to be processed. Setting manually a value here would set it as expected date for all the stock moves.", track_visibility='onchange'),
'max_date': fields.function(get_min_max_date, multi="min_max_date",
store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', string='Max. Expected Date', select=2, help="Scheduled time for the last part of the shipment to be processed"),
'date': fields.datetime('Creation Date', help="Creation Date, usually the time of the order", select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, track_visibility='onchange'),
'date_done': fields.datetime('Date of Transfer', help="Date of Completion", states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=False),
'move_lines': fields.one2many('stock.move', 'picking_id', 'Internal Moves', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=True),
'quant_reserved_exist': fields.function(_get_quant_reserved_exist, type='boolean', string='Quant already reserved ?', help='technical field used to know if there is already at least one quant reserved on moves of a given picking'),
'partner_id': fields.many2one('res.partner', 'Partner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'pack_operation_ids': fields.one2many('stock.pack.operation', 'picking_id', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Related Packing Operations'),
'pack_operation_exist': fields.function(_get_pack_operation_exist, type='boolean', string='Pack Operation Exists?', help='technical field for attrs in view'),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, required=True),
'picking_type_code': fields.related('picking_type_id', 'code', type='char', string='Picking Type Code', help="Technical field used to display the correct label on print button in the picking view"),
'owner_id': fields.many2one('res.partner', 'Owner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Default Owner"),
# Used to search on pickings
'product_id': fields.related('move_lines', 'product_id', type='many2one', relation='product.product', string='Product'),
'recompute_pack_op': fields.boolean('Recompute pack operation?', help='True if reserved quants changed, which mean we might need to recompute the package operations', copy=False),
'location_id': fields.related('move_lines', 'location_id', type='many2one', relation='stock.location', string='Location', readonly=True),
'location_dest_id': fields.related('move_lines', 'location_dest_id', type='many2one', relation='stock.location', string='Destination Location', readonly=True),
'group_id': fields.related('move_lines', 'group_id', type='many2one', relation='procurement.group', string='Procurement Group', readonly=True,
store={
'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_lines'], 10),
'stock.move': (_get_pickings, ['group_id', 'picking_id'], 10),
}),
}
_defaults = {
'name': '/',
'state': 'draft',
'move_type': 'direct',
'priority': '1', # normal
'date': fields.datetime.now,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.picking', context=c),
'recompute_pack_op': True,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per company!'),
]
def do_print_picking(self, cr, uid, ids, context=None):
'''This function prints the picking list'''
context = dict(context or {}, active_ids=ids)
return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_picking', context=context)
def action_confirm(self, cr, uid, ids, context=None):
todo = []
todo_force_assign = []
for picking in self.browse(cr, uid, ids, context=context):
if picking.location_id.usage in ('supplier', 'inventory', 'production'):
todo_force_assign.append(picking.id)
for r in picking.move_lines:
if r.state == 'draft':
todo.append(r.id)
if len(todo):
self.pool.get('stock.move').action_confirm(cr, uid, todo, context=context)
if todo_force_assign:
self.force_assign(cr, uid, todo_force_assign, context=context)
return True
def action_assign(self, cr, uid, ids, context=None):
""" Check availability of picking moves.
This has the effect of changing the state and reserve quants on available moves, and may
also impact the state of the picking as it is computed based on move's states.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
if pick.state == 'draft':
self.action_confirm(cr, uid, [pick.id], context=context)
#skip the moves that don't need to be checked
move_ids = [x.id for x in pick.move_lines if x.state not in ('draft', 'cancel', 'done')]
if not move_ids:
raise osv.except_osv(_('Warning!'), _('Nothing to check the availability for.'))
self.pool.get('stock.move').action_assign(cr, uid, move_ids, context=context)
return True
def force_assign(self, cr, uid, ids, context=None):
""" Changes state of picking to available if moves are confirmed or waiting.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
move_ids = [x.id for x in pick.move_lines if x.state in ['confirmed', 'waiting']]
self.pool.get('stock.move').force_assign(cr, uid, move_ids, context=context)
#pack_operation might have changed and need to be recomputed
self.write(cr, uid, ids, {'recompute_pack_op': True}, context=context)
return True
def action_cancel(self, cr, uid, ids, context=None):
for pick in self.browse(cr, uid, ids, context=context):
ids2 = [move.id for move in pick.move_lines]
self.pool.get('stock.move').action_cancel(cr, uid, ids2, context)
return True
def action_done(self, cr, uid, ids, context=None):
"""Changes picking state to done by processing the Stock Moves of the Picking
Normally that happens when the button "Done" is pressed on a Picking view.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
todo = []
for move in pick.move_lines:
if move.state == 'draft':
todo.extend(self.pool.get('stock.move').action_confirm(cr, uid, [move.id], context=context))
elif move.state in ('assigned', 'confirmed'):
todo.append(move.id)
if len(todo):
self.pool.get('stock.move').action_done(cr, uid, todo, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
#on picking deletion, cancel its move then unlink them too
move_obj = self.pool.get('stock.move')
context = context or {}
for pick in self.browse(cr, uid, ids, context=context):
move_ids = [move.id for move in pick.move_lines]
move_obj.action_cancel(cr, uid, move_ids, context=context)
move_obj.unlink(cr, uid, move_ids, context=context)
return super(stock_picking, self).unlink(cr, uid, ids, context=context)
def write(self, cr, uid, ids, vals, context=None):
if vals.get('move_lines') and not vals.get('pack_operation_ids'):
# pack operations are directly dependant of move lines, it needs to be recomputed
pack_operation_obj = self.pool['stock.pack.operation']
existing_package_ids = pack_operation_obj.search(cr, uid, [('picking_id', 'in', ids)], context=context)
if existing_package_ids:
pack_operation_obj.unlink(cr, uid, existing_package_ids, context)
res = super(stock_picking, self).write(cr, uid, ids, vals, context=context)
#if we changed the move lines or the pack operations, we need to recompute the remaining quantities of both
if 'move_lines' in vals or 'pack_operation_ids' in vals:
self.do_recompute_remaining_quantities(cr, uid, ids, context=context)
return res
def _create_backorder(self, cr, uid, picking, backorder_moves=[], context=None):
""" Move all non-done lines into a new backorder picking. If the key 'do_only_split' is given in the context, then move all lines not in context.get('split', []) instead of all non-done lines.
"""
if not backorder_moves:
backorder_moves = picking.move_lines
backorder_move_ids = [x.id for x in backorder_moves if x.state not in ('done', 'cancel')]
if 'do_only_split' in context and context['do_only_split']:
backorder_move_ids = [x.id for x in backorder_moves if x.id not in context.get('split', [])]
if backorder_move_ids:
backorder_id = self.copy(cr, uid, picking.id, {
'name': '/',
'move_lines': [],
'pack_operation_ids': [],
'backorder_id': picking.id,
})
backorder = self.browse(cr, uid, backorder_id, context=context)
self.message_post(cr, uid, picking.id, body=_("Back order <em>%s</em> <b>created</b>.") % (backorder.name), context=context)
move_obj = self.pool.get("stock.move")
move_obj.write(cr, uid, backorder_move_ids, {'picking_id': backorder_id}, context=context)
self.write(cr, uid, [picking.id], {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
self.action_confirm(cr, uid, [backorder_id], context=context)
return backorder_id
return False
@api.cr_uid_ids_context
def recheck_availability(self, cr, uid, picking_ids, context=None):
self.action_assign(cr, uid, picking_ids, context=context)
self.do_prepare_partial(cr, uid, picking_ids, context=context)
def _get_top_level_packages(self, cr, uid, quants_suggested_locations, context=None):
"""This method searches for the higher level packages that can be moved as a single operation, given a list of quants
to move and their suggested destination, and returns the list of matching packages.
"""
# Try to find as much as possible top-level packages that can be moved
pack_obj = self.pool.get("stock.quant.package")
quant_obj = self.pool.get("stock.quant")
top_lvl_packages = set()
quants_to_compare = quants_suggested_locations.keys()
for pack in list(set([x.package_id for x in quants_suggested_locations.keys() if x and x.package_id])):
loop = True
test_pack = pack
good_pack = False
pack_destination = False
while loop:
pack_quants = pack_obj.get_content(cr, uid, [test_pack.id], context=context)
all_in = True
for quant in quant_obj.browse(cr, uid, pack_quants, context=context):
# If the quant is not in the quants to compare and not in the common location
if not quant in quants_to_compare:
all_in = False
break
else:
#if putaway strat apply, the destination location of each quant may be different (and thus the package should not be taken as a single operation)
if not pack_destination:
pack_destination = quants_suggested_locations[quant]
elif pack_destination != quants_suggested_locations[quant]:
all_in = False
break
if all_in:
good_pack = test_pack
if test_pack.parent_id:
test_pack = test_pack.parent_id
else:
#stop the loop when there's no parent package anymore
loop = False
else:
#stop the loop when the package test_pack is not totally reserved for moves of this picking
#(some quants may be reserved for other picking or not reserved at all)
loop = False
if good_pack:
top_lvl_packages.add(good_pack)
return list(top_lvl_packages)
def _prepare_pack_ops(self, cr, uid, picking, quants, forced_qties, context=None):
""" returns a list of dict, ready to be used in create() of stock.pack.operation.
:param picking: browse record (stock.picking)
:param quants: browse record list (stock.quant). List of quants associated to the picking
:param forced_qties: dictionary showing for each product (keys) its corresponding quantity (value) that is not covered by the quants associated to the picking
"""
def _picking_putaway_apply(product):
location = False
# Search putaway strategy
if product_putaway_strats.get(product.id):
location = product_putaway_strats[product.id]
else:
location = self.pool.get('stock.location').get_putaway_strategy(cr, uid, picking.location_dest_id, product, context=context)
product_putaway_strats[product.id] = location
return location or picking.location_dest_id.id
# If we encounter an UoM that is smaller than the default UoM or the one already chosen, use the new one instead.
product_uom = {} # Determines UoM used in pack operations
location_dest_id = None
location_id = None
for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]:
if not product_uom.get(move.product_id.id):
product_uom[move.product_id.id] = move.product_id.uom_id
if move.product_uom.id != move.product_id.uom_id.id and move.product_uom.factor > product_uom[move.product_id.id].factor:
product_uom[move.product_id.id] = move.product_uom
if not move.scrapped:
if location_dest_id and move.location_dest_id.id != location_dest_id:
raise Warning(_('The destination location must be the same for all the moves of the picking.'))
location_dest_id = move.location_dest_id.id
if location_id and move.location_id.id != location_id:
raise Warning(_('The source location must be the same for all the moves of the picking.'))
location_id = move.location_id.id
pack_obj = self.pool.get("stock.quant.package")
quant_obj = self.pool.get("stock.quant")
vals = []
qtys_grouped = {}
#for each quant of the picking, find the suggested location
quants_suggested_locations = {}
product_putaway_strats = {}
for quant in quants:
if quant.qty <= 0:
continue
suggested_location_id = _picking_putaway_apply(quant.product_id)
quants_suggested_locations[quant] = suggested_location_id
#find the packages we can movei as a whole
top_lvl_packages = self._get_top_level_packages(cr, uid, quants_suggested_locations, context=context)
# and then create pack operations for the top-level packages found
for pack in top_lvl_packages:
pack_quant_ids = pack_obj.get_content(cr, uid, [pack.id], context=context)
pack_quants = quant_obj.browse(cr, uid, pack_quant_ids, context=context)
vals.append({
'picking_id': picking.id,
'package_id': pack.id,
'product_qty': 1.0,
'location_id': pack.location_id.id,
'location_dest_id': quants_suggested_locations[pack_quants[0]],
'owner_id': pack.owner_id.id,
})
#remove the quants inside the package so that they are excluded from the rest of the computation
for quant in pack_quants:
del quants_suggested_locations[quant]
# Go through all remaining reserved quants and group by product, package, lot, owner, source location and dest location
for quant, dest_location_id in quants_suggested_locations.items():
key = (quant.product_id.id, quant.package_id.id, quant.lot_id.id, quant.owner_id.id, quant.location_id.id, dest_location_id)
if qtys_grouped.get(key):
qtys_grouped[key] += quant.qty
else:
qtys_grouped[key] = quant.qty
# Do the same for the forced quantities (in cases of force_assign or incomming shipment for example)
for product, qty in forced_qties.items():
if qty <= 0:
continue
suggested_location_id = _picking_putaway_apply(product)
key = (product.id, False, False, picking.owner_id.id, picking.location_id.id, suggested_location_id)
if qtys_grouped.get(key):
qtys_grouped[key] += qty
else:
qtys_grouped[key] = qty
# Create the necessary operations for the grouped quants and remaining qtys
uom_obj = self.pool.get('product.uom')
prevals = {}
for key, qty in qtys_grouped.items():
product = self.pool.get("product.product").browse(cr, uid, key[0], context=context)
uom_id = product.uom_id.id
qty_uom = qty
if product_uom.get(key[0]):
uom_id = product_uom[key[0]].id
qty_uom = uom_obj._compute_qty(cr, uid, product.uom_id.id, qty, uom_id)
val_dict = {
'picking_id': picking.id,
'product_qty': qty_uom,
'product_id': key[0],
'package_id': key[1],
'lot_id': key[2],
'owner_id': key[3],
'location_id': key[4],
'location_dest_id': key[5],
'product_uom_id': uom_id,
}
if key[0] in prevals:
prevals[key[0]].append(val_dict)
else:
prevals[key[0]] = [val_dict]
# prevals var holds the operations in order to create them in the same order than the picking stock moves if possible
processed_products = set()
for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]:
if move.product_id.id not in processed_products:
vals += prevals.get(move.product_id.id, [])
processed_products.add(move.product_id.id)
return vals
@api.cr_uid_ids_context
def open_barcode_interface(self, cr, uid, picking_ids, context=None):
final_url="/barcode/web/#action=stock.ui&picking_id="+str(picking_ids[0])
return {'type': 'ir.actions.act_url', 'url':final_url, 'target': 'self',}
@api.cr_uid_ids_context
def do_partial_open_barcode(self, cr, uid, picking_ids, context=None):
self.do_prepare_partial(cr, uid, picking_ids, context=context)
return self.open_barcode_interface(cr, uid, picking_ids, context=context)
@api.cr_uid_ids_context
def do_prepare_partial(self, cr, uid, picking_ids, context=None):
context = context or {}
pack_operation_obj = self.pool.get('stock.pack.operation')
#used to avoid recomputing the remaining quantities at each new pack operation created
ctx = context.copy()
ctx['no_recompute'] = True
#get list of existing operations and delete them
existing_package_ids = pack_operation_obj.search(cr, uid, [('picking_id', 'in', picking_ids)], context=context)
if existing_package_ids:
pack_operation_obj.unlink(cr, uid, existing_package_ids, context)
for picking in self.browse(cr, uid, picking_ids, context=context):
forced_qties = {} # Quantity remaining after calculating reserved quants
picking_quants = []
#Calculate packages, reserved quants, qtys of this picking's moves
for move in picking.move_lines:
if move.state not in ('assigned', 'confirmed', 'waiting'):
continue
move_quants = move.reserved_quant_ids
picking_quants += move_quants
forced_qty = (move.state == 'assigned') and move.product_qty - sum([x.qty for x in move_quants]) or 0
#if we used force_assign() on the move, or if the move is incoming, forced_qty > 0
if float_compare(forced_qty, 0, precision_rounding=move.product_id.uom_id.rounding) > 0:
if forced_qties.get(move.product_id):
forced_qties[move.product_id] += forced_qty
else:
forced_qties[move.product_id] = forced_qty
for vals in self._prepare_pack_ops(cr, uid, picking, picking_quants, forced_qties, context=context):
pack_operation_obj.create(cr, uid, vals, context=ctx)
#recompute the remaining quantities all at once
self.do_recompute_remaining_quantities(cr, uid, picking_ids, context=context)
self.write(cr, uid, picking_ids, {'recompute_pack_op': False}, context=context)
@api.cr_uid_ids_context
def do_unreserve(self, cr, uid, picking_ids, context=None):
"""
Will remove all quants for picking in picking_ids
"""
moves_to_unreserve = []
pack_line_to_unreserve = []
for picking in self.browse(cr, uid, picking_ids, context=context):
moves_to_unreserve += [m.id for m in picking.move_lines if m.state not in ('done', 'cancel')]
pack_line_to_unreserve += [p.id for p in picking.pack_operation_ids]
if moves_to_unreserve:
if pack_line_to_unreserve:
self.pool.get('stock.pack.operation').unlink(cr, uid, pack_line_to_unreserve, context=context)
self.pool.get('stock.move').do_unreserve(cr, uid, moves_to_unreserve, context=context)
def recompute_remaining_qty(self, cr, uid, picking, context=None):
def _create_link_for_index(operation_id, index, product_id, qty_to_assign, quant_id=False):
move_dict = prod2move_ids[product_id][index]
qty_on_link = min(move_dict['remaining_qty'], qty_to_assign)
self.pool.get('stock.move.operation.link').create(cr, uid, {'move_id': move_dict['move'].id, 'operation_id': operation_id, 'qty': qty_on_link, 'reserved_quant_id': quant_id}, context=context)
if move_dict['remaining_qty'] == qty_on_link:
prod2move_ids[product_id].pop(index)
else:
move_dict['remaining_qty'] -= qty_on_link
return qty_on_link
def _create_link_for_quant(operation_id, quant, qty):
"""create a link for given operation and reserved move of given quant, for the max quantity possible, and returns this quantity"""
if not quant.reservation_id.id:
return _create_link_for_product(operation_id, quant.product_id.id, qty)
qty_on_link = 0
for i in range(0, len(prod2move_ids[quant.product_id.id])):
if prod2move_ids[quant.product_id.id][i]['move'].id != quant.reservation_id.id:
continue
qty_on_link = _create_link_for_index(operation_id, i, quant.product_id.id, qty, quant_id=quant.id)
break
return qty_on_link
def _create_link_for_product(operation_id, product_id, qty):
'''method that creates the link between a given operation and move(s) of given product, for the given quantity.
Returns True if it was possible to create links for the requested quantity (False if there was not enough quantity on stock moves)'''
qty_to_assign = qty
prod_obj = self.pool.get("product.product")
product = prod_obj.browse(cr, uid, product_id)
rounding = product.uom_id.rounding
qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding)
if prod2move_ids.get(product_id):
while prod2move_ids[product_id] and qtyassign_cmp > 0:
qty_on_link = _create_link_for_index(operation_id, 0, product_id, qty_to_assign, quant_id=False)
qty_to_assign -= qty_on_link
qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding)
return qtyassign_cmp == 0
uom_obj = self.pool.get('product.uom')
package_obj = self.pool.get('stock.quant.package')
quant_obj = self.pool.get('stock.quant')
link_obj = self.pool.get('stock.move.operation.link')
quants_in_package_done = set()
prod2move_ids = {}
still_to_do = []
#make a dictionary giving for each product, the moves and related quantity that can be used in operation links
for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]:
if not prod2move_ids.get(move.product_id.id):
prod2move_ids[move.product_id.id] = [{'move': move, 'remaining_qty': move.product_qty}]
else:
prod2move_ids[move.product_id.id].append({'move': move, 'remaining_qty': move.product_qty})
need_rereserve = False
#sort the operations in order to give higher priority to those with a package, then a serial number
operations = picking.pack_operation_ids
operations = sorted(operations, key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0))
#delete existing operations to start again from scratch
links = link_obj.search(cr, uid, [('operation_id', 'in', [x.id for x in operations])], context=context)
if links:
link_obj.unlink(cr, uid, links, context=context)
#1) first, try to create links when quants can be identified without any doubt
for ops in operations:
#for each operation, create the links with the stock move by seeking on the matching reserved quants,
#and deffer the operation if there is some ambiguity on the move to select
if ops.package_id and not ops.product_id:
#entire package
quant_ids = package_obj.get_content(cr, uid, [ops.package_id.id], context=context)
for quant in quant_obj.browse(cr, uid, quant_ids, context=context):
remaining_qty_on_quant = quant.qty
if quant.reservation_id:
#avoid quants being counted twice
quants_in_package_done.add(quant.id)
qty_on_link = _create_link_for_quant(ops.id, quant, quant.qty)
remaining_qty_on_quant -= qty_on_link
if remaining_qty_on_quant:
still_to_do.append((ops, quant.product_id.id, remaining_qty_on_quant))
need_rereserve = True
elif ops.product_id.id:
#Check moves with same product
qty_to_assign = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, ops.product_qty, ops.product_id.uom_id, context=context)
for move_dict in prod2move_ids.get(ops.product_id.id, []):
move = move_dict['move']
for quant in move.reserved_quant_ids:
if not qty_to_assign > 0:
break
if quant.id in quants_in_package_done:
continue
#check if the quant is matching the operation details
if ops.package_id:
flag = quant.package_id and bool(package_obj.search(cr, uid, [('id', 'child_of', [ops.package_id.id])], context=context)) or False
else:
flag = not quant.package_id.id
flag = flag and ((ops.lot_id and ops.lot_id.id == quant.lot_id.id) or not ops.lot_id)
flag = flag and (ops.owner_id.id == quant.owner_id.id)
if flag:
max_qty_on_link = min(quant.qty, qty_to_assign)
qty_on_link = _create_link_for_quant(ops.id, quant, max_qty_on_link)
qty_to_assign -= qty_on_link
qty_assign_cmp = float_compare(qty_to_assign, 0, precision_rounding=ops.product_id.uom_id.rounding)
if qty_assign_cmp > 0:
#qty reserved is less than qty put in operations. We need to create a link but it's deferred after we processed
#all the quants (because they leave no choice on their related move and needs to be processed with higher priority)
still_to_do += [(ops, ops.product_id.id, qty_to_assign)]
need_rereserve = True
#2) then, process the remaining part
all_op_processed = True
for ops, product_id, remaining_qty in still_to_do:
all_op_processed = _create_link_for_product(ops.id, product_id, remaining_qty) and all_op_processed
return (need_rereserve, all_op_processed)
def picking_recompute_remaining_quantities(self, cr, uid, picking, context=None):
need_rereserve = False
all_op_processed = True
if picking.pack_operation_ids:
need_rereserve, all_op_processed = self.recompute_remaining_qty(cr, uid, picking, context=context)
return need_rereserve, all_op_processed
@api.cr_uid_ids_context
def do_recompute_remaining_quantities(self, cr, uid, picking_ids, context=None):
for picking in self.browse(cr, uid, picking_ids, context=context):
if picking.pack_operation_ids:
self.recompute_remaining_qty(cr, uid, picking, context=context)
def _prepare_values_extra_move(self, cr, uid, op, product, remaining_qty, context=None):
"""
Creates an extra move when there is no corresponding original move to be copied
"""
uom_obj = self.pool.get("product.uom")
uom_id = product.uom_id.id
qty = remaining_qty
if op.product_id and op.product_uom_id and op.product_uom_id.id != product.uom_id.id:
if op.product_uom_id.factor > product.uom_id.factor: #If the pack operation's is a smaller unit
uom_id = op.product_uom_id.id
#HALF-UP rounding as only rounding errors will be because of propagation of error from default UoM
qty = uom_obj._compute_qty_obj(cr, uid, product.uom_id, remaining_qty, op.product_uom_id, rounding_method='HALF-UP')
picking = op.picking_id
ref = product.default_code
name = '[' + ref + ']' + ' ' + product.name if ref else product.name
res = {
'picking_id': picking.id,
'location_id': picking.location_id.id,
'location_dest_id': picking.location_dest_id.id,
'product_id': product.id,
'product_uom': uom_id,
'product_uom_qty': qty,
'name': _('Extra Move: ') + name,
'state': 'draft',
'restrict_partner_id': op.owner_id,
}
return res
def _create_extra_moves(self, cr, uid, picking, context=None):
'''This function creates move lines on a picking, at the time of do_transfer, based on
unexpected product transfers (or exceeding quantities) found in the pack operations.
'''
move_obj = self.pool.get('stock.move')
operation_obj = self.pool.get('stock.pack.operation')
moves = []
for op in picking.pack_operation_ids:
for product_id, remaining_qty in operation_obj._get_remaining_prod_quantities(cr, uid, op, context=context).items():
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
if float_compare(remaining_qty, 0, precision_rounding=product.uom_id.rounding) > 0:
vals = self._prepare_values_extra_move(cr, uid, op, product, remaining_qty, context=context)
moves.append(move_obj.create(cr, uid, vals, context=context))
if moves:
move_obj.action_confirm(cr, uid, moves, context=context)
return moves
def rereserve_pick(self, cr, uid, ids, context=None):
"""
This can be used to provide a button that rereserves taking into account the existing pack operations
"""
for pick in self.browse(cr, uid, ids, context=context):
self.rereserve_quants(cr, uid, pick, move_ids = [x.id for x in pick.move_lines], context=context)
def rereserve_quants(self, cr, uid, picking, move_ids=[], context=None):
""" Unreserve quants then try to reassign quants."""
stock_move_obj = self.pool.get('stock.move')
if not move_ids:
self.do_unreserve(cr, uid, [picking.id], context=context)
self.action_assign(cr, uid, [picking.id], context=context)
else:
stock_move_obj.do_unreserve(cr, uid, move_ids, context=context)
stock_move_obj.action_assign(cr, uid, move_ids, context=context)
@api.cr_uid_ids_context
def do_enter_transfer_details(self, cr, uid, picking, context=None):
if not context:
context = {}
context.update({
'active_model': self._name,
'active_ids': picking,
'active_id': len(picking) and picking[0] or False
})
created_id = self.pool['stock.transfer_details'].create(cr, uid, {'picking_id': len(picking) and picking[0] or False}, context)
return self.pool['stock.transfer_details'].wizard_view(cr, uid, created_id, context)
@api.cr_uid_ids_context
def do_transfer(self, cr, uid, picking_ids, context=None):
"""
If no pack operation, we do simple action_done of the picking
Otherwise, do the pack operations
"""
if not context:
context = {}
stock_move_obj = self.pool.get('stock.move')
for picking in self.browse(cr, uid, picking_ids, context=context):
if not picking.pack_operation_ids:
self.action_done(cr, uid, [picking.id], context=context)
continue
else:
need_rereserve, all_op_processed = self.picking_recompute_remaining_quantities(cr, uid, picking, context=context)
#create extra moves in the picking (unexpected product moves coming from pack operations)
todo_move_ids = []
if not all_op_processed:
todo_move_ids += self._create_extra_moves(cr, uid, picking, context=context)
#split move lines if needed
toassign_move_ids = []
for move in picking.move_lines:
remaining_qty = move.remaining_qty
if move.state in ('done', 'cancel'):
#ignore stock moves cancelled or already done
continue
elif move.state == 'draft':
toassign_move_ids.append(move.id)
if float_compare(remaining_qty, 0, precision_rounding = move.product_id.uom_id.rounding) == 0:
if move.state in ('draft', 'assigned', 'confirmed'):
todo_move_ids.append(move.id)
elif float_compare(remaining_qty,0, precision_rounding = move.product_id.uom_id.rounding) > 0 and \
float_compare(remaining_qty, move.product_qty, precision_rounding = move.product_id.uom_id.rounding) < 0:
new_move = stock_move_obj.split(cr, uid, move, remaining_qty, context=context)
todo_move_ids.append(move.id)
#Assign move as it was assigned before
toassign_move_ids.append(new_move)
if need_rereserve or not all_op_processed:
if not picking.location_id.usage in ("supplier", "production", "inventory"):
self.rereserve_quants(cr, uid, picking, move_ids=todo_move_ids, context=context)
self.do_recompute_remaining_quantities(cr, uid, [picking.id], context=context)
if todo_move_ids and not context.get('do_only_split'):
self.pool.get('stock.move').action_done(cr, uid, todo_move_ids, context=context)
elif context.get('do_only_split'):
context = dict(context, split=todo_move_ids)
self._create_backorder(cr, uid, picking, context=context)
if toassign_move_ids:
stock_move_obj.action_assign(cr, uid, toassign_move_ids, context=context)
return True
@api.cr_uid_ids_context
def do_split(self, cr, uid, picking_ids, context=None):
""" just split the picking (create a backorder) without making it 'done' """
if context is None:
context = {}
ctx = context.copy()
ctx['do_only_split'] = True
return self.do_transfer(cr, uid, picking_ids, context=ctx)
def get_next_picking_for_ui(self, cr, uid, context=None):
""" returns the next pickings to process. Used in the barcode scanner UI"""
if context is None:
context = {}
domain = [('state', 'in', ('assigned', 'partially_available'))]
if context.get('default_picking_type_id'):
domain.append(('picking_type_id', '=', context['default_picking_type_id']))
return self.search(cr, uid, domain, context=context)
def action_done_from_ui(self, cr, uid, picking_id, context=None):
""" called when button 'done' is pushed in the barcode scanner UI """
#write qty_done into field product_qty for every package_operation before doing the transfer
pack_op_obj = self.pool.get('stock.pack.operation')
for operation in self.browse(cr, uid, picking_id, context=context).pack_operation_ids:
pack_op_obj.write(cr, uid, operation.id, {'product_qty': operation.qty_done}, context=context)
self.do_transfer(cr, uid, [picking_id], context=context)
#return id of next picking to work on
return self.get_next_picking_for_ui(cr, uid, context=context)
@api.cr_uid_ids_context
def action_pack(self, cr, uid, picking_ids, operation_filter_ids=None, context=None):
""" Create a package with the current pack_operation_ids of the picking that aren't yet in a pack.
Used in the barcode scanner UI and the normal interface as well.
operation_filter_ids is used by barcode scanner interface to specify a subset of operation to pack"""
if operation_filter_ids == None:
operation_filter_ids = []
stock_operation_obj = self.pool.get('stock.pack.operation')
package_obj = self.pool.get('stock.quant.package')
stock_move_obj = self.pool.get('stock.move')
package_id = False
for picking_id in picking_ids:
operation_search_domain = [('picking_id', '=', picking_id), ('result_package_id', '=', False)]
if operation_filter_ids != []:
operation_search_domain.append(('id', 'in', operation_filter_ids))
operation_ids = stock_operation_obj.search(cr, uid, operation_search_domain, context=context)
pack_operation_ids = []
if operation_ids:
for operation in stock_operation_obj.browse(cr, uid, operation_ids, context=context):
#If we haven't done all qty in operation, we have to split into 2 operation
op = operation
if (operation.qty_done < operation.product_qty):
new_operation = stock_operation_obj.copy(cr, uid, operation.id, {'product_qty': operation.qty_done,'qty_done': operation.qty_done}, context=context)
stock_operation_obj.write(cr, uid, operation.id, {'product_qty': operation.product_qty - operation.qty_done,'qty_done': 0, 'lot_id': False}, context=context)
op = stock_operation_obj.browse(cr, uid, new_operation, context=context)
pack_operation_ids.append(op.id)
if op.product_id and op.location_id and op.location_dest_id:
stock_move_obj.check_tracking_product(cr, uid, op.product_id, op.lot_id.id, op.location_id, op.location_dest_id, context=context)
package_id = package_obj.create(cr, uid, {}, context=context)
stock_operation_obj.write(cr, uid, pack_operation_ids, {'result_package_id': package_id}, context=context)
return package_id
def process_product_id_from_ui(self, cr, uid, picking_id, product_id, op_id, increment=True, context=None):
return self.pool.get('stock.pack.operation')._search_and_increment(cr, uid, picking_id, [('product_id', '=', product_id),('id', '=', op_id)], increment=increment, context=context)
def process_barcode_from_ui(self, cr, uid, picking_id, barcode_str, visible_op_ids, context=None):
'''This function is called each time there barcode scanner reads an input'''
lot_obj = self.pool.get('stock.production.lot')
package_obj = self.pool.get('stock.quant.package')
product_obj = self.pool.get('product.product')
stock_operation_obj = self.pool.get('stock.pack.operation')
stock_location_obj = self.pool.get('stock.location')
answer = {'filter_loc': False, 'operation_id': False}
#check if the barcode correspond to a location
matching_location_ids = stock_location_obj.search(cr, uid, [('loc_barcode', '=', barcode_str)], context=context)
if matching_location_ids:
#if we have a location, return immediatly with the location name
location = stock_location_obj.browse(cr, uid, matching_location_ids[0], context=None)
answer['filter_loc'] = stock_location_obj._name_get(cr, uid, location, context=None)
answer['filter_loc_id'] = matching_location_ids[0]
return answer
#check if the barcode correspond to a product
matching_product_ids = product_obj.search(cr, uid, ['|', ('ean13', '=', barcode_str), ('default_code', '=', barcode_str)], context=context)
if matching_product_ids:
op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('product_id', '=', matching_product_ids[0])], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context)
answer['operation_id'] = op_id
return answer
#check if the barcode correspond to a lot
matching_lot_ids = lot_obj.search(cr, uid, [('name', '=', barcode_str)], context=context)
if matching_lot_ids:
lot = lot_obj.browse(cr, uid, matching_lot_ids[0], context=context)
op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('product_id', '=', lot.product_id.id), ('lot_id', '=', lot.id)], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context)
answer['operation_id'] = op_id
return answer
#check if the barcode correspond to a package
matching_package_ids = package_obj.search(cr, uid, [('name', '=', barcode_str)], context=context)
if matching_package_ids:
op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('package_id', '=', matching_package_ids[0])], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context)
answer['operation_id'] = op_id
return answer
return answer
class stock_production_lot(osv.osv):
_name = 'stock.production.lot'
_inherit = ['mail.thread']
_description = 'Lot/Serial'
_columns = {
'name': fields.char('Serial Number', required=True, help="Unique Serial Number"),
'ref': fields.char('Internal Reference', help="Internal reference number in case it differs from the manufacturer's serial number"),
'product_id': fields.many2one('product.product', 'Product', required=True, domain=[('type', '<>', 'service')]),
'quant_ids': fields.one2many('stock.quant', 'lot_id', 'Quants', readonly=True),
'create_date': fields.datetime('Creation Date'),
}
_defaults = {
'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'stock.lot.serial'),
'product_id': lambda x, y, z, c: c.get('product_id', False),
}
_sql_constraints = [
('name_ref_uniq', 'unique (name, ref, product_id)', 'The combination of serial number, internal reference and product must be unique !'),
]
def action_traceability(self, cr, uid, ids, context=None):
""" It traces the information of lots
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary of values
"""
quant_obj = self.pool.get("stock.quant")
quants = quant_obj.search(cr, uid, [('lot_id', 'in', ids)], context=context)
moves = set()
for quant in quant_obj.browse(cr, uid, quants, context=context):
moves |= {move.id for move in quant.history_ids}
if moves:
return {
'domain': "[('id','in',[" + ','.join(map(str, list(moves))) + "])]",
'name': _('Traceability'),
'view_mode': 'tree,form',
'view_type': 'form',
'context': {'tree_view_ref': 'stock.view_move_tree'},
'res_model': 'stock.move',
'type': 'ir.actions.act_window',
}
return False
# ----------------------------------------------------
# Move
# ----------------------------------------------------
class stock_move(osv.osv):
_name = "stock.move"
_description = "Stock Move"
_order = 'date_expected desc, id'
_log_create = False
def get_price_unit(self, cr, uid, move, context=None):
""" Returns the unit price to store on the quant """
return move.price_unit or move.product_id.standard_price
def name_get(self, cr, uid, ids, context=None):
res = []
for line in self.browse(cr, uid, ids, context=context):
name = line.location_id.name + ' > ' + line.location_dest_id.name
if line.product_id.code:
name = line.product_id.code + ': ' + name
if line.picking_id.origin:
name = line.picking_id.origin + '/ ' + name
res.append((line.id, name))
return res
def _quantity_normalize(self, cr, uid, ids, name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = uom_obj._compute_qty_obj(cr, uid, m.product_uom, m.product_uom_qty, m.product_id.uom_id, context=context)
return res
def _get_remaining_qty(self, cr, uid, ids, field_name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for move in self.browse(cr, uid, ids, context=context):
qty = move.product_qty
for record in move.linked_move_operation_ids:
qty -= record.qty
# Keeping in product default UoM
res[move.id] = float_round(qty, precision_rounding=move.product_id.uom_id.rounding)
return res
def _get_lot_ids(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, False)
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
res[move.id] = [q.lot_id.id for q in move.quant_ids if q.lot_id]
else:
res[move.id] = [q.lot_id.id for q in move.reserved_quant_ids if q.lot_id]
return res
def _get_product_availability(self, cr, uid, ids, field_name, args, context=None):
quant_obj = self.pool.get('stock.quant')
res = dict.fromkeys(ids, False)
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
res[move.id] = move.product_qty
else:
sublocation_ids = self.pool.get('stock.location').search(cr, uid, [('id', 'child_of', [move.location_id.id])], context=context)
quant_ids = quant_obj.search(cr, uid, [('location_id', 'in', sublocation_ids), ('product_id', '=', move.product_id.id), ('reservation_id', '=', False)], context=context)
availability = 0
for quant in quant_obj.browse(cr, uid, quant_ids, context=context):
availability += quant.qty
res[move.id] = min(move.product_qty, availability)
return res
def _get_string_qty_information(self, cr, uid, ids, field_name, args, context=None):
settings_obj = self.pool.get('stock.config.settings')
uom_obj = self.pool.get('product.uom')
res = dict.fromkeys(ids, '')
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('draft', 'done', 'cancel') or move.location_id.usage != 'internal':
res[move.id] = '' # 'not applicable' or 'n/a' could work too
continue
total_available = min(move.product_qty, move.reserved_availability + move.availability)
total_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, total_available, move.product_uom, context=context)
info = str(total_available)
#look in the settings if we need to display the UoM name or not
config_ids = settings_obj.search(cr, uid, [], limit=1, order='id DESC', context=context)
if config_ids:
stock_settings = settings_obj.browse(cr, uid, config_ids[0], context=context)
if stock_settings.group_uom:
info += ' ' + move.product_uom.name
if move.reserved_availability:
if move.reserved_availability != total_available:
#some of the available quantity is assigned and some are available but not reserved
reserved_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, move.reserved_availability, move.product_uom, context=context)
info += _(' (%s reserved)') % str(reserved_available)
else:
#all available quantity is assigned
info += _(' (reserved)')
res[move.id] = info
return res
def _get_reserved_availability(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, 0)
for move in self.browse(cr, uid, ids, context=context):
res[move.id] = sum([quant.qty for quant in move.reserved_quant_ids])
return res
def _get_move(self, cr, uid, ids, context=None):
res = set()
for quant in self.browse(cr, uid, ids, context=context):
if quant.reservation_id:
res.add(quant.reservation_id.id)
return list(res)
def _get_move_ids(self, cr, uid, ids, context=None):
res = []
for picking in self.browse(cr, uid, ids, context=context):
res += [x.id for x in picking.move_lines]
return res
def _get_moves_from_prod(self, cr, uid, ids, context=None):
if ids:
return self.pool.get('stock.move').search(cr, uid, [('product_id', 'in', ids)], context=context)
return []
def _set_product_qty(self, cr, uid, id, field, value, arg, context=None):
""" The meaning of product_qty field changed lately and is now a functional field computing the quantity
in the default product UoM. This code has been added to raise an error if a write is made given a value
for `product_qty`, where the same write should set the `product_uom_qty` field instead, in order to
detect errors.
"""
raise osv.except_osv(_('Programming Error!'), _('The requested operation cannot be processed because of a programming error setting the `product_qty` field instead of the `product_uom_qty`.'))
_columns = {
'name': fields.char('Description', required=True, select=True),
'priority': fields.selection(procurement.PROCUREMENT_PRIORITIES, 'Priority'),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'date': fields.datetime('Date', required=True, select=True, help="Move date: scheduled date until move is done, then date of actual move processing", states={'done': [('readonly', True)]}),
'date_expected': fields.datetime('Expected Date', states={'done': [('readonly', True)]}, required=True, select=True, help="Scheduled date for the processing of this move"),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True, domain=[('type', '<>', 'service')], states={'done': [('readonly', True)]}),
'product_qty': fields.function(_quantity_normalize, fnct_inv=_set_product_qty, type='float', digits=0, store={
_name: (lambda self, cr, uid, ids, c={}: ids, ['product_id', 'product_uom', 'product_uom_qty'], 10),
}, string='Quantity',
help='Quantity in the default UoM of the product'),
'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
required=True, states={'done': [('readonly', True)]},
help="This is the quantity of products from an inventory "
"point of view. For moves in the state 'done', this is the "
"quantity of products that were actually moved. For other "
"moves, this is the quantity of product that is planned to "
"be moved. Lowering this quantity does not generate a "
"backorder. Changing this quantity on assigned moves affects "
"the product reservation, and should be done with care."
),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True, states={'done': [('readonly', True)]}),
'product_uos_qty': fields.float('Quantity (UOS)', digits_compute=dp.get_precision('Product Unit of Measure'), states={'done': [('readonly', True)]}),
'product_uos': fields.many2one('product.uom', 'Product UOS', states={'done': [('readonly', True)]}),
'product_tmpl_id': fields.related('product_id', 'product_tmpl_id', type='many2one', relation='product.template', string='Product Template'),
'product_packaging': fields.many2one('product.packaging', 'Prefered Packaging', help="It specifies attributes of packaging like type, quantity of packaging,etc."),
'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True, auto_join=True,
states={'done': [('readonly', True)]}, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations."),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True, states={'done': [('readonly', True)]}, select=True,
auto_join=True, help="Location where the system will stock the finished products."),
'partner_id': fields.many2one('res.partner', 'Destination Address ', states={'done': [('readonly', True)]}, help="Optional address where goods are to be delivered, specifically used for allotment"),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Optional: next stock move when chaining them", select=True, copy=False),
'move_orig_ids': fields.one2many('stock.move', 'move_dest_id', 'Original Move', help="Optional: previous stock move when chaining them", select=True),
'picking_id': fields.many2one('stock.picking', 'Reference', select=True, states={'done': [('readonly', True)]}),
'note': fields.text('Notes'),
'state': fields.selection([('draft', 'New'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Move'),
('confirmed', 'Waiting Availability'),
('assigned', 'Available'),
('done', 'Done'),
], 'Status', readonly=True, select=True, copy=False,
help= "* New: When the stock move is created and not yet confirmed.\n"\
"* Waiting Another Move: This state can be seen when a move is waiting for another one, for example in a chained flow.\n"\
"* Waiting Availability: This state is reached when the procurement resolution is not straight forward. It may need the scheduler to run, a component to me manufactured...\n"\
"* Available: When products are reserved, it is set to \'Available\'.\n"\
"* Done: When the shipment is processed, the state is \'Done\'."),
'partially_available': fields.boolean('Partially Available', readonly=True, help="Checks if the move has some stock reserved", copy=False),
'price_unit': fields.float('Unit Price', help="Technical field used to record the product cost set by the user during a picking confirmation (when costing method used is 'average price' or 'real'). Value given in company currency and in product uom."), # as it's a technical field, we intentionally don't provide the digits attribute
'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
'split_from': fields.many2one('stock.move', string="Move Split From", help="Technical field used to track the origin of a split move, which can be useful in case of debug", copy=False),
'backorder_id': fields.related('picking_id', 'backorder_id', type='many2one', relation="stock.picking", string="Back Order of", select=True),
'origin': fields.char("Source"),
'procure_method': fields.selection([('make_to_stock', 'Default: Take From Stock'), ('make_to_order', 'Advanced: Apply Procurement Rules')], 'Supply Method', required=True,
help="""By default, the system will take from the stock in the source location and passively wait for availability. The other possibility allows you to directly create a procurement on the source location (and thus ignore its current stock) to gather products. If we want to chain moves and have this one to wait for the previous, this second option should be chosen."""),
# used for colors in tree views:
'scrapped': fields.related('location_dest_id', 'scrap_location', type='boolean', relation='stock.location', string='Scrapped', readonly=True),
'quant_ids': fields.many2many('stock.quant', 'stock_quant_move_rel', 'move_id', 'quant_id', 'Moved Quants', copy=False),
'reserved_quant_ids': fields.one2many('stock.quant', 'reservation_id', 'Reserved quants'),
'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'move_id', string='Linked Operations', readonly=True, help='Operations that impact this move for the computation of the remaining quantities'),
'remaining_qty': fields.function(_get_remaining_qty, type='float', string='Remaining Quantity', digits=0,
states={'done': [('readonly', True)]}, help="Remaining Quantity in default UoM according to operations matched with this move"),
'procurement_id': fields.many2one('procurement.order', 'Procurement'),
'group_id': fields.many2one('procurement.group', 'Procurement Group'),
'rule_id': fields.many2one('procurement.rule', 'Procurement Rule', help='The pull rule that created this stock move'),
'push_rule_id': fields.many2one('stock.location.path', 'Push Rule', help='The push rule that created this stock move'),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when this move is cancelled, cancel the linked move too'),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type'),
'inventory_id': fields.many2one('stock.inventory', 'Inventory'),
'lot_ids': fields.function(_get_lot_ids, type='many2many', relation='stock.production.lot', string='Lots'),
'origin_returned_move_id': fields.many2one('stock.move', 'Origin return move', help='move that created the return move', copy=False),
'returned_move_ids': fields.one2many('stock.move', 'origin_returned_move_id', 'All returned moves', help='Optional: all returned moves created from this move'),
'reserved_availability': fields.function(_get_reserved_availability, type='float', string='Quantity Reserved', readonly=True, help='Quantity that has already been reserved for this move'),
'availability': fields.function(_get_product_availability, type='float', string='Quantity Available', readonly=True, help='Quantity in stock that can still be reserved for this move'),
'string_availability_info': fields.function(_get_string_qty_information, type='text', string='Availability', readonly=True, help='Show various information on stock availability for this move'),
'restrict_lot_id': fields.many2one('stock.production.lot', 'Lot', help="Technical field used to depict a restriction on the lot of quants to consider when marking this move as 'done'"),
'restrict_partner_id': fields.many2one('res.partner', 'Owner ', help="Technical field used to depict a restriction on the ownership of quants to consider when marking this move as 'done'"),
'route_ids': fields.many2many('stock.location.route', 'stock_location_route_move', 'move_id', 'route_id', 'Destination route', help="Preferred route to be followed by the procurement order"),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', help="Technical field depicting the warehouse to consider for the route selection on the next procurement (if any)."),
}
def _default_location_destination(self, cr, uid, context=None):
context = context or {}
if context.get('default_picking_type_id', False):
pick_type = self.pool.get('stock.picking.type').browse(cr, uid, context['default_picking_type_id'], context=context)
return pick_type.default_location_dest_id and pick_type.default_location_dest_id.id or False
return False
def _default_location_source(self, cr, uid, context=None):
context = context or {}
if context.get('default_picking_type_id', False):
pick_type = self.pool.get('stock.picking.type').browse(cr, uid, context['default_picking_type_id'], context=context)
return pick_type.default_location_src_id and pick_type.default_location_src_id.id or False
return False
def _default_destination_address(self, cr, uid, context=None):
return False
def _default_group_id(self, cr, uid, context=None):
context = context or {}
if context.get('default_picking_id', False):
picking = self.pool.get('stock.picking').browse(cr, uid, context['default_picking_id'], context=context)
return picking.group_id.id
return False
_defaults = {
'location_id': _default_location_source,
'location_dest_id': _default_location_destination,
'partner_id': _default_destination_address,
'state': 'draft',
'priority': '1',
'product_uom_qty': 1.0,
'scrapped': False,
'date': fields.datetime.now,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.move', context=c),
'date_expected': fields.datetime.now,
'procure_method': 'make_to_stock',
'propagate': True,
'partially_available': False,
'group_id': _default_group_id,
}
def _check_uom(self, cr, uid, ids, context=None):
for move in self.browse(cr, uid, ids, context=context):
if move.product_id.uom_id.category_id.id != move.product_uom.category_id.id:
return False
return True
_constraints = [
(_check_uom,
'You try to move a product using a UoM that is not compatible with the UoM of the product moved. Please use an UoM in the same UoM category.',
['product_uom']),
]
def init(self, cr):
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_move_product_location_index',))
if not cr.fetchone():
cr.execute('CREATE INDEX stock_move_product_location_index ON stock_move (product_id, location_id, location_dest_id, company_id, state)')
@api.cr_uid_ids_context
def do_unreserve(self, cr, uid, move_ids, context=None):
quant_obj = self.pool.get("stock.quant")
for move in self.browse(cr, uid, move_ids, context=context):
if move.state in ('done', 'cancel'):
raise osv.except_osv(_('Operation Forbidden!'), _('Cannot unreserve a done move'))
quant_obj.quants_unreserve(cr, uid, move, context=context)
if self.find_move_ancestors(cr, uid, move, context=context):
self.write(cr, uid, [move.id], {'state': 'waiting'}, context=context)
else:
self.write(cr, uid, [move.id], {'state': 'confirmed'}, context=context)
def _prepare_procurement_from_move(self, cr, uid, move, context=None):
origin = (move.group_id and (move.group_id.name + ":") or "") + (move.rule_id and move.rule_id.name or move.origin or move.picking_id.name or "/")
group_id = move.group_id and move.group_id.id or False
if move.rule_id:
if move.rule_id.group_propagation_option == 'fixed' and move.rule_id.group_id:
group_id = move.rule_id.group_id.id
elif move.rule_id.group_propagation_option == 'none':
group_id = False
return {
'name': move.rule_id and move.rule_id.name or "/",
'origin': origin,
'company_id': move.company_id and move.company_id.id or False,
'date_planned': move.date,
'product_id': move.product_id.id,
'product_qty': move.product_uom_qty,
'product_uom': move.product_uom.id,
'product_uos_qty': (move.product_uos and move.product_uos_qty) or move.product_uom_qty,
'product_uos': (move.product_uos and move.product_uos.id) or move.product_uom.id,
'location_id': move.location_id.id,
'move_dest_id': move.id,
'group_id': group_id,
'route_ids': [(4, x.id) for x in move.route_ids],
'warehouse_id': move.warehouse_id.id or (move.picking_type_id and move.picking_type_id.warehouse_id.id or False),
'priority': move.priority,
}
def _push_apply(self, cr, uid, moves, context=None):
push_obj = self.pool.get("stock.location.path")
for move in moves:
#1) if the move is already chained, there is no need to check push rules
#2) if the move is a returned move, we don't want to check push rules, as returning a returned move is the only decent way
# to receive goods without triggering the push rules again (which would duplicate chained operations)
if not move.move_dest_id and not move.origin_returned_move_id:
domain = [('location_from_id', '=', move.location_dest_id.id)]
#priority goes to the route defined on the product and product category
route_ids = [x.id for x in move.product_id.route_ids + move.product_id.categ_id.total_route_ids]
rules = push_obj.search(cr, uid, domain + [('route_id', 'in', route_ids)], order='route_sequence, sequence', context=context)
if not rules:
#then we search on the warehouse if a rule can apply
wh_route_ids = []
if move.warehouse_id:
wh_route_ids = [x.id for x in move.warehouse_id.route_ids]
elif move.picking_type_id and move.picking_type_id.warehouse_id:
wh_route_ids = [x.id for x in move.picking_type_id.warehouse_id.route_ids]
if wh_route_ids:
rules = push_obj.search(cr, uid, domain + [('route_id', 'in', wh_route_ids)], order='route_sequence, sequence', context=context)
if not rules:
#if no specialized push rule has been found yet, we try to find a general one (without route)
rules = push_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context)
if rules:
rule = push_obj.browse(cr, uid, rules[0], context=context)
push_obj._apply(cr, uid, rule, move, context=context)
return True
def _create_procurement(self, cr, uid, move, context=None):
""" This will create a procurement order """
return self.pool.get("procurement.order").create(cr, uid, self._prepare_procurement_from_move(cr, uid, move, context=context), context=context)
def _create_procurements(self, cr, uid, moves, context=None):
res = []
for move in moves:
res.append(self._create_procurement(cr, uid, move, context=context))
return res
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
# Check that we do not modify a stock.move which is done
frozen_fields = set(['product_qty', 'product_uom', 'product_uos_qty', 'product_uos', 'location_id', 'location_dest_id', 'product_id'])
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
if frozen_fields.intersection(vals):
raise osv.except_osv(_('Operation Forbidden!'),
_('Quantities, Units of Measure, Products and Locations cannot be modified on stock moves that have already been processed (except by the Administrator).'))
propagated_changes_dict = {}
#propagation of quantity change
if vals.get('product_uom_qty'):
propagated_changes_dict['product_uom_qty'] = vals['product_uom_qty']
if vals.get('product_uom_id'):
propagated_changes_dict['product_uom_id'] = vals['product_uom_id']
#propagation of expected date:
propagated_date_field = False
if vals.get('date_expected'):
#propagate any manual change of the expected date
propagated_date_field = 'date_expected'
elif (vals.get('state', '') == 'done' and vals.get('date')):
#propagate also any delta observed when setting the move as done
propagated_date_field = 'date'
if not context.get('do_not_propagate', False) and (propagated_date_field or propagated_changes_dict):
#any propagation is (maybe) needed
for move in self.browse(cr, uid, ids, context=context):
if move.move_dest_id and move.propagate:
if 'date_expected' in propagated_changes_dict:
propagated_changes_dict.pop('date_expected')
if propagated_date_field:
current_date = datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT)
new_date = datetime.strptime(vals.get(propagated_date_field), DEFAULT_SERVER_DATETIME_FORMAT)
delta = new_date - current_date
if abs(delta.days) >= move.company_id.propagation_minimum_delta:
old_move_date = datetime.strptime(move.move_dest_id.date_expected, DEFAULT_SERVER_DATETIME_FORMAT)
new_move_date = (old_move_date + relativedelta.relativedelta(days=delta.days or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
propagated_changes_dict['date_expected'] = new_move_date
#For pushed moves as well as for pulled moves, propagate by recursive call of write().
#Note that, for pulled moves we intentionally don't propagate on the procurement.
if propagated_changes_dict:
self.write(cr, uid, [move.move_dest_id.id], propagated_changes_dict, context=context)
return super(stock_move, self).write(cr, uid, ids, vals, context=context)
def onchange_quantity(self, cr, uid, ids, product_id, product_qty, product_uom, product_uos):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_qty: Changed Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_uos_qty': 0.00
}
warning = {}
if (not product_id) or (product_qty <= 0.0):
result['product_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
# Warn if the quantity was decreased
if ids:
for move in self.read(cr, uid, ids, ['product_qty']):
if product_qty < move['product_qty']:
warning.update({
'title': _('Information'),
'message': _("By changing this quantity here, you accept the "
"new quantity as complete: Odoo will not "
"automatically generate a back order.")})
break
if product_uos and product_uom and (product_uom != product_uos):
result['product_uos_qty'] = product_qty * uos_coeff['uos_coeff']
else:
result['product_uos_qty'] = product_qty
return {'value': result, 'warning': warning}
def onchange_uos_quantity(self, cr, uid, ids, product_id, product_uos_qty,
product_uos, product_uom):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_uos_qty: Changed UoS Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_uom_qty': 0.00
}
if (not product_id) or (product_uos_qty <= 0.0):
result['product_uos_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
# No warning if the quantity was decreased to avoid double warnings:
# The clients should call onchange_quantity too anyway
if product_uos and product_uom and (product_uom != product_uos):
result['product_uom_qty'] = product_uos_qty / uos_coeff['uos_coeff']
else:
result['product_uom_qty'] = product_uos_qty
return {'value': result}
def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False, loc_dest_id=False, partner_id=False):
""" On change of product id, if finds UoM, UoS, quantity and UoS quantity.
@param prod_id: Changed Product id
@param loc_id: Source location id
@param loc_dest_id: Destination location id
@param partner_id: Address id of partner
@return: Dictionary of values
"""
if not prod_id:
return {}
user = self.pool.get('res.users').browse(cr, uid, uid)
lang = user and user.lang or False
if partner_id:
addr_rec = self.pool.get('res.partner').browse(cr, uid, partner_id)
if addr_rec:
lang = addr_rec and addr_rec.lang or False
ctx = {'lang': lang}
product = self.pool.get('product.product').browse(cr, uid, [prod_id], context=ctx)[0]
uos_id = product.uos_id and product.uos_id.id or False
result = {
'name': product.partner_ref,
'product_uom': product.uom_id.id,
'product_uos': uos_id,
'product_uom_qty': 1.00,
'product_uos_qty': self.pool.get('stock.move').onchange_quantity(cr, uid, ids, prod_id, 1.00, product.uom_id.id, uos_id)['value']['product_uos_qty'],
}
if loc_id:
result['location_id'] = loc_id
if loc_dest_id:
result['location_dest_id'] = loc_dest_id
return {'value': result}
def _prepare_picking_assign(self, cr, uid, move, context=None):
""" Prepares a new picking for this move as it could not be assigned to
another picking. This method is designed to be inherited.
"""
values = {
'origin': move.origin,
'company_id': move.company_id and move.company_id.id or False,
'move_type': move.group_id and move.group_id.move_type or 'direct',
'partner_id': move.partner_id.id or False,
'picking_type_id': move.picking_type_id and move.picking_type_id.id or False,
}
return values
@api.cr_uid_ids_context
def _picking_assign(self, cr, uid, move_ids, procurement_group, location_from, location_to, context=None):
"""Assign a picking on the given move_ids, which is a list of move supposed to share the same procurement_group, location_from and location_to
(and company). Those attributes are also given as parameters.
"""
pick_obj = self.pool.get("stock.picking")
# Use a SQL query as doing with the ORM will split it in different queries with id IN (,,)
# In the next version, the locations on the picking should be stored again.
query = """
SELECT stock_picking.id FROM stock_picking, stock_move
WHERE
stock_picking.state in ('draft', 'confirmed', 'waiting') AND
stock_move.picking_id = stock_picking.id AND
stock_move.location_id = %s AND
stock_move.location_dest_id = %s AND
"""
params = (location_from, location_to)
if not procurement_group:
query += "stock_picking.group_id IS NULL LIMIT 1"
else:
query += "stock_picking.group_id = %s LIMIT 1"
params += (procurement_group,)
cr.execute(query, params)
[pick] = cr.fetchone() or [None]
if not pick:
move = self.browse(cr, uid, move_ids, context=context)[0]
values = self._prepare_picking_assign(cr, uid, move, context=context)
pick = pick_obj.create(cr, uid, values, context=context)
return self.write(cr, uid, move_ids, {'picking_id': pick}, context=context)
def onchange_date(self, cr, uid, ids, date, date_expected, context=None):
""" On change of Scheduled Date gives a Move date.
@param date_expected: Scheduled Date
@param date: Move Date
@return: Move Date
"""
if not date_expected:
date_expected = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return {'value': {'date': date_expected}}
def attribute_price(self, cr, uid, move, context=None):
"""
Attribute price to move, important in inter-company moves or receipts with only one partner
"""
if not move.price_unit:
price = move.product_id.standard_price
self.write(cr, uid, [move.id], {'price_unit': price})
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms stock move or put it in waiting if it's linked to another move.
@return: List of ids.
"""
if not context:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
states = {
'confirmed': [],
'waiting': []
}
to_assign = {}
for move in self.browse(cr, uid, ids, context=context):
self.attribute_price(cr, uid, move, context=context)
state = 'confirmed'
#if the move is preceeded, then it's waiting (if preceeding move is done, then action_assign has been called already and its state is already available)
if move.move_orig_ids:
state = 'waiting'
#if the move is split and some of the ancestor was preceeded, then it's waiting as well
elif move.split_from:
move2 = move.split_from
while move2 and state != 'waiting':
if move2.move_orig_ids:
state = 'waiting'
move2 = move2.split_from
states[state].append(move.id)
if not move.picking_id and move.picking_type_id:
key = (move.group_id.id, move.location_id.id, move.location_dest_id.id)
if key not in to_assign:
to_assign[key] = []
to_assign[key].append(move.id)
moves = [move for move in self.browse(cr, uid, states['confirmed'], context=context) if move.procure_method == 'make_to_order']
self._create_procurements(cr, uid, moves, context=context)
for move in moves:
states['waiting'].append(move.id)
states['confirmed'].remove(move.id)
for state, write_ids in states.items():
if len(write_ids):
self.write(cr, uid, write_ids, {'state': state})
#assign picking in batch for all confirmed move that share the same details
for key, move_ids in to_assign.items():
procurement_group, location_from, location_to = key
self._picking_assign(cr, uid, move_ids, procurement_group, location_from, location_to, context=context)
moves = self.browse(cr, uid, ids, context=context)
self._push_apply(cr, uid, moves, context=context)
return ids
def force_assign(self, cr, uid, ids, context=None):
""" Changes the state to assigned.
@return: True
"""
return self.write(cr, uid, ids, {'state': 'assigned'}, context=context)
def check_tracking_product(self, cr, uid, product, lot_id, location, location_dest, context=None):
check = False
if product.track_all and not location_dest.usage == 'inventory':
check = True
elif product.track_incoming and location.usage in ('supplier', 'transit', 'inventory') and location_dest.usage == 'internal':
check = True
elif product.track_outgoing and location_dest.usage in ('customer', 'transit') and location.usage == 'internal':
check = True
if check and not lot_id:
raise osv.except_osv(_('Warning!'), _('You must assign a serial number for the product %s') % (product.name))
def check_tracking(self, cr, uid, move, lot_id, context=None):
""" Checks if serial number is assigned to stock move or not and raise an error if it had to.
"""
self.check_tracking_product(cr, uid, move.product_id, lot_id, move.location_id, move.location_dest_id, context=context)
def action_assign(self, cr, uid, ids, context=None):
""" Checks the product type and accordingly writes the state.
"""
context = context or {}
quant_obj = self.pool.get("stock.quant")
to_assign_moves = []
main_domain = {}
todo_moves = []
operations = set()
for move in self.browse(cr, uid, ids, context=context):
if move.state not in ('confirmed', 'waiting', 'assigned'):
continue
if move.location_id.usage in ('supplier', 'inventory', 'production'):
to_assign_moves.append(move.id)
#in case the move is returned, we want to try to find quants before forcing the assignment
if not move.origin_returned_move_id:
continue
if move.product_id.type == 'consu':
to_assign_moves.append(move.id)
continue
else:
todo_moves.append(move)
#we always keep the quants already assigned and try to find the remaining quantity on quants not assigned only
main_domain[move.id] = [('reservation_id', '=', False), ('qty', '>', 0)]
#if the move is preceeded, restrict the choice of quants in the ones moved previously in original move
ancestors = self.find_move_ancestors(cr, uid, move, context=context)
if move.state == 'waiting' and not ancestors:
#if the waiting move hasn't yet any ancestor (PO/MO not confirmed yet), don't find any quant available in stock
main_domain[move.id] += [('id', '=', False)]
elif ancestors:
main_domain[move.id] += [('history_ids', 'in', ancestors)]
#if the move is returned from another, restrict the choice of quants to the ones that follow the returned move
if move.origin_returned_move_id:
main_domain[move.id] += [('history_ids', 'in', move.origin_returned_move_id.id)]
for link in move.linked_move_operation_ids:
operations.add(link.operation_id)
# Check all ops and sort them: we want to process first the packages, then operations with lot then the rest
operations = list(operations)
operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0))
for ops in operations:
#first try to find quants based on specific domains given by linked operations
for record in ops.linked_move_operation_ids:
move = record.move_id
if move.id in main_domain:
domain = main_domain[move.id] + self.pool.get('stock.move.operation.link').get_specific_domain(cr, uid, record, context=context)
qty = record.qty
if qty:
quants = quant_obj.quants_get_prefered_domain(cr, uid, ops.location_id, move.product_id, qty, domain=domain, prefered_domain_list=[], restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context)
quant_obj.quants_reserve(cr, uid, quants, move, record, context=context)
for move in todo_moves:
if move.linked_move_operation_ids:
continue
#then if the move isn't totally assigned, try to find quants without any specific domain
if move.state != 'assigned':
qty_already_assigned = move.reserved_availability
qty = move.product_qty - qty_already_assigned
quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, qty, domain=main_domain[move.id], prefered_domain_list=[], restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context)
quant_obj.quants_reserve(cr, uid, quants, move, context=context)
#force assignation of consumable products and incoming from supplier/inventory/production
if to_assign_moves:
self.force_assign(cr, uid, to_assign_moves, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels the moves and if all moves are cancelled it cancels the picking.
@return: True
"""
procurement_obj = self.pool.get('procurement.order')
context = context or {}
procs_to_check = []
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
raise osv.except_osv(_('Operation Forbidden!'),
_('You cannot cancel a stock move that has been set to \'Done\'.'))
if move.reserved_quant_ids:
self.pool.get("stock.quant").quants_unreserve(cr, uid, move, context=context)
if context.get('cancel_procurement'):
if move.propagate:
procurement_ids = procurement_obj.search(cr, uid, [('move_dest_id', '=', move.id)], context=context)
procurement_obj.cancel(cr, uid, procurement_ids, context=context)
else:
if move.move_dest_id:
if move.propagate:
self.action_cancel(cr, uid, [move.move_dest_id.id], context=context)
elif move.move_dest_id.state == 'waiting':
#If waiting, the chain will be broken and we are not sure if we can still wait for it (=> could take from stock instead)
self.write(cr, uid, [move.move_dest_id.id], {'state': 'confirmed'}, context=context)
if move.procurement_id:
# Does the same as procurement check, only eliminating a refresh
procs_to_check.append(move.procurement_id.id)
res = self.write(cr, uid, ids, {'state': 'cancel', 'move_dest_id': False}, context=context)
if procs_to_check:
procurement_obj.check(cr, uid, procs_to_check, context=context)
return res
def _check_package_from_moves(self, cr, uid, ids, context=None):
pack_obj = self.pool.get("stock.quant.package")
packs = set()
for move in self.browse(cr, uid, ids, context=context):
packs |= set([q.package_id for q in move.quant_ids if q.package_id and q.qty > 0])
return pack_obj._check_location_constraint(cr, uid, list(packs), context=context)
def find_move_ancestors(self, cr, uid, move, context=None):
'''Find the first level ancestors of given move '''
ancestors = []
move2 = move
while move2:
ancestors += [x.id for x in move2.move_orig_ids]
#loop on the split_from to find the ancestor of split moves only if the move has not direct ancestor (priority goes to them)
move2 = not move2.move_orig_ids and move2.split_from or False
return ancestors
@api.cr_uid_ids_context
def recalculate_move_state(self, cr, uid, move_ids, context=None):
'''Recompute the state of moves given because their reserved quants were used to fulfill another operation'''
for move in self.browse(cr, uid, move_ids, context=context):
vals = {}
reserved_quant_ids = move.reserved_quant_ids
if len(reserved_quant_ids) > 0 and not move.partially_available:
vals['partially_available'] = True
if len(reserved_quant_ids) == 0 and move.partially_available:
vals['partially_available'] = False
if move.state == 'assigned':
if self.find_move_ancestors(cr, uid, move, context=context):
vals['state'] = 'waiting'
else:
vals['state'] = 'confirmed'
if vals:
self.write(cr, uid, [move.id], vals, context=context)
def action_done(self, cr, uid, ids, context=None):
""" Process completely the moves given as ids and if all moves are done, it will finish the picking.
"""
context = context or {}
picking_obj = self.pool.get("stock.picking")
quant_obj = self.pool.get("stock.quant")
todo = [move.id for move in self.browse(cr, uid, ids, context=context) if move.state == "draft"]
if todo:
ids = self.action_confirm(cr, uid, todo, context=context)
pickings = set()
procurement_ids = set()
#Search operations that are linked to the moves
operations = set()
move_qty = {}
for move in self.browse(cr, uid, ids, context=context):
move_qty[move.id] = move.product_qty
for link in move.linked_move_operation_ids:
operations.add(link.operation_id)
#Sort operations according to entire packages first, then package + lot, package only, lot only
operations = list(operations)
operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0))
for ops in operations:
if ops.picking_id:
pickings.add(ops.picking_id.id)
main_domain = [('qty', '>', 0)]
for record in ops.linked_move_operation_ids:
move = record.move_id
self.check_tracking(cr, uid, move, not ops.product_id and ops.package_id.id or ops.lot_id.id, context=context)
prefered_domain = [('reservation_id', '=', move.id)]
fallback_domain = [('reservation_id', '=', False)]
fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)]
prefered_domain_list = [prefered_domain] + [fallback_domain] + [fallback_domain2]
dom = main_domain + self.pool.get('stock.move.operation.link').get_specific_domain(cr, uid, record, context=context)
quants = quant_obj.quants_get_prefered_domain(cr, uid, ops.location_id, move.product_id, record.qty, domain=dom, prefered_domain_list=prefered_domain_list,
restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context)
if ops.product_id:
#If a product is given, the result is always put immediately in the result package (if it is False, they are without package)
quant_dest_package_id = ops.result_package_id.id
ctx = context
else:
# When a pack is moved entirely, the quants should not be written anything for the destination package
quant_dest_package_id = False
ctx = context.copy()
ctx['entire_pack'] = True
quant_obj.quants_move(cr, uid, quants, move, ops.location_dest_id, location_from=ops.location_id, lot_id=ops.lot_id.id, owner_id=ops.owner_id.id, src_package_id=ops.package_id.id, dest_package_id=quant_dest_package_id, context=ctx)
# Handle pack in pack
if not ops.product_id and ops.package_id and ops.result_package_id.id != ops.package_id.parent_id.id:
self.pool.get('stock.quant.package').write(cr, SUPERUSER_ID, [ops.package_id.id], {'parent_id': ops.result_package_id.id}, context=context)
if not move_qty.get(move.id):
raise osv.except_osv(_("Error"), _("The roundings of your Unit of Measures %s on the move vs. %s on the product don't allow to do these operations or you are not transferring the picking at once. ") % (move.product_uom.name, move.product_id.uom_id.name))
move_qty[move.id] -= record.qty
#Check for remaining qtys and unreserve/check move_dest_id in
move_dest_ids = set()
for move in self.browse(cr, uid, ids, context=context):
move_qty_cmp = float_compare(move_qty[move.id], 0, precision_rounding=move.product_id.uom_id.rounding)
if move_qty_cmp > 0: # (=In case no pack operations in picking)
main_domain = [('qty', '>', 0)]
prefered_domain = [('reservation_id', '=', move.id)]
fallback_domain = [('reservation_id', '=', False)]
fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)]
prefered_domain_list = [prefered_domain] + [fallback_domain] + [fallback_domain2]
self.check_tracking(cr, uid, move, move.restrict_lot_id.id, context=context)
qty = move_qty[move.id]
quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, qty, domain=main_domain, prefered_domain_list=prefered_domain_list, restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context)
quant_obj.quants_move(cr, uid, quants, move, move.location_dest_id, lot_id=move.restrict_lot_id.id, owner_id=move.restrict_partner_id.id, context=context)
# If the move has a destination, add it to the list to reserve
if move.move_dest_id and move.move_dest_id.state in ('waiting', 'confirmed'):
move_dest_ids.add(move.move_dest_id.id)
if move.procurement_id:
procurement_ids.add(move.procurement_id.id)
#unreserve the quants and make them available for other operations/moves
quant_obj.quants_unreserve(cr, uid, move, context=context)
# Check the packages have been placed in the correct locations
self._check_package_from_moves(cr, uid, ids, context=context)
#set the move as done
self.write(cr, uid, ids, {'state': 'done', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
self.pool.get('procurement.order').check(cr, uid, list(procurement_ids), context=context)
#assign destination moves
if move_dest_ids:
self.action_assign(cr, uid, list(move_dest_ids), context=context)
#check picking state to set the date_done is needed
done_picking = []
for picking in picking_obj.browse(cr, uid, list(pickings), context=context):
if picking.state == 'done' and not picking.date_done:
done_picking.append(picking.id)
if done_picking:
picking_obj.write(cr, uid, done_picking, {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
context = context or {}
for move in self.browse(cr, uid, ids, context=context):
if move.state not in ('draft', 'cancel'):
raise osv.except_osv(_('User Error!'), _('You can only delete draft moves.'))
return super(stock_move, self).unlink(cr, uid, ids, context=context)
def action_scrap(self, cr, uid, ids, quantity, location_id, restrict_lot_id=False, restrict_partner_id=False, context=None):
""" Move the scrap/damaged product into scrap location
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be scrapped
@param quantity : specify scrap qty
@param location_id : specify scrap location
@param context: context arguments
@return: Scraped lines
"""
quant_obj = self.pool.get("stock.quant")
#quantity should be given in MOVE UOM
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide a positive quantity to scrap.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
source_location = move.location_id
if move.state == 'done':
source_location = move.location_dest_id
#Previously used to prevent scraping from virtual location but not necessary anymore
#if source_location.usage != 'internal':
#restrict to scrap from a virtual location because it's meaningless and it may introduce errors in stock ('creating' new products from nowhere)
#raise osv.except_osv(_('Error!'), _('Forbidden operation: it is not allowed to scrap products from a virtual location.'))
move_qty = move.product_qty
uos_qty = quantity / move_qty * move.product_uos_qty
default_val = {
'location_id': source_location.id,
'product_uom_qty': quantity,
'product_uos_qty': uos_qty,
'state': move.state,
'scrapped': True,
'location_dest_id': location_id,
'restrict_lot_id': restrict_lot_id,
'restrict_partner_id': restrict_partner_id,
}
new_move = self.copy(cr, uid, move.id, default_val)
res += [new_move]
product_obj = self.pool.get('product.product')
for product in product_obj.browse(cr, uid, [move.product_id.id], context=context):
if move.picking_id:
uom = product.uom_id.name if product.uom_id else ''
message = _("%s %s %s has been <b>moved to</b> scrap.") % (quantity, uom, product.name)
move.picking_id.message_post(body=message)
# We "flag" the quant from which we want to scrap the products. To do so:
# - we select the quants related to the move we scrap from
# - we reserve the quants with the scrapped move
# See self.action_done, et particularly how is defined the "prefered_domain" for clarification
scrap_move = self.browse(cr, uid, new_move, context=context)
if move.state == 'done' and scrap_move.location_id.usage not in ('supplier', 'inventory', 'production'):
domain = [('qty', '>', 0), ('history_ids', 'in', [move.id])]
# We use scrap_move data since a reservation makes sense for a move not already done
quants = quant_obj.quants_get_prefered_domain(cr, uid, scrap_move.location_id,
scrap_move.product_id, quantity, domain=domain, prefered_domain_list=[],
restrict_lot_id=scrap_move.restrict_lot_id.id, restrict_partner_id=scrap_move.restrict_partner_id.id, context=context)
quant_obj.quants_reserve(cr, uid, quants, scrap_move, context=context)
self.action_done(cr, uid, res, context=context)
return res
def split(self, cr, uid, move, qty, restrict_lot_id=False, restrict_partner_id=False, context=None):
""" Splits qty from move move into a new move
:param move: browse record
:param qty: float. quantity to split (given in product UoM)
:param restrict_lot_id: optional production lot that can be given in order to force the new move to restrict its choice of quants to this lot.
:param restrict_partner_id: optional partner that can be given in order to force the new move to restrict its choice of quants to the ones belonging to this partner.
:param context: dictionay. can contains the special key 'source_location_id' in order to force the source location when copying the move
returns the ID of the backorder move created
"""
if move.state in ('done', 'cancel'):
raise osv.except_osv(_('Error'), _('You cannot split a move done'))
if move.state == 'draft':
#we restrict the split of a draft move because if not confirmed yet, it may be replaced by several other moves in
#case of phantom bom (with mrp module). And we don't want to deal with this complexity by copying the product that will explode.
raise osv.except_osv(_('Error'), _('You cannot split a draft move. It needs to be confirmed first.'))
if move.product_qty <= qty or qty == 0:
return move.id
uom_obj = self.pool.get('product.uom')
context = context or {}
#HALF-UP rounding as only rounding errors will be because of propagation of error from default UoM
uom_qty = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, qty, move.product_uom, rounding_method='HALF-UP', context=context)
uos_qty = uom_qty * move.product_uos_qty / move.product_uom_qty
defaults = {
'product_uom_qty': uom_qty,
'product_uos_qty': uos_qty,
'procure_method': 'make_to_stock',
'restrict_lot_id': restrict_lot_id,
'restrict_partner_id': restrict_partner_id,
'split_from': move.id,
'procurement_id': move.procurement_id.id,
'move_dest_id': move.move_dest_id.id,
'origin_returned_move_id': move.origin_returned_move_id.id,
}
if context.get('source_location_id'):
defaults['location_id'] = context['source_location_id']
new_move = self.copy(cr, uid, move.id, defaults, context=context)
ctx = context.copy()
ctx['do_not_propagate'] = True
self.write(cr, uid, [move.id], {
'product_uom_qty': move.product_uom_qty - uom_qty,
'product_uos_qty': move.product_uos_qty - uos_qty,
}, context=ctx)
if move.move_dest_id and move.propagate and move.move_dest_id.state not in ('done', 'cancel'):
new_move_prop = self.split(cr, uid, move.move_dest_id, qty, context=context)
self.write(cr, uid, [new_move], {'move_dest_id': new_move_prop}, context=context)
#returning the first element of list returned by action_confirm is ok because we checked it wouldn't be exploded (and
#thus the result of action_confirm should always be a list of 1 element length)
return self.action_confirm(cr, uid, [new_move], context=context)[0]
def get_code_from_locs(self, cr, uid, move, location_id=False, location_dest_id=False, context=None):
"""
Returns the code the picking type should have. This can easily be used
to check if a move is internal or not
move, location_id and location_dest_id are browse records
"""
code = 'internal'
src_loc = location_id or move.location_id
dest_loc = location_dest_id or move.location_dest_id
if src_loc.usage == 'internal' and dest_loc.usage != 'internal':
code = 'outgoing'
if src_loc.usage != 'internal' and dest_loc.usage == 'internal':
code = 'incoming'
return code
def _get_taxes(self, cr, uid, move, context=None):
return []
class stock_inventory(osv.osv):
_name = "stock.inventory"
_description = "Inventory"
def _get_move_ids_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for inv in self.browse(cr, uid, ids, context=context):
res[inv.id] = False
if inv.move_ids:
res[inv.id] = True
return res
def _get_available_filters(self, cr, uid, context=None):
"""
This function will return the list of filter allowed according to the options checked
in 'Settings\Warehouse'.
:rtype: list of tuple
"""
#default available choices
res_filter = [('none', _('All products')), ('partial', _('Manual Selection of Products')), ('product', _('One product only'))]
settings_obj = self.pool.get('stock.config.settings')
config_ids = settings_obj.search(cr, uid, [], limit=1, order='id DESC', context=context)
#If we don't have updated config until now, all fields are by default false and so should be not dipslayed
if not config_ids:
return res_filter
stock_settings = settings_obj.browse(cr, uid, config_ids[0], context=context)
if stock_settings.group_stock_tracking_owner:
res_filter.append(('owner', _('One owner only')))
res_filter.append(('product_owner', _('One product for a specific owner')))
if stock_settings.group_stock_tracking_lot:
res_filter.append(('lot', _('One Lot/Serial Number')))
if stock_settings.group_stock_packaging:
res_filter.append(('pack', _('A Pack')))
return res_filter
def _get_total_qty(self, cr, uid, ids, field_name, args, context=None):
res = {}
for inv in self.browse(cr, uid, ids, context=context):
res[inv.id] = sum([x.product_qty for x in inv.line_ids])
return res
INVENTORY_STATE_SELECTION = [
('draft', 'Draft'),
('cancel', 'Cancelled'),
('confirm', 'In Progress'),
('done', 'Validated'),
]
_columns = {
'name': fields.char('Inventory Reference', required=True, readonly=True, states={'draft': [('readonly', False)]}, help="Inventory Name."),
'date': fields.datetime('Inventory Date', required=True, readonly=True, help="The date that will be used for the stock level check of the products and the validation of the stock move related to this inventory."),
'line_ids': fields.one2many('stock.inventory.line', 'inventory_id', 'Inventories', readonly=False, states={'done': [('readonly', True)]}, help="Inventory Lines.", copy=True),
'move_ids': fields.one2many('stock.move', 'inventory_id', 'Created Moves', help="Inventory Moves.", states={'done': [('readonly', True)]}),
'state': fields.selection(INVENTORY_STATE_SELECTION, 'Status', readonly=True, select=True, copy=False),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, readonly=True, states={'draft': [('readonly', False)]}),
'location_id': fields.many2one('stock.location', 'Inventoried Location', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_id': fields.many2one('product.product', 'Inventoried Product', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Product to focus your inventory on a particular Product."),
'package_id': fields.many2one('stock.quant.package', 'Inventoried Pack', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Pack to focus your inventory on a particular Pack."),
'partner_id': fields.many2one('res.partner', 'Inventoried Owner', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Owner to focus your inventory on a particular Owner."),
'lot_id': fields.many2one('stock.production.lot', 'Inventoried Lot/Serial Number', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Lot/Serial Number to focus your inventory on a particular Lot/Serial Number.", copy=False),
'move_ids_exist': fields.function(_get_move_ids_exist, type='boolean', string=' Stock Move Exists?', help='technical field for attrs in view'),
'filter': fields.selection(_get_available_filters, 'Inventory of', required=True,
help="If you do an entire inventory, you can choose 'All Products' and it will prefill the inventory with the current stock. If you only do some products "\
"(e.g. Cycle Counting) you can choose 'Manual Selection of Products' and the system won't propose anything. You can also let the "\
"system propose for a single product / lot /... "),
'total_qty': fields.function(_get_total_qty, type="float"),
}
def _default_stock_location(self, cr, uid, context=None):
try:
warehouse = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'warehouse0')
return warehouse.lot_stock_id.id
except:
return False
_defaults = {
'date': fields.datetime.now,
'state': 'draft',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c),
'location_id': _default_stock_location,
'filter': 'none',
}
def reset_real_qty(self, cr, uid, ids, context=None):
inventory = self.browse(cr, uid, ids[0], context=context)
line_ids = [line.id for line in inventory.line_ids]
self.pool.get('stock.inventory.line').write(cr, uid, line_ids, {'product_qty': 0})
return True
def action_done(self, cr, uid, ids, context=None):
""" Finish the inventory
@return: True
"""
for inv in self.browse(cr, uid, ids, context=context):
for inventory_line in inv.line_ids:
if inventory_line.product_qty < 0 and inventory_line.product_qty != inventory_line.theoretical_qty:
raise osv.except_osv(_('Warning'), _('You cannot set a negative product quantity in an inventory line:\n\t%s - qty: %s' % (inventory_line.product_id.name, inventory_line.product_qty)))
self.action_check(cr, uid, [inv.id], context=context)
self.write(cr, uid, [inv.id], {'state': 'done'}, context=context)
self.post_inventory(cr, uid, inv, context=context)
return True
def post_inventory(self, cr, uid, inv, context=None):
#The inventory is posted as a single step which means quants cannot be moved from an internal location to another using an inventory
#as they will be moved to inventory loss, and other quants will be created to the encoded quant location. This is a normal behavior
#as quants cannot be reuse from inventory location (users can still manually move the products before/after the inventory if they want).
move_obj = self.pool.get('stock.move')
move_obj.action_done(cr, uid, [x.id for x in inv.move_ids if x.state != 'done'], context=context)
def action_check(self, cr, uid, ids, context=None):
""" Checks the inventory and computes the stock move to do
@return: True
"""
inventory_line_obj = self.pool.get('stock.inventory.line')
stock_move_obj = self.pool.get('stock.move')
for inventory in self.browse(cr, uid, ids, context=context):
#first remove the existing stock moves linked to this inventory
move_ids = [move.id for move in inventory.move_ids]
stock_move_obj.unlink(cr, uid, move_ids, context=context)
for line in inventory.line_ids:
#compare the checked quantities on inventory lines to the theorical one
stock_move = inventory_line_obj._resolve_inventory_line(cr, uid, line, context=context)
def action_cancel_draft(self, cr, uid, ids, context=None):
""" Cancels the stock move and change inventory state to draft.
@return: True
"""
for inv in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [inv.id], {'line_ids': [(5,)]}, context=context)
self.pool.get('stock.move').action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
self.write(cr, uid, [inv.id], {'state': 'draft'}, context=context)
return True
def action_cancel_inventory(self, cr, uid, ids, context=None):
self.action_cancel_draft(cr, uid, ids, context=context)
def prepare_inventory(self, cr, uid, ids, context=None):
inventory_line_obj = self.pool.get('stock.inventory.line')
for inventory in self.browse(cr, uid, ids, context=context):
# If there are inventory lines already (e.g. from import), respect those and set their theoretical qty
line_ids = [line.id for line in inventory.line_ids]
if not line_ids and inventory.filter != 'partial':
#compute the inventory lines and create them
vals = self._get_inventory_lines(cr, uid, inventory, context=context)
for product_line in vals:
inventory_line_obj.create(cr, uid, product_line, context=context)
return self.write(cr, uid, ids, {'state': 'confirm', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})
def _get_inventory_lines(self, cr, uid, inventory, context=None):
location_obj = self.pool.get('stock.location')
product_obj = self.pool.get('product.product')
location_ids = location_obj.search(cr, uid, [('id', 'child_of', [inventory.location_id.id])], context=context)
domain = ' location_id in %s'
args = (tuple(location_ids),)
if inventory.partner_id:
domain += ' and owner_id = %s'
args += (inventory.partner_id.id,)
if inventory.lot_id:
domain += ' and lot_id = %s'
args += (inventory.lot_id.id,)
if inventory.product_id:
domain += ' and product_id = %s'
args += (inventory.product_id.id,)
if inventory.package_id:
domain += ' and package_id = %s'
args += (inventory.package_id.id,)
cr.execute('''
SELECT product_id, sum(qty) as product_qty, location_id, lot_id as prod_lot_id, package_id, owner_id as partner_id
FROM stock_quant WHERE''' + domain + '''
GROUP BY product_id, location_id, lot_id, package_id, partner_id
''', args)
vals = []
for product_line in cr.dictfetchall():
#replace the None the dictionary by False, because falsy values are tested later on
for key, value in product_line.items():
if not value:
product_line[key] = False
product_line['inventory_id'] = inventory.id
product_line['theoretical_qty'] = product_line['product_qty']
if product_line['product_id']:
product = product_obj.browse(cr, uid, product_line['product_id'], context=context)
product_line['product_uom_id'] = product.uom_id.id
vals.append(product_line)
return vals
def _check_filter_product(self, cr, uid, ids, context=None):
for inventory in self.browse(cr, uid, ids, context=context):
if inventory.filter == 'none' and inventory.product_id and inventory.location_id and inventory.lot_id:
return True
if inventory.filter not in ('product', 'product_owner') and inventory.product_id:
return False
if inventory.filter != 'lot' and inventory.lot_id:
return False
if inventory.filter not in ('owner', 'product_owner') and inventory.partner_id:
return False
if inventory.filter != 'pack' and inventory.package_id:
return False
return True
def onchange_filter(self, cr, uid, ids, filter, context=None):
to_clean = { 'value': {} }
if filter not in ('product', 'product_owner'):
to_clean['value']['product_id'] = False
if filter != 'lot':
to_clean['value']['lot_id'] = False
if filter not in ('owner', 'product_owner'):
to_clean['value']['partner_id'] = False
if filter != 'pack':
to_clean['value']['package_id'] = False
return to_clean
_constraints = [
(_check_filter_product, 'The selected inventory options are not coherent.',
['filter', 'product_id', 'lot_id', 'partner_id', 'package_id']),
]
class stock_inventory_line(osv.osv):
_name = "stock.inventory.line"
_description = "Inventory Line"
_order = "inventory_id, location_name, product_code, product_name, prodlot_name"
def _get_product_name_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('product_id', 'in', ids)], context=context)
def _get_location_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('location_id', 'in', ids)], context=context)
def _get_prodlot_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('prod_lot_id', 'in', ids)], context=context)
def _get_theoretical_qty(self, cr, uid, ids, name, args, context=None):
res = {}
quant_obj = self.pool["stock.quant"]
uom_obj = self.pool["product.uom"]
for line in self.browse(cr, uid, ids, context=context):
quant_ids = self._get_quants(cr, uid, line, context=context)
quants = quant_obj.browse(cr, uid, quant_ids, context=context)
tot_qty = sum([x.qty for x in quants])
if line.product_uom_id and line.product_id.uom_id.id != line.product_uom_id.id:
tot_qty = uom_obj._compute_qty_obj(cr, uid, line.product_id.uom_id, tot_qty, line.product_uom_id, context=context)
res[line.id] = tot_qty
return res
_columns = {
'inventory_id': fields.many2one('stock.inventory', 'Inventory', ondelete='cascade', select=True),
'location_id': fields.many2one('stock.location', 'Location', required=True, select=True),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True),
'package_id': fields.many2one('stock.quant.package', 'Pack', select=True),
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_qty': fields.float('Checked Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),
'company_id': fields.related('inventory_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, select=True, readonly=True),
'prod_lot_id': fields.many2one('stock.production.lot', 'Serial Number', domain="[('product_id','=',product_id)]"),
'state': fields.related('inventory_id', 'state', type='char', string='Status', readonly=True),
'theoretical_qty': fields.function(_get_theoretical_qty, type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
store={'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id', 'product_id', 'package_id', 'product_uom_id', 'company_id', 'prod_lot_id', 'partner_id'], 20),},
readonly=True, string="Theoretical Quantity"),
'partner_id': fields.many2one('res.partner', 'Owner'),
'product_name': fields.related('product_id', 'name', type='char', string='Product Name', store={
'product.product': (_get_product_name_change, ['name', 'default_code'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}),
'product_code': fields.related('product_id', 'default_code', type='char', string='Product Code', store={
'product.product': (_get_product_name_change, ['name', 'default_code'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}),
'location_name': fields.related('location_id', 'complete_name', type='char', string='Location Name', store={
'stock.location': (_get_location_change, ['name', 'location_id', 'active'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id'], 20),}),
'prodlot_name': fields.related('prod_lot_id', 'name', type='char', string='Serial Number Name', store={
'stock.production.lot': (_get_prodlot_change, ['name'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['prod_lot_id'], 20),}),
}
_defaults = {
'product_qty': 0,
'product_uom_id': lambda self, cr, uid, ctx=None: self.pool['ir.model.data'].get_object_reference(cr, uid, 'product', 'product_uom_unit')[1]
}
def _get_quants(self, cr, uid, line, context=None):
quant_obj = self.pool["stock.quant"]
dom = [('company_id', '=', line.company_id.id), ('location_id', '=', line.location_id.id), ('lot_id', '=', line.prod_lot_id.id),
('product_id','=', line.product_id.id), ('owner_id', '=', line.partner_id.id), ('package_id', '=', line.package_id.id)]
quants = quant_obj.search(cr, uid, dom, context=context)
return quants
def onchange_createline(self, cr, uid, ids, location_id=False, product_id=False, uom_id=False, package_id=False, prod_lot_id=False, partner_id=False, company_id=False, context=None):
quant_obj = self.pool["stock.quant"]
uom_obj = self.pool["product.uom"]
res = {'value': {}}
# If no UoM already put the default UoM of the product
if product_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool['product.uom'].browse(cr, uid, uom_id, context=context)
if product.uom_id.category_id.id != uom.category_id.id:
res['value']['product_uom_id'] = product.uom_id.id
res['domain'] = {'product_uom_id': [('category_id','=',product.uom_id.category_id.id)]}
uom_id = product.uom_id.id
# Calculate theoretical quantity by searching the quants as in quants_get
if product_id and location_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
if not company_id:
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
dom = [('company_id', '=', company_id), ('location_id', '=', location_id), ('lot_id', '=', prod_lot_id),
('product_id','=', product_id), ('owner_id', '=', partner_id), ('package_id', '=', package_id)]
quants = quant_obj.search(cr, uid, dom, context=context)
th_qty = sum([x.qty for x in quant_obj.browse(cr, uid, quants, context=context)])
if product_id and uom_id and product.uom_id.id != uom_id:
th_qty = uom_obj._compute_qty(cr, uid, product.uom_id.id, th_qty, uom_id)
res['value']['theoretical_qty'] = th_qty
res['value']['product_qty'] = th_qty
return res
def _resolve_inventory_line(self, cr, uid, inventory_line, context=None):
stock_move_obj = self.pool.get('stock.move')
quant_obj = self.pool.get('stock.quant')
diff = inventory_line.theoretical_qty - inventory_line.product_qty
if not diff:
return
#each theorical_lines where difference between theoretical and checked quantities is not 0 is a line for which we need to create a stock move
vals = {
'name': _('INV:') + (inventory_line.inventory_id.name or ''),
'product_id': inventory_line.product_id.id,
'product_uom': inventory_line.product_uom_id.id,
'date': inventory_line.inventory_id.date,
'company_id': inventory_line.inventory_id.company_id.id,
'inventory_id': inventory_line.inventory_id.id,
'state': 'confirmed',
'restrict_lot_id': inventory_line.prod_lot_id.id,
'restrict_partner_id': inventory_line.partner_id.id,
}
inventory_location_id = inventory_line.product_id.property_stock_inventory.id
if diff < 0:
#found more than expected
vals['location_id'] = inventory_location_id
vals['location_dest_id'] = inventory_line.location_id.id
vals['product_uom_qty'] = -diff
else:
#found less than expected
vals['location_id'] = inventory_line.location_id.id
vals['location_dest_id'] = inventory_location_id
vals['product_uom_qty'] = diff
move_id = stock_move_obj.create(cr, uid, vals, context=context)
move = stock_move_obj.browse(cr, uid, move_id, context=context)
if diff > 0:
domain = [('qty', '>', 0.0), ('package_id', '=', inventory_line.package_id.id), ('lot_id', '=', inventory_line.prod_lot_id.id), ('location_id', '=', inventory_line.location_id.id)]
preferred_domain_list = [[('reservation_id', '=', False)], [('reservation_id.inventory_id', '!=', inventory_line.inventory_id.id)]]
quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, move.product_qty, domain=domain, prefered_domain_list=preferred_domain_list, restrict_partner_id=move.restrict_partner_id.id, context=context)
quant_obj.quants_reserve(cr, uid, quants, move, context=context)
elif inventory_line.package_id:
stock_move_obj.action_done(cr, uid, move_id, context=context)
quants = [x.id for x in move.quant_ids]
quant_obj.write(cr, uid, quants, {'package_id': inventory_line.package_id.id}, context=context)
res = quant_obj.search(cr, uid, [('qty', '<', 0.0), ('product_id', '=', move.product_id.id),
('location_id', '=', move.location_dest_id.id), ('package_id', '!=', False)], limit=1, context=context)
if res:
for quant in move.quant_ids:
if quant.location_id.id == move.location_dest_id.id: #To avoid we take a quant that was reconcile already
quant_obj._quant_reconcile_negative(cr, uid, quant, move, context=context)
return move_id
# Should be left out in next version
def restrict_change(self, cr, uid, ids, theoretical_qty, context=None):
return {}
# Should be left out in next version
def on_change_product_id(self, cr, uid, ids, product, uom, theoretical_qty, context=None):
""" Changes UoM
@param location_id: Location id
@param product: Changed product_id
@param uom: UoM product
@return: Dictionary of changed values
"""
if not product:
return {'value': {'product_uom_id': False}}
obj_product = self.pool.get('product.product').browse(cr, uid, product, context=context)
return {'value': {'product_uom_id': uom or obj_product.uom_id.id}}
#----------------------------------------------------------
# Stock Warehouse
#----------------------------------------------------------
class stock_warehouse(osv.osv):
_name = "stock.warehouse"
_description = "Warehouse"
_columns = {
'name': fields.char('Warehouse Name', required=True, select=True),
'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, select=True),
'partner_id': fields.many2one('res.partner', 'Address'),
'view_location_id': fields.many2one('stock.location', 'View Location', required=True, domain=[('usage', '=', 'view')]),
'lot_stock_id': fields.many2one('stock.location', 'Location Stock', domain=[('usage', '=', 'internal')], required=True),
'code': fields.char('Short Name', size=5, required=True, help="Short name used to identify your warehouse"),
'route_ids': fields.many2many('stock.location.route', 'stock_route_warehouse', 'warehouse_id', 'route_id', 'Routes', domain="[('warehouse_selectable', '=', True)]", help='Defaults routes through the warehouse'),
'reception_steps': fields.selection([
('one_step', 'Receive goods directly in stock (1 step)'),
('two_steps', 'Unload in input location then go to stock (2 steps)'),
('three_steps', 'Unload in input location, go through a quality control before being admitted in stock (3 steps)')], 'Incoming Shipments',
help="Default incoming route to follow", required=True),
'delivery_steps': fields.selection([
('ship_only', 'Ship directly from stock (Ship only)'),
('pick_ship', 'Bring goods to output location before shipping (Pick + Ship)'),
('pick_pack_ship', 'Make packages into a dedicated location, then bring them to the output location for shipping (Pick + Pack + Ship)')], 'Outgoing Shippings',
help="Default outgoing route to follow", required=True),
'wh_input_stock_loc_id': fields.many2one('stock.location', 'Input Location'),
'wh_qc_stock_loc_id': fields.many2one('stock.location', 'Quality Control Location'),
'wh_output_stock_loc_id': fields.many2one('stock.location', 'Output Location'),
'wh_pack_stock_loc_id': fields.many2one('stock.location', 'Packing Location'),
'mto_pull_id': fields.many2one('procurement.rule', 'MTO rule'),
'pick_type_id': fields.many2one('stock.picking.type', 'Pick Type'),
'pack_type_id': fields.many2one('stock.picking.type', 'Pack Type'),
'out_type_id': fields.many2one('stock.picking.type', 'Out Type'),
'in_type_id': fields.many2one('stock.picking.type', 'In Type'),
'int_type_id': fields.many2one('stock.picking.type', 'Internal Type'),
'crossdock_route_id': fields.many2one('stock.location.route', 'Crossdock Route'),
'reception_route_id': fields.many2one('stock.location.route', 'Receipt Route'),
'delivery_route_id': fields.many2one('stock.location.route', 'Delivery Route'),
'resupply_from_wh': fields.boolean('Resupply From Other Warehouses'),
'resupply_wh_ids': fields.many2many('stock.warehouse', 'stock_wh_resupply_table', 'supplied_wh_id', 'supplier_wh_id', 'Resupply Warehouses'),
'resupply_route_ids': fields.one2many('stock.location.route', 'supplied_wh_id', 'Resupply Routes',
help="Routes will be created for these resupply warehouses and you can select them on products and product categories"),
'default_resupply_wh_id': fields.many2one('stock.warehouse', 'Default Resupply Warehouse', help="Goods will always be resupplied from this warehouse"),
}
def onchange_filter_default_resupply_wh_id(self, cr, uid, ids, default_resupply_wh_id, resupply_wh_ids, context=None):
resupply_wh_ids = set([x['id'] for x in (self.resolve_2many_commands(cr, uid, 'resupply_wh_ids', resupply_wh_ids, ['id']))])
if default_resupply_wh_id: #If we are removing the default resupply, we don't have default_resupply_wh_id
resupply_wh_ids.add(default_resupply_wh_id)
resupply_wh_ids = list(resupply_wh_ids)
return {'value': {'resupply_wh_ids': resupply_wh_ids}}
def _get_external_transit_location(self, cr, uid, warehouse, context=None):
''' returns browse record of inter company transit location, if found'''
data_obj = self.pool.get('ir.model.data')
location_obj = self.pool.get('stock.location')
try:
inter_wh_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_inter_wh')[1]
except:
return False
return location_obj.browse(cr, uid, inter_wh_loc, context=context)
def _get_inter_wh_route(self, cr, uid, warehouse, wh, context=None):
return {
'name': _('%s: Supply Product from %s') % (warehouse.name, wh.name),
'warehouse_selectable': False,
'product_selectable': True,
'product_categ_selectable': True,
'supplied_wh_id': warehouse.id,
'supplier_wh_id': wh.id,
}
def _create_resupply_routes(self, cr, uid, warehouse, supplier_warehouses, default_resupply_wh, context=None):
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
#create route selectable on the product to resupply the warehouse from another one
external_transit_location = self._get_external_transit_location(cr, uid, warehouse, context=context)
internal_transit_location = warehouse.company_id.internal_transit_location_id
input_loc = warehouse.wh_input_stock_loc_id
if warehouse.reception_steps == 'one_step':
input_loc = warehouse.lot_stock_id
for wh in supplier_warehouses:
transit_location = wh.company_id.id == warehouse.company_id.id and internal_transit_location or external_transit_location
if transit_location:
output_loc = wh.wh_output_stock_loc_id
if wh.delivery_steps == 'ship_only':
output_loc = wh.lot_stock_id
# Create extra MTO rule (only for 'ship only' because in the other cases MTO rules already exists)
mto_pull_vals = self._get_mto_pull_rule(cr, uid, wh, [(output_loc, transit_location, wh.out_type_id.id)], context=context)[0]
pull_obj.create(cr, uid, mto_pull_vals, context=context)
inter_wh_route_vals = self._get_inter_wh_route(cr, uid, warehouse, wh, context=context)
inter_wh_route_id = route_obj.create(cr, uid, vals=inter_wh_route_vals, context=context)
values = [(output_loc, transit_location, wh.out_type_id.id, wh), (transit_location, input_loc, warehouse.in_type_id.id, warehouse)]
pull_rules_list = self._get_supply_pull_rules(cr, uid, wh.id, values, inter_wh_route_id, context=context)
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#if the warehouse is also set as default resupply method, assign this route automatically to the warehouse
if default_resupply_wh and default_resupply_wh.id == wh.id:
self.write(cr, uid, [warehouse.id], {'route_ids': [(4, inter_wh_route_id)]}, context=context)
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c),
'reception_steps': 'one_step',
'delivery_steps': 'ship_only',
}
_sql_constraints = [
('warehouse_name_uniq', 'unique(name, company_id)', 'The name of the warehouse must be unique per company!'),
('warehouse_code_uniq', 'unique(code, company_id)', 'The code of the warehouse must be unique per company!'),
]
def _get_partner_locations(self, cr, uid, ids, context=None):
''' returns a tuple made of the browse record of customer location and the browse record of supplier location'''
data_obj = self.pool.get('ir.model.data')
location_obj = self.pool.get('stock.location')
try:
customer_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_customers')[1]
supplier_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_suppliers')[1]
except:
customer_loc = location_obj.search(cr, uid, [('usage', '=', 'customer')], context=context)
customer_loc = customer_loc and customer_loc[0] or False
supplier_loc = location_obj.search(cr, uid, [('usage', '=', 'supplier')], context=context)
supplier_loc = supplier_loc and supplier_loc[0] or False
if not (customer_loc and supplier_loc):
raise osv.except_osv(_('Error!'), _('Can\'t find any customer or supplier location.'))
return location_obj.browse(cr, uid, [customer_loc, supplier_loc], context=context)
def _location_used(self, cr, uid, location_id, warehouse, context=None):
pull_obj = self.pool['procurement.rule']
push_obj = self.pool['stock.location.path']
pulls = pull_obj.search(cr, uid, ['&', ('route_id', 'not in', [x.id for x in warehouse.route_ids]), '|', ('location_src_id', '=', location_id), ('location_id', '=', location_id)], context=context)
pushs = push_obj.search(cr, uid, ['&', ('route_id', 'not in', [x.id for x in warehouse.route_ids]), '|', ('location_from_id', '=', location_id), ('location_dest_id', '=', location_id)], context=context)
if pulls or pushs:
return True
return False
def switch_location(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None):
location_obj = self.pool.get('stock.location')
new_reception_step = new_reception_step or warehouse.reception_steps
new_delivery_step = new_delivery_step or warehouse.delivery_steps
if warehouse.reception_steps != new_reception_step:
if not self._location_used(cr, uid, warehouse.wh_input_stock_loc_id.id, warehouse, context=context):
location_obj.write(cr, uid, [warehouse.wh_input_stock_loc_id.id, warehouse.wh_qc_stock_loc_id.id], {'active': False}, context=context)
if new_reception_step != 'one_step':
location_obj.write(cr, uid, warehouse.wh_input_stock_loc_id.id, {'active': True}, context=context)
if new_reception_step == 'three_steps':
location_obj.write(cr, uid, warehouse.wh_qc_stock_loc_id.id, {'active': True}, context=context)
if warehouse.delivery_steps != new_delivery_step:
if not self._location_used(cr, uid, warehouse.wh_output_stock_loc_id.id, warehouse, context=context):
location_obj.write(cr, uid, [warehouse.wh_output_stock_loc_id.id], {'active': False}, context=context)
if not self._location_used(cr, uid, warehouse.wh_pack_stock_loc_id.id, warehouse, context=context):
location_obj.write(cr, uid, [warehouse.wh_pack_stock_loc_id.id], {'active': False}, context=context)
if new_delivery_step != 'ship_only':
location_obj.write(cr, uid, warehouse.wh_output_stock_loc_id.id, {'active': True}, context=context)
if new_delivery_step == 'pick_pack_ship':
location_obj.write(cr, uid, warehouse.wh_pack_stock_loc_id.id, {'active': True}, context=context)
return True
def _get_reception_delivery_route(self, cr, uid, warehouse, route_name, context=None):
return {
'name': self._format_routename(cr, uid, warehouse, route_name, context=context),
'product_categ_selectable': True,
'product_selectable': False,
'sequence': 10,
}
def _get_supply_pull_rules(self, cr, uid, supply_warehouse, values, new_route_id, context=None):
pull_rules_list = []
for from_loc, dest_loc, pick_type_id, warehouse in values:
pull_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': new_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': warehouse.lot_stock_id.id != from_loc.id and 'make_to_order' or 'make_to_stock', # first part of the resuply route is MTS
'warehouse_id': warehouse.id,
'propagate_warehouse_id': supply_warehouse,
})
return pull_rules_list
def _get_push_pull_rules(self, cr, uid, warehouse, active, values, new_route_id, context=None):
first_rule = True
push_rules_list = []
pull_rules_list = []
for from_loc, dest_loc, pick_type_id in values:
push_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_from_id': from_loc.id,
'location_dest_id': dest_loc.id,
'route_id': new_route_id,
'auto': 'manual',
'picking_type_id': pick_type_id,
'active': active,
'warehouse_id': warehouse.id,
})
pull_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': new_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': first_rule is True and 'make_to_stock' or 'make_to_order',
'active': active,
'warehouse_id': warehouse.id,
})
first_rule = False
return push_rules_list, pull_rules_list
def _get_mto_route(self, cr, uid, context=None):
route_obj = self.pool.get('stock.location.route')
data_obj = self.pool.get('ir.model.data')
try:
mto_route_id = data_obj.get_object_reference(cr, uid, 'stock', 'route_warehouse0_mto')[1]
except:
mto_route_id = route_obj.search(cr, uid, [('name', 'like', _('Make To Order'))], context=context)
mto_route_id = mto_route_id and mto_route_id[0] or False
if not mto_route_id:
raise osv.except_osv(_('Error!'), _('Can\'t find any generic Make To Order route.'))
return mto_route_id
def _check_remove_mto_resupply_rules(self, cr, uid, warehouse, context=None):
""" Checks that the moves from the different """
pull_obj = self.pool.get('procurement.rule')
mto_route_id = self._get_mto_route(cr, uid, context=context)
rules = pull_obj.search(cr, uid, ['&', ('location_src_id', '=', warehouse.lot_stock_id.id), ('location_id.usage', '=', 'transit')], context=context)
pull_obj.unlink(cr, uid, rules, context=context)
def _get_mto_pull_rule(self, cr, uid, warehouse, values, context=None):
mto_route_id = self._get_mto_route(cr, uid, context=context)
res = []
for value in values:
from_loc, dest_loc, pick_type_id = value
res += [{
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context) + _(' MTO'),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': mto_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': 'make_to_order',
'active': True,
'warehouse_id': warehouse.id,
}]
return res
def _get_crossdock_route(self, cr, uid, warehouse, route_name, context=None):
return {
'name': self._format_routename(cr, uid, warehouse, route_name, context=context),
'warehouse_selectable': False,
'product_selectable': True,
'product_categ_selectable': True,
'active': warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step',
'sequence': 20,
}
def create_routes(self, cr, uid, ids, warehouse, context=None):
wh_route_ids = []
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context)
#create reception route and rules
route_name, values = routes_dict[warehouse.reception_steps]
route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context)
reception_route_id = route_obj.create(cr, uid, route_vals, context=context)
wh_route_ids.append((4, reception_route_id))
push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, reception_route_id, context=context)
#create the push/pull rules
for push_rule in push_rules_list:
push_obj.create(cr, uid, vals=push_rule, context=context)
for pull_rule in pull_rules_list:
#all pull rules in reception route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create MTS route and pull rules for delivery and a specific route MTO to be set on the product
route_name, values = routes_dict[warehouse.delivery_steps]
route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context)
#create the route and its pull rules
delivery_route_id = route_obj.create(cr, uid, route_vals, context=context)
wh_route_ids.append((4, delivery_route_id))
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, delivery_route_id, context=context)
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create MTO pull rule and link it to the generic MTO route
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0]
mto_pull_id = pull_obj.create(cr, uid, mto_pull_vals, context=context)
#create a route for cross dock operations, that can be set on products and product categories
route_name, values = routes_dict['crossdock']
crossdock_route_vals = self._get_crossdock_route(cr, uid, warehouse, route_name, context=context)
crossdock_route_id = route_obj.create(cr, uid, vals=crossdock_route_vals, context=context)
wh_route_ids.append((4, crossdock_route_id))
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step', values, crossdock_route_id, context=context)
for pull_rule in pull_rules_list:
# Fixed cross-dock is logically mto
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create route selectable on the product to resupply the warehouse from another one
self._create_resupply_routes(cr, uid, warehouse, warehouse.resupply_wh_ids, warehouse.default_resupply_wh_id, context=context)
#return routes and mto pull rule to store on the warehouse
return {
'route_ids': wh_route_ids,
'mto_pull_id': mto_pull_id,
'reception_route_id': reception_route_id,
'delivery_route_id': delivery_route_id,
'crossdock_route_id': crossdock_route_id,
}
def change_route(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None):
picking_type_obj = self.pool.get('stock.picking.type')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
route_obj = self.pool.get('stock.location.route')
new_reception_step = new_reception_step or warehouse.reception_steps
new_delivery_step = new_delivery_step or warehouse.delivery_steps
#change the default source and destination location and (de)activate picking types
input_loc = warehouse.wh_input_stock_loc_id
if new_reception_step == 'one_step':
input_loc = warehouse.lot_stock_id
output_loc = warehouse.wh_output_stock_loc_id
if new_delivery_step == 'ship_only':
output_loc = warehouse.lot_stock_id
picking_type_obj.write(cr, uid, warehouse.in_type_id.id, {'default_location_dest_id': input_loc.id}, context=context)
picking_type_obj.write(cr, uid, warehouse.out_type_id.id, {'default_location_src_id': output_loc.id}, context=context)
picking_type_obj.write(cr, uid, warehouse.pick_type_id.id, {
'active': new_delivery_step != 'ship_only',
'default_location_dest_id': output_loc.id if new_delivery_step == 'pick_ship' else warehouse.wh_pack_stock_loc_id.id,
}, context=context)
picking_type_obj.write(cr, uid, warehouse.pack_type_id.id, {'active': new_delivery_step == 'pick_pack_ship'}, context=context)
routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context)
#update delivery route and rules: unlink the existing rules of the warehouse delivery route and recreate it
pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.delivery_route_id.pull_ids], context=context)
route_name, values = routes_dict[new_delivery_step]
route_obj.write(cr, uid, warehouse.delivery_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context)
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.delivery_route_id.id, context=context)
#create the pull rules
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#update receipt route and rules: unlink the existing rules of the warehouse receipt route and recreate it
pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.pull_ids], context=context)
push_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.push_ids], context=context)
route_name, values = routes_dict[new_reception_step]
route_obj.write(cr, uid, warehouse.reception_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context)
push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.reception_route_id.id, context=context)
#create the push/pull rules
for push_rule in push_rules_list:
push_obj.create(cr, uid, vals=push_rule, context=context)
for pull_rule in pull_rules_list:
#all pull rules in receipt route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
route_obj.write(cr, uid, warehouse.crossdock_route_id.id, {'active': new_reception_step != 'one_step' and new_delivery_step != 'ship_only'}, context=context)
#change MTO rule
dummy, values = routes_dict[new_delivery_step]
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0]
pull_obj.write(cr, uid, warehouse.mto_pull_id.id, mto_pull_vals, context=context)
return True
def create_sequences_and_picking_types(self, cr, uid, warehouse, context=None):
seq_obj = self.pool.get('ir.sequence')
picking_type_obj = self.pool.get('stock.picking.type')
#create new sequences
in_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence in'), 'prefix': warehouse.code + '/IN/', 'padding': 5}, context=context)
out_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence out'), 'prefix': warehouse.code + '/OUT/', 'padding': 5}, context=context)
pack_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence packing'), 'prefix': warehouse.code + '/PACK/', 'padding': 5}, context=context)
pick_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence picking'), 'prefix': warehouse.code + '/PICK/', 'padding': 5}, context=context)
int_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence internal'), 'prefix': warehouse.code + '/INT/', 'padding': 5}, context=context)
wh_stock_loc = warehouse.lot_stock_id
wh_input_stock_loc = warehouse.wh_input_stock_loc_id
wh_output_stock_loc = warehouse.wh_output_stock_loc_id
wh_pack_stock_loc = warehouse.wh_pack_stock_loc_id
#fetch customer and supplier locations, for references
customer_loc, supplier_loc = self._get_partner_locations(cr, uid, warehouse.id, context=context)
#create in, out, internal picking types for warehouse
input_loc = wh_input_stock_loc
if warehouse.reception_steps == 'one_step':
input_loc = wh_stock_loc
output_loc = wh_output_stock_loc
if warehouse.delivery_steps == 'ship_only':
output_loc = wh_stock_loc
#choose the next available color for the picking types of this warehouse
color = 0
available_colors = [c%9 for c in range(3, 12)] # put flashy colors first
all_used_colors = self.pool.get('stock.picking.type').search_read(cr, uid, [('warehouse_id', '!=', False), ('color', '!=', False)], ['color'], order='color')
#don't use sets to preserve the list order
for x in all_used_colors:
if x['color'] in available_colors:
available_colors.remove(x['color'])
if available_colors:
color = available_colors[0]
#order the picking types with a sequence allowing to have the following suit for each warehouse: reception, internal, pick, pack, ship.
max_sequence = self.pool.get('stock.picking.type').search_read(cr, uid, [], ['sequence'], order='sequence desc')
max_sequence = max_sequence and max_sequence[0]['sequence'] or 0
in_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Receipts'),
'warehouse_id': warehouse.id,
'code': 'incoming',
'sequence_id': in_seq_id,
'default_location_src_id': supplier_loc.id,
'default_location_dest_id': input_loc.id,
'sequence': max_sequence + 1,
'color': color}, context=context)
out_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Delivery Orders'),
'warehouse_id': warehouse.id,
'code': 'outgoing',
'sequence_id': out_seq_id,
'return_picking_type_id': in_type_id,
'default_location_src_id': output_loc.id,
'default_location_dest_id': customer_loc.id,
'sequence': max_sequence + 4,
'color': color}, context=context)
picking_type_obj.write(cr, uid, [in_type_id], {'return_picking_type_id': out_type_id}, context=context)
int_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Internal Transfers'),
'warehouse_id': warehouse.id,
'code': 'internal',
'sequence_id': int_seq_id,
'default_location_src_id': wh_stock_loc.id,
'default_location_dest_id': wh_stock_loc.id,
'active': True,
'sequence': max_sequence + 2,
'color': color}, context=context)
pack_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Pack'),
'warehouse_id': warehouse.id,
'code': 'internal',
'sequence_id': pack_seq_id,
'default_location_src_id': wh_pack_stock_loc.id,
'default_location_dest_id': output_loc.id,
'active': warehouse.delivery_steps == 'pick_pack_ship',
'sequence': max_sequence + 3,
'color': color}, context=context)
pick_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Pick'),
'warehouse_id': warehouse.id,
'code': 'internal',
'sequence_id': pick_seq_id,
'default_location_src_id': wh_stock_loc.id,
'default_location_dest_id': output_loc.id if warehouse.delivery_steps == 'pick_ship' else wh_pack_stock_loc.id,
'active': warehouse.delivery_steps != 'ship_only',
'sequence': max_sequence + 2,
'color': color}, context=context)
#write picking types on WH
vals = {
'in_type_id': in_type_id,
'out_type_id': out_type_id,
'pack_type_id': pack_type_id,
'pick_type_id': pick_type_id,
'int_type_id': int_type_id,
}
super(stock_warehouse, self).write(cr, uid, warehouse.id, vals=vals, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals is None:
vals = {}
data_obj = self.pool.get('ir.model.data')
seq_obj = self.pool.get('ir.sequence')
picking_type_obj = self.pool.get('stock.picking.type')
location_obj = self.pool.get('stock.location')
#create view location for warehouse
loc_vals = {
'name': _(vals.get('code')),
'usage': 'view',
'location_id': data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_locations')[1],
}
if vals.get('company_id'):
loc_vals['company_id'] = vals.get('company_id')
wh_loc_id = location_obj.create(cr, uid, loc_vals, context=context)
vals['view_location_id'] = wh_loc_id
#create all location
def_values = self.default_get(cr, uid, {'reception_steps', 'delivery_steps'})
reception_steps = vals.get('reception_steps', def_values['reception_steps'])
delivery_steps = vals.get('delivery_steps', def_values['delivery_steps'])
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
sub_locations = [
{'name': _('Stock'), 'active': True, 'field': 'lot_stock_id'},
{'name': _('Input'), 'active': reception_steps != 'one_step', 'field': 'wh_input_stock_loc_id'},
{'name': _('Quality Control'), 'active': reception_steps == 'three_steps', 'field': 'wh_qc_stock_loc_id'},
{'name': _('Output'), 'active': delivery_steps != 'ship_only', 'field': 'wh_output_stock_loc_id'},
{'name': _('Packing Zone'), 'active': delivery_steps == 'pick_pack_ship', 'field': 'wh_pack_stock_loc_id'},
]
for values in sub_locations:
loc_vals = {
'name': values['name'],
'usage': 'internal',
'location_id': wh_loc_id,
'active': values['active'],
}
if vals.get('company_id'):
loc_vals['company_id'] = vals.get('company_id')
location_id = location_obj.create(cr, uid, loc_vals, context=context_with_inactive)
vals[values['field']] = location_id
#create WH
new_id = super(stock_warehouse, self).create(cr, uid, vals=vals, context=context)
warehouse = self.browse(cr, uid, new_id, context=context)
self.create_sequences_and_picking_types(cr, uid, warehouse, context=context)
#create routes and push/pull rules
new_objects_dict = self.create_routes(cr, uid, new_id, warehouse, context=context)
self.write(cr, uid, warehouse.id, new_objects_dict, context=context)
return new_id
def _format_rulename(self, cr, uid, obj, from_loc, dest_loc, context=None):
return obj.code + ': ' + from_loc.name + ' -> ' + dest_loc.name
def _format_routename(self, cr, uid, obj, name, context=None):
return obj.name + ': ' + name
def get_routes_dict(self, cr, uid, ids, warehouse, context=None):
#fetch customer and supplier locations, for references
customer_loc, supplier_loc = self._get_partner_locations(cr, uid, ids, context=context)
return {
'one_step': (_('Receipt in 1 step'), []),
'two_steps': (_('Receipt in 2 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]),
'three_steps': (_('Receipt in 3 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_qc_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_qc_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]),
'crossdock': (_('Cross-Dock'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
'ship_only': (_('Ship Only'), [(warehouse.lot_stock_id, customer_loc, warehouse.out_type_id.id)]),
'pick_ship': (_('Pick + Ship'), [(warehouse.lot_stock_id, warehouse.wh_output_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
'pick_pack_ship': (_('Pick + Pack + Ship'), [(warehouse.lot_stock_id, warehouse.wh_pack_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_pack_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.pack_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
}
def _handle_renaming(self, cr, uid, warehouse, name, code, context=None):
location_obj = self.pool.get('stock.location')
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
#rename location
location_id = warehouse.lot_stock_id.location_id.id
location_obj.write(cr, uid, location_id, {'name': code}, context=context)
#rename route and push-pull rules
for route in warehouse.route_ids:
route_obj.write(cr, uid, route.id, {'name': route.name.replace(warehouse.name, name, 1)}, context=context)
for pull in route.pull_ids:
pull_obj.write(cr, uid, pull.id, {'name': pull.name.replace(warehouse.name, name, 1)}, context=context)
for push in route.push_ids:
push_obj.write(cr, uid, push.id, {'name': pull.name.replace(warehouse.name, name, 1)}, context=context)
#change the mto pull rule name
if warehouse.mto_pull_id.id:
pull_obj.write(cr, uid, warehouse.mto_pull_id.id, {'name': warehouse.mto_pull_id.name.replace(warehouse.name, name, 1)}, context=context)
def _check_delivery_resupply(self, cr, uid, warehouse, new_location, change_to_multiple, context=None):
""" Will check if the resupply routes from this warehouse follow the changes of number of delivery steps """
#Check routes that are being delivered by this warehouse and change the rule going to transit location
route_obj = self.pool.get("stock.location.route")
pull_obj = self.pool.get("procurement.rule")
routes = route_obj.search(cr, uid, [('supplier_wh_id','=', warehouse.id)], context=context)
pulls = pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_id.usage', '=', 'transit')], context=context)
if pulls:
pull_obj.write(cr, uid, pulls, {'location_src_id': new_location, 'procure_method': change_to_multiple and "make_to_order" or "make_to_stock"}, context=context)
# Create or clean MTO rules
mto_route_id = self._get_mto_route(cr, uid, context=context)
if not change_to_multiple:
# If single delivery we should create the necessary MTO rules for the resupply
# pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context)
pull_recs = pull_obj.browse(cr, uid, pulls, context=context)
transfer_locs = list(set([x.location_id for x in pull_recs]))
vals = [(warehouse.lot_stock_id , x, warehouse.out_type_id.id) for x in transfer_locs]
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, vals, context=context)
for mto_pull_val in mto_pull_vals:
pull_obj.create(cr, uid, mto_pull_val, context=context)
else:
# We need to delete all the MTO pull rules, otherwise they risk to be used in the system
pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context)
if pulls:
pull_obj.unlink(cr, uid, pulls, context=context)
def _check_reception_resupply(self, cr, uid, warehouse, new_location, context=None):
"""
Will check if the resupply routes to this warehouse follow the changes of number of receipt steps
"""
#Check routes that are being delivered by this warehouse and change the rule coming from transit location
route_obj = self.pool.get("stock.location.route")
pull_obj = self.pool.get("procurement.rule")
routes = route_obj.search(cr, uid, [('supplied_wh_id','=', warehouse.id)], context=context)
pulls= pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_src_id.usage', '=', 'transit')])
if pulls:
pull_obj.write(cr, uid, pulls, {'location_id': new_location}, context=context)
def _check_resupply(self, cr, uid, warehouse, reception_new, delivery_new, context=None):
if reception_new:
old_val = warehouse.reception_steps
new_val = reception_new
change_to_one = (old_val != 'one_step' and new_val == 'one_step')
change_to_multiple = (old_val == 'one_step' and new_val != 'one_step')
if change_to_one or change_to_multiple:
new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_input_stock_loc_id.id
self._check_reception_resupply(cr, uid, warehouse, new_location, context=context)
if delivery_new:
old_val = warehouse.delivery_steps
new_val = delivery_new
change_to_one = (old_val != 'ship_only' and new_val == 'ship_only')
change_to_multiple = (old_val == 'ship_only' and new_val != 'ship_only')
if change_to_one or change_to_multiple:
new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_output_stock_loc_id.id
self._check_delivery_resupply(cr, uid, warehouse, new_location, change_to_multiple, context=context)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
seq_obj = self.pool.get('ir.sequence')
route_obj = self.pool.get('stock.location.route')
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
for warehouse in self.browse(cr, uid, ids, context=context_with_inactive):
#first of all, check if we need to delete and recreate route
if vals.get('reception_steps') or vals.get('delivery_steps'):
#activate and deactivate location according to reception and delivery option
self.switch_location(cr, uid, warehouse.id, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context)
# switch between route
self.change_route(cr, uid, ids, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context_with_inactive)
# Check if we need to change something to resupply warehouses and associated MTO rules
self._check_resupply(cr, uid, warehouse, vals.get('reception_steps'), vals.get('delivery_steps'), context=context)
if vals.get('code') or vals.get('name'):
name = warehouse.name
#rename sequence
if vals.get('name'):
name = vals.get('name', warehouse.name)
self._handle_renaming(cr, uid, warehouse, name, vals.get('code', warehouse.code), context=context_with_inactive)
if warehouse.in_type_id:
seq_obj.write(cr, uid, warehouse.in_type_id.sequence_id.id, {'name': name + _(' Sequence in'), 'prefix': vals.get('code', warehouse.code) + '\IN\\'}, context=context)
seq_obj.write(cr, uid, warehouse.out_type_id.sequence_id.id, {'name': name + _(' Sequence out'), 'prefix': vals.get('code', warehouse.code) + '\OUT\\'}, context=context)
seq_obj.write(cr, uid, warehouse.pack_type_id.sequence_id.id, {'name': name + _(' Sequence packing'), 'prefix': vals.get('code', warehouse.code) + '\PACK\\'}, context=context)
seq_obj.write(cr, uid, warehouse.pick_type_id.sequence_id.id, {'name': name + _(' Sequence picking'), 'prefix': vals.get('code', warehouse.code) + '\PICK\\'}, context=context)
seq_obj.write(cr, uid, warehouse.int_type_id.sequence_id.id, {'name': name + _(' Sequence internal'), 'prefix': vals.get('code', warehouse.code) + '\INT\\'}, context=context)
if vals.get('resupply_wh_ids') and not vals.get('resupply_route_ids'):
for cmd in vals.get('resupply_wh_ids'):
if cmd[0] == 6:
new_ids = set(cmd[2])
old_ids = set([wh.id for wh in warehouse.resupply_wh_ids])
to_add_wh_ids = new_ids - old_ids
if to_add_wh_ids:
supplier_warehouses = self.browse(cr, uid, list(to_add_wh_ids), context=context)
self._create_resupply_routes(cr, uid, warehouse, supplier_warehouses, warehouse.default_resupply_wh_id, context=context)
to_remove_wh_ids = old_ids - new_ids
if to_remove_wh_ids:
to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', 'in', list(to_remove_wh_ids))], context=context)
if to_remove_route_ids:
route_obj.unlink(cr, uid, to_remove_route_ids, context=context)
else:
#not implemented
pass
if 'default_resupply_wh_id' in vals:
if vals.get('default_resupply_wh_id') == warehouse.id:
raise osv.except_osv(_('Warning'),_('The default resupply warehouse should be different than the warehouse itself!'))
if warehouse.default_resupply_wh_id:
#remove the existing resupplying route on the warehouse
to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', warehouse.default_resupply_wh_id.id)], context=context)
for inter_wh_route_id in to_remove_route_ids:
self.write(cr, uid, [warehouse.id], {'route_ids': [(3, inter_wh_route_id)]})
if vals.get('default_resupply_wh_id'):
#assign the new resupplying route on all products
to_assign_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', vals.get('default_resupply_wh_id'))], context=context)
for inter_wh_route_id in to_assign_route_ids:
self.write(cr, uid, [warehouse.id], {'route_ids': [(4, inter_wh_route_id)]})
return super(stock_warehouse, self).write(cr, uid, ids, vals=vals, context=context)
def get_all_routes_for_wh(self, cr, uid, warehouse, context=None):
route_obj = self.pool.get("stock.location.route")
all_routes = [route.id for route in warehouse.route_ids]
all_routes += route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id)], context=context)
all_routes += [warehouse.mto_pull_id.route_id.id]
return all_routes
def view_all_routes_for_wh(self, cr, uid, ids, context=None):
all_routes = []
for wh in self.browse(cr, uid, ids, context=context):
all_routes += self.get_all_routes_for_wh(cr, uid, wh, context=context)
domain = [('id', 'in', all_routes)]
return {
'name': _('Warehouse\'s Routes'),
'domain': domain,
'res_model': 'stock.location.route',
'type': 'ir.actions.act_window',
'view_id': False,
'view_mode': 'tree,form',
'view_type': 'form',
'limit': 20
}
class stock_location_path(osv.osv):
_name = "stock.location.path"
_description = "Pushed Flows"
_order = "name"
def _get_rules(self, cr, uid, ids, context=None):
res = []
for route in self.browse(cr, uid, ids, context=context):
res += [x.id for x in route.push_ids]
return res
_columns = {
'name': fields.char('Operation Name', required=True),
'company_id': fields.many2one('res.company', 'Company'),
'route_id': fields.many2one('stock.location.route', 'Route'),
'location_from_id': fields.many2one('stock.location', 'Source Location', ondelete='cascade', select=1, required=True),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', ondelete='cascade', select=1, required=True),
'delay': fields.integer('Delay (days)', help="Number of days to do this transition"),
'picking_type_id': fields.many2one('stock.picking.type', 'Type of the new Operation', required=True, help="This is the picking type associated with the different pickings"),
'auto': fields.selection(
[('auto','Automatic Move'), ('manual','Manual Operation'),('transparent','Automatic No Step Added')],
'Automatic Move',
required=True, select=1,
help="This is used to define paths the product has to follow within the location tree.\n" \
"The 'Automatic Move' value will create a stock move after the current one that will be "\
"validated automatically. With 'Manual Operation', the stock move has to be validated "\
"by a worker. With 'Automatic No Step Added', the location is replaced in the original move."
),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when the previous move is cancelled or split, the move generated by this move will too'),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the rule without removing it."),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse'),
'route_sequence': fields.related('route_id', 'sequence', string='Route Sequence',
store={
'stock.location.route': (_get_rules, ['sequence'], 10),
'stock.location.path': (lambda self, cr, uid, ids, c={}: ids, ['route_id'], 10),
}),
'sequence': fields.integer('Sequence'),
}
_defaults = {
'auto': 'auto',
'delay': 0,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'procurement.order', context=c),
'propagate': True,
'active': True,
}
def _prepare_push_apply(self, cr, uid, rule, move, context=None):
newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return {
'origin': move.origin or move.picking_id.name or "/",
'location_id': move.location_dest_id.id,
'location_dest_id': rule.location_dest_id.id,
'date': newdate,
'company_id': rule.company_id and rule.company_id.id or False,
'date_expected': newdate,
'picking_id': False,
'picking_type_id': rule.picking_type_id and rule.picking_type_id.id or False,
'propagate': rule.propagate,
'push_rule_id': rule.id,
'warehouse_id': rule.warehouse_id and rule.warehouse_id.id or False,
}
def _apply(self, cr, uid, rule, move, context=None):
move_obj = self.pool.get('stock.move')
newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
if rule.auto == 'transparent':
old_dest_location = move.location_dest_id.id
move_obj.write(cr, uid, [move.id], {
'date': newdate,
'date_expected': newdate,
'location_dest_id': rule.location_dest_id.id
})
#avoid looping if a push rule is not well configured
if rule.location_dest_id.id != old_dest_location:
#call again push_apply to see if a next step is defined
move_obj._push_apply(cr, uid, [move], context=context)
else:
vals = self._prepare_push_apply(cr, uid, rule, move, context=context)
move_id = move_obj.copy(cr, uid, move.id, vals, context=context)
move_obj.write(cr, uid, [move.id], {
'move_dest_id': move_id,
})
move_obj.action_confirm(cr, uid, [move_id], context=None)
# -------------------------
# Packaging related stuff
# -------------------------
from openerp.report import report_sxw
class stock_package(osv.osv):
"""
These are the packages, containing quants and/or other packages
"""
_name = "stock.quant.package"
_description = "Physical Packages"
_parent_name = "parent_id"
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
def name_get(self, cr, uid, ids, context=None):
res = self._complete_name(cr, uid, ids, 'complete_name', None, context=context)
return res.items()
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = m.name
parent = m.parent_id
while parent:
res[m.id] = parent.name + ' / ' + res[m.id]
parent = parent.parent_id
return res
def _get_packages(self, cr, uid, ids, context=None):
"""Returns packages from quants for store"""
res = set()
for quant in self.browse(cr, uid, ids, context=context):
pack = quant.package_id
while pack:
res.add(pack.id)
pack = pack.parent_id
return list(res)
def _get_package_info(self, cr, uid, ids, name, args, context=None):
quant_obj = self.pool.get("stock.quant")
default_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
res = dict((res_id, {'location_id': False, 'company_id': default_company_id, 'owner_id': False}) for res_id in ids)
for pack in self.browse(cr, uid, ids, context=context):
quants = quant_obj.search(cr, uid, [('package_id', 'child_of', pack.id)], context=context)
if quants:
quant = quant_obj.browse(cr, uid, quants[0], context=context)
res[pack.id]['location_id'] = quant.location_id.id
res[pack.id]['owner_id'] = quant.owner_id.id
res[pack.id]['company_id'] = quant.company_id.id
else:
res[pack.id]['location_id'] = False
res[pack.id]['owner_id'] = False
res[pack.id]['company_id'] = False
return res
def _get_packages_to_relocate(self, cr, uid, ids, context=None):
res = set()
for pack in self.browse(cr, uid, ids, context=context):
res.add(pack.id)
if pack.parent_id:
res.add(pack.parent_id.id)
return list(res)
_columns = {
'name': fields.char('Package Reference', select=True, copy=False),
'complete_name': fields.function(_complete_name, type='char', string="Package Name",),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'packaging_id': fields.many2one('product.packaging', 'Packaging', help="This field should be completed only if everything inside the package share the same product, otherwise it doesn't really makes sense.", select=True),
'ul_id': fields.many2one('product.ul', 'Logistic Unit'),
'location_id': fields.function(_get_package_info, type='many2one', relation='stock.location', string='Location', multi="package",
store={
'stock.quant': (_get_packages, ['location_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
'quant_ids': fields.one2many('stock.quant', 'package_id', 'Bulk Content', readonly=True),
'parent_id': fields.many2one('stock.quant.package', 'Parent Package', help="The package containing this item", ondelete='restrict', readonly=True),
'children_ids': fields.one2many('stock.quant.package', 'parent_id', 'Contained Packages', readonly=True),
'company_id': fields.function(_get_package_info, type="many2one", relation='res.company', string='Company', multi="package",
store={
'stock.quant': (_get_packages, ['company_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
'owner_id': fields.function(_get_package_info, type='many2one', relation='res.partner', string='Owner', multi="package",
store={
'stock.quant': (_get_packages, ['owner_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
}
_defaults = {
'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').get(cr, uid, 'stock.quant.package') or _('Unknown Pack')
}
def _check_location_constraint(self, cr, uid, packs, context=None):
'''checks that all quants in a package are stored in the same location. This function cannot be used
as a constraint because it needs to be checked on pack operations (they may not call write on the
package)
'''
quant_obj = self.pool.get('stock.quant')
for pack in packs:
parent = pack
while parent.parent_id:
parent = parent.parent_id
quant_ids = self.get_content(cr, uid, [parent.id], context=context)
quants = [x for x in quant_obj.browse(cr, uid, quant_ids, context=context) if x.qty > 0]
location_id = quants and quants[0].location_id.id or False
if not [quant.location_id.id == location_id for quant in quants]:
raise osv.except_osv(_('Error'), _('Everything inside a package should be in the same location'))
return True
def action_print(self, cr, uid, ids, context=None):
context = dict(context or {}, active_ids=ids)
return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_package_barcode_small', context=context)
def unpack(self, cr, uid, ids, context=None):
quant_obj = self.pool.get('stock.quant')
for package in self.browse(cr, uid, ids, context=context):
quant_ids = [quant.id for quant in package.quant_ids]
quant_obj.write(cr, uid, quant_ids, {'package_id': package.parent_id.id or False}, context=context)
children_package_ids = [child_package.id for child_package in package.children_ids]
self.write(cr, uid, children_package_ids, {'parent_id': package.parent_id.id or False}, context=context)
#delete current package since it contains nothing anymore
self.unlink(cr, uid, ids, context=context)
return self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'action_package_view', context=context)
def get_content(self, cr, uid, ids, context=None):
child_package_ids = self.search(cr, uid, [('id', 'child_of', ids)], context=context)
return self.pool.get('stock.quant').search(cr, uid, [('package_id', 'in', child_package_ids)], context=context)
def get_content_package(self, cr, uid, ids, context=None):
quants_ids = self.get_content(cr, uid, ids, context=context)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'quantsact', context=context)
res['domain'] = [('id', 'in', quants_ids)]
return res
def _get_product_total_qty(self, cr, uid, package_record, product_id, context=None):
''' find the total of given product 'product_id' inside the given package 'package_id'''
quant_obj = self.pool.get('stock.quant')
all_quant_ids = self.get_content(cr, uid, [package_record.id], context=context)
total = 0
for quant in quant_obj.browse(cr, uid, all_quant_ids, context=context):
if quant.product_id.id == product_id:
total += quant.qty
return total
def _get_all_products_quantities(self, cr, uid, package_id, context=None):
'''This function computes the different product quantities for the given package
'''
quant_obj = self.pool.get('stock.quant')
res = {}
for quant in quant_obj.browse(cr, uid, self.get_content(cr, uid, package_id, context=context)):
if quant.product_id.id not in res:
res[quant.product_id.id] = 0
res[quant.product_id.id] += quant.qty
return res
def copy_pack(self, cr, uid, id, default_pack_values=None, default=None, context=None):
stock_pack_operation_obj = self.pool.get('stock.pack.operation')
if default is None:
default = {}
new_package_id = self.copy(cr, uid, id, default_pack_values, context=context)
default['result_package_id'] = new_package_id
op_ids = stock_pack_operation_obj.search(cr, uid, [('result_package_id', '=', id)], context=context)
for op_id in op_ids:
stock_pack_operation_obj.copy(cr, uid, op_id, default, context=context)
class stock_pack_operation(osv.osv):
_name = "stock.pack.operation"
_description = "Packing Operation"
def _get_remaining_prod_quantities(self, cr, uid, operation, context=None):
'''Get the remaining quantities per product on an operation with a package. This function returns a dictionary'''
#if the operation doesn't concern a package, it's not relevant to call this function
if not operation.package_id or operation.product_id:
return {operation.product_id.id: operation.remaining_qty}
#get the total of products the package contains
res = self.pool.get('stock.quant.package')._get_all_products_quantities(cr, uid, operation.package_id.id, context=context)
#reduce by the quantities linked to a move
for record in operation.linked_move_operation_ids:
if record.move_id.product_id.id not in res:
res[record.move_id.product_id.id] = 0
res[record.move_id.product_id.id] -= record.qty
return res
def _get_remaining_qty(self, cr, uid, ids, name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for ops in self.browse(cr, uid, ids, context=context):
res[ops.id] = 0
if ops.package_id and not ops.product_id:
#dont try to compute the remaining quantity for packages because it's not relevant (a package could include different products).
#should use _get_remaining_prod_quantities instead
continue
else:
qty = ops.product_qty
if ops.product_uom_id:
qty = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, ops.product_qty, ops.product_id.uom_id, context=context)
for record in ops.linked_move_operation_ids:
qty -= record.qty
res[ops.id] = float_round(qty, precision_rounding=ops.product_id.uom_id.rounding)
return res
def product_id_change(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None):
res = self.on_change_tests(cr, uid, ids, product_id, product_uom_id, product_qty, context=context)
if product_id and not product_uom_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
res['value']['product_uom_id'] = product.uom_id.id
return res
def on_change_tests(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None):
res = {'value': {}}
uom_obj = self.pool.get('product.uom')
if product_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
product_uom_id = product_uom_id or product.uom_id.id
selected_uom = uom_obj.browse(cr, uid, product_uom_id, context=context)
if selected_uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {
'title': _('Warning: wrong UoM!'),
'message': _('The selected UoM for product %s is not compatible with the UoM set on the product form. \nPlease choose an UoM within the same UoM category.') % (product.name)
}
if product_qty and 'warning' not in res:
rounded_qty = uom_obj._compute_qty(cr, uid, product_uom_id, product_qty, product_uom_id, round=True)
if rounded_qty != product_qty:
res['warning'] = {
'title': _('Warning: wrong quantity!'),
'message': _('The chosen quantity for product %s is not compatible with the UoM rounding. It will be automatically converted at confirmation') % (product.name)
}
return res
_columns = {
'picking_id': fields.many2one('stock.picking', 'Stock Picking', help='The stock operation where the packing has been made', required=True),
'product_id': fields.many2one('product.product', 'Product', ondelete="CASCADE"), # 1
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure'),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'qty_done': fields.float('Quantity Processed', digits_compute=dp.get_precision('Product Unit of Measure')),
'package_id': fields.many2one('stock.quant.package', 'Source Package'), # 2
'lot_id': fields.many2one('stock.production.lot', 'Lot/Serial Number'),
'result_package_id': fields.many2one('stock.quant.package', 'Destination Package', help="If set, the operations are packed into this package", required=False, ondelete='cascade'),
'date': fields.datetime('Date', required=True),
'owner_id': fields.many2one('res.partner', 'Owner', help="Owner of the quants"),
#'update_cost': fields.boolean('Need cost update'),
'cost': fields.float("Cost", help="Unit Cost for this product line"),
'currency': fields.many2one('res.currency', string="Currency", help="Currency in which Unit cost is expressed", ondelete='CASCADE'),
'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'operation_id', string='Linked Moves', readonly=True, help='Moves impacted by this operation for the computation of the remaining quantities'),
'remaining_qty': fields.function(_get_remaining_qty, type='float', digits = 0, string="Remaining Qty", help="Remaining quantity in default UoM according to moves matched with this operation. "),
'location_id': fields.many2one('stock.location', 'Source Location', required=True),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True),
'processed': fields.selection([('true','Yes'), ('false','No')],'Has been processed?', required=True),
}
_defaults = {
'date': fields.date.context_today,
'qty_done': 0,
'processed': lambda *a: 'false',
}
def write(self, cr, uid, ids, vals, context=None):
context = context or {}
res = super(stock_pack_operation, self).write(cr, uid, ids, vals, context=context)
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get("no_recompute"):
pickings = vals.get('picking_id') and [vals['picking_id']] or list(set([x.picking_id.id for x in self.browse(cr, uid, ids, context=context)]))
self.pool.get("stock.picking").do_recompute_remaining_quantities(cr, uid, pickings, context=context)
return res
def create(self, cr, uid, vals, context=None):
context = context or {}
res_id = super(stock_pack_operation, self).create(cr, uid, vals, context=context)
if vals.get("picking_id") and not context.get("no_recompute"):
self.pool.get("stock.picking").do_recompute_remaining_quantities(cr, uid, [vals['picking_id']], context=context)
return res_id
def action_drop_down(self, cr, uid, ids, context=None):
''' Used by barcode interface to say that pack_operation has been moved from src location
to destination location, if qty_done is less than product_qty than we have to split the
operation in two to process the one with the qty moved
'''
processed_ids = []
move_obj = self.pool.get("stock.move")
for pack_op in self.browse(cr, uid, ids, context=None):
if pack_op.product_id and pack_op.location_id and pack_op.location_dest_id:
move_obj.check_tracking_product(cr, uid, pack_op.product_id, pack_op.lot_id.id, pack_op.location_id, pack_op.location_dest_id, context=context)
op = pack_op.id
if pack_op.qty_done < pack_op.product_qty:
# we split the operation in two
op = self.copy(cr, uid, pack_op.id, {'product_qty': pack_op.qty_done, 'qty_done': pack_op.qty_done}, context=context)
self.write(cr, uid, [pack_op.id], {'product_qty': pack_op.product_qty - pack_op.qty_done, 'qty_done': 0, 'lot_id': False}, context=context)
processed_ids.append(op)
self.write(cr, uid, processed_ids, {'processed': 'true'}, context=context)
def create_and_assign_lot(self, cr, uid, id, name, context=None):
''' Used by barcode interface to create a new lot and assign it to the operation
'''
obj = self.browse(cr,uid,id,context)
product_id = obj.product_id.id
val = {'product_id': product_id}
new_lot_id = False
if name:
lots = self.pool.get('stock.production.lot').search(cr, uid, ['&', ('name', '=', name), ('product_id', '=', product_id)], context=context)
if lots:
new_lot_id = lots[0]
val.update({'name': name})
if not new_lot_id:
new_lot_id = self.pool.get('stock.production.lot').create(cr, uid, val, context=context)
self.write(cr, uid, id, {'lot_id': new_lot_id}, context=context)
def _search_and_increment(self, cr, uid, picking_id, domain, filter_visible=False, visible_op_ids=False, increment=True, context=None):
'''Search for an operation with given 'domain' in a picking, if it exists increment the qty (+1) otherwise create it
:param domain: list of tuple directly reusable as a domain
context can receive a key 'current_package_id' with the package to consider for this operation
returns True
'''
if context is None:
context = {}
#if current_package_id is given in the context, we increase the number of items in this package
package_clause = [('result_package_id', '=', context.get('current_package_id', False))]
existing_operation_ids = self.search(cr, uid, [('picking_id', '=', picking_id)] + domain + package_clause, context=context)
todo_operation_ids = []
if existing_operation_ids:
if filter_visible:
todo_operation_ids = [val for val in existing_operation_ids if val in visible_op_ids]
else:
todo_operation_ids = existing_operation_ids
if todo_operation_ids:
#existing operation found for the given domain and picking => increment its quantity
operation_id = todo_operation_ids[0]
op_obj = self.browse(cr, uid, operation_id, context=context)
qty = op_obj.qty_done
if increment:
qty += 1
else:
qty -= 1 if qty >= 1 else 0
if qty == 0 and op_obj.product_qty == 0:
#we have a line with 0 qty set, so delete it
self.unlink(cr, uid, [operation_id], context=context)
return False
self.write(cr, uid, [operation_id], {'qty_done': qty}, context=context)
else:
#no existing operation found for the given domain and picking => create a new one
picking_obj = self.pool.get("stock.picking")
picking = picking_obj.browse(cr, uid, picking_id, context=context)
values = {
'picking_id': picking_id,
'product_qty': 0,
'location_id': picking.location_id.id,
'location_dest_id': picking.location_dest_id.id,
'qty_done': 1,
}
for key in domain:
var_name, dummy, value = key
uom_id = False
if var_name == 'product_id':
uom_id = self.pool.get('product.product').browse(cr, uid, value, context=context).uom_id.id
update_dict = {var_name: value}
if uom_id:
update_dict['product_uom_id'] = uom_id
values.update(update_dict)
operation_id = self.create(cr, uid, values, context=context)
return operation_id
class stock_move_operation_link(osv.osv):
"""
Table making the link between stock.moves and stock.pack.operations to compute the remaining quantities on each of these objects
"""
_name = "stock.move.operation.link"
_description = "Link between stock moves and pack operations"
_columns = {
'qty': fields.float('Quantity', help="Quantity of products to consider when talking about the contribution of this pack operation towards the remaining quantity of the move (and inverse). Given in the product main uom."),
'operation_id': fields.many2one('stock.pack.operation', 'Operation', required=True, ondelete="cascade"),
'move_id': fields.many2one('stock.move', 'Move', required=True, ondelete="cascade"),
'reserved_quant_id': fields.many2one('stock.quant', 'Reserved Quant', help="Technical field containing the quant that created this link between an operation and a stock move. Used at the stock_move_obj.action_done() time to avoid seeking a matching quant again"),
}
def get_specific_domain(self, cr, uid, record, context=None):
'''Returns the specific domain to consider for quant selection in action_assign() or action_done() of stock.move,
having the record given as parameter making the link between the stock move and a pack operation'''
op = record.operation_id
domain = []
if op.package_id and op.product_id:
#if removing a product from a box, we restrict the choice of quants to this box
domain.append(('package_id', '=', op.package_id.id))
elif op.package_id:
#if moving a box, we allow to take everything from inside boxes as well
domain.append(('package_id', 'child_of', [op.package_id.id]))
else:
#if not given any information about package, we don't open boxes
domain.append(('package_id', '=', False))
#if lot info is given, we restrict choice to this lot otherwise we can take any
if op.lot_id:
domain.append(('lot_id', '=', op.lot_id.id))
#if owner info is given, we restrict to this owner otherwise we restrict to no owner
if op.owner_id:
domain.append(('owner_id', '=', op.owner_id.id))
else:
domain.append(('owner_id', '=', False))
return domain
class stock_warehouse_orderpoint(osv.osv):
"""
Defines Minimum stock rules.
"""
_name = "stock.warehouse.orderpoint"
_description = "Minimum Inventory Rule"
def subtract_procurements(self, cr, uid, orderpoint, context=None):
'''This function returns quantity of product that needs to be deducted from the orderpoint computed quantity because there's already a procurement created with aim to fulfill it.
'''
qty = 0
uom_obj = self.pool.get("product.uom")
for procurement in orderpoint.procurement_ids:
if procurement.state in ('cancel', 'done'):
continue
procurement_qty = uom_obj._compute_qty_obj(cr, uid, procurement.product_uom, procurement.product_qty, procurement.product_id.uom_id, context=context)
for move in procurement.move_ids:
#need to add the moves in draft as they aren't in the virtual quantity + moves that have not been created yet
if move.state not in ('draft'):
#if move is already confirmed, assigned or done, the virtual stock is already taking this into account so it shouldn't be deducted
procurement_qty -= move.product_qty
qty += procurement_qty
return qty
def _check_product_uom(self, cr, uid, ids, context=None):
'''
Check if the UoM has the same category as the product standard UoM
'''
if not context:
context = {}
for rule in self.browse(cr, uid, ids, context=context):
if rule.product_id.uom_id.category_id.id != rule.product_uom.category_id.id:
return False
return True
def action_view_proc_to_process(self, cr, uid, ids, context=None):
act_obj = self.pool.get('ir.actions.act_window')
mod_obj = self.pool.get('ir.model.data')
proc_ids = self.pool.get('procurement.order').search(cr, uid, [('orderpoint_id', 'in', ids), ('state', 'not in', ('done', 'cancel'))], context=context)
result = mod_obj.get_object_reference(cr, uid, 'procurement', 'do_view_procurements')
if not result:
return False
result = act_obj.read(cr, uid, [result[1]], context=context)[0]
result['domain'] = "[('id', 'in', [" + ','.join(map(str, proc_ids)) + "])]"
return result
_columns = {
'name': fields.char('Name', required=True, copy=False),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the orderpoint without removing it."),
'logic': fields.selection([('max', 'Order to Max'), ('price', 'Best price (not yet active!)')], 'Reordering Mode', required=True),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True, ondelete="cascade"),
'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="cascade"),
'product_id': fields.many2one('product.product', 'Product', required=True, ondelete='cascade', domain=[('type', '=', 'product')]),
'product_uom': fields.related('product_id', 'uom_id', type='many2one', relation='product.uom', string='Product Unit of Measure', readonly=True, required=True),
'product_min_qty': fields.float('Minimum Quantity', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="When the virtual stock goes below the Min Quantity specified for this field, Odoo generates "\
"a procurement to bring the forecasted quantity to the Max Quantity."),
'product_max_qty': fields.float('Maximum Quantity', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="When the virtual stock goes below the Min Quantity, Odoo generates "\
"a procurement to bring the forecasted quantity to the Quantity specified as Max Quantity."),
'qty_multiple': fields.float('Qty Multiple', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="The procurement quantity will be rounded up to this multiple. If it is 0, the exact quantity will be used. "),
'procurement_ids': fields.one2many('procurement.order', 'orderpoint_id', 'Created Procurements'),
'group_id': fields.many2one('procurement.group', 'Procurement Group', help="Moves created through this orderpoint will be put in this procurement group. If none is given, the moves generated by procurement rules will be grouped into one big picking.", copy=False),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'active': lambda *a: 1,
'logic': lambda *a: 'max',
'qty_multiple': lambda *a: 1,
'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').get(cr, uid, 'stock.orderpoint') or '',
'product_uom': lambda self, cr, uid, context: context.get('product_uom', False),
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.warehouse.orderpoint', context=context)
}
_sql_constraints = [
('qty_multiple_check', 'CHECK( qty_multiple >= 0 )', 'Qty Multiple must be greater than or equal to zero.'),
]
_constraints = [
(_check_product_uom, 'You have to select a product unit of measure in the same category than the default unit of measure of the product', ['product_id', 'product_uom']),
]
def default_get(self, cr, uid, fields, context=None):
warehouse_obj = self.pool.get('stock.warehouse')
res = super(stock_warehouse_orderpoint, self).default_get(cr, uid, fields, context)
# default 'warehouse_id' and 'location_id'
if 'warehouse_id' not in res:
warehouse_ids = res.get('company_id') and warehouse_obj.search(cr, uid, [('company_id', '=', res['company_id'])], limit=1, context=context) or []
res['warehouse_id'] = warehouse_ids and warehouse_ids[0] or False
if 'location_id' not in res:
res['location_id'] = res.get('warehouse_id') and warehouse_obj.browse(cr, uid, res['warehouse_id'], context).lot_stock_id.id or False
return res
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
""" Finds location id for changed warehouse.
@param warehouse_id: Changed id of warehouse.
@return: Dictionary of values.
"""
if warehouse_id:
w = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
v = {'location_id': w.lot_stock_id.id}
return {'value': v}
return {}
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Finds UoM for changed product.
@param product_id: Changed id of product.
@return: Dictionary of values.
"""
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
d = {'product_uom': [('category_id', '=', prod.uom_id.category_id.id)]}
v = {'product_uom': prod.uom_id.id}
return {'value': v, 'domain': d}
return {'domain': {'product_uom': []}}
class stock_picking_type(osv.osv):
_name = "stock.picking.type"
_description = "The picking type determines the picking view"
_order = 'sequence'
def open_barcode_interface(self, cr, uid, ids, context=None):
final_url = "/barcode/web/#action=stock.ui&picking_type_id=" + str(ids[0]) if len(ids) else '0'
return {'type': 'ir.actions.act_url', 'url': final_url, 'target': 'self'}
def _get_tristate_values(self, cr, uid, ids, field_name, arg, context=None):
picking_obj = self.pool.get('stock.picking')
res = {}
for picking_type_id in ids:
#get last 10 pickings of this type
picking_ids = picking_obj.search(cr, uid, [('picking_type_id', '=', picking_type_id), ('state', '=', 'done')], order='date_done desc', limit=10, context=context)
tristates = []
for picking in picking_obj.browse(cr, uid, picking_ids, context=context):
if picking.date_done > picking.date:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Late'), 'value': -1})
elif picking.backorder_id:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Backorder exists'), 'value': 0})
else:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('OK'), 'value': 1})
res[picking_type_id] = json.dumps(tristates)
return res
def _get_picking_count(self, cr, uid, ids, field_names, arg, context=None):
obj = self.pool.get('stock.picking')
domains = {
'count_picking_draft': [('state', '=', 'draft')],
'count_picking_waiting': [('state', '=', 'confirmed')],
'count_picking_ready': [('state', 'in', ('assigned', 'partially_available'))],
'count_picking': [('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))],
'count_picking_late': [('min_date', '<', time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)), ('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))],
'count_picking_backorders': [('backorder_id', '!=', False), ('state', 'in', ('confirmed', 'assigned', 'waiting', 'partially_available'))],
}
result = {}
for field in domains:
data = obj.read_group(cr, uid, domains[field] +
[('state', 'not in', ('done', 'cancel')), ('picking_type_id', 'in', ids)],
['picking_type_id'], ['picking_type_id'], context=context)
count = dict(map(lambda x: (x['picking_type_id'] and x['picking_type_id'][0], x['picking_type_id_count']), data))
for tid in ids:
result.setdefault(tid, {})[field] = count.get(tid, 0)
for tid in ids:
if result[tid]['count_picking']:
result[tid]['rate_picking_late'] = result[tid]['count_picking_late'] * 100 / result[tid]['count_picking']
result[tid]['rate_picking_backorders'] = result[tid]['count_picking_backorders'] * 100 / result[tid]['count_picking']
else:
result[tid]['rate_picking_late'] = 0
result[tid]['rate_picking_backorders'] = 0
return result
def onchange_picking_code(self, cr, uid, ids, picking_code=False):
if not picking_code:
return False
obj_data = self.pool.get('ir.model.data')
stock_loc = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_stock')
result = {
'default_location_src_id': stock_loc,
'default_location_dest_id': stock_loc,
}
if picking_code == 'incoming':
result['default_location_src_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_suppliers')
elif picking_code == 'outgoing':
result['default_location_dest_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_customers')
return {'value': result}
def _get_name(self, cr, uid, ids, field_names, arg, context=None):
return dict(self.name_get(cr, uid, ids, context=context))
def name_get(self, cr, uid, ids, context=None):
"""Overides orm name_get method to display 'Warehouse_name: PickingType_name' """
if context is None:
context = {}
if not isinstance(ids, list):
ids = [ids]
res = []
if not ids:
return res
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if record.warehouse_id:
name = record.warehouse_id.name + ': ' +name
if context.get('special_shortened_wh_name'):
if record.warehouse_id:
name = record.warehouse_id.name
else:
name = _('Customer') + ' (' + record.name + ')'
res.append((record.id, name))
return res
def _default_warehouse(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context)
res = self.pool.get('stock.warehouse').search(cr, uid, [('company_id', '=', user.company_id.id)], limit=1, context=context)
return res and res[0] or False
_columns = {
'name': fields.char('Picking Type Name', translate=True, required=True),
'complete_name': fields.function(_get_name, type='char', string='Name'),
'color': fields.integer('Color'),
'sequence': fields.integer('Sequence', help="Used to order the 'All Operations' kanban view"),
'sequence_id': fields.many2one('ir.sequence', 'Reference Sequence', required=True),
'default_location_src_id': fields.many2one('stock.location', 'Default Source Location'),
'default_location_dest_id': fields.many2one('stock.location', 'Default Destination Location'),
'code': fields.selection([('incoming', 'Suppliers'), ('outgoing', 'Customers'), ('internal', 'Internal')], 'Type of Operation', required=True),
'return_picking_type_id': fields.many2one('stock.picking.type', 'Picking Type for Returns'),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', ondelete='cascade'),
'active': fields.boolean('Active'),
# Statistics for the kanban view
'last_done_picking': fields.function(_get_tristate_values,
type='char',
string='Last 10 Done Pickings'),
'count_picking_draft': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_ready': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_waiting': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_late': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_backorders': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'rate_picking_late': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'rate_picking_backorders': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
}
_defaults = {
'warehouse_id': _default_warehouse,
'active': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| oliverhr/odoo | addons/stock/stock.py | Python | agpl-3.0 | 269,349 | 0.005732 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.