text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
import rhwl_hr
import rhwl_holidays
import controllers
|
vnsofthe/odoo-dev
|
addons/rhwl_hr/__init__.py
|
Python
|
agpl-3.0
| 55 | 0 |
#!/usr/bin/python
# Import the necessary package to process data in JSON format
try:
import json
except ImportError:
import simplejson as json
# Import the necessary methods from "twitter" library
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
# from twython import Twython
from secret import (
TW_CONSUMER_KEY,
TW_CONSUMER_SECRET,
TW_ACCESS_TOKEN_KEY,
TW_ACCESS_TOKEN_SECRET
)
CONSUMER_KEY=TW_CONSUMER_KEY
CONSUMER_SECRET=TW_CONSUMER_SECRET
ACCESS_TOKEN=TW_ACCESS_TOKEN_KEY
ACCESS_SECRET=TW_ACCESS_TOKEN_SECRET
oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
# Initiate the connection to Twitter Streaming API
twitter_stream = TwitterStream(auth=oauth)
def listenTwitter(track, code):
"""
Listen Twitter for mention of keywords stated in 'track' and 'code'.
Use Twitter stream API
Params:
track: message to track in Tweets
code: unique code from CandyBot
Returns:
True or False decision status on candy dispensing
"""
# Listen for tweets with required track (@fun_robots) and #code
iterator = twitter_stream.statuses.filter(track=track)
while True:
for tweet in iterator:
tw_text = json.loads(json.dumps(tweet)).get('text')
# print(tw_text, "\n") ##for debug
if code in tw_text:
print("PLEASE, TAKE YOUR CANDY! :)))))))))")
return(True)
else:
break
return(False)
if __name__ == "__main__":
get_candy = listenTwitter(track='@fun_robots', code='4451')
print(get_candy)
|
mnrozhkov/candybot
|
twitter_stream.py
|
Python
|
gpl-2.0
| 1,522 | 0.027595 |
from controllers.board_controller import BoardController
from models.move import Move
from models.board import Board
controller = BoardController()
controller.init_game()
|
daniellima/othello
|
main.py
|
Python
|
mit
| 204 | 0.014706 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 18 15:53:27 2016
@author: Michael Wu
Benchmark test:
Giving classification performances of:
Random forest(rf), MultitaskDNN(tf),
RobustMultitaskDNN(tf_robust),
Logistic regression(logreg),
Graph convolution(graphconv)
on datasets: muv, pcba, tox21, sider, toxcast
Giving regression performances of:
MultitaskDNN(tf_regression),
Graph convolution regression(graphconvreg)
on datasets: delaney, nci, kaggle, pdbbind
time estimation listed in README file
Total time of running a benchmark test(for one splitting function): 20h
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import sys
import os
import numpy as np
import shutil
import time
import deepchem as dc
import tensorflow as tf
import argparse
from keras import backend as K
import csv
from sklearn.ensemble import RandomForestClassifier
from muv.muv_datasets import load_muv
from nci.nci_datasets import load_nci
from pcba.pcba_datasets import load_pcba
from tox21.tox21_datasets import load_tox21
from toxcast.toxcast_datasets import load_toxcast
from sider.sider_datasets import load_sider
from kaggle.kaggle_datasets import load_kaggle
from delaney.delaney_datasets import load_delaney
from nci.nci_datasets import load_nci
from pdbbind.pdbbind_datasets import load_pdbbind_grid
def benchmark_loading_datasets(hyper_parameters,
dataset='tox21', model='tf', split=None,
reload=True, out_path='.'):
"""
Loading dataset for benchmark test
Parameters
----------
hyper_parameters: dict of list
hyper parameters including dropout rate, learning rate, etc.
dataset: string, optional (default='tox21')
choice of which dataset to use, should be: tox21, muv, sider,
toxcast, pcba, delaney, kaggle, nci
model: string, optional (default='tf')
choice of which model to use, should be: rf, tf, tf_robust, logreg,
graphconv, tf_regression, graphconvreg
split: string, optional (default=None)
choice of splitter function, None = using the default splitter
out_path: string, optional(default='.')
path of result file
"""
if dataset in ['muv', 'pcba', 'tox21', 'sider', 'toxcast']:
mode = 'classification'
elif dataset in ['kaggle', 'delaney', 'nci','pdbbind']:
mode = 'regression'
else:
raise ValueError('Dataset not supported')
#assigning featurizer
if model in ['graphconv', 'graphconvreg']:
featurizer = 'GraphConv'
n_features = 75
elif model in ['tf', 'tf_robust', 'logreg', 'rf', 'tf_regression']:
featurizer = 'ECFP'
n_features = 1024
else:
raise ValueError('Model not supported')
if dataset in ['kaggle']:
featurizer = None #kaggle dataset use its own features
if split in ['random', 'scaffold']:
return
else:
split = None #kaggle dataset is already splitted
if not model in ['tf_regression']:
return
if dataset in ['pdbbind']:
featurizer = 'grid' #pdbbind use grid featurizer
if split in ['scaffold', 'index']:
return #skip the scaffold and index splitting of pdbbind
if not model in ['tf_regression']:
return
if not split in [None, 'index','random','scaffold']:
raise ValueError('Splitter function not supported')
loading_functions = {'tox21': load_tox21, 'muv': load_muv,
'pcba': load_pcba, 'nci': load_nci,
'sider': load_sider, 'toxcast': load_toxcast,
'kaggle': load_kaggle, 'delaney': load_delaney,
'pdbbind': load_pdbbind_grid}
print('-------------------------------------')
print('Benchmark %s on dataset: %s' % (model, dataset))
print('-------------------------------------')
time_start = time.time()
#loading datasets
if split is not None:
print('Splitting function: %s' % split)
tasks, all_dataset, transformers = loading_functions[dataset](
featurizer=featurizer, split=split)
else:
tasks, all_dataset, transformers = loading_functions[dataset](
featurizer=featurizer)
train_dataset, valid_dataset, test_dataset = all_dataset
time_finish_loading = time.time()
#time_finish_loading-time_start is the time(s) used for dataset loading
if dataset in ['kaggle','pdbbind']:
n_features = train_dataset.get_data_shape()[0]
#kaggle dataset has customized features
#running model
for count, hp in enumerate(hyper_parameters[model]):
time_start_fitting = time.time()
if mode == 'classification':
train_score, valid_score = benchmark_classification(
train_dataset, valid_dataset, tasks,
transformers, hp, n_features,
model=model)
elif mode == 'regression':
train_score, valid_score = benchmark_regression(
train_dataset, valid_dataset, tasks,
transformers, hp, n_features,
model=model)
time_finish_fitting = time.time()
with open(os.path.join(out_path, 'results.csv'),'a') as f:
writer = csv.writer(f)
if mode == 'classification':
for i in train_score:
output_line = [count, dataset, str(split), mode, 'train', i,
train_score[i]['mean-roc_auc_score'], 'valid', i,
valid_score[i]['mean-roc_auc_score'],
'time_for_running',
time_finish_fitting-time_start_fitting]
writer.writerow(output_line)
else:
for i in train_score:
output_line = [count, dataset, str(split), mode, 'train', i,
train_score[i]['mean-pearson_r2_score'], 'valid', i,
valid_score[i]['mean-pearson_r2_score'],
'time_for_running',
time_finish_fitting-time_start_fitting]
writer.writerow(output_line)
def benchmark_classification(train_dataset, valid_dataset, tasks,
transformers, hyper_parameters,
n_features, model='tf', seed=123):
"""
Calculate performance of different models on the specific dataset & tasks
Parameters
----------
train_dataset: dataset struct
loaded dataset using load_* or splitter function
valid_dataset: dataset struct
loaded dataset using load_* or splitter function
tasks: list of string
list of targets(tasks, datasets)
transformers: BalancingTransformer struct
loaded properties of dataset from load_* function
hyper_parameters: dict
hyper parameters including dropout rate, learning rate, etc.
n_features: integer
number of features, or length of binary fingerprints
model: string, optional (default='tf')
choice of which model to use, should be: rf, tf, tf_robust, logreg,
graphconv
Returns
-------
train_scores : dict
predicting results(AUC) on training set
valid_scores : dict
predicting results(AUC) on valid set
"""
train_scores = {}
valid_scores = {}
# Initialize metrics
classification_metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
assert model in ['rf', 'tf', 'tf_robust', 'logreg', 'graphconv']
if model == 'tf':
# Loading hyper parameters
layer_sizes = hyper_parameters['layer_sizes']
weight_init_stddevs = hyper_parameters['weight_init_stddevs']
bias_init_consts = hyper_parameters['bias_init_consts']
dropouts = hyper_parameters['dropouts']
penalty = hyper_parameters['penalty']
penalty_type = hyper_parameters['penalty_type']
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
# Building tensorflow MultiTaskDNN model
model_tf = dc.models.TensorflowMultiTaskClassifier(len(tasks),
n_features, layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts, dropouts=dropouts, penalty=penalty,
penalty_type=penalty_type, batch_size=batch_size,
learning_rate=learning_rate, seed=seed)
print('-------------------------------------')
print('Start fitting by multitask DNN')
model_tf.fit(train_dataset, nb_epoch=nb_epoch)
# Evaluating tensorflow MultiTaskDNN model
train_scores['tf'] = model_tf.evaluate(
train_dataset, [classification_metric], transformers)
valid_scores['tf'] = model_tf.evaluate(
valid_dataset, [classification_metric], transformers)
if model == 'tf_robust':
# Loading hyper parameters
layer_sizes = hyper_parameters['layer_sizes']
weight_init_stddevs = hyper_parameters['weight_init_stddevs']
bias_init_consts = hyper_parameters['bias_init_consts']
dropouts = hyper_parameters['dropouts']
bypass_layer_sizes = hyper_parameters['bypass_layer_sizes']
bypass_weight_init_stddevs = hyper_parameters['bypass_weight_init_stddevs']
bypass_bias_init_consts = hyper_parameters['bypass_bias_init_consts']
bypass_dropouts = hyper_parameters['bypass_dropouts']
penalty = hyper_parameters['penalty']
penalty_type = hyper_parameters['penalty_type']
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
# Building tensorflow robust MultiTaskDNN model
model_tf_robust = dc.models.RobustMultitaskClassifier(len(tasks),
n_features, layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts, dropouts=dropouts,
bypass_layer_sizes=bypass_layer_sizes,
bypass_weight_init_stddevs=bypass_weight_init_stddevs,
bypass_bias_init_consts=bypass_bias_init_consts,
bypass_dropouts=bypass_dropouts, penalty=penalty,
penalty_type=penalty_type, batch_size=batch_size,
learning_rate=learning_rate, seed=seed)
print('--------------------------------------------')
print('Start fitting by robust multitask DNN')
model_tf_robust.fit(train_dataset, nb_epoch=nb_epoch)
# Evaluating tensorflow robust MultiTaskDNN model
train_scores['tf_robust'] = model_tf_robust.evaluate(
train_dataset, [classification_metric], transformers)
valid_scores['tf_robust'] = model_tf_robust.evaluate(
valid_dataset, [classification_metric], transformers)
if model == 'logreg':
# Loading hyper parameters
penalty = hyper_parameters['penalty']
penalty_type = hyper_parameters['penalty_type']
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
# Building tensorflow logistic regression model
model_logreg = dc.models.TensorflowLogisticRegression(len(tasks),
n_features, penalty=penalty, penalty_type=penalty_type,
batch_size=batch_size, learning_rate=learning_rate,
seed=seed)
print('-------------------------------------')
print('Start fitting by logistic regression')
model_logreg.fit(train_dataset, nb_epoch=nb_epoch)
# Evaluating tensorflow logistic regression model
train_scores['logreg'] = model_logreg.evaluate(
train_dataset, [classification_metric], transformers)
valid_scores['logreg'] = model_logreg.evaluate(
valid_dataset, [classification_metric], transformers)
if model == 'graphconv':
# Initialize model folder
# Loading hyper parameters
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_filters = hyper_parameters['n_filters']
n_fully_connected_nodes = hyper_parameters['n_fully_connected_nodes']
g = tf.Graph()
sess = tf.Session(graph=g)
K.set_session(sess)
# Building graph convolution model
with g.as_default():
tf.set_random_seed(seed)
graph_model = dc.nn.SequentialGraph(n_features)
graph_model.add(dc.nn.GraphConv(int(n_filters), activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphPool())
graph_model.add(dc.nn.GraphConv(int(n_filters), activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphPool())
# Gather Projection
graph_model.add(dc.nn.Dense(int(n_fully_connected_nodes),
activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphGather(batch_size, activation="tanh"))
with tf.Session() as sess:
model_graphconv = dc.models.MultitaskGraphClassifier(
sess, graph_model, len(tasks),
batch_size=batch_size, learning_rate=learning_rate,
optimizer_type="adam", beta1=.9, beta2=.999)
print('-------------------------------------')
print('Start fitting by graph convolution')
# Fit trained model
model_graphconv.fit(train_dataset, nb_epoch=nb_epoch)
# Evaluating graph convolution model
train_scores['graphconv'] = model_graphconv.evaluate(
train_dataset, [classification_metric], transformers)
valid_scores['graphconv'] = model_graphconv.evaluate(
valid_dataset, [classification_metric], transformers)
if model == 'rf':
# Initialize model folder
# Loading hyper parameters
n_estimators = hyper_parameters['n_estimators']
# Building scikit random forest model
def model_builder(model_dir_rf):
sklearn_model = RandomForestClassifier(
class_weight="balanced", n_estimators=n_estimators,n_jobs=-1)
return dc.models.sklearn_models.SklearnModel(sklearn_model, model_dir_rf)
model_rf = dc.models.multitask.SingletaskToMultitask(
tasks, model_builder)
print('-------------------------------------')
print('Start fitting by random forest')
model_rf.fit(train_dataset)
# Evaluating scikit random forest model
train_scores['rf'] = model_rf.evaluate(
train_dataset, [classification_metric], transformers)
valid_scores['rf'] = model_rf.evaluate(
valid_dataset, [classification_metric], transformers)
return train_scores, valid_scores
def benchmark_regression(train_dataset, valid_dataset, tasks,
transformers, hyper_parameters,
n_features, model='tf_regression', seed=123):
"""
Calculate performance of different models on the specific dataset & tasks
Parameters
----------
train_dataset: dataset struct
loaded dataset using load_* or splitter function
valid_dataset: dataset struct
loaded dataset using load_* or splitter function
tasks: list of string
list of targets(tasks, datasets)
transformers: BalancingTransformer struct
loaded properties of dataset from load_* function
hyper_parameters: dict
hyper parameters including dropout rate, learning rate, etc.
n_features: integer
number of features, or length of binary fingerprints
model: string, optional (default='tf_regression')
choice of which model to use, should be: tf_regression, graphconvreg
Returns
-------
train_scores: dict
predicting results(R2) on training set
valid_scores: dict
predicting results(R2) on valid set
"""
train_scores = {}
valid_scores = {}
# Initialize metrics
regression_metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, np.mean)
assert model in ['tf_regression', 'graphconvreg']
if model == 'tf_regression':
# Loading hyper parameters
layer_sizes = hyper_parameters['layer_sizes']
weight_init_stddevs = hyper_parameters['weight_init_stddevs']
bias_init_consts = hyper_parameters['bias_init_consts']
dropouts = hyper_parameters['dropouts']
penalty = hyper_parameters['penalty']
penalty_type = hyper_parameters['penalty_type']
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
# Building tensorflow MultiTaskDNN model
model_tf_regression = dc.models.TensorflowMultiTaskRegressor(len(tasks),
n_features, layer_sizes=layer_sizes,
weight_init_stddevs=weight_init_stddevs,
bias_init_consts=bias_init_consts, dropouts=dropouts, penalty=penalty,
penalty_type=penalty_type, batch_size=batch_size,
learning_rate=learning_rate, seed=seed)
print('-----------------------------------------')
print('Start fitting by multitask DNN regression')
model_tf_regression.fit(train_dataset, nb_epoch=nb_epoch)
# Evaluating tensorflow MultiTaskDNN model
train_scores['tf_regression'] = model_tf_regression.evaluate(
train_dataset, [regression_metric], transformers)
valid_scores['tf_regression'] = model_tf_regression.evaluate(
valid_dataset, [regression_metric], transformers)
if model == 'graphconvreg':
# Initialize model folder
# Loading hyper parameters
batch_size = hyper_parameters['batch_size']
nb_epoch = hyper_parameters['nb_epoch']
learning_rate = hyper_parameters['learning_rate']
n_filters = hyper_parameters['n_filters']
n_fully_connected_nodes = hyper_parameters['n_fully_connected_nodes']
g = tf.Graph()
sess = tf.Session(graph=g)
K.set_session(sess)
# Building graph convoluwtion model
with g.as_default():
tf.set_random_seed(seed)
graph_model = dc.nn.SequentialGraph(n_features)
graph_model.add(dc.nn.GraphConv(int(n_filters), activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphPool())
graph_model.add(dc.nn.GraphConv(int(n_filters), activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphPool())
# Gather Projection
graph_model.add(dc.nn.Dense(int(n_fully_connected_nodes),
activation='relu'))
graph_model.add(dc.nn.BatchNormalization(epsilon=1e-5, mode=1))
graph_model.add(dc.nn.GraphGather(batch_size, activation="tanh"))
with tf.Session() as sess:
model_graphconvreg = dc.models.MultitaskGraphRegressor(
sess, graph_model, len(tasks),
batch_size=batch_size, learning_rate=learning_rate,
optimizer_type="adam", beta1=.9, beta2=.999)
print('-------------------------------------')
print('Start fitting by graph convolution')
# Fit trained model
model_graphconvreg.fit(train_dataset, nb_epoch=nb_epoch)
# Evaluating graph convolution model
train_scores['graphconvreg'] = model_graphconvreg.evaluate(
train_dataset, [regression_metric], transformers)
valid_scores['graphconvreg'] = model_graphconvreg.evaluate(
valid_dataset, [regression_metric], transformers)
return train_scores, valid_scores
if __name__ == '__main__':
# Global variables
np.random.seed(123)
parser = argparse.ArgumentParser(description='Deepchem benchmark: '+
'giving performances of different learning models on datasets')
parser.add_argument('-s', action='append', dest='splitter_args', default=[],
help='Choice of splitting function: index, random, scaffold')
parser.add_argument('-m', action='append', dest='model_args', default=[],
help='Choice of model: tf, tf_robust, logreg, graphconv, ' +
'tf_regression, graphconvreg')
parser.add_argument('-d', action='append', dest='dataset_args', default=[],
help='Choice of dataset: tox21, sider, muv, toxcast, pcba, ' +
'kaggle, delaney, nci, pdbbind')
args = parser.parse_args()
#Datasets and models used in the benchmark test
splitters = args.splitter_args
models = args.model_args
datasets = args.dataset_args
if len(splitters) == 0:
splitters = ['index', 'random', 'scaffold']
if len(models) == 0:
models = ['tf', 'tf_robust', 'logreg', 'graphconv',
'tf_regression', 'graphconvreg']
if len(datasets) == 0:
datasets = ['tox21', 'sider', 'muv', 'toxcast', 'pcba',
'delaney', 'nci', 'kaggle', 'pdbbind']
#input hyperparameters
#tf: dropouts, learning rate, layer_sizes, weight initial stddev,penalty,
# batch_size
hps = {}
hps = {}
hps['tf'] = [{'layer_sizes': [1500], 'weight_init_stddevs': [0.02],
'bias_init_consts': [1.], 'dropouts': [0.5], 'penalty': 0.1,
'penalty_type': 'l2', 'batch_size': 50, 'nb_epoch': 10,
'learning_rate': 0.001}]
hps['tf_robust'] = [{'layer_sizes': [1500], 'weight_init_stddevs': [0.02],
'bias_init_consts': [1.], 'dropouts': [0.5],
'bypass_layer_sizes': [200],
'bypass_weight_init_stddevs': [0.02],
'bypass_bias_init_consts': [1.],
'bypass_dropouts': [0.5], 'penalty': 0.1,
'penalty_type': 'l2', 'batch_size': 50,
'nb_epoch': 10, 'learning_rate': 0.0005}]
hps['logreg'] = [{'penalty': 0.1, 'penalty_type': 'l2', 'batch_size': 50,
'nb_epoch': 10, 'learning_rate': 0.005}]
hps['graphconv'] = [{'batch_size': 50, 'nb_epoch': 15,
'learning_rate': 0.0005, 'n_filters': 64,
'n_fully_connected_nodes': 128, 'seed': 123}]
hps['rf'] = [{'n_estimators': 500}]
hps['tf_regression'] = [{'layer_sizes': [1000, 1000],
'weight_init_stddevs': [0.02, 0.02],
'bias_init_consts': [1., 1.],
'dropouts': [0.25, 0.25],
'penalty': 0.0005, 'penalty_type': 'l2',
'batch_size': 128, 'nb_epoch': 50,
'learning_rate': 0.00008}]
hps['graphconvreg'] = [{'batch_size': 128, 'nb_epoch': 20,
'learning_rate': 0.0005, 'n_filters': 128,
'n_fully_connected_nodes': 256, 'seed': 123}]
for split in splitters:
for dataset in datasets:
if dataset in ['tox21', 'sider', 'muv', 'toxcast', 'pcba']:
for model in models:
if model in ['tf', 'tf_robust', 'logreg', 'graphconv']:
benchmark_loading_datasets(
hps, dataset=dataset, model=model, split=split, out_path='.')
else:
for model in models:
if model in ['tf_regression', 'graphconvreg']:
benchmark_loading_datasets(
hps, dataset=dataset, model=model, split=split, out_path='.')
|
bowenliu16/deepchem
|
examples/benchmark.py
|
Python
|
gpl-3.0
| 23,002 | 0.012086 |
#gets COUNT tweets from user's timeline
import os
import tweepy
import cPickle as pickle
from config import Config
#constants
COUNT = 200
#tweepy configuration
keys = file('config.cfg')
cfg = Config(keys)
consumer_key= cfg.consumer_key
consumer_secret= cfg.consumer_secret
access_token= cfg.access_token
access_token_secret= cfg.access_token_secret
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
def get_tweets(username, isVerified):
if isVerified:
file_name = './verified/'+username+'/'+username+'_tweets.pickle'
else:
file_name = './unverified/'+username+'/'+username+'_tweets.pickle'
#save tweets
with open(file_name, 'wb') as f:
pickler = pickle.Pickler(f, -1)
tweet_count = 0
for tweet in tweepy.Cursor(api.user_timeline,screen_name=username).items(200):
pickler.dump(tweet)
tweet_count = tweet_count +1
print tweet_count
if __name__ == "__main__":
for directory in os.listdir("verified/"):
if directory == ".DS_Store":
continue
print directory
get_tweets(directory, True)
for directory in os.listdir("unverified/"):
print directory
get_tweets(directory, False)
|
dnr2/fml-twitter
|
tweets.py
|
Python
|
mit
| 1,301 | 0.017679 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CSS Compare documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 6 06:29:25 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'CSS Compare'
copyright = '2015, Adrian L. Flanagan'
author = 'Adrian L. Flanagan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'CSSComparedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CSSCompare.tex', 'CSS Compare Documentation',
'Adrian L. Flanagan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'csscompare', 'CSS Compare Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CSSCompare', 'CSS Compare Documentation',
author, 'CSSCompare', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
alflanagan/css_compare
|
doc/conf.py
|
Python
|
agpl-3.0
| 9,525 | 0.005879 |
#!/usr/bin/env python
"""
Example that displays how to switch between Emacs and Vi input mode.
"""
from prompt_toolkit import prompt
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.key_binding.manager import KeyBindingManager
from prompt_toolkit.keys import Keys
from prompt_toolkit.styles import style_from_dict
from prompt_toolkit.token import Token
def run():
# Create a set of key bindings that have Vi mode enabled if the
# ``vi_mode_enabled`` is True..
manager = KeyBindingManager.for_prompt()
# Add an additional key binding for toggling this flag.
@manager.registry.add_binding(Keys.F4)
def _(event):
" Toggle between Emacs and Vi mode. "
if event.cli.editing_mode == EditingMode.VI:
event.cli.editing_mode = EditingMode.EMACS
else:
event.cli.editing_mode = EditingMode.VI
# Add a bottom toolbar to display the status.
style = style_from_dict({
Token.Toolbar: 'reverse',
})
def get_bottom_toolbar_tokens(cli):
" Display the current input mode. "
text = 'Vi' if cli.editing_mode == EditingMode.VI else 'Emacs'
return [
(Token.Toolbar, ' [F4] %s ' % text)
]
prompt('> ', key_bindings_registry=manager.registry,
get_bottom_toolbar_tokens=get_bottom_toolbar_tokens,
style=style)
if __name__ == '__main__':
run()
|
melund/python-prompt-toolkit
|
examples/switch-between-vi-emacs.py
|
Python
|
bsd-3-clause
| 1,411 | 0.000709 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_taboo_contract
short_description: Manage taboo contracts on Cisco ACI fabrics (vz:BrCP)
description:
- Manage taboo contracts on Cisco ACI fabrics.
- More information from the internal APIC class I(vz:BrCP) at
U(https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
options:
taboo_contract:
description:
- The name of the Taboo Contract.
required: yes
aliases: [ name ]
description:
description:
- The description for the Taboo Contract.
aliases: [ descr ]
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ tenant_name ]
scope:
description:
- The scope of a service contract.
- The APIC defaults new Taboo Contracts to C(context).
choices: [ application-profile, context, global, tenant ]
default: context
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_taboo_contract:
host: '{{ inventory_hostname }}'
username: '{{ username }}'
password: '{{ password }}'
taboo_contract: '{{ taboo_contract }}'
description: '{{ descr }}'
tenant: '{{ tenant }}'
'''
RETURN = r'''
#
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
taboo_contract=dict(type='str', required=False, aliases=['name']), # Not required for querying all contracts
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all contracts
scope=dict(type='str', choices=['application-profile', 'context', 'global', 'tenant']),
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
protocol=dict(type='str', removed_in_version='2.6'), # Deprecated in v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['tenant', 'taboo_contract']],
['state', 'present', ['tenant', 'taboo_contract']],
],
)
taboo_contract = module.params['taboo_contract']
description = module.params['description']
scope = module.params['scope']
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='vzTaboo',
aci_rn='taboo-{0}'.format(taboo_contract),
filter_target='eq(vzTaboo.name, "{0}")'.format(taboo_contract),
module_object=taboo_contract,
),
)
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class='vzTaboo',
class_config=dict(
name=taboo_contract,
descr=description, scope=scope,
),
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class='vzTaboo')
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
|
wilvk/ansible
|
lib/ansible/modules/network/aci/aci_taboo_contract.py
|
Python
|
gpl-3.0
| 4,522 | 0.002211 |
import os
import boto3
import json
import urllib.parse
from elasticsearch import ElasticsearchException
from botocore.exceptions import ClientError
from common.elasticsearch_client import *
from common.constants import *
from common.logger_utility import *
class HandleBucketEvent:
def _fetchS3DetailsFromEvent(self, event):
try:
sns_message = json.loads(event["Records"][0]["Sns"]["Message"])
bucket = sns_message["Records"][0]["s3"]["bucket"]["name"]
key = urllib.parse.unquote_plus(sns_message["Records"][0]["s3"]["object"]["key"])
except Exception as e:
LoggerUtility.logError(str(e))
LoggerUtility.logError("Failed to process the event")
raise e
else:
LoggerUtility.logInfo("Bucket name: " + bucket)
LoggerUtility.logInfo("Object key: " + key)
return bucket, key
def _getS3HeadObject(self, bucket_name, object_key):
s3_client = boto3.client(Constants.S3_SERVICE_CLIENT)
try:
response = s3_client.head_object(Bucket=bucket_name, Key=object_key)
except ClientError as e:
LoggerUtility.logError(e)
LoggerUtility.logError('Error getting object {} from bucket {}. Make sure they exist, '
'your bucket is in the same region as this function and necessary permissions '
'have been granted.'.format(object_key, bucket_name))
raise e
else:
return response
def _createMetadataObject(self, s3_head_object, key, bucket_name=None):
metadata = {
Constants.KEY_REFERENCE: key,
Constants.CONTENT_LENGTH_REFERENCE: s3_head_object[Constants.CONTENT_LENGTH_REFERENCE],
Constants.SIZE_MIB_REFERENCE: s3_head_object[Constants.CONTENT_LENGTH_REFERENCE] / 1024**2,
Constants.LAST_MODIFIED_REFERENCE: s3_head_object[Constants.LAST_MODIFIED_REFERENCE].isoformat(),
Constants.CONTENT_TYPE_REFERENCE: s3_head_object[Constants.CONTENT_TYPE_REFERENCE],
Constants.ETAG_REFERENCE: s3_head_object[Constants.ETAG_REFERENCE],
Constants.DATASET_REFERENCE: key.split('/')[0],
Constants.ENVIRONMENT_NAME: os.environ["ENVIRONMENT_NAME"]
}
if key.split('/')[0] == "waze":
if 'type' in key:
type_value = key.split('/type=')[1].split('/')[0]
type_metadata = {
Constants.TRAFFIC_TYPE_REFERENCE: type_value
}
metadata.update(type_metadata)
if 'table' in key:
table_value = key.split('/table=')[1].split('/')[0]
table_metadata = {
Constants.TABLE_NAME_REFERENCE: table_value
}
metadata.update(table_metadata)
if 'state' in key:
state_value = key.split('/state=')[1].split('/')[0]
state_metadata = {
Constants.STATE_REFERENCE: state_value
}
metadata.update(state_metadata)
elif key.split('/')[0] == "cv":
data_provider_type_value = key.split('/')[1]
data_provider_type_metadata = {
Constants.DATA_PROVIDER_REFERENCE: data_provider_type_value
}
metadata.update(data_provider_type_metadata)
data_type_value = key.split('/')[2]
data_type_metadata = {
Constants.DATA_TYPE_REFERENCE: data_type_value
}
metadata.update(data_type_metadata)
LoggerUtility.logInfo("METADATA: "+str(metadata))
return metadata
def _pushMetadataToElasticsearch(self, bucket_name, metadata):
try:
elasticsearch_endpoint = os.environ[Constants.ES_ENDPOINT_ENV_VAR]
except KeyError as e:
LoggerUtility.logError(str(e) + " not configured")
raise e
es_client = ElasticsearchClient.getClient(elasticsearch_endpoint)
try:
es_client.index(index=Constants.DEFAULT_INDEX_ID, doc_type=bucket_name, body=json.dumps(metadata))
except ElasticsearchException as e:
LoggerUtility.logError(e)
LoggerUtility.logError("Could not index in Elasticsearch")
raise e
def _publishCustomMetricsToCloudwatch(self, bucket_name, metadata):
try:
if bucket_name == os.environ["SUBMISSIONS_BUCKET_NAME"] and metadata["Dataset"] == "waze":
cloudwatch_client = boto3.client('cloudwatch')
cloudwatch_client.put_metric_data(
Namespace='dot-sdc-waze-submissions-bucket-metric',
MetricData=[
{
'MetricName' : 'Counts by state and traffic type',
'Dimensions' : [
{
'Name' : 'State',
'Value': metadata["State"]
},
{
'Name' : 'TrafficType',
'Value': metadata["TrafficType"]
}
],
'Value' : 1,
'Unit': 'Count'
},
]
)
if metadata["ContentLength"] == 166:
cloudwatch_client = boto3.client('cloudwatch')
cloudwatch_client.put_metric_data(
Namespace='dot-sdc-waze-zero-byte-submissions-metric',
MetricData=[
{
'MetricName' : 'Zero Byte Submissions by State and traffic type',
'Dimensions' : [
{
'Name' : 'State',
'Value': metadata["State"]
},
{
'Name' : 'TrafficType',
'Value': metadata["TrafficType"]
}
],
'Value' : 1,
'Unit': 'Count'
},
]
)
elif bucket_name == os.environ["SUBMISSIONS_BUCKET_NAME"] and metadata["Dataset"] == "cv":
cloudwatch_client = boto3.client('cloudwatch')
cloudwatch_client.put_metric_data(
Namespace='dot-sdc-cv-submissions-bucket-metric',
MetricData=[
{
'MetricName' : 'Counts by provider and datatype',
'Dimensions' : [
{
'Name' : 'DataProvider',
'Value': metadata["DataProvider"]
},
{
'Name' : 'DataType',
'Value': metadata["DataType"]
}
],
'Value' : 10,
'Unit': 'Count'
},
]
)
elif bucket_name == os.environ["CURATED_BUCKET_NAME"] and metadata["Dataset"] != "manifest":
cloudwatch_client = boto3.client('cloudwatch')
cloudwatch_client.put_metric_data(
Namespace='dot-sdc-waze-curated-bucket-metric',
MetricData=[
{
'MetricName' : 'Counts by state and table name',
'Dimensions' : [
{
'Name' : 'State',
'Value': metadata["State"]
},
{
'Name' : 'TableName',
'Value': metadata["TableName"]
}
],
'Value' : 1,
'Unit': 'Count'
},
]
)
except Exception as e:
LoggerUtility.logError(e)
LoggerUtility.logError("Failed to publish custom cloudwatch metrics")
raise e
def handleBucketEvent(self, event, context):
LoggerUtility.setLevel()
bucket_name, object_key = self._fetchS3DetailsFromEvent(event)
s3_head_object = self._getS3HeadObject(bucket_name, object_key)
metadata = self._createMetadataObject(s3_head_object, object_key)
self._pushMetadataToElasticsearch(bucket_name, metadata)
self._publishCustomMetricsToCloudwatch(bucket_name, metadata)
|
VolpeUSDOT/CV-PEP
|
lambda/cvp-qc/bucket_handler_lambda/bucket_event_lambda_handler.py
|
Python
|
mit
| 9,274 | 0.003882 |
# coding=utf-8
# Author: Dustyn Gibson <miigotu@gmail.com>
#
# This file is part of Medusa.
#
# Medusa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medusa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Medusa. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import traceback
import validators
from requests.compat import urljoin
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickrage.helper.common import convert_size, try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class ThePirateBayProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
"""ThePirateBay Torrent provider"""
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, 'ThePirateBay')
# Credentials
self.public = True
# URLs
self.url = 'https://thepiratebay.org'
self.urls = {
'rss': urljoin(self.url, 'tv/latest'),
'search': urljoin(self.url, 's/'), # Needs trailing /
}
self.custom_url = None
# Proper Strings
# Miscellaneous Options
self.confirmed = True
# Torrent Stats
self.minseed = None
self.minleech = None
# Cache
self.cache = tvcache.TVCache(self, min_time=20) # only poll ThePirateBay every 20 minutes max
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
results = []
"""
205 = SD, 208 = HD, 200 = All Videos
https://pirateproxy.pl/s/?q=Game of Thrones&type=search&orderby=7&page=0&category=200
"""
search_params = {
'q': '',
'type': 'search',
'orderby': 7,
'page': 0,
'category': 200
}
# Units
units = ['B', 'KIB', 'MIB', 'GIB', 'TIB', 'PIB']
def process_column_header(th):
result = ''
if th.a:
result = th.a.get_text(strip=True)
if not result:
result = th.get_text(strip=True)
return result
for mode in search_strings:
items = []
logger.log('Search mode: {0}'.format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
search_url = self.urls['search'] if mode != 'RSS' else self.urls['rss']
if self.custom_url:
if not validators.url(self.custom_url):
logger.log('Invalid custom url: {0}'.format(self.custom_url), logger.WARNING)
return results
search_url = urljoin(self.custom_url, search_url.split(self.url)[1])
if mode != 'RSS':
search_params['q'] = search_string
logger.log('Search string: {search}'.format
(search=search_string), logger.DEBUG)
data = self.get_url(search_url, params=search_params, returns='text')
else:
data = self.get_url(search_url, returns='text')
if not data:
logger.log('No data returned from provider', logger.DEBUG)
continue
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', id='searchResult')
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one release is found
if len(torrent_rows) < 2:
logger.log('Data returned from provider does not contain any torrents', logger.DEBUG)
continue
labels = [process_column_header(label) for label in torrent_rows[0]('th')]
# Skip column headers
for result in torrent_rows[1:]:
try:
cells = result('td')
title = result.find(class_='detName')
title = title.get_text(strip=True) if title else None
download_url = result.find(title='Download this torrent using magnet')
download_url = download_url['href'] + self._custom_trackers if download_url else None
if download_url and 'magnet:?' not in download_url:
logger.log('Invalid ThePirateBay proxy please try another one', logger.DEBUG)
continue
if not all([title, download_url]):
continue
seeders = try_int(cells[labels.index('SE')].get_text(strip=True))
leechers = try_int(cells[labels.index('LE')].get_text(strip=True))
# Filter unseeded torrent
if seeders < min(self.minseed, 1):
if mode != 'RSS':
logger.log("Discarding torrent because it doesn't meet the "
"minimum seeders: {0}. Seeders: {1}".format
(title, seeders), logger.DEBUG)
continue
# Accept Torrent only from Good People for every Episode Search
if self.confirmed and not result.find(alt=re.compile(r'VIP|Trusted')):
if mode != 'RSS':
logger.log("Found result {0} but that doesn't seem like a trusted"
" result so I'm ignoring it".format(title), logger.DEBUG)
continue
# Convert size after all possible skip scenarios
torrent_size = cells[labels.index('Name')].find(class_='detDesc').get_text(strip=True).split(', ')[1]
torrent_size = re.sub(r'Size ([\d.]+).+([KMGT]iB)', r'\1 \2', torrent_size)
size = convert_size(torrent_size, units=units) or -1
item = {
'title': title,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'pubdate': None,
'hash': None,
}
if mode != 'RSS':
logger.log('Found result: {0} with {1} seeders and {2} leechers'.format
(title, seeders, leechers), logger.DEBUG)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
logger.log('Failed parsing provider. Traceback: {0!r}'.format
(traceback.format_exc()), logger.ERROR)
continue
results += items
return results
provider = ThePirateBayProvider()
|
Thraxis/pymedusa
|
sickbeard/providers/thepiratebay.py
|
Python
|
gpl-3.0
| 7,881 | 0.00368 |
from cls_method_decorators import debugmethods
class metatype(type):
def __new__(cls, cls_name, bases, cls_dict):
clsobj = super().__new__(cls, cls_name, bases, cls_dict)
clsobj = debugmethods(clsobj)
return clsobj
class Animal(metaclass = metatype):
def __init__(self, name):
self.name = name
def greet(self):
print("hi, my name is ", self.name)
class Dog(Animal):
def greet(self):
print("Woof!, my name is", self.name)
class Cat(Animal):
def greet(self):
print("meow!, my name is", self.name)
|
kmad1729/python_notes
|
metaprogramming/practice_code/metatype_demo.py
|
Python
|
unlicense
| 583 | 0.010292 |
'''
Button Behavior
===============
The :class:`~kivy.uix.behaviors.button.ButtonBehavior`
`mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides
:class:`~kivy.uix.button.Button` behavior. You can combine this class with
other widgets, such as an :class:`~kivy.uix.image.Image`, to provide
alternative buttons that preserve Kivy button behavior.
For an overview of behaviors, please refer to the :mod:`~kivy.uix.behaviors`
documentation.
Example
-------
The following example adds button behavior to an image to make a checkbox that
behaves like a button::
from kivy.app import App
from kivy.uix.image import Image
from kivy.uix.behaviors import ButtonBehavior
class MyButton(ButtonBehavior, Image):
def __init__(self, **kwargs):
super(MyButton, self).__init__(**kwargs)
self.source = 'atlas://data/images/defaulttheme/checkbox_off'
def on_press(self):
self.source = 'atlas://data/images/defaulttheme/checkbox_on'
def on_release(self):
self.source = 'atlas://data/images/defaulttheme/checkbox_off'
class SampleApp(App):
def build(self):
return MyButton()
SampleApp().run()
See :class:`~kivy.uix.behaviors.ButtonBehavior` for details.
'''
__all__ = ('ButtonBehavior', )
from kivy.clock import Clock
from kivy.config import Config
from kivy.properties import OptionProperty, ObjectProperty, \
BooleanProperty, NumericProperty
from time import time
class ButtonBehavior(object):
'''
This `mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides
:class:`~kivy.uix.button.Button` behavior. Please see the
:mod:`button behaviors module <kivy.uix.behaviors.button>` documentation
for more information.
:Events:
`on_press`
Fired when the button is pressed.
`on_release`
Fired when the button is released (i.e. the touch/click that
pressed the button goes away).
'''
state = OptionProperty('normal', options=('normal', 'down'))
'''The state of the button, must be one of 'normal' or 'down'.
The state is 'down' only when the button is currently touched/clicked,
otherwise its 'normal'.
:attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults
to 'normal'.
'''
last_touch = ObjectProperty(None)
'''Contains the last relevant touch received by the Button. This can
be used in `on_press` or `on_release` in order to know which touch
dispatched the event.
.. versionadded:: 1.8.0
:attr:`last_touch` is a :class:`~kivy.properties.ObjectProperty` and
defaults to `None`.
'''
min_state_time = NumericProperty(0)
'''The minimum period of time which the widget must remain in the
`'down'` state.
.. versionadded:: 1.9.1
:attr:`min_state_time` is a float and defaults to 0.035. This value is
taken from :class:`~kivy.config.Config`.
'''
always_release = BooleanProperty(False)
'''This determines whether or not the widget fires an `on_release` event if
the touch_up is outside the widget.
.. versionadded:: 1.9.0
.. versionchanged:: 1.10.0
The default value is now False.
:attr:`always_release` is a :class:`~kivy.properties.BooleanProperty` and
defaults to `False`.
'''
def __init__(self, **kwargs):
self.register_event_type('on_press')
self.register_event_type('on_release')
if 'min_state_time' not in kwargs:
self.min_state_time = float(Config.get('graphics',
'min_state_time'))
super(ButtonBehavior, self).__init__(**kwargs)
self.__state_event = None
self.__touch_time = None
self.fbind('state', self.cancel_event)
def _do_press(self):
self.state = 'down'
def _do_release(self, *args):
self.state = 'normal'
def cancel_event(self, *args):
if self.__state_event:
self.__state_event.cancel()
self.__state_event = None
def on_touch_down(self, touch):
if super(ButtonBehavior, self).on_touch_down(touch):
return True
if touch.is_mouse_scrolling:
return False
if not self.collide_point(touch.x, touch.y):
return False
if self in touch.ud:
return False
touch.grab(self)
touch.ud[self] = True
self.last_touch = touch
self.__touch_time = time()
self._do_press()
self.dispatch('on_press')
return True
def on_touch_move(self, touch):
if touch.grab_current is self:
return True
if super(ButtonBehavior, self).on_touch_move(touch):
return True
return self in touch.ud
def on_touch_up(self, touch):
if touch.grab_current is not self:
return super(ButtonBehavior, self).on_touch_up(touch)
assert(self in touch.ud)
touch.ungrab(self)
self.last_touch = touch
if (not self.always_release and
not self.collide_point(*touch.pos)):
self._do_release()
return
touchtime = time() - self.__touch_time
if touchtime < self.min_state_time:
self.__state_event = Clock.schedule_once(
self._do_release, self.min_state_time - touchtime)
else:
self._do_release()
self.dispatch('on_release')
return True
def on_press(self):
pass
def on_release(self):
pass
def trigger_action(self, duration=0.1):
'''Trigger whatever action(s) have been bound to the button by calling
both the on_press and on_release callbacks.
This simulates a quick button press without using any touch events.
Duration is the length of the press in seconds. Pass 0 if you want
the action to happen instantly.
.. versionadded:: 1.8.0
'''
self._do_press()
self.dispatch('on_press')
def trigger_release(dt):
self._do_release()
self.dispatch('on_release')
if not duration:
trigger_release(0)
else:
Clock.schedule_once(trigger_release, duration)
|
inclement/kivy
|
kivy/uix/behaviors/button.py
|
Python
|
mit
| 6,290 | 0 |
# Copyright (c) 2001-2005 Twisted Matrix Laboratories.
# See LICENSE for details.
import sys, os, sha
from twisted.trial import unittest
from twisted.words.protocols.jabber.component import ConnectComponentAuthenticator, ComponentInitiatingInitializer
from twisted.words.protocols import jabber
from twisted.words.protocols.jabber import xmlstream
class DummyTransport:
def __init__(self, list):
self.list = list
def write(self, bytes):
self.list.append(bytes)
class ComponentInitiatingInitializerTest(unittest.TestCase):
def setUp(self):
self.output = []
self.authenticator = xmlstream.Authenticator()
self.authenticator.password = 'secret'
self.xmlstream = xmlstream.XmlStream(self.authenticator)
self.xmlstream.namespace = 'test:component'
self.xmlstream.send = self.output.append
self.xmlstream.connectionMade()
self.xmlstream.dataReceived(
"<stream:stream xmlns='test:component' "
"xmlns:stream='http://etherx.jabber.org/streams' "
"from='example.com' id='12345' version='1.0'>")
self.init = ComponentInitiatingInitializer(self.xmlstream)
def testHandshake(self):
"""
Test basic operations of component handshake.
"""
d = self.init.initialize()
# the initializer should have sent the handshake request
handshake = self.output[0]
self.assertEquals('handshake', handshake.name)
self.assertEquals('test:component', handshake.uri)
self.assertEquals(sha.new("%s%s" % ('12345', 'secret')).hexdigest(),
unicode(handshake))
# successful authentication
handshake.children = []
self.xmlstream.dataReceived(handshake.toXml())
return d
class ComponentAuthTest(unittest.TestCase):
def authPassed(self, stream):
self.authComplete = True
def testAuth(self):
self.authComplete = False
outlist = []
ca = ConnectComponentAuthenticator("cjid", "secret")
xs = xmlstream.XmlStream(ca)
xs.transport = DummyTransport(outlist)
xs.addObserver(xmlstream.STREAM_AUTHD_EVENT,
self.authPassed)
# Go...
xs.connectionMade()
xs.dataReceived("<stream:stream xmlns='jabber:component:accept' xmlns:stream='http://etherx.jabber.org/streams' from='cjid' id='12345'>")
# Calculate what we expect the handshake value to be
hv = sha.new("%s%s" % ("12345", "secret")).hexdigest()
self.assertEquals(outlist[1], "<handshake>%s</handshake>" % (hv))
xs.dataReceived("<handshake/>")
self.assertEquals(self.authComplete, True)
class JabberServiceHarness(jabber.component.Service):
def __init__(self):
self.componentConnectedFlag = False
self.componentDisconnectedFlag = False
self.transportConnectedFlag = False
def componentConnected(self, xmlstream):
self.componentConnectedFlag = True
def componentDisconnected(self):
self.componentDisconnectedFlag = True
def transportConnected(self, xmlstream):
self.transportConnectedFlag = True
class TestJabberServiceManager(unittest.TestCase):
def testSM(self):
# Setup service manager and test harnes
sm = jabber.component.ServiceManager("foo", "password")
svc = JabberServiceHarness()
svc.setServiceParent(sm)
# Create a write list
wlist = []
# Setup a XmlStream
xs = sm.getFactory().buildProtocol(None)
xs.transport = self
xs.transport.write = wlist.append
# Indicate that it's connected
xs.connectionMade()
# Ensure the test service harness got notified
self.assertEquals(True, svc.transportConnectedFlag)
# Jump ahead and pretend like the stream got auth'd
xs.dispatch(xs, xmlstream.STREAM_AUTHD_EVENT)
# Ensure the test service harness got notified
self.assertEquals(True, svc.componentConnectedFlag)
# Pretend to drop the connection
xs.connectionLost(None)
# Ensure the test service harness got notified
self.assertEquals(True, svc.componentDisconnectedFlag)
|
santisiri/popego
|
envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/words/test/test_jabbercomponent.py
|
Python
|
bsd-3-clause
| 4,313 | 0.002319 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_lead_report
import crm_phonecall_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jaggu303619/asylum
|
openerp/addons/crm/report/__init__.py
|
Python
|
agpl-3.0
| 1,107 | 0.00271 |
# Copyright (c) 2014, FTW Forschungszentrum Telekommunikation Wien
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of FTW nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL FTW
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
from collections import defaultdict
import itertools
import logging
import re
import linecache
import netaddr
import GeoIP
geodb_ = GeoIP.open("data/GeoIPASNum.dat", GeoIP.GEOIP_MEMORY_CACHE)
def getTopDomainSuffix(domains, levels=2):
d=defaultdict(int)
for domain in domains:
sdomain=domain.split('.')
if len(sdomain)<levels:
suffix=domain
else:
suffix='.'.join(sdomain[-levels:])
d[suffix]+=1
domainCounts=d.items()
_,counts=zip(*domainCounts)
domainCounts.sort(key=lambda x:x[1])
return (domainCounts[-1][0], domainCounts[-1][1]/float(sum(counts)))
def splitOnCondition(seq, condition):
"""
Splits a list of tuples (<x>,<y>) in two lists, depending on the condition
on <y>. Returns the <x> elements as tuple of two lists.
"""
l1,l2 = itertools.tee((condition(item),item) for item in seq)
return ([i[0] for p, i in l1 if p], [i[0] for p, i in l2 if not p])
def minmax(data):
"""
Computes the minimum and maximum values in one-pass using only
1.5*len(data) comparisons
"""
it = iter(data)
try:
lo = hi = next(it)
except StopIteration:
raise ValueError('minmax() arg is an empty sequence')
for x, y in itertools.izip_longest(it, it, fillvalue=lo):
if x > y:
x, y = y, x
if x < lo:
lo = x
if y > hi:
hi = y
return lo, hi
def dnameEquality(d1, d2):
"""
returns an array of bools of length max(domain-levels(d1),
domain-levels(d2)). The i-th element of the array is True if i-ld(d1) ==
i-ld(d2), else it's False. d1 and d2 are aligned on the top level domain.
"""
sd1 = d1.split('.')
sd2 = d2.split('.')
if not sd1 or not sd2:
raise Exception('invalid domain names: '+d1+' '+d2)
l_d1 = len(sd1)
l_d2 = len(sd2)
if d1 == d2:
return [True]*l_d1
else:
min_l = min(l_d1, l_d2)
matchmap = [False] * min_l
for i in range(min_l):
print sd1[-1-i], sd2[-1-i]
if sd1[-1-i] == sd2[-1-i]:
matchmap[-1-i] = True
return matchmap
def getAsnAndOrganisation(ip):
try:
answer = geodb_.org_by_addr(str(ip))
except GeoIP.error:
return (None, None)
else:
if answer:
if answer.startswith('AS'):
try:
first_space = answer.index(' ')
except ValueError:
asn = int(answer[2:])
return (asn, None)
else:
asn = int(answer[2:first_space])
org = answer[first_space+1:]
return (asn, org)
else:
return (None, answer)
else:
return (None, None)
class SetGrab:
"""
Return the object in a set that matches <value>.
To be used as follows:
s=set(['foobar', 'foo', 'bar'])
g=SetGrab('foobar')
if g in s:
return g.actual_value
http://python.6.n6.nabble.com/Get-item-from-set-td1530758.html
"""
def __init__(self, value):
self.search_value = value
def __hash__(self):
return hash(self.search_value)
def __eq__(self, other):
if self.search_value == other:
self.actual_value = other
return True
return False
def punyDecodeDomain(dname):
if 'xn--' in dname:
try:
return dname.decode('idna')
except UnicodeError:
"""
there's a python bug that causes the german 'scharfes s' not to be
decoded correctly
"""
logging.warn(u'IDNA decoding failed for '+unicode(dname))
return dname
else:
return dname
def memory_usage():
"""Memory usage of the current process in kilobytes."""
status = None
result = {'peak': 0, 'rss': 0}
try:
# This will only work on systems with a /proc file system
# (like Linux).
status = open('/proc/self/status')
for line in status:
parts = line.split()
key = parts[0][2:-1].lower()
if key in result:
result[key] = int(parts[1])
finally:
if status is not None:
status.close()
return result
def filterSingles(data):
"""
"""
from collections import defaultdict
domainToIPs = defaultdict(set)
IPToDomains = defaultdict(set)
for d in data:
domainToIPs[d[1]].add(d[2])
IPToDomains[d[2]].add(d[1])
remainingDomains=set()
for domain, IPs in domainToIPs.iteritems():
if len(IPs)==1:
ip=IPs.pop()
if len(IPToDomains[ip])==1:
continue
remainingDomains.add(domain)
numRemaining=len(set([d[1] for d in data if d[1] in remainingDomains]))
print numRemaining, '/',len(domainToIPs),' domains left after removing singles'
filteredData=[]
for d in data:
if d[1] in remainingDomains:
filteredData.append(d)
return filteredData
def filterSuspiciousData(data, minNumDomains=2, minNumIPs=2):
"""
"""
from collections import defaultdict
domainToIPs = defaultdict(set)
IPToDomains = defaultdict(set)
for d in data:
domainToIPs[d[1]].add(d[2])
IPToDomains[d[2]].add(d[1])
remainingDomains=set()
for domain, IPs in domainToIPs.iteritems():
if len(IPs)<minNumIPs:
continue
for ip in IPs:
"""
find the number of domains to which <ip> maps
"""
numDomains = len(IPToDomains[ip])
if numDomains>=minNumDomains:
"""
This is an interesting domain-IP mapping, let's keep this domain
"""
remainingDomains.add(domain)
break
numRemaining=len(set([d[1] for d in data if d[1] in remainingDomains]))
print numRemaining, '/',len(domainToIPs),' domains left'
filteredData=[]
for d in data:
if d[1] in remainingDomains:
filteredData.append(d)
return filteredData
def readSuspiciousFile(filename, lineNumStart=1, lineNumStop=0,
omitNewIPs=False, filterExp=[], removeSingles=True):
"""
expected format:
timestamp fqdn IP None score <number of IPBlocks in which this fqdn
appears> <number of fqdns in the IPBlock which contains this IP>
"""
data=[]
lineNum=lineNumStart
if filterExp:
filterHits=dict.fromkeys([regex.pattern for regex in filterExp], 0)
else:
filterHits=dict()
print 'reading',filename,'from line',lineNumStart,'to line',lineNumStop
linecache.updatecache(filename)
while True:
line=linecache.getline(filename, lineNum)
if not line:
# end of file
break
if lineNum>=lineNumStop:
break
lineNum+=1
sl=line.split()
try:
if omitNewIPs and float(sl[4])==-1:
continue
dStr=sl[1]
if dStr=='invalid_hostname':
continue
# if any(regex.match(dStr) for regex in filterExp):
# #print 'whitelisted',dStr
# filterHits+=1
# continue
for regex in filterExp:
if regex.match(dStr):
filterHits[regex.pattern]+=1
break
else:
dUnicode=unicode(dStr, 'utf-8')
if dStr==dUnicode:
data.append((int(sl[0]), dStr, str(netaddr.IPAddress(sl[2])),
sl[3], float(sl[4]), int(sl[5])))
except (IndexError, ValueError):
# may happen when reading incomplete files - ignore
pass
#print filterHits, 'filtered'
if filterHits:
print 'Filter hits:'
for pattern, hits in filterHits.iteritems():
print pattern,':',hits
if removeSingles:
cntPrevData=len(data)
uniqueFqdns=set([fqdn for _,fqdn,_,_,_,_ in data])
cntPrevUniqueFqdns=len(uniqueFqdns)
#data=filterSuspiciousData(data, 1, 2)
data=filterSingles(data)
uniqueFqdns=set([fqdn for _,fqdn,_,_,_,_ in data])
print 'removed',cntPrevData-len(data),'/',cntPrevData,'data records'
print 'removed',cntPrevUniqueFqdns-len(uniqueFqdns),'/',cntPrevUniqueFqdns,'single FQDNs'
return data
DOMAIN_COLOR='red'
IP_COLOR='blue'
CLIENT_IP_COLOR='green'
AS_COLOR='yellow'
def buildMappingGraph(data):
import networkx as nx
g=nx.Graph()
if not data:
return g
_,domains,ips,clientIPs,_,_=zip(*data)
for domain in domains:
g.add_node(domain, color=DOMAIN_COLOR)
for ip in ips:
g.add_node(ip, color=IP_COLOR)
for cip in clientIPs:
if cip!='None':
g.add_node(cip, color=CLIENT_IP_COLOR)
for d in data:
g.add_edge(d[1], d[2], {'score':d[4]})
if d[3]!='None':
g.add_edge(d[1], d[3])
return g
#def compressGraph(g, clusteringThreshold=0.2, maxClustersPerComponent=1):
# import networkx as nx
# import DomainCluster as domclust
# subgraphs = nx.connected_component_subgraphs(g)
# numCompressed=0
# dispersions=[]
#
# for sg in subgraphs:
# domains=[]
# verbose=False
# for node in sg.nodes_iter(data=True):
# if node[1]['color']==DOMAIN_COLOR:
# domains.append(domclust.DomainStr(node[0]))
#
# #cl=domclust.domainCluster(domains, clusteringThreshold)
# cl=domclust.domainClusterDBSCAN(domains, clusteringThreshold)
# if verbose:
# print cl
# if len(cl)<=maxClustersPerComponent:
# for ck, cv in cl.iteritems():
# g.remove_nodes_from(cv.domains)
# g.add_node(ck, color=DOMAIN_COLOR)
# #FIXME: add external edges
# numCompressed+=1
# dispersions.append(domclust.clusterDispersion(domains))
#
# print 'compressed',numCompressed,'out of',len(subgraphs),'subgraphs'
# return dispersions
nodeDefaultSize_=10.0
def createASHierarchy(g, minIPsPerAS=2):
"""
inserts hierarchical AS information in the graph. For each subgraph, the
number ASes is evaluated. AS with more than <minIPsPerAS> IPs, an AS
supernode is created that contains these IPs
Modifies <g>, does not return anything!
"""
from collections import defaultdict
import numpy as np
import networkx as nx
subgraphs = nx.connected_component_subgraphs(g)
for sgIndex, sg in enumerate(subgraphs):
ASesPerSubgraph=defaultdict(list)
for node in sg.nodes_iter(data=True):
if 'color' in node[1] and node[1]['color']==IP_COLOR:
ip=node[0]
asNum, asOrg =getAsnAndOrganisation(ip)
if asOrg:
try:
ASesPerSubgraph[unicode(asOrg, 'utf-8')].append(ip)
except UnicodeDecodeError:
"""
this happens for some strange AS names, and causes
networkx's GEXF exporter to crash. fall back to using
the AS number.
"""
ASesPerSubgraph[str(asNum)].append(ip)
else:
ASesPerSubgraph['Unknown AS'].append(ip)
for ASIndex, (asOrg,ips) in enumerate(ASesPerSubgraph.iteritems()):
if len(ips)<minIPsPerAS:
"""
Only one IP from this AS, don't collapse
"""
continue
else:
newNodeId = 'SG'+str(sgIndex)+'_AS'+str(ASIndex)
g.add_node(newNodeId, label=asOrg, color=AS_COLOR)
"""
encode the color and size again in 'viz' format, else gephi
cannot visualize it when exporting to GEXF
"""
g.add_node(newNodeId,
{'viz':{
'color':{'r':'255','g':'255','b':'0'},
'size':str(nodeDefaultSize_+2*np.log(len(ips)))
}})
for ip in ips:
g.add_node(ip, pid=newNodeId)
def getTimeRangeInFile(fname):
"""
returns the time range in the suspicious file as a tuple (firstTimestamp,
lastTimestamp)
"""
from os.path import getsize
def _getTimestamp(line):
spl=line.split()
return int(spl[0])
linecache.updatecache(fname)
with open(fname, 'r') as fh:
firstLine = next(fh).decode()
try:
first=_getTimestamp(firstLine)
except IndexError:
return (None, None)
last=None
numBytesInFile = getsize(fname)
seekTo=numBytesInFile
while not last:
# seek back 1024 bytes from the end of the file, hoping that we
# would arrive somewhere before the start of the last line
seekTo-=1024
if seekTo < 0:
# cannot seek over the start of the file
seekTo = 0
# seek relative to start of file
fh.seek(seekTo)
lines = fh.readlines()
lastLine = lines[-1].decode()
try:
last=_getTimestamp(lastLine)
except IndexError:
if seekTo==0:
#nothing else we could do, give up
return (None, None)
return (first, last)
return (None, None)
def seekToTimestamp(fn, timestamp, matchFirstOccurrence=True):
def _getTimestamp(line):
spl=line.split()
return int(spl[0])
def _fileLen(fname):
"""
find number of lines in file
"""
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
return 0
fLen=_fileLen(fn)
def _slowSeek(fPos, fn, searchedTimestamp, matchFirstOccurrence):
searchForward=False
searchBackward=False
while True:
if fPos<0:
fPos=0
break
if fPos>fLen:
fPos=fLen+1
break
line = linecache.getline(fn, fPos)
t=_getTimestamp(line)
if t<searchedTimestamp:
if searchBackward:
fPos-=1
break
searchForward=True
fPos+=1
elif t>searchedTimestamp:
if searchForward:
fPos+=1
break
searchBackward=True
fPos-=1
else:
break
while True:
# this assumes that we are already in a block of identical timestamps
if matchFirstOccurrence:
# search backward
if fPos==1:
return fPos
lastfPos=fPos
fPos-=1
else:
# search forward
if fPos==fLen:
return fPos
lastfPos=fPos
fPos+=1
line = linecache.getline(fn, fPos)
t=_getTimestamp(line)
if t!=searchedTimestamp:
return lastfPos
fPos=1
delta=fLen/2
while True:
if fPos<0: return 1
#if fPos>fLen: return fLen+1
if fPos>fLen: fPos=fLen+1
line = linecache.getline(fn, fPos)
try:
t=_getTimestamp(line)
except IndexError:
if fPos==1:
"""
seems that even the first line is not complete
"""
return fPos
else:
"""
seems we encountered an incomplete line, let's try the previous
one
"""
fPos-=1
continue
else:
if t==timestamp or delta==1:
break
else:
if t<timestamp:
fPos+=delta
elif t>timestamp:
fPos-=delta
delta/=2
print 'FOOO',fPos,matchFirstOccurrence,timestamp
if matchFirstOccurrence and fPos==1:
return fPos
elif not matchFirstOccurrence and fPos==(fLen+1):
return fPos
else:
return _slowSeek(fPos, fn, timestamp, matchFirstOccurrence)
|
anderasberger/pydnsmap
|
pydnsmap/util.py
|
Python
|
bsd-3-clause
| 18,175 | 0.010344 |
#!/usr/bin/env python
"""
Extract pronunciations from the ELP items.
Outputs a CSV with the orthographic and phonological form on each
line. The phonological form is stripped of syllabification and stress
markers.
"""
# Copyright 2013 Constantine Lignos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from lingtools.corpus.elp import ELP, NULL
# " is primary stress, % is secondary, . is syllable boundary
DELETION_CHARS = '"%.'
# These represent a reasonable attempt to map the phonemes to
# one-character versions. The distinction between @` and 3` is
# removed; it is not present in most standard phone sets. Flap (4) is
# left alone as it cannot be mapped back to its underlying form.
PHON_REPLACEMENTS = (
# R-colored schwa
("@`", "R"),
("3`", "R"),
# In the ELP it is always `, but some hand output uses '
("3'", "R"),
("@'", "R"),
# Syllabic l
("l=", "L"),
# Move engma to G to leave N for syllabic n.
("N", "G"),
# Syllabic n. Note that N is engma in the original.
("n=", "N"),
# Syllabic m
("m=", "M"),
# dZ to J (like JH in Arpabet)
("dZ", "J"),
# tS to C (like CH in Arpabet)
("tS", "C"),
# aI to Y (like AY in Arpabet)
("aI", "Y"),
# aU to W (like AW in Arpabet)
("aU", "W"),
# OI to 8 (cannot use O like OY in Arpabet, as O is in use)
("OI", "8"),
)
def replace_phons(pron):
"""Replace phonemes using the PHON_REPLACEMENTS table."""
for replacement in PHON_REPLACEMENTS:
pron = pron.replace(*replacement)
return pron
def extract(input_path, output_path, mono_only, cmudict_format, target_sylls):
"""Extract words from the input path and write them to the output."""
with open(output_path, 'wb') as output_file:
elp = ELP(input_path)
# Sort by lowercase version of entry
words = sorted(elp.keys(), key=lambda s: s.lower())
count = 0
for word in words:
entry = elp[word]
# Extract orthography and pron
pron = entry.pron
nsyll = entry.nsyll
# Match syllable numbers if specified
if target_sylls is not None and nsyll != target_sylls:
continue
# Skip non-monomorphs if specified
if mono_only and not entry.monomorph:
continue
# Skip NULL prons, get the length if there is a pron.
if pron == NULL:
continue
else:
n_phon = entry.nphon
# Perform phoneme replacement on the pron
pron = replace_phons(pron)
# Remove stress/syllable markers
pron = pron.translate(None, DELETION_CHARS)
# Check that length matches
if len(pron) != n_phon:
print "Bad pronunciation for {!r}:".format(word)
print "Pron. {!r} of length {}, expected {}.".format(
pron, len(pron), n_phon)
continue
out_line = ("{},{}".format(word, pron) if not cmudict_format else
"{} {}".format(word.upper(), " ".join(pron)))
print >> output_file, out_line
count += 1
print "{} pronunciations written to {}".format(count, output_path)
def main():
"""Parse arguments and call the extractor."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('input', help='input CSV file')
parser.add_argument('output', help='output CSV file')
parser.add_argument('-m', '--mono', action='store_true',
help='output only monomorphemic items')
parser.add_argument('-s', '--sylls', nargs='?', type=int, metavar='n',
help='output only items with n syllables')
parser.add_argument('-c', '--cmudict', action='store_true',
help='output in CMUDict format')
args = parser.parse_args()
extract(args.input, args.output, args.mono, args.cmudict, args.sylls)
if __name__ == "__main__":
main()
|
lingtools/lingtools
|
extract_elp_prons.py
|
Python
|
apache-2.0
| 4,566 | 0 |
from bs4 import BeautifulSoup
import os
PATH = '/home/arj/arj_projects/imp_dump/kv_advanced/course_material/Advanced_Python_Course_(KV_October_2019)'
VID_DIR = 'advanced_videos'
LESSON_VID_MAP = {
# 'AD_-_Decorators_2': '29B_Decorators.webm',
# 'AD_-_Introduction': '01_Introduction.webm',
# 'AD_-_Exercise1': '04_Exercise_1.webm',
# 'AD_-_Exercise5': '08_Exercise_5.webm',
}
def dir_walk(path):
""" Use to walk through all objects in a directory."""
for f in os.listdir(path):
yield os.path.join(path, f)
def walker(path):
flist = []
reslist = _walker(path, flist)
return reslist
def _walker(dirpath, flist):
for f in os.listdir(dirpath):
fpath = os.path.join(dirpath, f)
if os.path.isfile(fpath):
flist.append(fpath)
else:
_walker(fpath, flist)
return flist
def get_soup(filepath):
with open(filepath, 'r') as f:
html = f.read()
return BeautifulSoup(html, 'html.parser')
def write_soup(filepath, soup):
with open(filepath, 'w') as f:
f.write(str(soup))
def check_iframe(filepath):
soup = get_soup(filepath)
return soup.iframe
def replace(filepath, video_dir, video_name):
soup = get_soup(filepath)
new_vid_tag = soup.new_tag('video')
new_vid_tag['width'] = "560"
new_vid_tag['height'] = "315"
new_vid_tag['controls'] = None
src_tag = soup.new_tag('source')
src_tag['src'] = "../../../{0}/{1}".format(video_dir, video_name)
src_tag['type'] = "video/mp4"
new_vid_tag.append(src_tag)
soup.iframe.replace_with(new_vid_tag)
write_soup(filepath, soup)
if __name__ == '__main__':
reslist = walker(PATH)
for f in reslist:
fpath, fname = os.path.split(f)
if 'html' in fname:
fn, fext = fname.split('.')
else:
print("NON HTML File: ", fname)
continue
if fn in LESSON_VID_MAP and check_iframe(f):
vid_name = LESSON_VID_MAP.get(fn)
if vid_name:
replace(f, VID_DIR, vid_name)
print("REPLACED: Video: ", vid_name)
else:
print("NO VIDEO FOUND: File: ", fname)
else:
print("Unknown FILE or NO IFRAME")
|
ankitjavalkar/algosutra
|
yaksh_video_replacer/replacer.py
|
Python
|
gpl-2.0
| 2,250 | 0.005333 |
# BlenderBIM Add-on - OpenBIM Blender Add-on
# Copyright (C) 2020, 2021 Dion Moult <dion@thinkmoult.com>
#
# This file is part of BlenderBIM Add-on.
#
# BlenderBIM Add-on is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BlenderBIM Add-on is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BlenderBIM Add-on. If not, see <http://www.gnu.org/licenses/>.
import bpy
from blenderbim.bim.ifc import IfcStore
from blenderbim.bim.prop import StrProperty, Attribute
from bpy.types import PropertyGroup
from bpy.props import (
PointerProperty,
StringProperty,
EnumProperty,
BoolProperty,
IntProperty,
FloatProperty,
FloatVectorProperty,
CollectionProperty,
)
class BIMStyleProperties(PropertyGroup):
attributes: CollectionProperty(name="Attributes", type=Attribute)
is_editing: BoolProperty(name="Is Editing")
|
IfcOpenShell/IfcOpenShell
|
src/blenderbim/blenderbim/bim/module/style/prop.py
|
Python
|
lgpl-3.0
| 1,292 | 0 |
# encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Template', fields ['name']
db.delete_unique('django_template', ['name'])
def backwards(self, orm):
# Adding unique constraint on 'Template', fields ['name']
db.create_unique('django_template', ['name'])
models = {
'dbtemplates.template': {
'Meta': {'ordering': "('name',)", 'object_name': 'Template', 'db_table': "'django_template'"},
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['dbtemplates']
|
tbtimes/checkup-survey
|
editorial_board/dbtemplates/south_migrations/0002_auto__del_unique_template_name.py
|
Python
|
mit
| 1,636 | 0.006724 |
#!/usr/bin/env python
"""
wsonoma.py
This module inherits from RecipeParser, and provides an implementation
for parsing recipes from the williams-sonoma.com site.
"""
from urllib.parse import urlsplit
from parser import RecipeParser
class WilliamsSonoma(RecipeParser):
def getTitle(self):
"""The title format is:
<title>Recipe | Williams Sonoma</title>
we want just 'Recipe'
"""
return self.tree.xpath('//title')[0].text.split('|')[0].strip()
def getImage(self):
"""The image format is:
<meta property="og:image" content="IMG_URL">
we want just 'IMG_URL'
"""
return self.tree.xpath('//meta[@property="og:image"]')[0].get('content')
def getIngredients(self):
"""Return a list or a map of the recipe ingredients"""
data = []
for node in self.tree.xpath('//li[@itemprop="ingredient"]'):
data.append(''.join(node.xpath('descendant-or-self::text()')).strip())
return data
def getDirections(self):
"""Return a list or a map of the preparation instructions"""
data = []
for node in self.tree.xpath('//div[@class="directions"]'):
data.append(node.xpath('descendant-or-self::text()'))
return [_f for _f in [x.strip() for x in data[0]] if _f]
def getTags(self):
"""Return a list of tags for this recipe"""
return []
def getOtherRecipeLinks(self):
"""Return a list of other recipes found in the page: while single recipe
pages do not have links, the various categories at
http://www.williams-sonoma.com/recipe/ do.
For example,
http://www.williams-sonoma.com/search/results.html?activeTab=recipes&words=winter_weeknight_dinners
has a collection of individual recipe links, and this method will find them.
"""
data = []
for link in self.tree.xpath('//ul[@class="recipe-list"]/li/a'):
if 'href' in list(link.keys()):
href = urlsplit(link.get('href'))
if 'cm_src=RECIPESEARCH' == href.query:
data.append(href.scheme + '://' + href.netloc + href.path)
return data
|
dpapathanasiou/recipebook
|
sites/wsonoma.py
|
Python
|
mit
| 2,219 | 0.001803 |
import numpy as np
def hessian(im_input, sigma):
"""
Calculates hessian of image I convolved with a gaussian kernel with
covariance C = [Sigma^2 0; 0 Sigma^2].
Parameters
----------
im_input : array_like
M x N grayscale image.
sigma : double
standard deviation of gaussian kernel.
Returns
-------
im_hess : array_like
M x N x 4 hessian matrix - im_hess[:,:,0] = dxx,
im_hess[:,:,1] = im_hess[:,:,2] = dxy, im_hess[:,:,3] = dyy.
"""
from scipy.ndimage.filters import convolve
# generate kernel domain
h, k = round(3 * sigma), round(3 * sigma + 1)
x, y = np.mgrid[-h:k, -h:k]
# generate kernels
gxx = 1./(2 * np.pi * sigma ** 4) * ((x / sigma) ** 2 - 1) * \
np.exp(-(x**2+y**2) / (2 * sigma ** 2))
gxy = 1./(2 * np.pi * sigma ** 6) * np.multiply(x, y) * \
np.exp(-(x**2+y**2) / (2 * sigma ** 2))
gyy = np.transpose(gxx)
# convolve
dxx = convolve(im_input, gxx, mode='constant')
dxy = convolve(im_input, gxy, mode='constant')
dyy = convolve(im_input, gyy, mode='constant')
# format output
im_hess = np.concatenate(
(dxx[:, :, None], dxy[:, :, None], dxy[:, :, None], dyy[:, :, None]),
axis=2
)
return im_hess
|
DigitalSlideArchive/HistomicsTK
|
histomicstk/utils/hessian.py
|
Python
|
apache-2.0
| 1,287 | 0 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
"""
Unit tests for ``octoprint.settings``.
"""
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2016 The OctoPrint Project - Released under terms of the AGPLv3 License"
|
foosel/OctoPrint
|
tests/settings/__init__.py
|
Python
|
agpl-3.0
| 394 | 0.007653 |
import os
import re
import json
import urllib.parse
import urllib.request
import contextlib
import hashlib
from lxml import etree
from django.conf import settings
from django.http import HttpResponse, JsonResponse, Http404
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.vary import vary_on_headers
from django.core.exceptions import PermissionDenied
from webgis.map.wfsfilter import webgisfilter
from webgis.libs.utils import set_query_parameters
from webgis.mapcache import get_tile_response, get_legendgraphic_response, \
WmsLayer, TileNotFoundException
from webgis.map.project import clean_project_name, get_project, filter_user_roles, \
get_project_info, get_last_project_version, InvalidProjectException
from webgis.auth import basic_auth
from webgis.auth.decorators import login_required
def abs_project_path(project):
return os.path.join(settings.GISQUICK_PROJECT_ROOT, project)
def check_project_access(request, project, project_auth):
if project_auth == "all":
return True
elif project_auth == "authenticated":
return request.user.is_authenticated
elif project_auth == "owner":
project_owner = project.split('/', 1)[0]
return request.user.is_authenticated and (project_owner == request.user.username or request.user.is_superuser)
return False
def check_layer_access(user_roles, layer_name, permission):
for role in user_roles:
perms = role['permissions']['layers']
if perms[layer_name][permission]:
return True
return False
def map_project(request):
try:
project_data = get_project(request)
return JsonResponse(project_data, status=project_data['status'])
except InvalidProjectException:
raise Http404
project_name_pattern = re.compile('(.+)_(\d{10})')
def parse_project_name(name):
match = project_name_pattern.match(name)
if match:
return match.group(1), int(match.group(2))
return name, None
@csrf_exempt
@vary_on_headers('Authorization')
def ows(request):
params = {key.upper(): request.GET[key] for key in request.GET.keys()}
ows_project = clean_project_name(params.get('MAP'))
project, timestamp = parse_project_name(ows_project)
project_hash = hashlib.md5(project.encode('utf-8')).hexdigest()
pi = get_project_info(project_hash, timestamp, project=ows_project)
if not request.user.is_authenticated:
basic_auth.is_authenticated(request)
if not check_project_access(request, project, pi['authentication']):
if not request.user.is_authenticated:
response = HttpResponse('Authentication required', status=401)
response['WWW-Authenticate'] = 'Basic realm=OWS API'
return response
raise PermissionDenied
if params.get('SERVICE') == 'WFS' and params.get('REQUEST') != 'GetFeature':
access_control = pi.get('access_control')
if access_control and access_control['enabled']:
root = etree.fromstring(request.body.decode())
user_roles = filter_user_roles(request.user, access_control['roles'])
for elem in root.findall('.//{*}Insert'):
for child in elem.getchildren():
layer_name = etree.QName(child).localname
if not check_layer_access(user_roles, layer_name, 'insert'):
raise PermissionDenied
checks = [
('.//{*}Update', 'update'),
('.//{*}Delete', 'delete')
]
for query_path, permission in checks:
for elem in root.findall(query_path):
layer_name = elem.get('typeName').split(':')[-1]
if not check_layer_access(user_roles, layer_name, permission):
raise PermissionDenied
url = "{0}?{1}".format(
settings.GISQUICK_MAPSERVER_URL.rstrip("/"),
request.environ['QUERY_STRING']
)
abs_project = abs_project_path(params.get('MAP'))
url = set_query_parameters(url, {'MAP': abs_project})
if request.method == 'POST':
owsrequest = urllib.request.Request(url, request.body)
else:
owsrequest = urllib.request.Request(url)
owsrequest.add_header("User-Agent", "Gisquick")
resp_content = b""
try:
with contextlib.closing(urllib.request.urlopen(owsrequest)) as resp:
while True:
data = resp.read()
if not data:
break
resp_content += data
if params.get('REQUEST', '') == 'GetCapabilities':
resp_content = resp_content.replace(
settings.GISQUICK_MAPSERVER_URL.encode(),
request.build_absolute_uri(request.path).encode()
)
content_type = resp.getheader('Content-Type')
status = resp.getcode()
return HttpResponse(resp_content, content_type=content_type, status=status)
except urllib.error.HTTPError as e:
# reason = e.read().decode("utf8")
return HttpResponse(e.read(), content_type=e.headers.get_content_type(), status=e.code)
def tile(request, project_hash, publish, layers_hash=None, z=None, x=None, y=None, format=None):
params = {key.upper(): request.GET[key] for key in request.GET.keys()}
project = params['PROJECT']+'.qgs'
mapserver_url = set_query_parameters(
settings.GISQUICK_MAPSERVER_URL,
{'MAP': abs_project_path(project)}
)
project_info = get_project_info(project_hash, publish, project=project)
if not project_info:
raise Http404
if not check_project_access(request, params['PROJECT'], project_info['authentication']):
raise PermissionDenied
try:
layer = WmsLayer(
project=project_hash,
publish=publish,
name=layers_hash,
provider_layers=params['LAYERS'].encode("utf-8"),
provider_url=mapserver_url,
image_format=format,
tile_size=256,
metasize=5,
extent=project_info['extent'],
resolutions=project_info['tile_resolutions'],
projection=project_info['projection']['code']
)
return get_tile_response(layer, z=z, x=x, y=y)
except TileNotFoundException as e:
raise Http404
def legend(request, project_hash, publish, layer_hash=None, zoom=None, format=None):
params = {key.upper(): request.GET[key] for key in request.GET.keys()}
project = params['PROJECT']+'.qgs'
mapserver_url = set_query_parameters(
settings.GISQUICK_MAPSERVER_URL,
{'MAP': abs_project_path(project)}
)
project_info = get_project_info(project_hash, publish, project=project)
if not project_info:
raise Http404
if not check_project_access(request, params['PROJECT'], project_info['authentication']):
raise PermissionDenied
try:
layer = WmsLayer(
project=project_hash,
publish=publish,
name=layer_hash,
provider_layers=params['LAYER'].encode('utf-8'),
provider_url=mapserver_url,
image_format=format,
)
params.pop('PROJECT')
params.pop('LAYER')
return get_legendgraphic_response(layer, zoom, **params)
except:
raise Http404
@csrf_exempt
def filterdata(request):
"""Handle filter requrest - using OGC WFS service
The request body should look like:
{
'layer': 'Places',
'maxfeatures': 1000,
'startindex': 0,
'bbox': [0, 1, 2, 3],
'filters': [{
'attribute': 'NAME',
'value': 'Prague',
'operator': '='
}]
}
sent as HTTP POST request
"""
# TODO: use check_project_access
if request.method == 'POST':
project = request.GET['PROJECT']
project = get_last_project_version(project) + '.qgs'
url = settings.GISQUICK_MAPSERVER_URL
params = {
'MAP': abs_project_path(project)
}
mapserv = '{}?{}'.format(url, urllib.parse.urlencode(params))
filter_request = json.loads(request.body.decode('utf-8'))
layer_name = filter_request['layer']
maxfeatures = startindex = bbox = filters = None
if 'maxfeatures' in filter_request:
maxfeatures = filter_request['maxfeatures']
if 'startindex' in filter_request:
startindex = filter_request['startindex']
if 'bbox' in filter_request:
bbox = filter_request['bbox']
if 'filters' in filter_request:
filters = filter_request['filters']
result = webgisfilter(mapserv, layer_name, maxfeatures=maxfeatures,
startindex=startindex, bbox=bbox, filters=filters)
return HttpResponse(json.dumps(result), content_type="application/json")
else:
raise Exception('No inputs specified, use POST method')
|
gislab-npo/gislab-web
|
server/webgis/map/views.py
|
Python
|
gpl-2.0
| 9,021 | 0.002439 |
__author__ = 'BeyondSky'
from collections import defaultdict
class Solution(object):
def getHint(self, secret, guess):
"""
:type secret: str
:type guess: str
:rtype: str
"""
bulls = cows = 0
digits = defaultdict(int)
# first pass: count bulls and non-matching digits
for index in range(len(secret)):
if secret[index] == guess[index]:
# matches, count the number of bulls
bulls += 1
else:
# not match, increase number of non-matching digits
digits[secret[index]] += 1
# second pass: count number of cows
for index in range(len(secret)):
if secret[index] != guess[index]:
# decrease number of non-matching digit by 1 if it is greater than 0
if digits[guess[index]] > 0:
cows += 1
digits[guess[index]] -= 1
return str(bulls) + 'A' + str(cows) + 'B'
|
BeyondSkyCoder/BeyondCoder
|
leetcode/python/bulls_and_cows.py
|
Python
|
apache-2.0
| 1,021 | 0.002938 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2019 khalim19
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines a custom widget holding an array of GUI elements. The widget
is used as the default GUI for `setting.ArraySetting` instances.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import *
import collections
import contextlib
import pygtk
pygtk.require("2.0")
import gtk
import gobject
from .. import utils as pgutils
from . import draganddropcontext as draganddropcontext_
__all__ = [
"ItemBox",
"ArrayBox",
"ItemBoxItem",
]
class ItemBox(gtk.ScrolledWindow):
"""
This base class defines a scrollable box holding a vertical list of items.
Each item is an instance of `_ItemBoxItem` class or one of its subclasses.
"""
ITEM_SPACING = 4
VBOX_SPACING = 4
def __init__(self, item_spacing=ITEM_SPACING, *args, **kwargs):
super().__init__(*args, **kwargs)
self._item_spacing = item_spacing
self._drag_and_drop_context = draganddropcontext_.DragAndDropContext()
self._items = []
self._vbox_items = gtk.VBox(homogeneous=False)
self._vbox_items.set_spacing(self._item_spacing)
self._vbox = gtk.VBox(homogeneous=False)
self._vbox.set_spacing(self.VBOX_SPACING)
self._vbox.pack_start(self._vbox_items, expand=False, fill=False)
self.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.add_with_viewport(self._vbox)
self.get_child().set_shadow_type(gtk.SHADOW_NONE)
def add_item(self, item):
self._vbox_items.pack_start(item.widget, expand=False, fill=False)
item.button_remove.connect("clicked", self._on_item_button_remove_clicked, item)
item.widget.connect("key-press-event", self._on_item_widget_key_press_event, item)
self._setup_drag(item)
self._items.append(item)
return item
def reorder_item(self, item, position):
new_position = min(max(position, 0), len(self._items) - 1)
self._items.pop(self._get_item_position(item))
self._items.insert(new_position, item)
self._vbox_items.reorder_child(item.widget, new_position)
return new_position
def remove_item(self, item):
item_position = self._get_item_position(item)
if item_position < len(self._items) - 1:
next_item_position = item_position + 1
self._items[next_item_position].item_widget.grab_focus()
self._vbox_items.remove(item.widget)
item.remove_item_widget()
self._items.remove(item)
def clear(self):
for unused_ in range(len(self._items)):
self.remove_item(self._items[0])
def _setup_drag(self, item):
self._drag_and_drop_context.setup_drag(
item.item_widget,
self._get_drag_data,
self._on_drag_data_received,
[item],
[item],
self)
def _get_drag_data(self, dragged_item):
return str(self._items.index(dragged_item))
def _on_drag_data_received(self, dragged_item_index_str, destination_item):
dragged_item = self._items[int(dragged_item_index_str)]
self.reorder_item(dragged_item, self._get_item_position(destination_item))
def _on_item_widget_key_press_event(self, widget, event, item):
if event.state & gtk.gdk.MOD1_MASK: # Alt key
key_name = gtk.gdk.keyval_name(event.keyval)
if key_name in ["Up", "KP_Up"]:
self.reorder_item(
item, self._get_item_position(item) - 1)
elif key_name in ["Down", "KP_Down"]:
self.reorder_item(
item, self._get_item_position(item) + 1)
def _on_item_button_remove_clicked(self, button, item):
self.remove_item(item)
def _get_item_position(self, item):
return self._items.index(item)
class ItemBoxItem(object):
_HBOX_BUTTONS_SPACING = 3
_HBOX_SPACING = 3
def __init__(self, item_widget):
self._item_widget = item_widget
self._hbox = gtk.HBox(homogeneous=False)
self._hbox.set_spacing(self._HBOX_SPACING)
self._hbox_buttons = gtk.HBox(homogeneous=False)
self._hbox_buttons.set_spacing(self._HBOX_BUTTONS_SPACING)
self._event_box_buttons = gtk.EventBox()
self._event_box_buttons.add(self._hbox_buttons)
self._hbox.pack_start(self._item_widget, expand=True, fill=True)
self._hbox.pack_start(self._event_box_buttons, expand=False, fill=False)
self._event_box = gtk.EventBox()
self._event_box.add(self._hbox)
self._has_hbox_buttons_focus = False
self._button_remove = gtk.Button()
self._setup_item_button(self._button_remove, gtk.STOCK_CLOSE)
self._event_box.connect("enter-notify-event", self._on_event_box_enter_notify_event)
self._event_box.connect("leave-notify-event", self._on_event_box_leave_notify_event)
self._is_event_box_allocated_size = False
self._buttons_allocation = None
self._event_box.connect("size-allocate", self._on_event_box_size_allocate)
self._event_box_buttons.connect(
"size-allocate", self._on_event_box_buttons_size_allocate)
self._event_box.show_all()
self._hbox_buttons.set_no_show_all(True)
@property
def widget(self):
return self._event_box
@property
def item_widget(self):
return self._item_widget
@property
def button_remove(self):
return self._button_remove
def remove_item_widget(self):
self._hbox.remove(self._item_widget)
def _setup_item_button(self, item_button, icon, position=None):
item_button.set_relief(gtk.RELIEF_NONE)
button_icon = gtk.image_new_from_pixbuf(
item_button.render_icon(icon, gtk.ICON_SIZE_MENU))
item_button.add(button_icon)
self._hbox_buttons.pack_start(item_button, expand=False, fill=False)
if position is not None:
self._hbox_buttons.reorder_child(item_button, position)
item_button.show_all()
def _on_event_box_enter_notify_event(self, event_box, event):
if event.detail != gtk.gdk.NOTIFY_INFERIOR:
self._hbox_buttons.show()
def _on_event_box_leave_notify_event(self, event_box, event):
if event.detail != gtk.gdk.NOTIFY_INFERIOR:
self._hbox_buttons.hide()
def _on_event_box_size_allocate(self, event_box, allocation):
if self._is_event_box_allocated_size:
return
self._is_event_box_allocated_size = True
# Assign enough height to the HBox to make sure it does not resize when
# showing buttons.
if self._buttons_allocation.height >= allocation.height:
self._hbox.set_property("height-request", allocation.height)
def _on_event_box_buttons_size_allocate(self, event_box, allocation):
if self._buttons_allocation is not None:
return
self._buttons_allocation = allocation
# Make sure the width allocated to the buttons remains the same even if
# buttons are hidden. This avoids a problem with unreachable buttons when
# the horizontal scrollbar is displayed.
self._event_box_buttons.set_property(
"width-request", self._buttons_allocation.width)
self._hbox_buttons.hide()
class ArrayBox(ItemBox):
"""
This class can be used to edit `setting.ArraySetting` instances interactively.
Signals:
* `"array-box-changed"` - An item was added, reordered or removed by the user.
* `"array-box-item-changed"` - The contents of an item was modified by the
user. Currently, this signal is not invoked in this widget and can only be
invoked explicitly by calling `ArrayBox.emit("array-box-item-changed")`.
"""
__gsignals__ = {
b"array-box-changed": (gobject.SIGNAL_RUN_FIRST, None, ()),
b"array-box-item-changed": (gobject.SIGNAL_RUN_FIRST, None, ())}
_SIZE_HBOX_SPACING = 6
def __init__(
self,
new_item_default_value,
min_size=0,
max_size=None,
item_spacing=ItemBox.ITEM_SPACING,
max_width=None,
max_height=None,
*args,
**kwargs):
"""
Parameters:
* `new_item_default_value` - default value for new items.
* `min_size` - minimum number of elements.
* `max_size` - maximum number of elements. If `None`, the number of elements
is unlimited.
* `item_spacing` - vertical spacing in pixels between items.
* `max_width` - maximum width of the array box before the horizontal
scrollbar is displayed. The array box will resize automatically until the
maximum width is reached. If `max_width` is `None`, the width is fixed
to whatever width is provided by `gtk.ScrolledWindow`. If `max_width` is
zero or negative, the width is unlimited.
* `max_height` - maximum height of the array box before the vertical
scrollbar is displayed. For more information, see `max_width`.
"""
super().__init__(item_spacing=item_spacing, *args, **kwargs)
self._new_item_default_value = new_item_default_value
self._min_size = min_size if min_size >= 0 else 0
if max_size is None:
self._max_size = 2**32
else:
self._max_size = max_size if max_size >= min_size else min_size
self.max_width = max_width
self.max_height = max_height
self.on_add_item = pgutils.empty_func
self.on_reorder_item = pgutils.empty_func
self.on_remove_item = pgutils.empty_func
self._items_total_width = None
self._items_total_height = None
self._items_allocations = {}
self._locker = _ActionLocker()
self._init_gui()
def _init_gui(self):
self._size_spin_button = gtk.SpinButton(
gtk.Adjustment(
value=0,
lower=self._min_size,
upper=self._max_size,
step_incr=1,
page_incr=10,
),
digits=0)
self._size_spin_button.set_numeric(True)
self._size_spin_button.set_value(0)
self._size_spin_button_label = gtk.Label(_("Size"))
self._size_hbox = gtk.HBox()
self._size_hbox.set_spacing(self._SIZE_HBOX_SPACING)
self._size_hbox.pack_start(self._size_spin_button_label, expand=False, fill=False)
self._size_hbox.pack_start(self._size_spin_button, expand=False, fill=False)
self._vbox.pack_start(self._size_hbox, expand=False, fill=False)
self._vbox.reorder_child(self._size_hbox, 0)
self._size_spin_button.connect(
"value-changed", self._on_size_spin_button_value_changed)
def add_item(self, item_value=None, index=None):
if item_value is None:
item_value = self._new_item_default_value
item_widget = self.on_add_item(item_value, index)
item = _ArrayBoxItem(item_widget)
super().add_item(item)
item.widget.connect("size-allocate", self._on_item_widget_size_allocate, item)
if index is None:
item.label.set_label(self._get_item_name(len(self._items)))
if index is not None:
with self._locker.lock_temp("emit_array_box_changed_on_reorder"):
self.reorder_item(item, index)
if self._locker.is_unlocked("update_spin_button"):
with self._locker.lock_temp("emit_size_spin_button_value_changed"):
self._size_spin_button.spin(gtk.SPIN_STEP_FORWARD, increment=1)
return item
def reorder_item(self, item, new_position):
orig_position = self._get_item_position(item)
processed_new_position = super().reorder_item(item, new_position)
self.on_reorder_item(orig_position, processed_new_position)
self._rename_item_names(min(orig_position, processed_new_position))
if self._locker.is_unlocked("emit_array_box_changed_on_reorder"):
self.emit("array-box-changed")
def remove_item(self, item):
if (self._locker.is_unlocked("prevent_removal_below_min_size")
and len(self._items) == self._min_size):
return
if self._locker.is_unlocked("update_spin_button"):
with self._locker.lock_temp("emit_size_spin_button_value_changed"):
self._size_spin_button.spin(gtk.SPIN_STEP_BACKWARD, increment=1)
item_position = self._get_item_position(item)
super().remove_item(item)
if item in self._items_allocations:
self._update_height(-(self._items_allocations[item].height + self._item_spacing))
del self._items_allocations[item]
self.on_remove_item(item_position)
self._rename_item_names(item_position)
def set_values(self, values):
self._locker.lock("emit_size_spin_button_value_changed")
self._locker.lock("prevent_removal_below_min_size")
orig_on_remove_item = self.on_remove_item
self.on_remove_item = pgutils.empty_func
self.clear()
# This fixes an issue of items being allocated height of 1 when the array
# size was previously 0.
self.set_property("height-request", -1)
for index, value in enumerate(values):
self.add_item(value, index)
self.on_remove_item = orig_on_remove_item
self._size_spin_button.set_value(len(values))
self._locker.unlock("prevent_removal_below_min_size")
self._locker.unlock("emit_size_spin_button_value_changed")
def _setup_drag(self, item):
self._drag_and_drop_context.setup_drag(
# Using the entire item allows dragging only by the label rather than the
# widget itself. This avoids problems with widgets such as spin buttons
# that do not behave correctly when reordering and also avoids accidental
# clicking and modifying the widget by the user.
item.widget,
self._get_drag_data,
self._on_drag_data_received,
[item],
[item],
self)
def _on_size_spin_button_value_changed(self, size_spin_button):
if self._locker.is_unlocked("emit_size_spin_button_value_changed"):
self._locker.lock("update_spin_button")
new_size = size_spin_button.get_value_as_int()
if new_size > len(self._items):
num_elements_to_add = new_size - len(self._items)
for unused_ in range(num_elements_to_add):
self.add_item()
elif new_size < len(self._items):
num_elements_to_remove = len(self._items) - new_size
for unused_ in range(num_elements_to_remove):
self.remove_item(self._items[-1])
self.emit("array-box-changed")
self._locker.unlock("update_spin_button")
def _on_item_button_remove_clicked(self, button, item):
self._locker.lock("emit_size_spin_button_value_changed")
should_emit_signal = (
len(self._items) > self._min_size
or self._locker.is_locked("prevent_removal_below_min_size"))
super()._on_item_button_remove_clicked(button, item)
if should_emit_signal:
self.emit("array-box-changed")
self._locker.unlock("emit_size_spin_button_value_changed")
def _on_item_widget_size_allocate(self, item_widget, allocation, item):
if item in self._items_allocations:
self._update_width(allocation.width - self._items_allocations[item].width)
self._update_height(allocation.height - self._items_allocations[item].height)
else:
self._update_width(allocation.width)
self._update_height(allocation.height + self._item_spacing)
self._items_allocations[item] = allocation
def _update_width(self, width_diff):
if self._items_total_width is None:
self._items_total_width = self.get_allocation().width
if width_diff != 0:
self._update_dimension(
width_diff,
self._items_total_width,
self.max_width,
"width-request")
self._items_total_width = self._items_total_width + width_diff
def _update_height(self, height_diff):
if self._items_total_height is None:
self._items_total_height = self.get_allocation().height
if height_diff != 0:
self._update_dimension(
height_diff,
self._items_total_height,
self.max_height,
"height-request")
self._items_total_height = self._items_total_height + height_diff
def _update_dimension(
self,
size_diff,
total_size,
max_visible_size,
dimension_request_property):
if max_visible_size is None:
is_max_visible_size_unlimited = True
else:
is_max_visible_size_unlimited = max_visible_size <= 0
if not is_max_visible_size_unlimited:
visible_size = min(total_size, max_visible_size)
else:
visible_size = total_size
if (is_max_visible_size_unlimited
or (visible_size + size_diff <= max_visible_size
and total_size < max_visible_size)):
new_size = visible_size + size_diff
elif total_size >= max_visible_size and size_diff < 0:
if total_size + size_diff < max_visible_size:
new_size = total_size + size_diff
else:
new_size = max_visible_size
else:
new_size = max_visible_size
if max_visible_size is not None:
self.set_property(dimension_request_property, new_size)
def _rename_item_names(self, start_index):
for index, item in enumerate(self._items[start_index:]):
item.label.set_label(self._get_item_name(index + 1 + start_index))
@staticmethod
def _get_item_name(index):
return _("Element") + " " + str(index)
class _ArrayBoxItem(ItemBoxItem):
def __init__(self, item_widget):
super().__init__(item_widget)
self._label = gtk.Label()
self._label.show()
self._hbox.pack_start(self._label, expand=False, fill=False)
self._hbox.reorder_child(self._label, 0)
@property
def label(self):
return self._label
class _ActionLocker(object):
def __init__(self):
self._tokens = collections.defaultdict(int)
@contextlib.contextmanager
def lock_temp(self, key):
self.lock(key)
try:
yield
finally:
self.unlock(key)
def lock(self, key):
self._tokens[key] += 1
def unlock(self, key):
if self._tokens[key] > 0:
self._tokens[key] -= 1
def is_locked(self, key):
return self._tokens[key] > 0
def is_unlocked(self, key):
return self._tokens[key] == 0
gobject.type_register(ArrayBox)
|
khalim19/gimp-plugin-export-layers
|
export_layers/pygimplib/gui/itembox.py
|
Python
|
gpl-3.0
| 18,637 | 0.014863 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-02-11 15:40
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reports', '0010_auto_20170211_0306'),
]
operations = [
migrations.RemoveField(
model_name='listreport',
name='board',
),
migrations.RemoveField(
model_name='listreport',
name='list',
),
migrations.DeleteModel(
name='ListReport',
),
]
|
diegojromerolopez/djanban
|
src/djanban/apps/reports/migrations/0011_auto_20170211_1640.py
|
Python
|
mit
| 573 | 0 |
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: test.py
Description :
Author : JHao
date: 2017/3/7
-------------------------------------------------
Change Activity:
2017/3/7:
-------------------------------------------------
"""
__author__ = 'JHao'
from test import testProxyValidator
from test import testConfigHandler
from test import testLogHandler
from test import testDbClient
if __name__ == '__main__':
print("ConfigHandler:")
testConfigHandler.testConfig()
print("LogHandler:")
testLogHandler.testLogHandler()
print("DbClient:")
testDbClient.testDbClient()
print("ProxyValidator:")
testProxyValidator.testProxyValidator()
|
jhao104/proxy_pool
|
test.py
|
Python
|
mit
| 770 | 0.003916 |
from ._compat import unittest
from ._adapt import DEFAULT_URI
from pydal import DAL
class DALtest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(DALtest, self).__init__(*args, **kwargs)
self._connections = []
def connect(self, *args, **kwargs):
if not args:
kwargs.setdefault('uri', DEFAULT_URI)
kwargs.setdefault('check_reserved', ['all'])
ret = DAL(*args, **kwargs)
self._connections.append(ret)
return ret
def tearDown(self):
for db in self._connections:
db.commit()
tablist = list(db.tables)
for table in reversed(tablist):
db[table].drop()
db.close()
self._connections = []
|
niphlod/pydal
|
tests/_helpers.py
|
Python
|
bsd-3-clause
| 758 | 0.001319 |
"""
Support for Satel Integra devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/satel_integra/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED, EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
REQUIREMENTS = ['satel_integra==0.2.0']
DEFAULT_ALARM_NAME = 'satel_integra'
DEFAULT_PORT = 7094
DEFAULT_CONF_ARM_HOME_MODE = 1
DEFAULT_DEVICE_PARTITION = 1
DEFAULT_ZONE_TYPE = 'motion'
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'satel_integra'
DATA_SATEL = 'satel_integra'
CONF_DEVICE_HOST = 'host'
CONF_DEVICE_PORT = 'port'
CONF_DEVICE_PARTITION = 'partition'
CONF_ARM_HOME_MODE = 'arm_home_mode'
CONF_ZONE_NAME = 'name'
CONF_ZONE_TYPE = 'type'
CONF_ZONES = 'zones'
CONF_OUTPUTS = 'outputs'
ZONES = 'zones'
SIGNAL_PANEL_MESSAGE = 'satel_integra.panel_message'
SIGNAL_PANEL_ARM_AWAY = 'satel_integra.panel_arm_away'
SIGNAL_PANEL_ARM_HOME = 'satel_integra.panel_arm_home'
SIGNAL_PANEL_DISARM = 'satel_integra.panel_disarm'
SIGNAL_ZONES_UPDATED = 'satel_integra.zones_updated'
SIGNAL_OUTPUTS_UPDATED = 'satel_integra.outputs_updated'
ZONE_SCHEMA = vol.Schema({
vol.Required(CONF_ZONE_NAME): cv.string,
vol.Optional(CONF_ZONE_TYPE, default=DEFAULT_ZONE_TYPE): cv.string})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_DEVICE_HOST): cv.string,
vol.Optional(CONF_DEVICE_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_DEVICE_PARTITION,
default=DEFAULT_DEVICE_PARTITION): cv.positive_int,
vol.Optional(CONF_ARM_HOME_MODE,
default=DEFAULT_CONF_ARM_HOME_MODE): vol.In([1, 2, 3]),
vol.Optional(CONF_ZONES,
default={}): {vol.Coerce(int): ZONE_SCHEMA},
vol.Optional(CONF_OUTPUTS,
default={}): {vol.Coerce(int): ZONE_SCHEMA},
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the Satel Integra component."""
conf = config.get(DOMAIN)
zones = conf.get(CONF_ZONES)
outputs = conf.get(CONF_OUTPUTS)
host = conf.get(CONF_DEVICE_HOST)
port = conf.get(CONF_DEVICE_PORT)
partition = conf.get(CONF_DEVICE_PARTITION)
from satel_integra.satel_integra import AsyncSatel, AlarmState
controller = AsyncSatel(host, port, hass.loop, zones, outputs, partition)
hass.data[DATA_SATEL] = controller
result = await controller.connect()
if not result:
return False
async def _close():
controller.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close())
_LOGGER.debug("Arm home config: %s, mode: %s ",
conf,
conf.get(CONF_ARM_HOME_MODE))
task_control_panel = hass.async_create_task(
async_load_platform(hass, 'alarm_control_panel', DOMAIN, conf, config))
task_zones = hass.async_create_task(
async_load_platform(hass, 'binary_sensor', DOMAIN,
{CONF_ZONES: zones, CONF_OUTPUTS: outputs}, config)
)
await asyncio.wait([task_control_panel, task_zones], loop=hass.loop)
@callback
def alarm_status_update_callback(status):
"""Send status update received from alarm to home assistant."""
_LOGGER.debug("Alarm status callback, status: %s", status)
hass_alarm_status = STATE_ALARM_DISARMED
if status == AlarmState.ARMED_MODE0:
hass_alarm_status = STATE_ALARM_ARMED_AWAY
elif status in [
AlarmState.ARMED_MODE0,
AlarmState.ARMED_MODE1,
AlarmState.ARMED_MODE2,
AlarmState.ARMED_MODE3
]:
hass_alarm_status = STATE_ALARM_ARMED_HOME
elif status in [AlarmState.TRIGGERED, AlarmState.TRIGGERED_FIRE]:
hass_alarm_status = STATE_ALARM_TRIGGERED
elif status == AlarmState.DISARMED:
hass_alarm_status = STATE_ALARM_DISARMED
_LOGGER.debug("Sending hass_alarm_status: %s...", hass_alarm_status)
async_dispatcher_send(hass, SIGNAL_PANEL_MESSAGE, hass_alarm_status)
@callback
def zones_update_callback(status):
"""Update zone objects as per notification from the alarm."""
_LOGGER.debug("Zones callback, status: %s", status)
async_dispatcher_send(hass, SIGNAL_ZONES_UPDATED, status[ZONES])
@callback
def outputs_update_callback(status):
"""Update zone objects as per notification from the alarm."""
_LOGGER.debug("Outputs updated callback , status: %s", status)
async_dispatcher_send(hass, SIGNAL_OUTPUTS_UPDATED, status["outputs"])
# Create a task instead of adding a tracking job, since this task will
# run until the connection to satel_integra is closed.
hass.loop.create_task(controller.keep_alive())
hass.loop.create_task(
controller.monitor_status(
alarm_status_update_callback,
zones_update_callback,
outputs_update_callback)
)
return True
|
PetePriority/home-assistant
|
homeassistant/components/satel_integra/__init__.py
|
Python
|
apache-2.0
| 5,379 | 0 |
# coding=utf-8
from __future__ import unicode_literals
"""
Utility library for interacting with unimate
"""
import socket
import types
class Client(object):
"""
Unimate client
"""
def __init__(self, server, port):
if not isinstance(server, types.StringTypes):
raise TypeError("server must be a string")
if not isinstance(port, (int, long)):
raise TypeError("port must be an integer")
self._server = server
self._port = port
def send(self, message, room=None):
"""Broadcast a message through unimate"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self._server, self._port))
if isinstance(message, str):
message = message.decode('utf-8')
if room is None:
msg = "broadcast %s\r\n" % message
else:
if isinstance(room, str):
room = room.decode('utf-8')
msg = "broadcast %s %s\r\n" % (room, message)
assert isinstance(msg, unicode)
msg = msg.encode('utf-8')
sock.send(msg)
sock.close()
class DummyUnimate(object):
def send(self, message, room = None):
pass
if __name__ == '__main__':
Client("unimate.corp.smarkets.com", 12344).send(u"Tëßt")
|
smarkets/pyunimate
|
unimate.py
|
Python
|
mit
| 1,311 | 0.00382 |
# -*- coding: utf-8 -*-
# Use this file to easily define all of your cron jobs.
#
# It's helpful to understand cron before proceeding.
# http://en.wikipedia.org/wiki/Cron
#
# Learn more: http://github.com/fengsp/plan
from plan import Plan
cron = Plan()
# register one command, script or module
# cron.command('command', every='1.day')
# cron.script('script.py', path='/web/yourproject/scripts', every='1.month')
# cron.module('calendar', every='feburary', at='day.3')
if __name__ == "__main__":
cron.run()
|
supergis/git_notebook
|
other/schedule.py
|
Python
|
gpl-3.0
| 515 | 0 |
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Fourcot Florent
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.browser import BaseBrowser, BrowserIncorrectPassword, BrowserBanned
from .pages import HomePage, LoginPage, HistoryPage, BillsPage, ErrorPage
__all__ = ['PoivyBrowser']
class PoivyBrowser(BaseBrowser):
DOMAIN = 'www.poivy.com'
PROTOCOL = 'https'
ENCODING = None # refer to the HTML encoding
PAGES = {'.*login': LoginPage,
'.*buy_credit.*': HomePage,
'.*/recent_calls': HistoryPage,
'.*purchases': BillsPage,
'.*warning.*': ErrorPage
}
def __init__(self, *args, **kwargs):
BaseBrowser.__init__(self, *args, **kwargs)
def home(self):
self.location('/login')
def is_logged(self):
return not self.is_on_page(LoginPage)
def login(self):
assert isinstance(self.username, basestring)
assert isinstance(self.password, basestring)
if not self.is_on_page(LoginPage):
self.location('/login')
if not self.page.login(self.username, self.password):
raise BrowserBanned('Too many connections from you IP address: captcha enabled')
if self.is_on_page(LoginPage) or self.is_on_page(ErrorPage):
raise BrowserIncorrectPassword()
def get_subscription_list(self):
if not self.is_on_page(HomePage):
self.location('/buy_credit')
return self.page.get_list()
def get_subscription(self, id):
assert isinstance(id, basestring)
l = self.get_subscription_list()
for a in l:
if a.id == id:
return a
return None
def get_history(self):
if not self.is_on_page(HistoryPage):
self.location('/recent_calls')
return self.page.get_calls()
def iter_bills(self, parentid):
if not self.is_on_page(BillsPage):
self.location('/purchases')
return self.page.date_bills()
def get_bill(self, id):
assert isinstance(id, basestring)
l = self.iter_bills(id)
for a in l:
if a.id == id:
return a
|
blckshrk/Weboob
|
modules/poivy/browser.py
|
Python
|
agpl-3.0
| 2,888 | 0.001385 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import logging
import os
import re
import subprocess
import sys
import time
from collections import defaultdict
from html import unescape
from urllib.error import URLError
from urllib.parse import quote, urlparse, urlsplit, urlunsplit
from urllib.request import urlretrieve
# because logging.setLoggerClass has to be called before logging.getLogger
from pelican.log import init
from pelican.settings import read_settings
from pelican.utils import SafeDatetime, slugify
logger = logging.getLogger(__name__)
def decode_wp_content(content, br=True):
pre_tags = {}
if content.strip() == "":
return ""
content += "\n"
if "<pre" in content:
pre_parts = content.split("</pre>")
last_pre = pre_parts.pop()
content = ""
pre_index = 0
for pre_part in pre_parts:
start = pre_part.find("<pre")
if start == -1:
content = content + pre_part
continue
name = "<pre wp-pre-tag-{0}></pre>".format(pre_index)
pre_tags[name] = pre_part[start:] + "</pre>"
content = content + pre_part[0:start] + name
pre_index += 1
content = content + last_pre
content = re.sub(r'<br />\s*<br />', "\n\n", content)
allblocks = ('(?:table|thead|tfoot|caption|col|colgroup|tbody|tr|'
'td|th|div|dl|dd|dt|ul|ol|li|pre|select|option|form|'
'map|area|blockquote|address|math|style|p|h[1-6]|hr|'
'fieldset|noscript|samp|legend|section|article|aside|'
'hgroup|header|footer|nav|figure|figcaption|details|'
'menu|summary)')
content = re.sub(r'(<' + allblocks + r'[^>]*>)', "\n\\1", content)
content = re.sub(r'(</' + allblocks + r'>)', "\\1\n\n", content)
# content = content.replace("\r\n", "\n")
if "<object" in content:
# no <p> inside object/embed
content = re.sub(r'\s*<param([^>]*)>\s*', "<param\\1>", content)
content = re.sub(r'\s*</embed>\s*', '</embed>', content)
# content = re.sub(r'/\n\n+/', '\n\n', content)
pgraphs = filter(lambda s: s != "", re.split(r'\n\s*\n', content))
content = ""
for p in pgraphs:
content = content + "<p>" + p.strip() + "</p>\n"
# under certain strange conditions it could create
# a P of entirely whitespace
content = re.sub(r'<p>\s*</p>', '', content)
content = re.sub(
r'<p>([^<]+)</(div|address|form)>',
"<p>\\1</p></\\2>",
content)
# don't wrap tags
content = re.sub(
r'<p>\s*(</?' + allblocks + r'[^>]*>)\s*</p>',
"\\1",
content)
# problem with nested lists
content = re.sub(r'<p>(<li.*)</p>', "\\1", content)
content = re.sub(r'<p><blockquote([^>]*)>', "<blockquote\\1><p>", content)
content = content.replace('</blockquote></p>', '</p></blockquote>')
content = re.sub(r'<p>\s*(</?' + allblocks + '[^>]*>)', "\\1", content)
content = re.sub(r'(</?' + allblocks + r'[^>]*>)\s*</p>', "\\1", content)
if br:
def _preserve_newline(match):
return match.group(0).replace("\n", "<WPPreserveNewline />")
content = re.sub(
r'/<(script|style).*?<\/\\1>/s',
_preserve_newline,
content)
# optionally make line breaks
content = re.sub(r'(?<!<br />)\s*\n', "<br />\n", content)
content = content.replace("<WPPreserveNewline />", "\n")
content = re.sub(
r'(</?' + allblocks + r'[^>]*>)\s*<br />', "\\1",
content)
content = re.sub(
r'<br />(\s*</?(?:p|li|div|dl|dd|dt|th|pre|td|ul|ol)[^>]*>)',
'\\1',
content)
content = re.sub(r'\n</p>', "</p>", content)
if pre_tags:
def _multi_replace(dic, string):
pattern = r'|'.join(map(re.escape, dic.keys()))
return re.sub(pattern, lambda m: dic[m.group()], string)
content = _multi_replace(pre_tags, content)
return content
def xml_to_soup(xml):
"""Opens an xml file"""
try:
from bs4 import BeautifulSoup
except ImportError:
error = ('Missing dependency "BeautifulSoup4" and "lxml" required to '
'import XML files.')
sys.exit(error)
with open(xml, encoding='utf-8') as infile:
xmlfile = infile.read()
soup = BeautifulSoup(xmlfile, "xml")
return soup
def get_filename(post_name, post_id):
if post_name is None or post_name.isspace():
return post_id
else:
return post_name
def wp2fields(xml, wp_custpost=False):
"""Opens a wordpress XML file, and yield Pelican fields"""
soup = xml_to_soup(xml)
items = soup.rss.channel.findAll('item')
for item in items:
if item.find('status').string in ["publish", "draft"]:
try:
# Use HTMLParser due to issues with BeautifulSoup 3
title = unescape(item.title.contents[0])
except IndexError:
title = 'No title [%s]' % item.find('post_name').string
logger.warning('Post "%s" is lacking a proper title', title)
post_name = item.find('post_name').string
post_id = item.find('post_id').string
filename = get_filename(post_name, post_id)
content = item.find('encoded').string
raw_date = item.find('post_date').string
if raw_date == u'0000-00-00 00:00:00':
date = None
else:
date_object = SafeDatetime.strptime(
raw_date, '%Y-%m-%d %H:%M:%S')
date = date_object.strftime('%Y-%m-%d %H:%M')
author = item.find('creator').string
categories = [cat.string for cat
in item.findAll('category', {'domain': 'category'})]
tags = [tag.string for tag
in item.findAll('category', {'domain': 'post_tag'})]
# To publish a post the status should be 'published'
status = 'published' if item.find('status').string == "publish" \
else item.find('status').string
kind = 'article'
post_type = item.find('post_type').string
if post_type == 'page':
kind = 'page'
elif wp_custpost:
if post_type == 'post':
pass
# Old behaviour was to name everything not a page as an
# article.Theoretically all attachments have status == inherit
# so no attachments should be here. But this statement is to
# maintain existing behaviour in case that doesn't hold true.
elif post_type == 'attachment':
pass
else:
kind = post_type
yield (title, content, filename, date, author, categories,
tags, status, kind, 'wp-html')
def blogger2fields(xml):
"""Opens a blogger XML file, and yield Pelican fields"""
soup = xml_to_soup(xml)
entries = soup.feed.findAll('entry')
for entry in entries:
raw_kind = entry.find(
'category', {'scheme': 'http://schemas.google.com/g/2005#kind'}
).get('term')
if raw_kind == 'http://schemas.google.com/blogger/2008/kind#post':
kind = 'article'
elif raw_kind == 'http://schemas.google.com/blogger/2008/kind#comment':
kind = 'comment'
elif raw_kind == 'http://schemas.google.com/blogger/2008/kind#page':
kind = 'page'
else:
continue
try:
assert kind != 'comment'
filename = entry.find('link', {'rel': 'alternate'})['href']
filename = os.path.splitext(os.path.basename(filename))[0]
except (AssertionError, TypeError, KeyError):
filename = entry.find('id').string.split('.')[-1]
title = entry.find('title').string or ''
content = entry.find('content').string
raw_date = entry.find('published').string
if hasattr(SafeDatetime, 'fromisoformat'):
date_object = SafeDatetime.fromisoformat(raw_date)
else:
date_object = SafeDatetime.strptime(
raw_date[:23], '%Y-%m-%dT%H:%M:%S.%f')
date = date_object.strftime('%Y-%m-%d %H:%M')
author = entry.find('author').find('name').string
# blogger posts only have tags, no category
tags = [tag.get('term') for tag in entry.findAll(
'category', {'scheme': 'http://www.blogger.com/atom/ns#'})]
# Drafts have <app:control><app:draft>yes</app:draft></app:control>
status = 'published'
try:
if entry.find('control').find('draft').string == 'yes':
status = 'draft'
except AttributeError:
pass
yield (title, content, filename, date, author, None, tags, status,
kind, 'html')
def dc2fields(file):
"""Opens a Dotclear export file, and yield pelican fields"""
try:
from bs4 import BeautifulSoup
except ImportError:
error = ('Missing dependency '
'"BeautifulSoup4" and "lxml" required '
'to import Dotclear files.')
sys.exit(error)
in_cat = False
in_post = False
category_list = {}
posts = []
with open(file, 'r', encoding='utf-8') as f:
for line in f:
# remove final \n
line = line[:-1]
if line.startswith('[category'):
in_cat = True
elif line.startswith('[post'):
in_post = True
elif in_cat:
fields = line.split('","')
if not line:
in_cat = False
else:
# remove 1st and last ""
fields[0] = fields[0][1:]
# fields[-1] = fields[-1][:-1]
category_list[fields[0]] = fields[2]
elif in_post:
if not line:
in_post = False
break
else:
posts.append(line)
print("%i posts read." % len(posts))
settings = read_settings()
subs = settings['SLUG_REGEX_SUBSTITUTIONS']
for post in posts:
fields = post.split('","')
# post_id = fields[0][1:]
# blog_id = fields[1]
# user_id = fields[2]
cat_id = fields[3]
# post_dt = fields[4]
# post_tz = fields[5]
post_creadt = fields[6]
# post_upddt = fields[7]
# post_password = fields[8]
# post_type = fields[9]
post_format = fields[10]
# post_url = fields[11]
# post_lang = fields[12]
post_title = fields[13]
post_excerpt = fields[14]
post_excerpt_xhtml = fields[15]
post_content = fields[16]
post_content_xhtml = fields[17]
# post_notes = fields[18]
# post_words = fields[19]
# post_status = fields[20]
# post_selected = fields[21]
# post_position = fields[22]
# post_open_comment = fields[23]
# post_open_tb = fields[24]
# nb_comment = fields[25]
# nb_trackback = fields[26]
post_meta = fields[27]
# redirect_url = fields[28][:-1]
# remove seconds
post_creadt = ':'.join(post_creadt.split(':')[0:2])
author = ''
categories = []
tags = []
if cat_id:
categories = [category_list[id].strip() for id
in cat_id.split(',')]
# Get tags related to a post
tag = (post_meta.replace('{', '')
.replace('}', '')
.replace('a:1:s:3:\\"tag\\";a:', '')
.replace('a:0:', ''))
if len(tag) > 1:
if int(len(tag[:1])) == 1:
newtag = tag.split('"')[1]
tags.append(
BeautifulSoup(
newtag,
'xml'
)
# bs4 always outputs UTF-8
.decode('utf-8')
)
else:
i = 1
j = 1
while(i <= int(tag[:1])):
newtag = tag.split('"')[j].replace('\\', '')
tags.append(
BeautifulSoup(
newtag,
'xml'
)
# bs4 always outputs UTF-8
.decode('utf-8')
)
i = i + 1
if j < int(tag[:1]) * 2:
j = j + 2
"""
dotclear2 does not use markdown by default unless
you use the markdown plugin
Ref: http://plugins.dotaddict.org/dc2/details/formatting-markdown
"""
if post_format == "markdown":
content = post_excerpt + post_content
else:
content = post_excerpt_xhtml + post_content_xhtml
content = content.replace('\\n', '')
post_format = "html"
kind = 'article' # TODO: Recognise pages
status = 'published' # TODO: Find a way for draft posts
yield (post_title, content, slugify(post_title, regex_subs=subs),
post_creadt, author, categories, tags, status, kind,
post_format)
def posterous2fields(api_token, email, password):
"""Imports posterous posts"""
import base64
from datetime import timedelta
import json
import urllib.request as urllib_request
def get_posterous_posts(api_token, email, password, page=1):
base64string = base64.encodestring(
("%s:%s" % (email, password)).encode('utf-8')).replace('\n', '')
url = ("http://posterous.com/api/v2/users/me/sites/primary/"
"posts?api_token=%s&page=%d") % (api_token, page)
request = urllib_request.Request(url)
request.add_header('Authorization', 'Basic %s' % base64string.decode())
handle = urllib_request.urlopen(request)
posts = json.loads(handle.read().decode('utf-8'))
return posts
page = 1
posts = get_posterous_posts(api_token, email, password, page)
settings = read_settings()
subs = settings['SLUG_REGEX_SUBSTITUTIONS']
while len(posts) > 0:
posts = get_posterous_posts(api_token, email, password, page)
page += 1
for post in posts:
slug = post.get('slug')
if not slug:
slug = slugify(post.get('title'), regex_subs=subs)
tags = [tag.get('name') for tag in post.get('tags')]
raw_date = post.get('display_date')
date_object = SafeDatetime.strptime(
raw_date[:-6], '%Y/%m/%d %H:%M:%S')
offset = int(raw_date[-5:])
delta = timedelta(hours=(offset / 100))
date_object -= delta
date = date_object.strftime('%Y-%m-%d %H:%M')
kind = 'article' # TODO: Recognise pages
status = 'published' # TODO: Find a way for draft posts
yield (post.get('title'), post.get('body_cleaned'),
slug, date, post.get('user').get('display_name'),
[], tags, status, kind, 'html')
def tumblr2fields(api_key, blogname):
""" Imports Tumblr posts (API v2)"""
import json
import urllib.request as urllib_request
def get_tumblr_posts(api_key, blogname, offset=0):
url = ("http://api.tumblr.com/v2/blog/%s.tumblr.com/"
"posts?api_key=%s&offset=%d&filter=raw") % (
blogname, api_key, offset)
request = urllib_request.Request(url)
handle = urllib_request.urlopen(request)
posts = json.loads(handle.read().decode('utf-8'))
return posts.get('response').get('posts')
offset = 0
posts = get_tumblr_posts(api_key, blogname, offset)
settings = read_settings()
subs = settings['SLUG_REGEX_SUBSTITUTIONS']
while len(posts) > 0:
for post in posts:
title = \
post.get('title') or \
post.get('source_title') or \
post.get('type').capitalize()
slug = post.get('slug') or slugify(title, regex_subs=subs)
tags = post.get('tags')
timestamp = post.get('timestamp')
date = SafeDatetime.fromtimestamp(int(timestamp)).strftime(
"%Y-%m-%d %H:%M:%S")
slug = SafeDatetime.fromtimestamp(int(timestamp)).strftime(
"%Y-%m-%d-") + slug
format = post.get('format')
content = post.get('body')
type = post.get('type')
if type == 'photo':
if format == 'markdown':
fmtstr = ''
else:
fmtstr = '<img alt="%s" src="%s" />'
content = ''
for photo in post.get('photos'):
content += '\n'.join(
fmtstr % (photo.get('caption'),
photo.get('original_size').get('url')))
content += '\n\n' + post.get('caption')
elif type == 'quote':
if format == 'markdown':
fmtstr = '\n\n— %s'
else:
fmtstr = '<p>— %s</p>'
content = post.get('text') + fmtstr % post.get('source')
elif type == 'link':
if format == 'markdown':
fmtstr = '[via](%s)\n\n'
else:
fmtstr = '<p><a href="%s">via</a></p>\n'
content = fmtstr % post.get('url') + post.get('description')
elif type == 'audio':
if format == 'markdown':
fmtstr = '[via](%s)\n\n'
else:
fmtstr = '<p><a href="%s">via</a></p>\n'
content = fmtstr % post.get('source_url') + \
post.get('caption') + \
post.get('player')
elif type == 'video':
if format == 'markdown':
fmtstr = '[via](%s)\n\n'
else:
fmtstr = '<p><a href="%s">via</a></p>\n'
source = fmtstr % post.get('source_url')
caption = post.get('caption')
players = '\n'.join(player.get('embed_code')
for player in post.get('player'))
content = source + caption + players
elif type == 'answer':
title = post.get('question')
content = ('<p>'
'<a href="%s" rel="external nofollow">%s</a>'
': %s'
'</p>\n'
' %s' % (post.get('asking_name'),
post.get('asking_url'),
post.get('question'),
post.get('answer')))
content = content.rstrip() + '\n'
kind = 'article'
status = 'published' # TODO: Find a way for draft posts
yield (title, content, slug, date, post.get('blog_name'), [type],
tags, status, kind, format)
offset += len(posts)
posts = get_tumblr_posts(api_key, blogname, offset)
def feed2fields(file):
"""Read a feed and yield pelican fields"""
import feedparser
d = feedparser.parse(file)
settings = read_settings()
subs = settings['SLUG_REGEX_SUBSTITUTIONS']
for entry in d.entries:
date = (time.strftime('%Y-%m-%d %H:%M', entry.updated_parsed)
if hasattr(entry, 'updated_parsed') else None)
author = entry.author if hasattr(entry, 'author') else None
tags = ([e['term'] for e in entry.tags]
if hasattr(entry, 'tags') else None)
slug = slugify(entry.title, regex_subs=subs)
kind = 'article'
yield (entry.title, entry.description, slug, date,
author, [], tags, None, kind, 'html')
def build_header(title, date, author, categories, tags, slug,
status=None, attachments=None):
"""Build a header from a list of fields"""
from docutils.utils import column_width
header = '%s\n%s\n' % (title, '#' * column_width(title))
if date:
header += ':date: %s\n' % date
if author:
header += ':author: %s\n' % author
if categories:
header += ':category: %s\n' % ', '.join(categories)
if tags:
header += ':tags: %s\n' % ', '.join(tags)
if slug:
header += ':slug: %s\n' % slug
if status:
header += ':status: %s\n' % status
if attachments:
header += ':attachments: %s\n' % ', '.join(attachments)
header += '\n'
return header
def build_markdown_header(title, date, author, categories, tags,
slug, status=None, attachments=None):
"""Build a header from a list of fields"""
header = 'Title: %s\n' % title
if date:
header += 'Date: %s\n' % date
if author:
header += 'Author: %s\n' % author
if categories:
header += 'Category: %s\n' % ', '.join(categories)
if tags:
header += 'Tags: %s\n' % ', '.join(tags)
if slug:
header += 'Slug: %s\n' % slug
if status:
header += 'Status: %s\n' % status
if attachments:
header += 'Attachments: %s\n' % ', '.join(attachments)
header += '\n'
return header
def get_ext(out_markup, in_markup='html'):
if in_markup == 'markdown' or out_markup == 'markdown':
ext = '.md'
else:
ext = '.rst'
return ext
def get_out_filename(output_path, filename, ext, kind,
dirpage, dircat, categories, wp_custpost, slug_subs):
filename = os.path.basename(filename)
# Enforce filename restrictions for various filesystems at once; see
# http://en.wikipedia.org/wiki/Filename#Reserved_characters_and_words
# we do not need to filter words because an extension will be appended
filename = re.sub(r'[<>:"/\\|?*^% ]', '-', filename) # invalid chars
filename = filename.lstrip('.') # should not start with a dot
if not filename:
filename = '_'
filename = filename[:249] # allow for 5 extra characters
out_filename = os.path.join(output_path, filename + ext)
# option to put page posts in pages/ subdirectory
if dirpage and kind == 'page':
pages_dir = os.path.join(output_path, 'pages')
if not os.path.isdir(pages_dir):
os.mkdir(pages_dir)
out_filename = os.path.join(pages_dir, filename + ext)
elif not dirpage and kind == 'page':
pass
# option to put wp custom post types in directories with post type
# names. Custom post types can also have categories so option to
# create subdirectories with category names
elif kind != 'article':
if wp_custpost:
typename = slugify(kind, regex_subs=slug_subs)
else:
typename = ''
kind = 'article'
if dircat and (len(categories) > 0):
catname = slugify(categories[0], regex_subs=slug_subs)
else:
catname = ''
out_filename = os.path.join(output_path, typename,
catname, filename + ext)
if not os.path.isdir(os.path.join(output_path, typename, catname)):
os.makedirs(os.path.join(output_path, typename, catname))
# option to put files in directories with categories names
elif dircat and (len(categories) > 0):
catname = slugify(categories[0], regex_subs=slug_subs)
out_filename = os.path.join(output_path, catname, filename + ext)
if not os.path.isdir(os.path.join(output_path, catname)):
os.mkdir(os.path.join(output_path, catname))
return out_filename
def get_attachments(xml):
"""returns a dictionary of posts that have attachments with a list
of the attachment_urls
"""
soup = xml_to_soup(xml)
items = soup.rss.channel.findAll('item')
names = {}
attachments = []
for item in items:
kind = item.find('post_type').string
post_name = item.find('post_name').string
post_id = item.find('post_id').string
if kind == 'attachment':
attachments.append((item.find('post_parent').string,
item.find('attachment_url').string))
else:
filename = get_filename(post_name, post_id)
names[post_id] = filename
attachedposts = defaultdict(set)
for parent, url in attachments:
try:
parent_name = names[parent]
except KeyError:
# attachment's parent is not a valid post
parent_name = None
attachedposts[parent_name].add(url)
return attachedposts
def download_attachments(output_path, urls):
"""Downloads WordPress attachments and returns a list of paths to
attachments that can be associated with a post (relative path to output
directory). Files that fail to download, will not be added to posts"""
locations = {}
for url in urls:
path = urlparse(url).path
# teardown path and rebuild to negate any errors with
# os.path.join and leading /'s
path = path.split('/')
filename = path.pop(-1)
localpath = ''
for item in path:
if sys.platform != 'win32' or ':' not in item:
localpath = os.path.join(localpath, item)
full_path = os.path.join(output_path, localpath)
# Generate percent-encoded URL
scheme, netloc, path, query, fragment = urlsplit(url)
path = quote(path)
url = urlunsplit((scheme, netloc, path, query, fragment))
if not os.path.exists(full_path):
os.makedirs(full_path)
print('downloading {}'.format(filename))
try:
urlretrieve(url, os.path.join(full_path, filename))
locations[url] = os.path.join(localpath, filename)
except (URLError, IOError) as e:
# Python 2.7 throws an IOError rather Than URLError
logger.warning("No file could be downloaded from %s\n%s", url, e)
return locations
def is_pandoc_needed(in_markup):
return in_markup in ('html', 'wp-html')
def get_pandoc_version():
cmd = ['pandoc', '--version']
try:
output = subprocess.check_output(cmd, universal_newlines=True)
except (subprocess.CalledProcessError, OSError) as e:
logger.warning("Pandoc version unknown: %s", e)
return ()
return tuple(int(i) for i in output.split()[1].split('.'))
def update_links_to_attached_files(content, attachments):
for old_url, new_path in attachments.items():
# url may occur both with http:// and https://
http_url = old_url.replace('https://', 'http://')
https_url = old_url.replace('http://', 'https://')
for url in [http_url, https_url]:
content = content.replace(url, '{static}' + new_path)
return content
def fields2pelican(
fields, out_markup, output_path,
dircat=False, strip_raw=False, disable_slugs=False,
dirpage=False, filename_template=None, filter_author=None,
wp_custpost=False, wp_attach=False, attachments=None):
pandoc_version = get_pandoc_version()
posts_require_pandoc = []
settings = read_settings()
slug_subs = settings['SLUG_REGEX_SUBSTITUTIONS']
for (title, content, filename, date, author, categories, tags, status,
kind, in_markup) in fields:
if filter_author and filter_author != author:
continue
if is_pandoc_needed(in_markup) and not pandoc_version:
posts_require_pandoc.append(filename)
slug = not disable_slugs and filename or None
if wp_attach and attachments:
try:
urls = attachments[filename]
links = download_attachments(output_path, urls)
except KeyError:
links = None
else:
links = None
ext = get_ext(out_markup, in_markup)
if ext == '.md':
header = build_markdown_header(
title, date, author, categories, tags, slug,
status, links.values() if links else None)
else:
out_markup = 'rst'
header = build_header(title, date, author, categories,
tags, slug, status, links.values()
if links else None)
out_filename = get_out_filename(
output_path, filename, ext, kind, dirpage, dircat,
categories, wp_custpost, slug_subs)
print(out_filename)
if in_markup in ('html', 'wp-html'):
html_filename = os.path.join(output_path, filename + '.html')
with open(html_filename, 'w', encoding='utf-8') as fp:
# Replace newlines with paragraphs wrapped with <p> so
# HTML is valid before conversion
if in_markup == 'wp-html':
new_content = decode_wp_content(content)
else:
paragraphs = content.splitlines()
paragraphs = ['<p>{0}</p>'.format(p) for p in paragraphs]
new_content = ''.join(paragraphs)
fp.write(new_content)
if pandoc_version < (2,):
parse_raw = '--parse-raw' if not strip_raw else ''
wrap_none = '--wrap=none' \
if pandoc_version >= (1, 16) else '--no-wrap'
cmd = ('pandoc --normalize {0} --from=html'
' --to={1} {2} -o "{3}" "{4}"')
cmd = cmd.format(parse_raw, out_markup, wrap_none,
out_filename, html_filename)
else:
from_arg = '-f html+raw_html' if not strip_raw else '-f html'
cmd = ('pandoc {0} --to={1}-smart --wrap=none -o "{2}" "{3}"')
cmd = cmd.format(from_arg, out_markup,
out_filename, html_filename)
try:
rc = subprocess.call(cmd, shell=True)
if rc < 0:
error = 'Child was terminated by signal %d' % -rc
exit(error)
elif rc > 0:
error = 'Please, check your Pandoc installation.'
exit(error)
except OSError as e:
error = 'Pandoc execution failed: %s' % e
exit(error)
os.remove(html_filename)
with open(out_filename, 'r', encoding='utf-8') as fs:
content = fs.read()
if out_markup == 'markdown':
# In markdown, to insert a <br />, end a line with two
# or more spaces & then a end-of-line
content = content.replace('\\\n ', ' \n')
content = content.replace('\\\n', ' \n')
if wp_attach and links:
content = update_links_to_attached_files(content, links)
with open(out_filename, 'w', encoding='utf-8') as fs:
fs.write(header + content)
if posts_require_pandoc:
logger.error("Pandoc must be installed to import the following posts:"
"\n {}".format("\n ".join(posts_require_pandoc)))
if wp_attach and attachments and None in attachments:
print("downloading attachments that don't have a parent post")
urls = attachments[None]
download_attachments(output_path, urls)
def main():
parser = argparse.ArgumentParser(
description="Transform feed, Blogger, Dotclear, Posterous, Tumblr, or"
"WordPress files into reST (rst) or Markdown (md) files. "
"Be sure to have pandoc installed.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
dest='input', help='The input file to read')
parser.add_argument(
'--blogger', action='store_true', dest='blogger',
help='Blogger XML export')
parser.add_argument(
'--dotclear', action='store_true', dest='dotclear',
help='Dotclear export')
parser.add_argument(
'--posterous', action='store_true', dest='posterous',
help='Posterous export')
parser.add_argument(
'--tumblr', action='store_true', dest='tumblr',
help='Tumblr export')
parser.add_argument(
'--wpfile', action='store_true', dest='wpfile',
help='Wordpress XML export')
parser.add_argument(
'--feed', action='store_true', dest='feed',
help='Feed to parse')
parser.add_argument(
'-o', '--output', dest='output', default='content',
help='Output path')
parser.add_argument(
'-m', '--markup', dest='markup', default='rst',
help='Output markup format (supports rst & markdown)')
parser.add_argument(
'--dir-cat', action='store_true', dest='dircat',
help='Put files in directories with categories name')
parser.add_argument(
'--dir-page', action='store_true', dest='dirpage',
help=('Put files recognised as pages in "pages/" sub-directory'
' (blogger and wordpress import only)'))
parser.add_argument(
'--filter-author', dest='author',
help='Import only post from the specified author')
parser.add_argument(
'--strip-raw', action='store_true', dest='strip_raw',
help="Strip raw HTML code that can't be converted to "
"markup such as flash embeds or iframes (wordpress import only)")
parser.add_argument(
'--wp-custpost', action='store_true',
dest='wp_custpost',
help='Put wordpress custom post types in directories. If used with '
'--dir-cat option directories will be created as '
'/post_type/category/ (wordpress import only)')
parser.add_argument(
'--wp-attach', action='store_true', dest='wp_attach',
help='(wordpress import only) Download files uploaded to wordpress as '
'attachments. Files will be added to posts as a list in the post '
'header. All files will be downloaded, even if '
"they aren't associated with a post. Files will be downloaded "
'with their original path inside the output directory. '
'e.g. output/wp-uploads/date/postname/file.jpg '
'-- Requires an internet connection --')
parser.add_argument(
'--disable-slugs', action='store_true',
dest='disable_slugs',
help='Disable storing slugs from imported posts within output. '
'With this disabled, your Pelican URLs may not be consistent '
'with your original posts.')
parser.add_argument(
'-e', '--email', dest='email',
help="Email address (posterous import only)")
parser.add_argument(
'-p', '--password', dest='password',
help="Password (posterous import only)")
parser.add_argument(
'-b', '--blogname', dest='blogname',
help="Blog name (Tumblr import only)")
args = parser.parse_args()
input_type = None
if args.blogger:
input_type = 'blogger'
elif args.dotclear:
input_type = 'dotclear'
elif args.posterous:
input_type = 'posterous'
elif args.tumblr:
input_type = 'tumblr'
elif args.wpfile:
input_type = 'wordpress'
elif args.feed:
input_type = 'feed'
else:
error = ('You must provide either --blogger, --dotclear, '
'--posterous, --tumblr, --wpfile or --feed options')
exit(error)
if not os.path.exists(args.output):
try:
os.mkdir(args.output)
except OSError:
error = 'Unable to create the output folder: ' + args.output
exit(error)
if args.wp_attach and input_type != 'wordpress':
error = ('You must be importing a wordpress xml '
'to use the --wp-attach option')
exit(error)
if input_type == 'blogger':
fields = blogger2fields(args.input)
elif input_type == 'dotclear':
fields = dc2fields(args.input)
elif input_type == 'posterous':
fields = posterous2fields(args.input, args.email, args.password)
elif input_type == 'tumblr':
fields = tumblr2fields(args.input, args.blogname)
elif input_type == 'wordpress':
fields = wp2fields(args.input, args.wp_custpost or False)
elif input_type == 'feed':
fields = feed2fields(args.input)
if args.wp_attach:
attachments = get_attachments(args.input)
else:
attachments = None
# init logging
init()
fields2pelican(fields, args.markup, args.output,
dircat=args.dircat or False,
dirpage=args.dirpage or False,
strip_raw=args.strip_raw or False,
disable_slugs=args.disable_slugs or False,
filter_author=args.author,
wp_custpost=args.wp_custpost or False,
wp_attach=args.wp_attach or False,
attachments=attachments or None)
|
talha131/pelican
|
pelican/tools/pelican_import.py
|
Python
|
agpl-3.0
| 37,631 | 0 |
# coding=utf-8
# Author: Dustyn Gibson <miigotu@gmail.com>
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import posixpath # Must use posixpath
from urllib import urlencode
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.bs4_parser import BS4Parser
from sickrage.helper.common import try_int, convert_size
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class ThePirateBayProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
TorrentProvider.__init__(self, "ThePirateBay")
self.public = True
self.ratio = None
self.confirmed = True
self.minseed = None
self.minleech = None
self.cache = ThePirateBayCache(self)
self.url = 'https://thepiratebay.se/'
self.urls = {
'search': self.url + 's/',
'rss': self.url + 'tv/latest'
}
self.custom_url = None
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches
results = []
"""
205 = SD, 208 = HD, 200 = All Videos
https://pirateproxy.pl/s/?q=Game of Thrones&type=search&orderby=7&page=0&category=200
"""
search_params = {
'q': '',
'type': 'search',
'orderby': 7,
'page': 0,
'category': 200
}
for mode in search_strings:
items = []
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
search_params['q'] = search_string.strip()
search_url = self.urls[('search', 'rss')[mode == 'RSS']] + '?' + urlencode(search_params)
if self.custom_url:
search_url = posixpath.join(self.custom_url, search_url.split(self.url)[1].lstrip('/')) # Must use posixpath
logger.log(u"Search URL: %s" % search_url, logger.DEBUG)
data = self.get_url(search_url)
if not data:
logger.log(u'URL did not return data, maybe try a custom url, or a different one', logger.DEBUG)
continue
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', id='searchResult')
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
# Continue only if one Release is found
if len(torrent_rows) < 2:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
def process_column_header(th):
result = ''
if th.a:
result = th.a.get_text(strip=True)
if not result:
result = th.get_text(strip=True)
return result
labels = [process_column_header(label) for label in torrent_rows[0].find_all('th')]
for result in torrent_rows[1:]:
try:
cells = result.find_all('td')
title = result.find(class_='detName').get_text(strip=True)
download_url = result.find(title="Download this torrent using magnet")['href']
if not all([title, download_url]):
continue
seeders = try_int(cells[labels.index('SE')].get_text(strip=True))
leechers = try_int(cells[labels.index('LE')].get_text(strip=True))
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
# Accept Torrent only from Good People for every Episode Search
if self.confirmed and result.find(alt=re.compile(r'(VIP|Trusted|Helper|Moderator)')):
if mode != 'RSS':
logger.log(u"Found result %s but that doesn't seem like a trusted result so I'm ignoring it" % title, logger.DEBUG)
continue
# Convert size after all possible skip scenarios
torrent_size = cells[labels.index('Name')].find(class_='detDesc').get_text(strip=True).split(', ')[1]
torrent_size = re.sub(r'Size ([\d.]+).+([KMGT]iB)', r'\1 \2', torrent_size)
size = convert_size(torrent_size) or -1
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items.append(item)
except StandardError:
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda tup: tup[3], reverse=True)
results += items
return results
def seed_ratio(self):
return self.ratio
class ThePirateBayCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# only poll ThePirateBay every 30 minutes max
self.minTime = 30
def _getRSSData(self):
search_strings = {'RSS': ['']}
return {'entries': self.provider.search(search_strings)}
provider = ThePirateBayProvider()
|
adaur/SickRage
|
sickbeard/providers/thepiratebay.py
|
Python
|
gpl-3.0
| 6,703 | 0.003431 |
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Tests for grr.lib.flows.general.services."""
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.lib import test_lib
class ServicesTest(test_lib.FlowTestsBaseclass):
def testEnumerateRunningServices(self):
class ClientMock(object):
def EnumerateRunningServices(self, _):
service = rdfvalue.Service(label="org.openbsd.ssh-agent",
args="/usr/bin/ssh-agent -l")
service.osx_launchd.sessiontype = "Aqua"
service.osx_launchd.lastexitstatus = 0
service.osx_launchd.timeout = 30
service.osx_launchd.ondemand = 1
return [service]
# Run the flow in the emulated way.
for _ in test_lib.TestFlowHelper(
"EnumerateRunningServices", ClientMock(), client_id=self.client_id,
token=self.token):
pass
# Check the output file is created
fd = aff4.FACTORY.Open(rdfvalue.RDFURN(self.client_id)
.Add("analysis/Services"),
token=self.token)
self.assertEqual(fd.__class__.__name__, "RDFValueCollection")
jobs = list(fd)
self.assertEqual(len(fd), 1)
self.assertEqual(jobs[0].label, "org.openbsd.ssh-agent")
self.assertEqual(jobs[0].args, "/usr/bin/ssh-agent -l")
self.assertIsInstance(jobs[0], rdfvalue.Service)
|
MiniSEC/GRR_clone
|
lib/flows/general/services_test.py
|
Python
|
apache-2.0
| 1,398 | 0.003577 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Jocelyn Jaubert
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
# python2.5 compatibility
from __future__ import with_statement
from weboob.capabilities.bank import ICapBank, AccountNotFound
from weboob.tools.backend import BaseBackend, BackendConfig
from weboob.tools.value import ValueBackendPassword
from .browser import SocieteGenerale
__all__ = ['SocieteGeneraleBackend']
class SocieteGeneraleBackend(BaseBackend, ICapBank):
NAME = 'societegenerale'
MAINTAINER = u'Jocelyn Jaubert'
EMAIL = 'jocelyn.jaubert@gmail.com'
VERSION = '0.f'
LICENSE = 'AGPLv3+'
DESCRIPTION = u'Société Générale French bank website'
CONFIG = BackendConfig(ValueBackendPassword('login', label='Account ID', masked=False),
ValueBackendPassword('password', label='Password'))
BROWSER = SocieteGenerale
def create_default_browser(self):
return self.create_browser(self.config['login'].get(),
self.config['password'].get())
def iter_accounts(self):
for account in self.browser.get_accounts_list():
yield account
def get_account(self, _id):
with self.browser:
account = self.browser.get_account(_id)
if account:
return account
else:
raise AccountNotFound()
def iter_history(self, account):
with self.browser:
for tr in self.browser.iter_history(account):
if not tr._coming:
yield tr
def iter_coming(self, account):
with self.browser:
for tr in self.browser.iter_history(account):
if tr._coming:
yield tr
|
franek/weboob
|
modules/societegenerale/backend.py
|
Python
|
agpl-3.0
| 2,388 | 0.000839 |
import volmdlr
import volmdlr.edges
import volmdlr.wires
import volmdlr.faces
p1 = volmdlr.Point3D(0.15, 0.48, 0.5)
p2 = volmdlr.Point3D(0.15, 0.1, 0.5)
p1s = volmdlr.Point2D(0, 0)
p2s = volmdlr.Point2D(0.1, 0)
p3s = volmdlr.Point2D(0.2, 0.1)
p4s = volmdlr.Point2D(-0.01, 0.05)
surface2d = volmdlr.faces.Surface2D(volmdlr.wires.ClosedPolygon2D([p1s, p2s, p3s, p4s]), [])
u = volmdlr.Vector3D(0.1, 0.7, -0.5)
u.normalize()
v = u.deterministic_unit_normal_vector()
w = u.cross(v)
plane = volmdlr.faces.Plane3D(frame=volmdlr.Frame3D(0.1*volmdlr.X3D, u, v, w))
face = volmdlr.faces.PlaneFace3D(plane, surface2d)
ax = face.plot()
p1.plot(ax=ax, color='b')
p2.plot(ax=ax, color='g')
l1 = volmdlr.edges.LineSegment3D(p1, p1+w)
l2 = volmdlr.edges.LineSegment3D(p2, p2+w)
l1.plot(ax=ax, color='b')
l2.plot(ax=ax, color='g')
i1 = face.linesegment_intersections(l1)
if i1:
i1[0].plot(ax=ax, color='r')
i2 = face.linesegment_intersections(l2)
if i2:
i2[0].plot(ax=ax, color='r')
plane_inter_1 = plane.linesegment_intersections(l1)
if plane_inter_1:
plane_inter_1[0].plot(ax=ax, color='b')
plane_inter_2 = plane.linesegment_intersections(l2)
if plane_inter_2:
plane_inter_2[0].plot(ax=ax, color='g')
plane_inter_1_2d = plane.point3d_to_2d(plane_inter_1[0])
plane_inter_2_2d = plane.point3d_to_2d(plane_inter_2[0])
ax2 = face.surface2d.plot()
plane_inter_1_2d.plot(ax=ax2, color='b')
plane_inter_2_2d.plot(ax=ax2, color='g')
assert surface2d.point_belongs(plane_inter_1_2d) == True
assert surface2d.point_belongs(plane_inter_2_2d) == False
p1_2dto3d = plane.point2d_to_3d(plane_inter_1_2d)
p1_2dto3d.plot(ax=ax, color='b')
assert p1_2dto3d == plane_inter_1[0]
face.babylonjs()
|
masfaraud/volmdlr
|
scripts/faces/planar.py
|
Python
|
gpl-3.0
| 1,693 | 0.002363 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Nicolas Duhamel
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from decimal import Decimal
from weboob.capabilities.bank import Account, AccountNotFound
from weboob.tools.browser import BasePage
from weboob.tools.misc import to_unicode
from weboob.tools.capabilities.bank.transactions import FrenchTransaction
from weboob.tools.ordereddict import OrderedDict
__all__ = ['AccountList']
class AccountList(BasePage):
def on_loaded(self):
self.accounts = OrderedDict()
self.parse_table('comptes', Account.TYPE_CHECKING)
self.parse_table('comptesEpargne', Account.TYPE_SAVINGS)
self.parse_table('comptesTitres', Account.TYPE_MARKET)
self.parse_table('comptesVie', Account.TYPE_DEPOSIT)
self.parse_table('comptesRetraireEuros')
def get_accounts_list(self):
return self.accounts.itervalues()
def parse_table(self, what, actype=Account.TYPE_UNKNOWN):
tables = self.document.xpath("//table[@id='%s']" % what, smart_strings=False)
if len(tables) < 1:
return
lines = tables[0].xpath(".//tbody/tr")
for line in lines:
account = Account()
tmp = line.xpath("./td//a")[0]
account.label = to_unicode(tmp.text)
account.type = actype
account._link_id = tmp.get("href")
if 'BourseEnLigne' in account._link_id:
account.type = Account.TYPE_MARKET
tmp = line.xpath("./td/span/strong")
if len(tmp) >= 2:
tmp_id = tmp[0].text
tmp_balance = tmp[1].text
else:
tmp_id = line.xpath("./td//span")[1].text
tmp_balance = tmp[0].text
account.id = tmp_id
account.currency = account.get_currency(tmp_balance)
account.balance = Decimal(FrenchTransaction.clean_amount(tmp_balance))
if account.id in self.accounts:
a = self.accounts[account.id]
a._card_links.append(account._link_id)
if not a.coming:
a.coming = Decimal('0.0')
a.coming += account.balance
else:
account._card_links = []
self.accounts[account.id] = account
def get_account(self, id):
try:
return self.accounts[id]
except KeyError:
raise AccountNotFound('Unable to find account: %s' % id)
|
yannrouillard/weboob
|
modules/bp/pages/accountlist.py
|
Python
|
agpl-3.0
| 3,140 | 0.000637 |
# -*- coding: utf-8 -*-
# © 2014-2015 Avanzosc
# © 2014-2015 Pedro M. Baeza
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import models
|
Endika/manufacture
|
mrp_production_real_cost/__init__.py
|
Python
|
agpl-3.0
| 165 | 0 |
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2011, Timothy Legge <timlegge@gmail.com> and Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
from contextlib import closing
class Bookmark(): # {{{
'''
A simple class fetching bookmark data
kobo-specific
'''
def __init__(self, db_path, contentid, path, id, book_format, bookmark_extension):
self.book_format = book_format
self.bookmark_extension = bookmark_extension
self.book_length = 0 # Not Used
self.id = id
self.last_read = 0
self.last_read_location = 0 # Not Used
self.path = path
self.timestamp = 0
self.user_notes = None
self.db_path = db_path
self.contentid = contentid
self.percent_read = 0
self.get_bookmark_data()
self.get_book_length() # Not Used
def get_bookmark_data(self):
''' Return the timestamp and last_read_location '''
import sqlite3 as sqlite
user_notes = {}
self.timestamp = os.path.getmtime(self.path)
with closing(sqlite.connect(self.db_path)) as connection:
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
t = (self.contentid,)
cursor.execute('select bm.bookmarkid, bm.contentid, bm.volumeid, '
'bm.text, bm.annotation, bm.ChapterProgress, '
'bm.StartContainerChildIndex, bm.StartOffset, c.BookTitle, '
'c.TITLE, c.volumeIndex, c.___NumPages '
'from Bookmark bm inner join Content c on '
'bm.contentid = c.contentid and '
'bm.volumeid = ? order by bm.volumeid, bm.chapterprogress', t)
previous_chapter = 0
bm_count = 0
for row in cursor:
current_chapter = row[10]
if previous_chapter == current_chapter:
bm_count = bm_count + 1
else:
bm_count = 0
text = row[3]
annotation = row[4]
# A dog ear (bent upper right corner) is a bookmark
if row[6] == row[7] == 0: # StartContainerChildIndex = StartOffset = 0
e_type = 'Bookmark'
text = row[9]
# highlight is text with no annotation
elif text is not None and (annotation is None or annotation == ""):
e_type = 'Highlight'
elif text and annotation:
e_type = 'Annotation'
else:
e_type = 'Unknown annotation type'
note_id = row[10] + bm_count
chapter_title = row[9]
# book_title = row[8]
chapter_progress = min(round(float(100*row[5]),2),100)
user_notes[note_id] = dict(id=self.id,
displayed_location=note_id,
type=e_type,
text=text,
annotation=annotation,
chapter=row[10],
chapter_title=chapter_title,
chapter_progress=chapter_progress)
previous_chapter = row[10]
# debug_print("e_type:" , e_type, '\t', 'loc: ', note_id, 'text: ', text,
# 'annotation: ', annotation, 'chapter_title: ', chapter_title,
# 'chapter_progress: ', chapter_progress, 'date: ')
cursor.execute('select datelastread, ___PercentRead from content '
'where bookid is Null and '
'contentid = ?', t)
for row in cursor:
self.last_read = row[0]
self.percent_read = row[1]
# print row[1]
cursor.close()
# self.last_read_location = self.last_read - self.pdf_page_offset
self.user_notes = user_notes
def get_book_length(self):
#TL self.book_length = 0
#TL self.book_length = int(unpack('>I', record0[0x04:0x08])[0])
pass
# }}}
|
insomnia-lab/calibre
|
src/calibre/devices/kobo/bookmark.py
|
Python
|
gpl-3.0
| 4,557 | 0.007022 |
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
from djangosige.apps.estoque.models import LocalEstoque
class LocalEstoqueForm(forms.ModelForm):
class Meta:
model = LocalEstoque
fields = ('descricao',)
widgets = {
'descricao': forms.TextInput(attrs={'class': 'form-control'}),
}
labels = {
'descricao': _('Descrição'),
}
|
thiagopena/djangoSIGE
|
djangosige/apps/estoque/forms/local.py
|
Python
|
mit
| 461 | 0 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
from pyspark import pandas as ps
from pyspark.pandas.exceptions import PandasNotImplementedError
from pyspark.pandas.missing.window import (
MissingPandasLikeExpanding,
MissingPandasLikeRolling,
MissingPandasLikeExpandingGroupby,
MissingPandasLikeRollingGroupby,
)
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class ExpandingRollingTest(PandasOnSparkTestCase, TestUtils):
def test_missing(self):
psdf = ps.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# Expanding functions
missing_functions = inspect.getmembers(MissingPandasLikeExpanding, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.expanding(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.a.expanding(1), name)() # Series
# Rolling functions
missing_functions = inspect.getmembers(MissingPandasLikeRolling, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.rolling(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.a.rolling(1), name)() # Series
# Expanding properties
missing_properties = inspect.getmembers(
MissingPandasLikeExpanding, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.expanding(1), name) # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.expanding(1), name) # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.expanding(1), name) # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.a.expanding(1), name) # Series
# Rolling properties
missing_properties = inspect.getmembers(
MissingPandasLikeRolling, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.rolling(1), name)() # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.a.rolling(1), name)() # Series
def test_missing_groupby(self):
psdf = ps.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# Expanding functions
missing_functions = inspect.getmembers(
MissingPandasLikeExpandingGroupby, inspect.isfunction
)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.groupby("a").expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.groupby(psdf.a).expanding(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.groupby("a").expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.a.groupby(psdf.a).expanding(1), name)() # Series
# Rolling functions
missing_functions = inspect.getmembers(MissingPandasLikeRollingGroupby, inspect.isfunction)
unsupported_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function"
]
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.groupby("a").rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.groupby(psdf.a).rolling(1), name)() # Series
deprecated_functions = [
name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function"
]
for name in deprecated_functions:
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "method.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.a.rolling(1), name)() # Series
# Expanding properties
missing_properties = inspect.getmembers(
MissingPandasLikeExpandingGroupby, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.groupby("a").expanding(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Expanding.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.groupby(psdf.a).expanding(1), name)() # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.expanding(1), name) # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Expanding.*{}.*is deprecated".format(name)
):
getattr(psdf.a.expanding(1), name) # Series
# Rolling properties
missing_properties = inspect.getmembers(
MissingPandasLikeRollingGroupby, lambda o: isinstance(o, property)
)
unsupported_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "unsupported_property"
]
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.groupby("a").rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Rolling.*{}.*not implemented( yet\\.|\\. .+)".format(name),
):
getattr(psdf.a.groupby(psdf.a).rolling(1), name)() # Series
deprecated_properties = [
name
for (name, type_) in missing_properties
if type_.fget.__name__ == "deprecated_property"
]
for name in deprecated_properties:
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.rolling(1), name)() # Frame
with self.assertRaisesRegex(
PandasNotImplementedError, "property.*Rolling.*{}.*is deprecated".format(name)
):
getattr(psdf.a.rolling(1), name)() # Series
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_window import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
ueshin/apache-spark
|
python/pyspark/pandas/tests/test_window.py
|
Python
|
apache-2.0
| 13,671 | 0.003292 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class ScheduleConfig(AppConfig):
name = 'schedule'
|
CongBao/mrsys.online
|
sub_mrsys/schedule/apps.py
|
Python
|
apache-2.0
| 156 | 0 |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
class StubOutForTesting:
"""Sample Usage:
You want os.path.exists() to always return true during testing.
stubs = StubOutForTesting()
stubs.Set(os.path, 'exists', lambda x: 1)
...
stubs.UnsetAll()
The above changes os.path.exists into a lambda that returns 1. Once
the ... part of the code finishes, the UnsetAll() looks up the old value
of os.path.exists and restores it.
"""
def __init__(self):
self.cache = []
self.stubs = []
def __del__(self):
self.SmartUnsetAll()
self.UnsetAll()
def SmartSet(self, obj, attr_name, new_attr):
"""Replace obj.attr_name with new_attr. This method is smart and works
at the module, class, and instance level while preserving proper
inheritance. It will not stub out C types however unless that has been
explicitly allowed by the type.
This method supports the case where attr_name is a staticmethod or a
classmethod of obj.
Notes:
- If obj is an instance, then it is its class that will actually be
stubbed. Note that the method Set() does not do that: if obj is
an instance, it (and not its class) will be stubbed.
- The stubbing is using the builtin getattr and setattr. So, the __get__
and __set__ will be called when stubbing (TODO: A better idea would
probably be to manipulate obj.__dict__ instead of getattr() and
setattr()).
Raises AttributeError if the attribute cannot be found.
"""
if (inspect.ismodule(obj) or
(not inspect.isclass(obj) and attr_name in obj.__dict__)):
orig_obj = obj
orig_attr = getattr(obj, attr_name)
else:
if not inspect.isclass(obj):
mro = list(inspect.getmro(obj.__class__))
else:
mro = list(inspect.getmro(obj))
mro.reverse()
orig_attr = None
for cls in mro:
try:
orig_obj = cls
orig_attr = getattr(obj, attr_name)
except AttributeError:
continue
if orig_attr is None:
raise AttributeError("Attribute not found.")
# Calling getattr() on a staticmethod transforms it to a 'normal' function.
# We need to ensure that we put it back as a staticmethod.
old_attribute = obj.__dict__.get(attr_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
orig_attr = staticmethod(orig_attr)
self.stubs.append((orig_obj, attr_name, orig_attr))
setattr(orig_obj, attr_name, new_attr)
def SmartUnsetAll(self):
"""Reverses all the SmartSet() calls, restoring things to their original
definition. Its okay to call SmartUnsetAll() repeatedly, as later calls
have no effect if no SmartSet() calls have been made.
"""
self.stubs.reverse()
for args in self.stubs:
setattr(*args)
self.stubs = []
def Set(self, parent, child_name, new_child):
"""Replace child_name's old definition with new_child, in the context
of the given parent. The parent could be a module when the child is a
function at module scope. Or the parent could be a class when a class'
method is being replaced. The named child is set to new_child, while
the prior definition is saved away for later, when UnsetAll() is called.
This method supports the case where child_name is a staticmethod or a
classmethod of parent.
"""
old_child = getattr(parent, child_name)
old_attribute = parent.__dict__.get(child_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
old_child = staticmethod(old_child)
self.cache.append((parent, old_child, child_name))
setattr(parent, child_name, new_child)
def UnsetAll(self):
"""Reverses all the Set() calls, restoring things to their original
definition. Its okay to call UnsetAll() repeatedly, as later calls have
no effect if no Set() calls have been made.
"""
# Undo calls to Set() in reverse order, in case Set() was called on the
# same arguments repeatedly (want the original call to be last one undone)
self.cache.reverse()
for (parent, old_child, child_name) in self.cache:
setattr(parent, child_name, old_child)
self.cache = []
|
rickiepark/openbidder
|
protobuf/protobuf-2.6.1/python/stubout.py
|
Python
|
mit
| 4,934 | 0.004662 |
"""
Django settings for ecommerce project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
#root of project
#BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'csqwlmc8s55o($rt6ozh7u+ui9zb-et00w$d90j8$^!nvj41_r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'stroms38@gmail.com'
EMAIL_HOST_PASSWORD = 'yourpassword'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
'''
If using gmail, you will need to
unlock Captcha to enable Django
to send for you:
https://accounts.google.com/displayunlockcaptcha
'''
# Application definition
INSTALLED_APPS = (
#django app
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
#third party apps
'crispy_forms',
'registration',
#my apps
'answers',
'newsletter',
"products",
"carts",
"billing",
"django_filters",
"storages",
'gunicorn',
"djstripe",
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'ecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'EST'
USE_I18N = True
USE_L10N = True
USE_TZ = True
'''Image storage Amazon S3'''
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = 'examplefy'
S3_URL = 'http://%s.s3.amazonaws.com/' % AWS_STORAGE_BUCKET_NAME
STATIC_URL = S3_URL
AWS_QUERYSTRING_AUTH = False
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
'''Static storage'''
# # Static files (CSS, JavaScript, Images)
# # https://docs.djangoproject.com/en/1.8/howto/static-files/
# STATIC_ROOT = 'staticfiles'
# STATIC_URL = '/static/'
# STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_in_env", "static_root")
# MEDIA_URL = '/media/'
# MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_in_env", "media_root")
# PROTECTED_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static_in_env", "protected_root")
# STATICFILES_DIRS = (
# os.path.join(BASE_DIR, "static", "static_root"),
# #os.path.join(BASE_DIR, "static_in_env"),
# #'/var/www/static/',
# )
#Production Code
#Parse database configuration from $DATABASE_URL
#import dj_database_url
#DATABASES['default'] = dj_database_url.config()
# #BOTO S3 Storage for Production ONLY
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static', "static_root"),
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
# STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static", "static_root")
MEDIA_URL = S3_URL
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static", "media_root")
PROTECTED_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static", "protected_root")
# TEMPLATE_DIRS = (
# os.path.join(BASE_DIR, "templates"),
# )
# here() gives us file paths from the root of the system to the directory
# holding the current file.
here = lambda * x: os.path.join(os.path.abspath(os.path.dirname(__file__)), *x)
PROJECT_ROOT = here("..")
# root() gives us file paths from the root of the system to whatever
# folder(s) we pass it starting at the parent directory of the current file.
root = lambda * x: os.path.join(os.path.abspath(PROJECT_ROOT), *x)
TEMPLATE_DIRS = (
root('templates'),
)
#Crispy FORM TAGs SETTINGS
CRISPY_TEMPLATE_PACK = 'bootstrap3'
#DJANGO REGISTRATION REDUX SETTINGS
ACCOUNT_ACTIVATION_DAYS = 7
REGISTRATION_AUTO_LOGIN = True
SITE_ID = 1
LOGIN_REDIRECT_URL = '/'
#Braintree
BRAINTREE_PUBLIC = "hsjhmqhy73rvpqbv"
BRAINTREE_PRIVATE = "37b06da7e2cdb493bf0e0ddb1c47cbcd"
BRAINTREE_MERCHANT = "bgd7scxjbcrz6dd2"
BRAINTREE_ENVIRONMENT = "Sandbox"
#Stripe
STRIPE_PUBLIC_KEY = os.environ.get("STRIPE_PUBLIC_KEY", "pk_test_lLFAbBOc7bHtpxq5QnIp94xh")
STRIPE_SECRET_KEY = os.environ.get("STRIPE_SECRET_KEY", "sk_test_hWkIxMrsvR3IGJIRKLRy1Rts")
CURRENCIES = getattr(settings, "DJSTRIPE_CURRENCIES", (
('usd', 'U.S. Dollars',),
('gbp', 'Pounds (GBP)',),
('eur', 'Euros',))
)
DJSTRIPE_PLANS = {
"one-time": {
"stripe_plan_id": "one-time",
"name": "Examplefy ($0.99)",
"description": "A one-time buy to Examplefy",
"price": 99, # $0.99
"currency": "usd",
"interval": "day"
},
"monthly": {
"stripe_plan_id": "pro-monthly",
"name": "Examplefy Pro ($4.99/month)",
"description": "The monthly subscription plan to Examplefy",
"price": 499, # $4.99
"currency": "usd",
"interval": "month",
"interval_count": 1
},
"yearly": {
"stripe_plan_id": "pro-yearly",
"name": "Examplefy Prime ($49/year)",
"description": "The annual subscription plan to Examplefy",
"price": 4900, # $49.00
"currency": "usd",
"interval": "year",
"interval_count": 1
}
}
|
Maelstroms38/ecommerce
|
src/ecommerce/settings/local.py
|
Python
|
mit
| 7,411 | 0.003778 |
# versione 0.5
import socket
import threading
import hashlib
import base64
import json
class BadWSRequest(Exception):
pass
class BadWSFrame(Exception):
pass
class BadCmdCall(Exception):
pass
class BadCmdParam(Exception):
pass
class Client(threading.Thread):
_MAGIC_STRING = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
_OPCODE_TEXT = 0x1
_OPCODE_CLOSE = 0x8
def __init__(self, Manager, socket, address):
super().__init__()
self.Manager = Manager
self.socket = socket
self.ip, self.port = address
self.invokedPath = None
self.sessionStarted = False
def _parseHeader(self):
self.socket.settimeout(2.0)
rcvBuffer = ''
toRead = True
while toRead:
rcvBuffer += self.socket.recv(128).decode('utf-8')
#Check for the termination sequence
if rcvBuffer[-4:] == '\r\n\r\n': toRead = False
#vedere di usare splitlines
headerLines = rcvBuffer.split('\r\n')
requestLineElements = headerLines[0].split(' ')
if requestLineElements[0] == 'GET' and requestLineElements[-1] == 'HTTP/1.1':
self.invokedPath = requestLineElements[2]
else:
raise BadWSRequest
self.headerDict = {}
#Cut off rubbish (first line and termination sequence)
for header in headerLines[1:-2]:
headerKey, headerVal = header.split(':', 1)
self.headerDict.update({ headerKey: headerVal.strip() })
if (
'upgrade' not in self.headerDict['Connection'].lower().split(', ') or
self.headerDict['Upgrade'].lower() != 'websocket' or
'Sec-WebSocket-Key' not in self.headerDict
#Very weak part
):
raise BadWSRequest
#Operative mode needs more time
self.socket.settimeout(3600.0)
def _initComunication(self):
payload = 'HTTP/1.1 101 Web Socket Protocol Handshake\r\n'
payload += 'Upgrade: WebSocket\r\n'
payload += 'Connection: Upgrade\r\n'
#Generate the security key
acceptKey = self.headerDict['Sec-WebSocket-Key'] + self._MAGIC_STRING
acceptKey = hashlib.sha1( acceptKey.encode('ascii') ).digest()
acceptKey = base64.b64encode(acceptKey)
payload += 'Sec-WebSocket-Accept: ' + acceptKey.decode('utf-8') + '\r\n\r\n'
self.socket.send( payload.encode('utf-8') )
def _rcvRequest(self):
#1st byte: FIN, RUBBISH1, RUBBISH2, RUBBISH3, OPCODE (4 bit)
#2nd byte: MASKED, PAYLOAD_LENGTH (7 bit)
rcvBuffer = self.socket.recv(2)
print('FIN: ' + str( rcvBuffer[0] >> 7 ))
#0x0f is 00001111 binary sequence
opcode = rcvBuffer[0] & 0x0f
print('opcode: ' + hex( opcode ))
maskBit = rcvBuffer[1] >> 7
print('mask: ' + str( maskBit ))
if maskBit != 1:
raise BadWSFrame('Unmasked data')
#0x7f is 01111111 binary sequence
length = rcvBuffer[1] & 0x7f
if length == 126:
#A long length is stored in more space
rcvBuffer = self.socket.recv(2)
length = int.from_bytes(rcvBuffer, 'big')
elif length == 127:
#un carico maggiore di 65kb a thread mi fa collassare il tutto..
#Ma poi.. perche' un utente dovrebbe caricare cosi' tanti dati? :O
raise BadWSFrame('Too big payload')
print('length: ' + str(length))
#Read the mask applied to data
maskKey = self.socket.recv(4)
#valutare di bufferizzare per rendere il thread piu' parsionioso
rcvBuffer = self.socket.recv(length)
message = b''
for i in range(length):
#Unmask the original message
message += bytes([ rcvBuffer[i] ^ maskKey[i % 4] ])
print(message)
if opcode == self._OPCODE_TEXT:
return json.loads( message.decode('utf-8') )
elif opcode == self._OPCODE_CLOSE:
return None
else:
raise BadWSFrame('Unknown OpCode')
def _sndResponse(self, data):
data = json.dumps(data).encode('utf-8')
length = len(data)
#FIN bit and opcode 0x1 (0x81 is 10000001 binary sequence)
payload = b'\x81'
if length >= 65535:
#Over the maximum length allowed by 16bit addressing
raise BadWSFrame('Too big payload')
elif length <= 125:
payload += bytes([length])
else:
payload += bytes([126])
payload += length.to_bytes(2, 'big')
#si potrebbe bufferizzare l'invio
self.socket.send(payload + data)
#Chiudere inviando un codice di errore e usando l'opcode globale
def _sndClose(self):
#FIN bit and opcode 0x8 (0x88 is 10001000 binary sequence)
#Mask and length bits are zero
self.socket.send(b'\x88\x00')
#Empty the remote buffer
self.socket.recv(100)
def run(self):
print('[+] Connection established with ' + self.ip + ':' + str(self.port), "[%s]" % str(len(self.Manager)))
try:
self._parseHeader()
self._initComunication()
self.sessionStarted = True
#socket non bloccanti potrebbero essere di aiuto per smaltire prima i dati
while True:
request = self._rcvRequest()
if not request: break
response = self.Manager.executeAction(self, request)
if response == None:
raise UnknownCommand
self._sndResponse(response)
except BadWSRequest:
print('[!] Bad-formed request from ' + self.ip + ':' + str(self.port))
except BadWSFrame as err:
print('[!] Bad-formed frame from ' + self.ip + ':' + str(self.port), str(err))
#valutare se lasciare il messaggio o meno
except BadCmdCall as err:
print('[!] Unknown command received from ' + self.ip + ':' + str(self.port), str(err))
except BadCmdParam as err:
print('[!] Invalid parameters from ' + self.ip + ':' + str(self.port), str(err))
except socket.timeout:
print('[!] Timeout occurred for ' + self.ip + ':' + str(self.port))
finally:
if self.sessionStarted:
self._sndClose()
self.socket.close()
self.Manager.rmvClient(self)
print('[-] Connection closed with ' + self.ip + ':' + str(self.port), "[%s]" % str(len(self.Manager)))
class ClientManager:
def __init__(self):
self.clientList = []
self.actionDict = {}
def __len__(self):
return len(self.clientList)
def addClient(self, clientSocket, address):
newClient = Client(self, clientSocket, address)
newClient.start()
self.clientList.append(newClient)
def rmvClient(self, clientInstance):
self.clientList.remove(clientInstance)
def registerAction(self, functionName, function):
self.actionDict.update({ functionName: function })
def executeAction(self, clientInstance, request):
#Array of two element is expected
function, parameters = request
if function in self.actionDict:
try:
return self.actionDict[function](*parameters)
except TypeError:
raise BadCmdParam(request)
else:
raise BadCmdCall(function)
def shutdown(self):
for client in self.clientList:
client.join()
class WebSocketServer:
def __init__(self, ip = '0.0.0.0', port = 8888, conns = 9999):
self.ip = ip
self.port = port
self.CM = ClientManager()
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind( (self.ip, self.port) )
self.socket.listen(conns)
print('[#] Waiting for connections on ' + self.ip + ':' + str(self.port) + '...')
except socket.error as err:
print('[!] Error opening the socket: ' + str(err))
def register(self, functionName, function):
self.CM.registerAction(functionName, function)
def start(self):
try:
while True:
clientSocket, address = self.socket.accept()
self.CM.addClient(clientSocket, address)
except:
print('[#] Shutting down the server...')
self.stop()
def stop(self):
self.CM.shutdown()
self.socket.close()
|
ferdas/ws-rpc
|
websocket.py
|
Python
|
gpl-3.0
| 8,711 | 0.011365 |
import theano.tensor as T
import numpy as np
from cuboid.bricks import Flattener, FilterPool, Dropout, BatchNormalization
from cuboid.bricks import Convolutional, LeakyRectifier, BrickSequence
from blocks.bricks.conv import MaxPooling
from blocks.bricks import Linear, Softmax
from blocks.initialization import IsotropicGaussian, Constant
from blocks.bricks.cost import CategoricalCrossEntropy, MisclassificationRate
def conv3(num_filters):
return [Convolutional(filter_size=(3, 3),
num_filters=num_filters,
weights_init=IsotropicGaussian(std=0.05),
biases_init=Constant(0),
use_bias=True,
border_mode="same",
step=(1,1)),
LeakyRectifier(0.01)]
def max_pool():
return MaxPooling(pooling_size=(2, 2),
step=(2, 2))
def linear(n):
return Linear(output_dim=n,
weights_init=IsotropicGaussian(std=0.01),
biases_init=Constant(0),
use_bias=True)
class ModelHelper():
def __init__(self, config):
self.X = T.tensor4("features")
c = config
seq = BrickSequence(input_dim = (3, 32, 32), bricks=[
conv3(c['n_l1']),
conv3(c['n_l2']),
max_pool(),
conv3(c['n_l3']),
conv3(c['n_l4']),
max_pool(),
#conv3(10),
#conv3(10),
Flattener(),
linear(c['n_l5']),
Softmax()
])
seq.initialize()
self.pred = seq.apply(self.X)
self.Y = T.imatrix("targets")
self.cost = CategoricalCrossEntropy().apply(self.Y.flatten(), self.pred)
self.cost.name = "cost"
self.accur = 1.0 - MisclassificationRate().apply(self.Y.flatten(), self.pred)
self.accur.name = "accur"
|
lukemetz/MLFun
|
DistCifar10/model.py
|
Python
|
mit
| 1,811 | 0.008283 |
#!/usr/bin/env python
"""
Copyright (c) 2006-2015 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
optDict = {
# Format:
# Family: { "parameter name": "parameter datatype" },
# Or:
# Family: { "parameter name": ("parameter datatype", "category name used for common outputs feature") },
"Target": {
"direct": "string",
"url": "string",
"logFile": "string",
"bulkFile": "string",
"requestFile": "string",
"sessionFile": "string",
"googleDork": "string",
"configFile": "string",
"sitemapUrl": "string",
},
"Request": {
"method": "string",
"data": "string",
"paramDel": "string",
"cookie": "string",
"cookieDel": "string",
"loadCookies": "string",
"dropSetCookie": "boolean",
"agent": "string",
"randomAgent": "boolean",
"host": "string",
"referer": "string",
"headers": "string",
"authType": "string",
"authCred": "string",
"authFile": "string",
"proxy": "string",
"proxyCred": "string",
"proxyFile": "string",
"ignoreProxy": "boolean",
"tor": "boolean",
"torPort": "integer",
"torType": "string",
"checkTor": "boolean",
"delay": "float",
"timeout": "float",
"retries": "integer",
"rParam": "string",
"safeUrl": "string",
"safePost": "string",
"safeReqFile": "string",
"safeFreq": "integer",
"skipUrlEncode": "boolean",
"csrfToken": "string",
"csrfUrl": "string",
"forceSSL": "boolean",
"hpp": "boolean",
"evalCode": "string",
},
"Optimization": {
"optimize": "boolean",
"predictOutput": "boolean",
"keepAlive": "boolean",
"nullConnection": "boolean",
"threads": "integer",
},
"Injection": {
"testParameter": "string",
"skip": "string",
"skipStatic": "boolean",
"dbms": "string",
"dbmsCred": "string",
"os": "string",
"invalidBignum": "boolean",
"invalidLogical": "boolean",
"invalidString": "boolean",
"noCast": "boolean",
"noEscape": "boolean",
"prefix": "string",
"suffix": "string",
"tamper": "string",
},
"Detection": {
"level": "integer",
"risk": "integer",
"string": "string",
"notString": "string",
"regexp": "string",
"code": "integer",
"textOnly": "boolean",
"titles": "boolean",
},
"Techniques": {
"tech": "string",
"timeSec": "integer",
"uCols": "string",
"uChar": "string",
"uFrom": "string",
"dnsName": "string",
"secondOrder": "string",
},
"Fingerprint": {
"extensiveFp": "boolean",
},
"Enumeration": {
"getAll": "boolean",
"getBanner": ("boolean", "Banners"),
"getCurrentUser": ("boolean", "Users"),
"getCurrentDb": ("boolean", "Databases"),
"getHostname": "boolean",
"isDba": "boolean",
"getUsers": ("boolean", "Users"),
"getPasswordHashes": ("boolean", "Passwords"),
"getPrivileges": ("boolean", "Privileges"),
"getRoles": ("boolean", "Roles"),
"getDbs": ("boolean", "Databases"),
"getTables": ("boolean", "Tables"),
"getColumns": ("boolean", "Columns"),
"getSchema": "boolean",
"getCount": "boolean",
"dumpTable": "boolean",
"dumpAll": "boolean",
"search": "boolean",
"getComments": "boolean",
"db": "string",
"tbl": "string",
"col": "string",
"excludeCol": "string",
"dumpWhere": "string",
"user": "string",
"excludeSysDbs": "boolean",
"limitStart": "integer",
"limitStop": "integer",
"firstChar": "integer",
"lastChar": "integer",
"query": "string",
"sqlShell": "boolean",
"sqlFile": "string",
},
"Brute": {
"commonTables": "boolean",
"commonColumns": "boolean",
},
"User-defined function": {
"udfInject": "boolean",
"shLib": "string",
},
"File system": {
"rFile": "string",
"wFile": "string",
"dFile": "string",
},
"Takeover": {
"osCmd": "string",
"osShell": "boolean",
"osPwn": "boolean",
"osSmb": "boolean",
"osBof": "boolean",
"privEsc": "boolean",
"msfPath": "string",
"tmpPath": "string",
},
"Windows": {
"regRead": "boolean",
"regAdd": "boolean",
"regDel": "boolean",
"regKey": "string",
"regVal": "string",
"regData": "string",
"regType": "string",
},
"General": {
#"xmlFile": "string",
"trafficFile": "string",
"batch": "boolean",
"charset": "string",
"crawlDepth": "integer",
"crawlExclude": "string",
"csvDel": "string",
"dumpFormat": "string",
"eta": "boolean",
"flushSession": "boolean",
"forms": "boolean",
"freshQueries": "boolean",
"hexConvert": "boolean",
"outputDir": "string",
"parseErrors": "boolean",
"pivotColumn": "string",
"saveConfig": "string",
"scope": "string",
"testFilter": "string",
"testSkip": "string",
"updateAll": "boolean",
},
"Miscellaneous": {
"alert": "string",
"answers": "string",
"beep": "boolean",
"cleanup": "boolean",
"dependencies": "boolean",
"disableColoring": "boolean",
"googlePage": "integer",
"mobile": "boolean",
"offline": "boolean",
"pageRank": "boolean",
"purgeOutput": "boolean",
"smart": "boolean",
"wizard": "boolean",
"verbose": "integer",
},
"Hidden": {
"dummy": "boolean",
"binaryFields": "string",
"profile": "boolean",
"cpuThrottle": "integer",
"forceDns": "boolean",
"identifyWaf": "boolean",
"skipWaf": "boolean",
"ignore401": "boolean",
"smokeTest": "boolean",
"liveTest": "boolean",
"stopFail": "boolean",
"runCase": "string",
}
}
|
krintoxi/NoobSec-Toolkit
|
NoobSecToolkit /tools/inject/lib/core/optiondict.py
|
Python
|
gpl-2.0
| 13,002 | 0.000154 |
from trollius import subprocess
from trollius import test_utils
import trollius as asyncio
import os
import signal
import sys
import warnings
from trollius import BrokenPipeError, ConnectionResetError, ProcessLookupError
from trollius import From, Return
from trollius import base_subprocess
from trollius import test_support as support
from trollius.test_utils import mock
from trollius.test_utils import unittest
if sys.platform != 'win32':
from trollius import unix_events
# Program blocking
PROGRAM_BLOCKED = [sys.executable, '-c', 'import time; time.sleep(3600)']
# Program copying input to output
if sys.version_info >= (3,):
PROGRAM_CAT = ';'.join(('import sys',
'data = sys.stdin.buffer.read()',
'sys.stdout.buffer.write(data)'))
else:
PROGRAM_CAT = ';'.join(('import sys',
'data = sys.stdin.read()',
'sys.stdout.write(data)'))
PROGRAM_CAT = [sys.executable, '-c', PROGRAM_CAT]
class TestSubprocessTransport(base_subprocess.BaseSubprocessTransport):
def _start(self, *args, **kwargs):
self._proc = mock.Mock()
self._proc.stdin = None
self._proc.stdout = None
self._proc.stderr = None
class SubprocessTransportTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
self.set_event_loop(self.loop)
def create_transport(self, waiter=None):
protocol = mock.Mock()
protocol.connection_made._is_coroutine = False
protocol.process_exited._is_coroutine = False
transport = TestSubprocessTransport(
self.loop, protocol, ['test'], False,
None, None, None, 0, waiter=waiter)
return (transport, protocol)
def test_proc_exited(self):
waiter = asyncio.Future(loop=self.loop)
transport, protocol = self.create_transport(waiter)
transport._process_exited(6)
self.loop.run_until_complete(waiter)
self.assertEqual(transport.get_returncode(), 6)
self.assertTrue(protocol.connection_made.called)
self.assertTrue(protocol.process_exited.called)
self.assertTrue(protocol.connection_lost.called)
self.assertEqual(protocol.connection_lost.call_args[0], (None,))
self.assertFalse(transport._closed)
self.assertIsNone(transport._loop)
self.assertIsNone(transport._proc)
self.assertIsNone(transport._protocol)
# methods must raise ProcessLookupError if the process exited
self.assertRaises(ProcessLookupError,
transport.send_signal, signal.SIGTERM)
self.assertRaises(ProcessLookupError, transport.terminate)
self.assertRaises(ProcessLookupError, transport.kill)
transport.close()
class SubprocessMixin:
def test_stdin_stdout(self):
args = PROGRAM_CAT
@asyncio.coroutine
def run(data):
proc = yield From(asyncio.create_subprocess_exec(
*args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
loop=self.loop))
# feed data
proc.stdin.write(data)
yield From(proc.stdin.drain())
proc.stdin.close()
# get output and exitcode
data = yield From(proc.stdout.read())
exitcode = yield From(proc.wait())
raise Return(exitcode, data)
task = run(b'some data')
task = asyncio.wait_for(task, 60.0, loop=self.loop)
exitcode, stdout = self.loop.run_until_complete(task)
self.assertEqual(exitcode, 0)
self.assertEqual(stdout, b'some data')
def test_communicate(self):
args = PROGRAM_CAT
@asyncio.coroutine
def run(data):
proc = yield From(asyncio.create_subprocess_exec(
*args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
loop=self.loop))
stdout, stderr = yield From(proc.communicate(data))
raise Return(proc.returncode, stdout)
task = run(b'some data')
task = asyncio.wait_for(task, 60.0, loop=self.loop)
exitcode, stdout = self.loop.run_until_complete(task)
self.assertEqual(exitcode, 0)
self.assertEqual(stdout, b'some data')
def test_shell(self):
create = asyncio.create_subprocess_shell('exit 7',
loop=self.loop)
proc = self.loop.run_until_complete(create)
exitcode = self.loop.run_until_complete(proc.wait())
self.assertEqual(exitcode, 7)
@unittest.skipUnless(hasattr(os, 'setsid'), "need os.setsid()")
def test_start_new_session(self):
def start_new_session():
os.setsid()
# start the new process in a new session
create = asyncio.create_subprocess_shell('exit 8',
preexec_fn=start_new_session,
loop=self.loop)
proc = self.loop.run_until_complete(create)
exitcode = self.loop.run_until_complete(proc.wait())
self.assertEqual(exitcode, 8)
def test_kill(self):
args = PROGRAM_BLOCKED
create = asyncio.create_subprocess_exec(*args, loop=self.loop)
proc = self.loop.run_until_complete(create)
proc.kill()
returncode = self.loop.run_until_complete(proc.wait())
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_terminate(self):
args = PROGRAM_BLOCKED
create = asyncio.create_subprocess_exec(*args, loop=self.loop)
proc = self.loop.run_until_complete(create)
proc.terminate()
returncode = self.loop.run_until_complete(proc.wait())
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_send_signal(self):
code = '; '.join((
'import sys, time',
'print("sleeping")',
'sys.stdout.flush()',
'time.sleep(3600)'))
args = [sys.executable, '-c', code]
create = asyncio.create_subprocess_exec(*args,
stdout=subprocess.PIPE,
loop=self.loop)
proc = self.loop.run_until_complete(create)
@asyncio.coroutine
def send_signal(proc):
# basic synchronization to wait until the program is sleeping
line = yield From(proc.stdout.readline())
self.assertEqual(line, b'sleeping\n')
proc.send_signal(signal.SIGHUP)
returncode = yield From(proc.wait())
raise Return(returncode)
returncode = self.loop.run_until_complete(send_signal(proc))
self.assertEqual(-signal.SIGHUP, returncode)
def prepare_broken_pipe_test(self):
# buffer large enough to feed the whole pipe buffer
large_data = b'x' * support.PIPE_MAX_SIZE
# the program ends before the stdin can be feeded
create = asyncio.create_subprocess_exec(
sys.executable, '-c', 'pass',
stdin=subprocess.PIPE,
loop=self.loop)
proc = self.loop.run_until_complete(create)
return (proc, large_data)
def test_stdin_broken_pipe(self):
proc, large_data = self.prepare_broken_pipe_test()
@asyncio.coroutine
def write_stdin(proc, data):
proc.stdin.write(data)
yield From(proc.stdin.drain())
coro = write_stdin(proc, large_data)
# drain() must raise BrokenPipeError or ConnectionResetError
with test_utils.disable_logger():
self.assertRaises((BrokenPipeError, ConnectionResetError),
self.loop.run_until_complete, coro)
self.loop.run_until_complete(proc.wait())
def test_communicate_ignore_broken_pipe(self):
proc, large_data = self.prepare_broken_pipe_test()
# communicate() must ignore BrokenPipeError when feeding stdin
with test_utils.disable_logger():
self.loop.run_until_complete(proc.communicate(large_data))
self.loop.run_until_complete(proc.wait())
def test_pause_reading(self):
limit = 10
size = (limit * 2 + 1)
@asyncio.coroutine
def test_pause_reading():
code = '\n'.join((
'import sys',
'sys.stdout.write("x" * %s)' % size,
'sys.stdout.flush()',
))
connect_read_pipe = self.loop.connect_read_pipe
@asyncio.coroutine
def connect_read_pipe_mock(*args, **kw):
connect = connect_read_pipe(*args, **kw)
transport, protocol = yield From(connect)
transport.pause_reading = mock.Mock()
transport.resume_reading = mock.Mock()
raise Return(transport, protocol)
self.loop.connect_read_pipe = connect_read_pipe_mock
proc = yield From(asyncio.create_subprocess_exec(
sys.executable, '-c', code,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
limit=limit,
loop=self.loop))
stdout_transport = proc._transport.get_pipe_transport(1)
stdout, stderr = yield From(proc.communicate())
# The child process produced more than limit bytes of output,
# the stream reader transport should pause the protocol to not
# allocate too much memory.
raise Return(stdout, stdout_transport)
# Issue #22685: Ensure that the stream reader pauses the protocol
# when the child process produces too much data
stdout, transport = self.loop.run_until_complete(test_pause_reading())
self.assertEqual(stdout, b'x' * size)
self.assertTrue(transport.pause_reading.called)
self.assertTrue(transport.resume_reading.called)
def test_stdin_not_inheritable(self):
# asyncio issue #209: stdin must not be inheritable, otherwise
# the Process.communicate() hangs
@asyncio.coroutine
def len_message(message):
code = 'import sys; data = sys.stdin.read(); print(len(data))'
proc = yield From(asyncio.create_subprocess_exec(
sys.executable, '-c', code,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
close_fds=False,
loop=self.loop))
stdout, stderr = yield From(proc.communicate(message))
exitcode = yield From(proc.wait())
raise Return(stdout, exitcode)
output, exitcode = self.loop.run_until_complete(len_message(b'abc'))
self.assertEqual(output.rstrip(), b'3')
self.assertEqual(exitcode, 0)
def test_cancel_process_wait(self):
# Issue #23140: cancel Process.wait()
@asyncio.coroutine
def cancel_wait():
proc = yield From(asyncio.create_subprocess_exec(
*PROGRAM_BLOCKED,
loop=self.loop))
# Create an internal future waiting on the process exit
task = self.loop.create_task(proc.wait())
self.loop.call_soon(task.cancel)
try:
yield From(task)
except asyncio.CancelledError:
pass
# Cancel the future
task.cancel()
# Kill the process and wait until it is done
proc.kill()
yield From(proc.wait())
self.loop.run_until_complete(cancel_wait())
def test_cancel_make_subprocess_transport_exec(self):
@asyncio.coroutine
def cancel_make_transport():
coro = asyncio.create_subprocess_exec(*PROGRAM_BLOCKED,
loop=self.loop)
task = self.loop.create_task(coro)
self.loop.call_soon(task.cancel)
try:
yield From(task)
except asyncio.CancelledError:
pass
# ignore the log:
# "Exception during subprocess creation, kill the subprocess"
with test_utils.disable_logger():
self.loop.run_until_complete(cancel_make_transport())
def test_cancel_post_init(self):
@asyncio.coroutine
def cancel_make_transport():
coro = self.loop.subprocess_exec(asyncio.SubprocessProtocol,
*PROGRAM_BLOCKED)
task = self.loop.create_task(coro)
self.loop.call_soon(task.cancel)
try:
yield From(task)
except asyncio.CancelledError:
pass
# ignore the log:
# "Exception during subprocess creation, kill the subprocess"
with test_utils.disable_logger():
self.loop.run_until_complete(cancel_make_transport())
test_utils.run_briefly(self.loop)
def test_close_kill_running(self):
@asyncio.coroutine
def kill_running():
create = self.loop.subprocess_exec(asyncio.SubprocessProtocol,
*PROGRAM_BLOCKED)
transport, protocol = yield From(create)
non_local = {'kill_called': False}
def kill():
non_local['kill_called'] = True
orig_kill()
proc = transport.get_extra_info('subprocess')
orig_kill = proc.kill
proc.kill = kill
returncode = transport.get_returncode()
transport.close()
yield From(transport._wait())
raise Return(returncode, non_local['kill_called'])
# Ignore "Close running child process: kill ..." log
with test_utils.disable_logger():
returncode, killed = self.loop.run_until_complete(kill_running())
self.assertIsNone(returncode)
# transport.close() must kill the process if it is still running
self.assertTrue(killed)
test_utils.run_briefly(self.loop)
def test_close_dont_kill_finished(self):
@asyncio.coroutine
def kill_running():
create = self.loop.subprocess_exec(asyncio.SubprocessProtocol,
*PROGRAM_BLOCKED)
transport, protocol = yield From(create)
proc = transport.get_extra_info('subprocess')
# kill the process (but asyncio is not notified immediatly)
proc.kill()
proc.wait()
proc.kill = mock.Mock()
proc_returncode = proc.poll()
transport_returncode = transport.get_returncode()
transport.close()
raise Return(proc_returncode, transport_returncode,
proc.kill.called)
# Ignore "Unknown child process pid ..." log of SafeChildWatcher,
# emitted because the test already consumes the exit status:
# proc.wait()
with test_utils.disable_logger():
result = self.loop.run_until_complete(kill_running())
test_utils.run_briefly(self.loop)
proc_returncode, transport_return_code, killed = result
self.assertIsNotNone(proc_returncode)
self.assertIsNone(transport_return_code)
# transport.close() must not kill the process if it finished, even if
# the transport was not notified yet
self.assertFalse(killed)
def test_popen_error(self):
# Issue #24763: check that the subprocess transport is closed
# when BaseSubprocessTransport fails
if sys.platform == 'win32':
target = 'trollius.windows_utils.Popen'
else:
target = 'subprocess.Popen'
with mock.patch(target) as popen:
exc = ZeroDivisionError
popen.side_effect = exc
create = asyncio.create_subprocess_exec(sys.executable, '-c',
'pass', loop=self.loop)
with warnings.catch_warnings(record=True) as warns:
with self.assertRaises(exc):
self.loop.run_until_complete(create)
self.assertEqual(warns, [])
if sys.platform != 'win32':
# Unix
class SubprocessWatcherMixin(SubprocessMixin):
Watcher = None
def setUp(self):
policy = asyncio.get_event_loop_policy()
self.loop = policy.new_event_loop()
self.set_event_loop(self.loop)
watcher = self.Watcher()
watcher.attach_loop(self.loop)
policy.set_child_watcher(watcher)
self.addCleanup(policy.set_child_watcher, None)
class SubprocessSafeWatcherTests(SubprocessWatcherMixin,
test_utils.TestCase):
Watcher = unix_events.SafeChildWatcher
class SubprocessFastWatcherTests(SubprocessWatcherMixin,
test_utils.TestCase):
Watcher = unix_events.FastChildWatcher
else:
# Windows
class SubprocessProactorTests(SubprocessMixin, test_utils.TestCase):
def setUp(self):
self.loop = asyncio.ProactorEventLoop()
self.set_event_loop(self.loop)
if __name__ == '__main__':
unittest.main()
|
haypo/trollius
|
tests/test_subprocess.py
|
Python
|
apache-2.0
| 18,416 | 0.000163 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multi_worker_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.eager import test
from tensorflow.python.training import server_lib
class NormalizeClusterSpecTest(test.TestCase):
def assert_same_cluster(self, lhs, rhs):
self.assertEqual(
server_lib.ClusterSpec(lhs).as_dict(),
server_lib.ClusterSpec(rhs).as_dict())
def testDictAsInput(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assert_same_cluster(
cluster_spec, multi_worker_util.normalize_cluster_spec(cluster_spec))
def testClusterDefAsInput(self):
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = "chief"
job.tasks[0] = "127.0.0.1:1234"
job = cluster_def.job.add()
job.name = "worker"
job.tasks[0] = "127.0.0.1:8964"
job.tasks[1] = "127.0.0.1:2333"
job = cluster_def.job.add()
job.name = "ps"
job.tasks[0] = "127.0.0.1:1926"
job.tasks[1] = "127.0.0.1:3141"
self.assert_same_cluster(
cluster_def, multi_worker_util.normalize_cluster_spec(cluster_def))
def testClusterSpecAsInput(self):
cluster_spec = server_lib.ClusterSpec({
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
})
self.assert_same_cluster(
cluster_spec, multi_worker_util.normalize_cluster_spec(cluster_spec))
def testUnexpectedInput(self):
cluster_spec = ["127.0.0.1:8964", "127.0.0.1:2333"]
with self.assertRaisesRegexp(
ValueError,
"`cluster_spec' should be dict or a `tf.train.ClusterSpec` or a "
"`tf.train.ClusterDef` object"):
multi_worker_util.normalize_cluster_spec(cluster_spec)
class IsChiefTest(test.TestCase):
def testClusterWithChief(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertTrue(multi_worker_util.is_chief(cluster_spec, "chief", 0))
self.assertFalse(multi_worker_util.is_chief(cluster_spec, "worker", 0))
def testClusterWithoutChief(self):
cluster_spec = {"worker": ["127.0.0.1:8964", "127.0.0.1:2333"]}
self.assertTrue(multi_worker_util.is_chief(cluster_spec, "worker", 0))
self.assertFalse(multi_worker_util.is_chief(cluster_spec, "worker", 1))
with self.assertRaisesRegexp(
ValueError, "`task_type` 'chief' not found in cluster_spec."):
multi_worker_util.is_chief(cluster_spec, "chief", 0)
with self.assertRaisesRegexp(
ValueError, "The `task_id` 2 exceeds the maximum id of worker."):
multi_worker_util.is_chief(cluster_spec, "worker", 2)
def testEvaluatorIsChief(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"evaluator": ["127.0.0.1:2019"]
}
self.assertTrue(multi_worker_util.is_chief(cluster_spec, "evaluator", 0))
class NumWorkersTest(test.TestCase):
def testCountWorker(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.worker_count(cluster_spec, task_type="chief"), 3)
self.assertEqual(
multi_worker_util.worker_count(cluster_spec, task_type="worker"), 3)
def testCountEvaluator(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"evaluator": ["127.0.0.1:7566"]
}
self.assertEqual(
multi_worker_util.worker_count(cluster_spec, task_type="evaluator"), 1)
def testTaskTypeNotFound(self):
cluster_spec = {}
with self.assertRaisesRegexp(
ValueError, "`task_type` 'worker' not found in cluster_spec."):
multi_worker_util.worker_count(cluster_spec, task_type="worker")
def testCountPs(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
# A "ps" job shouldn't call this method.
with self.assertRaisesRegexp(ValueError, "Unexpected `task_type` 'ps'"):
multi_worker_util.worker_count(cluster_spec, task_type="ps")
class IdInClusterTest(test.TestCase):
def testChiefId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "chief", 0), 0)
def testWorkerId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "worker", 1), 2)
cluster_spec = {
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "worker", 1), 1)
def testEvaluatorId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"evaluator": ["127.0.0.1:7566"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "evaluator", 0), 0)
def testPsId(self):
cluster_spec = {"chief": ["127.0.0.1:1234"], "ps": ["127.0.0.1:7566"]}
with self.assertRaisesRegexp(ValueError,
"There is no id for task_type 'ps'"):
multi_worker_util.id_in_cluster(cluster_spec, "ps", 0)
def testMultipleChiefs(self):
cluster_spec = {
"chief": ["127.0.0.1:8258", "127.0.0.1:7566"],
}
with self.assertRaisesRegexp(ValueError,
"There must be at most one 'chief' job."):
multi_worker_util.id_in_cluster(cluster_spec, "chief", 0)
class CollectiveLeaderTest(test.TestCase):
def testChiefAsLeader(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.collective_leader(cluster_spec, "worker", 0),
"/job:chief/replica:0/task:0")
def testWorkerAsLeader(self):
cluster_spec = {
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.collective_leader(cluster_spec, "worker", 1),
"/job:worker/replica:0/task:0")
def testLeaderForEvaluator(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"],
"evaluator": ["127.0.0.1:2019"]
}
self.assertEqual(
multi_worker_util.collective_leader(cluster_spec, "evaluator", 0), "")
def testLocalLeader(self):
cluster_spec = {}
self.assertEqual(
multi_worker_util.collective_leader(cluster_spec, None, 0), "")
if __name__ == "__main__":
test.main()
|
ghchinoy/tensorflow
|
tensorflow/python/distribute/multi_worker_util_test.py
|
Python
|
apache-2.0
| 8,226 | 0.004012 |
""" Oriented graph aka ERD painter """
# Copyright (C) Petr Vanek <petr@yarpen.cz> , 2005
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
__author__ = 'Petr Vanek <petr@yarpen.cz>'
import types
import os
import sys
# local file subprocess24 is imported only for <2.4
if sys.version_info[:3] < (2, 4, 2):
import subprocess24 as subprocess
else:
import subprocess
class Dot:
"""! \brief Oriented graph aka ERD painter.
This class requires GraphViz installed because it calls 'dot'
externally. If it does not find that programm, no images are included
in html docs.
Format for parent - children: parent and [child1, child2, ..., childN]
Format for all - {
parent1: [child1, child2, ..., childN],
parent2: [child1, child2, ..., childN],
...
parentN: [child1, child2, ..., childN]
}
"""
def __init__(self, outPath):
## Path to write temp files and final images
self.outPath = outPath
## A flag for 'dot' availability
self.haveDot = self.haveDot()
## A text template for DOT source files.
self.graphTemplate = """
/* This is a DOT file created by Oraschemadoc (OSD).
When you see this file in your filesystem and OSD
is not running, there is propably a bug in this file.
Visit http://www.yarpen.cz/oraschemadoc and send me
this file to fix the bug, please. */
digraph G
{
label="%s";fontname="Helvetica";labelfontsize="12";
labelloc="t";labeljust="l";labeldistance="5.0";
edge [fontname="Helvetica",fontsize=10,labelfontname="Helvetica",labelfontsize=10];
node [fontname="Helvetica",fontsize=10,shape=record];
rankdir=LR;
%s
}
"""
def uniq(self, aList):
"""! \brief Create a list with unique values.
It's used for a dummy lists to be reset during diagrams source
code creation."""
set = {}
map(set.__setitem__, aList, [])
return set.keys()
def makeKeyNode(self, node, highlighNode=None):
"""! \brief Make base node.
Base node definiton for DOT source."""
bgcolor = 'white'
if highlighNode == node:
bgcolor = 'gray88'
s = '"%s" [label="%s" height=0.2,width=0.4,color="black",fillcolor="%s",style="filled",fontcolor="black",href="table-%s.html#t-fk"];\n' % (node, node, bgcolor, node)
return s
def graphList(self, mainName, children=[], inverseChildren=[]):
"""! \brief Make relations between the nodes.
Link base nodes (makeKeyNode()) together.
\param children leafs pointing to mainName
\param inverseChildren mainName is pointing to these leafs"""
s = []
for i in children:
s.append('''"%s" -> "%s" [color="black",fontsize=10,style="solid",arrowhead="crow"];\n''' % (i, mainName))
for i in inverseChildren:
s.append('''"%s" -> "%s" [color="black",fontsize=10,style="solid",arrowhead="crow"];\n''' % (mainName, i))
return ''.join(s)
def haveDot(self):
"""! \brief Check if there is a dot installed in PATH """
try:
"""
if os.spawnlp(os.P_WAIT, 'dot', 'dot', '-V') == 0:
return True
"""
print '\nChecking for dot binary...'
if self.runDot(['-V']) == 0:
return True
except OSError, e:
print '\nUnknown error in Dot.haveDot() method. ERD disabled.'
print '%s\n' % e
print ' Error'
return False
def runDot(self, params=[]):
"""! \brief Call the 'dot' binary. Searchnig in PATH variable"""
#return subprocess.call(["dot"] + params, env={"PATH": os.environ['PATH']}, stdout=None)
return subprocess.call(['dot'] + params)
def callDot(self, fname):
"""! \brief Create the PNGs and image maps from DOT files """
f = fname + '.dot'
retval = 1
self.runDot(params=['-Tcmap', '-o', fname + '.map', f])
retval = self.runDot(params=['-Tpng', '-o', fname + '.png', f])
if retval == 0:
try:
os.remove(f)
except IOError:
print 'cannot delete %s' % f
return retval
def fileGraphList(self, mainName, children=[], inverseChildren=[]):
"""! \brief Make a graph of the mainName's children """
allNodes = self.uniq(children + [mainName] + inverseChildren)
s = ''
for i in allNodes:
s += self.makeKeyNode(i, mainName)
s += self.graphList(mainName, children, inverseChildren)
s = self.graphTemplate % ('ERD related to the table', s)
fname = os.path.join(self.outPath, mainName)
f = file(fname+'.dot', 'w')
f.write(s)
f.close()
if self.callDot(fname) == 0:
return mainName+'.png'
return None
def fileGraphDict(self, all={}):
"""! \brief Make wide graph for the whole schema.
It's used at the index page."""
allNodes = all.keys()
for i in all.keys():
if type(i) != types.ListType:
continue
for j in i:
allNodes.append(j)
allNodes = self.uniq(allNodes)
s = ''
for i in allNodes:
s += self.makeKeyNode(i)
for i in all.keys():
s += self.graphList(i, all[i])
s = self.graphTemplate % ('ERD of the schema', s)
fname = os.path.join(self.outPath, 'main')
f = file(fname + '.dot', 'w')
f.write(s)
f.close()
if self.callDot(fname) == 0:
return 'main.png'
return None
if __name__ == '__main__':
d = Dot()
d.fileGraphList('rodic', ['ch1', 'ch2', 'ch3'])
d.fileGraphDict({'rodic1': ['ch1', 'ch2', 'ch3', 'rodic2'], 'rodic2': ['x1', 'rodic1']})
|
tocisz/oraschemadoc
|
oraschemadoc/dot.py
|
Python
|
gpl-2.0
| 6,714 | 0.003724 |
#!/usr/bin/env python3
# This file is part of OpenSoccerManager.
#
# OpenSoccerManager is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# OpenSoccerManager is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# OpenSoccerManager. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk
from gi.repository import GObject
import data
class ContinueDialog(Gtk.Dialog):
'''
Dialog displayed when moving between dates in the game.
'''
def __init__(self):
Gtk.Dialog.__init__(self)
self.set_transient_for(data.window)
self.set_modal(True)
self.set_title("Continue Game")
self.set_default_size(200, -1)
self.set_resizable(False)
self.vbox.set_border_width(5)
self.progressbar = Gtk.ProgressBar()
self.progressbar.set_text("")
self.vbox.add(self.progressbar)
self.count = 0
def on_timeout_event(self, *args):
if self.count < 10:
self.count += 1
self.progressbar.set_fraction(self.count * 0.1)
state = True
else:
self.destroy()
data.window.mainscreen.information.update_date()
state = False
return state
def show(self):
self.show_all()
GObject.timeout_add(10, self.on_timeout_event)
|
OpenSoccerManager/opensoccermanager
|
uigtk/continuedialog.py
|
Python
|
gpl-3.0
| 1,783 | 0 |
import pytest
@pytest.fixture(params=["EUROPE", "ASIA", "AMERICAS"])
def region(request):
return request.param
@pytest.fixture(params=["pseudonym", "Tuxedo"])
def game_name(request):
return request.param
@pytest.fixture(params=["sudo", "riot"])
def tag_line(request):
return request.param
@pytest.fixture(params=["val", "lor"])
def game(request):
return request.param
@pytest.mark.riot
@pytest.mark.integration
class TestAccountApi:
def test_by_puuid(self, riot_context, region, puuid):
actual_response = riot_context.watcher.account.by_puuid(region, puuid)
riot_context.verify_api_call(
region, f"/riot/account/v1/accounts/by-puuid/{puuid}", {}, actual_response
)
def test_by_riot_id(self, riot_context, region, game_name, tag_line):
actual_response = riot_context.watcher.account.by_riot_id(
region, game_name, tag_line
)
riot_context.verify_api_call(
region,
f"/riot/account/v1/accounts/by-riot-id/{game_name}/{tag_line}",
{},
actual_response,
)
def test_active_shard(self, riot_context, region, game, puuid):
actual_response = riot_context.watcher.account.active_shard(region, game, puuid)
riot_context.verify_api_call(
region,
f"/riot/account/v1/active-shards/by-game/{game}/by-puuid/{puuid}",
{},
actual_response,
)
|
pseudonym117/Riot-Watcher
|
tests/integration/riot/test_AccountApi.py
|
Python
|
mit
| 1,465 | 0.001365 |
import socket
s = socket.socket()
host = socket.gethostname()
port = 1234
s.bind((host, port))
s.listen(5)
while True:
c, addr = s.accept()
print "Get coonect from", addr
c.send('Thanks your coonecting')
c.close()
|
cheenwe/cheenwe.github.io
|
study/python/8_get_web_page.py
|
Python
|
mit
| 220 | 0.022727 |
#!/usr/bin/python
# Copyright (c) 2014 SwiftStack, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
version = "__VERSION__"
setup(
name="swift_undelete",
version=version,
description='Undelete middleware for OpenStack Swift',
license='Apache License (2.0)',
author='Samuel N. Merritt',
author_email='sam@swiftstack.com',
url='https://github.com/swiftstack/swift_undelete',
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Environment :: No Input/Output (Daemon)'],
# Ubuntu packaging incorrectly detects this as a dependency on the
# "python-swift" package, which SwiftStack doesn't use. So commenting this
# out so SwiftStack can still use ${python:Depends}
#install_requires=["swift"],
test_suite='nose.collector',
tests_require=["nose"],
scripts=[],
entry_points={
'paste.filter_factory': ['undelete=swift_undelete:filter_factory']})
|
caiobrentano/swift_undelete
|
setup.py
|
Python
|
apache-2.0
| 1,663 | 0.000601 |
from distutils.core import Command
from setuptools import setup
class TestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
from django.conf import settings
settings.configure(
DATABASES={'default': {'NAME': ':memory:', 'ENGINE': 'django.db.backends.sqlite3'}},
INSTALLED_APPS=('jsonfield',)
)
from django.core.management import call_command
import django
if django.VERSION[:2] >= (1, 7):
django.setup()
call_command('test', 'jsonfield')
setup(
name='jsonfield',
version='1.0.3',
packages=['jsonfield'],
license='MIT',
author='Brad Jasper',
author_email='bjasper@gmail.com',
url='https://github.com/bradjasper/django-jsonfield/',
description='A reusable Django field that allows you to store validated JSON in your model.',
long_description=open("README.rst").read(),
install_requires=['Django >= 1.4.3'],
tests_require=['Django >= 1.4.3'],
cmdclass={'test': TestCommand},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Framework :: Django',
],
)
|
hanuprateek/django-jsonfield
|
setup.py
|
Python
|
mit
| 1,596 | 0.001253 |
# -*- coding: utf-8 -*-
"""Tree utilities
"""
class TranslationKind(object):
in_ = 'in'
out_ = 'out'
def _normalize_branche(branche, node_mapping):
new_branches = []
if node_mapping:
node_id = node_mapping.get('node_id')
parts = node_id.split('/')
aliases = node_mapping.get('aliases')
for aliase in aliases:
new_branche = parts[:-1]
new_branche.append(aliase)
new_branche = '/'.join(new_branche)
new_branche = branche.replace(node_id, new_branche)
new_branches.append(new_branche)
return new_branches
def _get_mapping_for_branche(branche, mapping):
nor_branche = branche + '/'
nodes = [node for node in mapping
if nor_branche.find(node.get('node_id') + '/') >= 0]
ordered_nodes = sorted(
nodes, key=lambda e: len(e.get('node_id').split('/')),
reverse=True)
return ordered_nodes
def normalize_branche(branche, mapping):
map_ = _get_mapping_for_branche(branche, mapping)
branches = []
for node in map_:
branches.extend(_normalize_branche(branche, node))
return list(set(branches))
def normalize_branches_in(branches, mapping):
result = list(branches)
for branche in branches:
branch_result = normalize_branche(branche, mapping)
if branch_result:
result.extend(branch_result)
result.extend(normalize_branches_out(branch_result, mapping))
return list(set(result))
def normalize_branches_out(branches, mapping):
new_branches = list(branches)
for node_mapping in mapping:
node_id = node_mapping.get('node_id')
parts = node_id.split('/')
aliases = node_mapping.get('aliases')
for aliase in aliases:
branche_to_replace = parts[:-1]
branche_to_replace.append(aliase)
branche_to_replace = '/'.join(branche_to_replace)
_new_branches = []
for branche in new_branches:
if (branche + '/').find(branche_to_replace + '/') >= 0:
_new_branches.append(
branche.replace(branche_to_replace, node_id))
else:
_new_branches.append(branche)
new_branches = _new_branches
return list(set(new_branches))
def normalize_tree(tree, mapping, type_=TranslationKind.out_):
branches = get_branches(tree)
if type_ == TranslationKind.out_:
branches = normalize_branches_out(branches, mapping)
else:
branches = normalize_branches_in(branches, mapping)
return branches_to_tree(branches)
def normalize_keywords_in(keywords, mapping):
new_keywords = []
mapping_nodes = {
node_mapping.get('node_id').split('/')[-1].lower(): node_mapping
for node_mapping in mapping}
for keyword in list(keywords):
new_keyword = [keyword]
if keyword.lower() in mapping_nodes:
node_mapping = mapping_nodes[keyword.lower()]
new_keyword = list(node_mapping.get('aliases'))
new_keywords.extend(new_keyword)
return new_keywords
def normalize_keywords_out(keywords, mapping):
new_keywords = []
mapping_nodes = {}
for node_mapping in mapping:
for alias in node_mapping.get('aliases'):
mapping_nodes[alias] = node_mapping
for keyword in list(keywords):
new_keyword = [keyword]
if keyword.lower() in mapping_nodes:
node_mapping = mapping_nodes[keyword.lower()]
new_keyword = [node_mapping.get('node_id').split('/')[-1]]
new_keywords.extend(new_keyword)
return new_keywords
def normalize_keywords(keywords, mapping, type_=TranslationKind.out_):
if type_ == TranslationKind.in_:
return normalize_keywords_in(keywords, mapping)
else:
return normalize_keywords_out(keywords, mapping)
return keywords
def node_to_keywords(node_name, children, source):
new_source = node_name
if source is not None:
new_source = source + '/' + node_name
result = [node_name, new_source]
path = []
for child in children:
child_keywords, child_path = node_to_keywords(
child, children[child], new_source)
result.extend(child_keywords)
result.extend(child_path)
path.extend([node_name + '/' + k for k in child_path])
if not children:
path = [node_name]
return result, path
def tree_to_keywords(tree, include_path=True):
result = []
for node in tree:
node_keywords, node_path = node_to_keywords(node, tree[node], None)
result.extend(node_keywords)
result.extend(node_path)
if not include_path:
flattened_result = []
for node in result:
flattened_result.extend(node.split('/'))
result = flattened_result
return list(set(result))
def get_keywords_by_level(tree, root, iskeywords=False):
keywords = []
if iskeywords:
keywords = tree
else:
keywords = tree_to_keywords(tree)
branches = sorted([k.split('/') for k in keywords
if k.startswith(root.lower()) or
k.startswith(root)],
key=lambda e: len(e), reverse=True)
len_tree = len(branches[0])
result = {}
for index in range(len_tree):
result[index] = []
for branche in branches:
if len(branche) > index:
result[index].append(branche[index])
result[index] = list(set(result[index]))
return list(result.values())
def get_tree_nodes_by_level(tree):
all_nodes = []
nodes = [(n[0], list(n[1].keys())) for n in tree.items()]
all_nodes.append(nodes)
nodes_values = [[(key, value) for value in n.items()]
for key, n in list(tree.items())]
sub_nodes = [item for sublist in nodes_values for item in sublist]
while sub_nodes:
nodes = list([(n[0]+'-'+n[1][0], list(n[1][1].keys()))
for n in sub_nodes])
all_nodes.append(nodes)
nodes_values = [[(origine+'-'+key, value) for value in n.items()]
for origine, (key, n) in list([n for n in sub_nodes])]
sub_nodes = [item for sublist in nodes_values for item in sublist]
return all_nodes
def merge_nodes(node1_name, children1, node2_name, children2):
if node1_name != node2_name:
return {node1_name: children1.copy(),
node2_name: children2.copy()}
node = {node1_name: merge_tree(children1, children2)}
return node
def merge_tree(tree1, tree2, mapping=[]):
if tree2 and mapping:
tree2 = normalize_tree(tree2, mapping)
if not tree1:
return tree2
if not tree2:
return tree1
result_tree = {}
merged_nodes = []
for node in tree1:
nodes_to_merge = [n for n in tree2
if node == n]
if not nodes_to_merge:
result_tree.update({node: tree1[node].copy()})
else:
node_to_merge = nodes_to_merge[0]
result_tree.update(merge_nodes(node, tree1[node],
node_to_merge, tree2[node_to_merge]))
merged_nodes.append(node_to_merge)
nodes_to_merge = {n: tree2[n] for n in tree2 if n not in merged_nodes}
result_tree.update(nodes_to_merge)
return result_tree
def get_branches_node(node_name, children):
result = []
for child in children:
result.extend([node_name + '/' + k for k
in get_branches_node(child, children[child])])
if not children:
result = [node_name]
return result
def get_branches(tree):
result = []
for node in tree:
result.extend(get_branches_node(node, tree[node]))
return result
def get_all_branches(tree):
branches = get_branches(tree)
result = []
for branche in branches:
result.append(branche)
parts = branche.split('/')
while parts:
parts.pop()
result.append('/'.join(parts))
return list(set(result))
def branch_to_tree(branch):
nodes = branch.split('/')
nodes.reverse()
current_node = None
for node_name in nodes:
node = {node_name: {}}
if current_node:
node[node_name].update(current_node)
current_node = node
return current_node
def branches_to_tree(branches):
branches_nodes = [branch_to_tree(b) for b in branches]
if not branches_nodes:
return {}
current_branch = branches_nodes[0]
if len(branches_nodes) > 1:
for branch in branches_nodes[1:]:
current_branch = merge_tree(current_branch, branch)
return current_branch
def replace_branche(branch, new_branch, branches):
return [old_branch.replace(branch, new_branch)
for old_branch in branches]
def tree_diff(tree1, tree2, mark_diff=''):
branches1 = get_all_branches(tree1)
branches2 = get_all_branches(tree2)
result = []
for branch in branches2:
if branch not in branches1:
result.append(branch)
if mark_diff:
diff_result = list(result)
for branch in result:
diff_result = replace_branche(
branch, branch+mark_diff, list(diff_result))
result = diff_result
return branches_to_tree(result)
def intersect_nodes(node1_name, children1, node2_name, children2):
if node1_name != node2_name:
return {node1_name: children1.copy()}
node = {node1_name: intersect_tree(children1, children2)}
return node
def intersect_tree(tree1, tree2):
if not tree1:
return tree2
if not tree2:
return tree1
result_tree = {}
for node in tree1:
nodes_to_merge = [n for n in tree2
if node == n]
if not nodes_to_merge:
result_tree.update({node: tree1[node].copy()})
else:
node_to_merge = nodes_to_merge[0]
result_tree.update(intersect_nodes(
node, tree1[node], node_to_merge, tree2[node_to_merge]))
return result_tree
def edit_keywords(keywords, newkeyword, tree):
branches = get_branches(tree)
new_branches = []
edited = False
for branch in branches:
split = branch.split('/')
newbranch = branch
for keyword in keywords:
if keyword in split:
result = []
for item in split:
if keyword == item:
result.append(newkeyword)
else:
result.append(item)
newbranch = '/'.join(result)
edited = True
new_branches.append(newbranch)
if not edited:
return False
return branches_to_tree(new_branches)
def tree_len(tree):
if tree:
return 1 + max([tree_len(node) for node in tree.values()])
else:
return 0
def tree_min_len(tree):
if tree:
return 1 + min([tree_min_len(node) for node in tree.values()])
else:
return 0
|
ecreall/deform_treepy
|
deform_treepy/utilities/tree_utility.py
|
Python
|
agpl-3.0
| 11,176 | 0.000358 |
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_multiclass.dat')
parameter_list = [[traindat,testdat,label_traindat,3],[traindat,testdat,label_traindat,3]]
def classifier_knn_modular(fm_train_real=traindat,fm_test_real=testdat,label_train_multiclass=label_traindat, k=3 ):
from shogun.Features import RealFeatures, MulticlassLabels
from shogun.Classifier import KNN
from shogun.Distance import EuclidianDistance
feats_train=RealFeatures(fm_train_real)
feats_test=RealFeatures(fm_test_real)
distance=EuclidianDistance(feats_train, feats_train)
labels=MulticlassLabels(label_train_multiclass)
knn=KNN(k, distance, labels)
knn_train = knn.train()
output=knn.apply(feats_test).get_labels()
multiple_k=knn.classify_for_multiple_k()
return knn,knn_train,output,multiple_k
if __name__=='__main__':
print('KNN')
classifier_knn_modular(*parameter_list[0])
|
ratschlab/ASP
|
examples/undocumented/python_modular/classifier_knn_modular.py
|
Python
|
gpl-2.0
| 1,033 | 0.03969 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
getname
~~~~~~~
Get popular cat/dog/superhero/supervillain names.
:copyright: (c) 2015 by lord63.
:license: MIT, see LICENSE for more details.
"""
from getname.main import random_name
__title__ = "getname"
__version__ = '0.1.1'
__author__ = "lord63"
__license__ = "MIT"
__copyright__ = "Copyright 2015 lord63"
|
lord63/getname
|
getname/__init__.py
|
Python
|
mit
| 385 | 0 |
"""
Parses input files of the Cross-section tool, and generates valid input files
from (modified) settings in Python.
"""
from collections import OrderedDict
from ruamel import yaml
from cslib import (
units)
from cslib.settings import (
Type, Model, ModelType, Settings, each_value_conforms,
check_settings, generate_settings, parse_to_model)
from cslib.predicates import (
predicate,
is_string, is_integer, file_exists, has_units, is_none, is_)
from .phonon_loss import phonon_loss
from .elf import ELF
def pprint_settings(model, settings):
dumper = yaml.RoundTripDumper
dumper.add_representer(ELF, lambda dumper, data : dumper.represent_data(data.filename))
return yaml.dump(
generate_settings(settings),
indent=4, allow_unicode=True, Dumper=dumper)
def quantity(description, unit_str, default=None):
return Type(description, default=default,
check=has_units(unit_str),
generator=lambda v: '{:~P}'.format(v),
parser=units.parse_expression)
def maybe_quantity(description, unit_str, default=None):
return Type(description, default=default,
check=is_none | has_units(unit_str),
generator=lambda v: v if v is None else '{:~P}'.format(v),
parser=lambda s: s if s is None else units.parse_expression(s))
element_model = Model([
('count', Type("Integer abundance", default=None,
check=is_integer)),
('Z', Type("Atomic number", default=None,
check=is_integer)),
('M', quantity("Molar mass", 'g/mol'))
])
phonon_branch_model = Model([
('alpha', maybe_quantity(
"Bending in dispersion relation. (TV Eq. 3.112)",
'm²/s', default=units('0 m²/s'))),
('eps_ac', quantity("Accoustic deformation potential", 'eV')),
('c_s', quantity("Speed of sound", 'km/s'))])
phonon_model = Model([
('model', Type(
"Whether the model is the `single` or `dual` mode.",
check=is_('single') | is_('dual'),
default="single")),
('m_eff', maybe_quantity(
"Effective mass.", 'g', default=units('1 m_e'))),
('m_dos', maybe_quantity(
"Density of state mass.", 'g', default=units('1 m_e'))),
('lattice', quantity("Lattice spacing", 'Å')),
('single', ModelType(
phonon_branch_model, "branch",
"Only given for single mode, parameters of model.")),
('longitudinal', ModelType(
phonon_branch_model, "branch",
"Only given for dual mode, parameters of model.")),
('transversal', ModelType(
phonon_branch_model, "branch",
"Only given for dual mode, parameters of model.")),
('energy_loss', maybe_quantity(
"Phonon loss.", 'eV',
default=phonon_loss)),
('E_BZ', maybe_quantity(
"Brioullon zone energy.", 'eV',
default=lambda s: (units.h**2 / (2*units.m_e * s.lattice**2))
.to('eV')))])
@predicate("Consistent branch model")
def phonon_check(s: Settings):
if s.model == 'single' and 'single' in s:
return True
if s.model == 'dual' and 'longitudinal' in s and 'transversal' in s:
return True
return False
@predicate("Consistent energy diagram")
def energy_check(s: Settings):
if s.model == 'insulator' or s.model == 'semiconductor':
if 'band_gap' in s and 'affinity' in s and 'work_func' not in s:
return True
if s.model == 'metal':
if 'band_gap' not in s and 'affinity' not in s and 'work_func' in s:
return True
return False
def get_barrier(s: Settings):
if s.model == 'insulator' or s.model == 'semiconductor':
if s.fermi > 0*units.eV:
return s.fermi + s.band_gap/2 + s.affinity
else:
return s.band_gap + s.affinity
if s.model == 'metal':
return s.fermi + s.work_func
# It should be impossible to get here, s.model is checked to be insul/semic/metal
return 0*units.eV
band_structure_model = Model([
('model', Type(
"Whether the material is of `insulator`, `semiconductor` or `metal` type."
" Insulators and semiconductors are treated in the same manner",
check=is_('insulator') | is_('semiconductor') | is_('metal'))),
('fermi', quantity("Fermi energy", 'eV')),
('barrier', quantity("Barrier energy", 'eV', default=get_barrier)),
# Metals
('work_func', maybe_quantity("Work function", 'eV')),
# Insulators / semiconductors
('affinity', maybe_quantity("Electron affinity", 'eV')),
('band_gap', maybe_quantity("Band gap", 'eV'))
])
cstool_model = Model([
('name', Type("Name of material", default=None,
check=is_string)),
('rho_m', quantity("Specific density", 'g/cm³')),
('band_structure', ModelType(
band_structure_model, "band_structure",
"Band structure of the material. There are two models: metals"
" and insulators (or semiconductors). Metals need a Fermi energy"
" and work function, insulators need a Fermi energy, band gap"
" and affinity. The barrier energy is calculated as Fermi +"
" work_func in the case of metals, or as Fermi + affinity +"
" band_gap/2 for insulators.",
check=energy_check, obligatory=True)),
('phonon', ModelType(
phonon_model, "phonon",
"We have two choices for modeling phonon scattering: single and"
" dual branch. The second option is important for crystaline"
" materials; we then split the scattering in transverse and"
" longitudinal modes.",
check=phonon_check, obligatory=True)),
('elf_file', Type(
"Filename of ELF data (Energy Loss Function). Data can be harvested"
" from http://henke.lbl.gov/optical_constants/getdb2.html.",
check=lambda s : True,
parser=lambda fname : ELF(fname))),
('elements', Type(
"Dictionary of elements contained in the substance.",
check=each_value_conforms(element_model, "element"),
parser=lambda d: OrderedDict((k, parse_to_model(element_model, v))
for k, v in d.items()),
generator=lambda d: yaml.comments.CommentedMap(
(k, generate_settings(v))
for k, v in d.items()))),
('M_tot', maybe_quantity(
"Total molar mass; this is computed from the `elements` entry.",
'g/mol',
default=lambda s: sum(e.M * e.count for e in s.elements.values()))),
('rho_n', maybe_quantity(
"Number density of atoms or molecules in compound. For instance "
"in the case of silicon dioxide this is the number density of "
"groups of two oxygen and one silicon atom, even if SiO2 is not "
"a molecule per se.", 'cm⁻³',
default=lambda s: (units.N_A / s.M_tot * s.rho_m).to('cm⁻³')))
])
cstool_model_type = ModelType(
cstool_model, "cstool",
"""The settings given to cstool should follow a certain hierarchy,
and each setting is required to have a particular dimensionality.""")
def read_input(filename):
raw_data = yaml.load(open(filename, 'r', encoding='utf-8'), Loader=yaml.RoundTripLoader)
settings = parse_to_model(cstool_model, raw_data)
if not check_settings(settings, cstool_model):
raise ValueError("Parsed settings do not conform the model.")
return settings
|
eScatter/cstool
|
cstool/parse_input.py
|
Python
|
apache-2.0
| 7,511 | 0.001333 |
from ._costs import *
|
csxeba/ReSkiv
|
brainforge/costs/__init__.py
|
Python
|
gpl-3.0
| 22 | 0 |
from ll1_symbols import *
YAML_OUTPUT = """terminals: %s
non-terminals: %s
eof-marker: %s
error-marker: %s
start-symbol: %s
productions: %s
table: %s"""
YAML_OUTPUT_NO_TABLE = """terminals: %s
non-terminals: %s
eof-marker: %s
error-marker: %s
start-symbol: %s
productions: %s"""
class YamlGenerator(object):
"""docstring for yaml_generator"""
def __init__(self, grammar):
self.grammar = grammar
def print_yaml(self, ll1_table = None):
def convert_list_str(a_list):
return "[%s]" % (", ".join(a_list))
def convert_dict_str(a_dict):
return "{%s}" % ", ".join(["%s: %s" % (key, value)
for key, value in a_dict.items()])
def convert_dict_dict_str(a_dict):
return "\n %s" % ("\n ".join(["%s: %s" % (key, convert_dict_str(value))
for key, value in a_dict.items()]))
def convert_dict_list_str(a_dict):
return "{%s}" % (", \n ".join(["%s: %s" % (key, convert_list_str(value))
for key, value in a_dict.items()]))
def convert_dict_dict_list_str(a_dict):
return "\n %s" % ("\n ".join(["%s: %s" % (key, convert_dict_list_str(value))
for key, value in a_dict.items()]))
if ll1_table:
return YAML_OUTPUT % (convert_list_str(list(self.grammar.term)),
convert_list_str(list(self.grammar.non_term)),
EOF,
ERROR_MARKER,
self.grammar.goal,
convert_dict_dict_list_str(self.convert_production()),
convert_dict_dict_str(ll1_table))
else:
return YAML_OUTPUT_NO_TABLE % (convert_list_str(list(self.grammar.term)),
convert_list_str(list(self.grammar.non_term)),
EOF,
ERROR_MARKER,
self.grammar.goal,
convert_dict_dict_list_str(self.convert_production()))
def convert_production(self):
return {idx : {production.left_hand.lexeme : [item.lexeme for item in production.right_hand if item.lexeme is not EPSILON]} for idx, production in enumerate(self.grammar.production)}
|
liuxue1990/python-ll1-parser-generator
|
yaml_generator.py
|
Python
|
gpl-3.0
| 1,932 | 0.036232 |
# Generated by Django 2.1.5 on 2019-01-14 14:14
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [("donations", "0006_document_deleted")]
operations = [
migrations.AlterField(
model_name="spendingrequest",
name="created",
field=models.DateTimeField(
default=django.utils.timezone.now, verbose_name="created"
),
),
migrations.AlterField(
model_name="spendingrequest",
name="modified",
field=models.DateTimeField(auto_now=True, verbose_name="modified"),
),
]
|
lafranceinsoumise/api-django
|
agir/donations/migrations/0007_auto_20190114_1514.py
|
Python
|
agpl-3.0
| 681 | 0 |
import inspect
from functools import total_ordering
def yield_once(iterator):
"""
Decorator to make an iterator yield each result only once.
:param iterator: Any iterator
:return: An iterator that yields every result only once at most.
"""
def yield_once_generator(*args, **kwargs):
yielded = []
for item in iterator(*args, **kwargs):
if item in yielded:
pass
else:
yielded.append(item)
yield item
return yield_once_generator
def _to_list(var):
"""
Make variable to list.
:param var: variable of any type
:return: list
"""
if isinstance(var, list):
return var
elif var is None:
return []
elif isinstance(var, str) or isinstance(var, dict):
# We dont want to make a list out of those via the default constructor
return [var]
else:
try:
return list(var)
except TypeError:
return [var]
def arguments_to_lists(function):
"""
Decorator for a function that converts all arguments to lists.
:param function: target function
:return: target function with only lists as parameters
"""
def l_function(*args, **kwargs):
l_args = [_to_list(arg) for arg in args]
l_kwargs = {}
for key, value in kwargs.items():
l_kwargs[key] = _to_list(value)
return function(*l_args, **l_kwargs)
return l_function
def _get_member(obj, member):
# If not found, pass AttributeError to invoking function.
attribute = getattr(obj, member)
if callable(attribute) and hasattr(attribute, "__self__"):
# If the value is a bound method, invoke it like a getter and return
# its value.
try:
return attribute()
except TypeError:
# Don't use repr() to display the member more accurately, because
# invoking repr() on a bound method prints in this format:
# <bound method CLASS.METHOD of **repr(instance)**>
# This invokes repr() recursively.
raise TypeError("Given bound method '" + member + "' must be "
"callable like a getter, taking no arguments.")
else:
# Otherwise it's a member variable or property (or any other attribute
# that holds a value).
return attribute
def _construct_repr_string(obj, members):
# The passed entries have format (member-name, repr-function).
values = ", ".join(member + "=" + func(_get_member(obj, member))
for member, func in members)
return ("<" + type(obj).__name__ + " object(" + values + ") at "
+ hex(id(obj)) + ">")
def get_public_members(obj):
"""
Retrieves a list of member-like objects (members or properties) that are
publically exposed.
:param obj: The object to probe.
:return: A list of strings.
"""
return {attr: getattr(obj, attr) for attr in dir(obj)
if not attr.startswith("_")
and not hasattr(getattr(obj, attr), '__call__')}
def generate_repr(*members):
"""
Decorator that binds an auto-generated ``__repr__()`` function to a class.
The generated ``__repr__()`` function prints in following format:
<ClassName object(field1=1, field2='A string', field3=[1, 2, 3]) at 0xAAAA>
Note that this decorator modifies the given class in place!
:param members: An iterable of member names to include into the
representation-string. Providing no members yields
to inclusion of all member variables and properties
in alphabetical order (except if they start with an
underscore).
To control the representation of each member, you
can also pass a tuple where the first element
contains the member to print and the second one the
representation function (which defaults to the
built-in ``repr()``). Using None as representation
function is the same as using ``repr()``.
Supported members are fields/variables, properties
and getter-like functions (functions that accept no
arguments).
:raises ValueError: Raised when the passed
(member, repr-function)-tuples have not a length of
2.
:raises AttributeError: Raised when a given member/attribute was not found
in class.
:raises TypeError: Raised when a provided member is a bound method
that is not a getter-like function (means it must
accept no parameters).
:return: The class armed with an auto-generated __repr__
function.
"""
def decorator(cls):
cls.__repr__ = __repr__
return cls
if members:
# Prepare members list.
members_to_print = list(members)
for i, member in enumerate(members_to_print):
if isinstance(member, tuple):
# Check tuple dimensions.
length = len(member)
if length == 2:
members_to_print[i] = (member[0],
member[1] if member[1] else repr)
else:
raise ValueError("Passed tuple " + repr(member) +
" needs to be 2-dimensional, but has " +
str(length) + " dimensions.")
else:
members_to_print[i] = (member, repr)
def __repr__(self):
return _construct_repr_string(self, members_to_print)
else:
def __repr__(self):
# Need to fetch member variables every time since they are unknown
# until class instantation.
members_to_print = get_public_members(self)
member_repr_list = ((member, repr) for member in
sorted(members_to_print, key=str.lower))
return _construct_repr_string(self, member_repr_list)
return decorator
def generate_eq(*members):
"""
Decorator that generates equality and inequality operators for the
decorated class. The given members as well as the type of self and other
will be taken into account.
Note that this decorator modifies the given class in place!
:param members: A list of members to compare for equality.
"""
def decorator(cls):
def eq(self, other):
if type(other) is not type(self):
return False
return all(getattr(self, member) == getattr(other, member)
for member in members)
def ne(self, other):
return not eq(self, other)
cls.__eq__ = eq
cls.__ne__ = ne
return cls
return decorator
def generate_ordering(*members):
"""
Decorator that generates ordering operators for the decorated class based
on the given member names. All ordering except equality functions will
raise a TypeError when a comparison with an unrelated class is attempted.
(Comparisons with child classes will thus work fine with the capabilities
of the base class as python will choose the base classes comparison
operator in that case.)
Note that this decorator modifies the given class in place!
:param members: A list of members to compare, ordered from high priority to
low. I.e. if the first member is equal the second will be
taken for comparison and so on. If a member is None it is
considered smaller than any other value except None.
"""
def decorator(cls):
def lt(self, other):
if not isinstance(other, cls):
raise TypeError("Comparison with unrelated classes is "
"unsupported.")
for member in members:
if getattr(self, member) == getattr(other, member):
continue
if (
getattr(self, member) is None or
getattr(other, member) is None):
return getattr(self, member) is None
return getattr(self, member) < getattr(other, member)
return False
cls.__lt__ = lt
return total_ordering(generate_eq(*members)(cls))
return decorator
def assert_right_type(value, types, argname):
if isinstance(types, type) or types is None:
types = (types,)
for typ in types:
if value == typ or (isinstance(typ, type) and isinstance(value, typ)):
return
raise TypeError("{} must be an instance of one of {} (provided value: "
"{})".format(argname, types, repr(value)))
def enforce_signature(function):
"""
Enforces the signature of the function by throwing TypeError's if invalid
arguments are provided. The return value is not checked.
You can annotate any parameter of your function with the desired type or a
tuple of allowed types. If you annotate the function with a value, this
value only will be allowed (useful especially for None). Example:
>>> @enforce_signature
... def test(arg: bool, another: (int, None)):
... pass
...
>>> test(True, 5)
>>> test(True, None)
Any string value for any parameter e.g. would then trigger a TypeError.
:param function: The function to check.
"""
argspec = inspect.getfullargspec(function)
annotations = argspec.annotations
argnames = argspec.args
unnamed_annotations = {}
for i, arg in enumerate(argnames):
if arg in annotations:
unnamed_annotations[i] = (annotations[arg], arg)
def decorated(*args, **kwargs):
for i, annotation in unnamed_annotations.items():
if i < len(args):
assert_right_type(args[i], annotation[0], annotation[1])
for argname, argval in kwargs.items():
if argname in annotations:
assert_right_type(argval, annotations[argname], argname)
return function(*args, **kwargs)
return decorated
class classproperty(property):
"""
Decorator to set a class function to a class property.
Given a class like:
>>> class test:
... @classproperty
... def func(self):
... return 1
We can now access the class property using the class name:
>>> test.func
1
And we can still have the same behaviour with an instance:
>>> test().func
1
"""
def __get__(self, obj, type_):
return self.fget.__get__(None, type_)(type_)
|
Adrianzatreanu/coala-decorators
|
coala_decorators/decorators.py
|
Python
|
mit
| 11,053 | 0.00009 |
# problem 48
# Project Euler
__author__ = 'Libao Jin'
__date__ = 'July 18, 2015'
def lastTenDigits(number):
string = str(number)
lastTen = int(string[-10:])
return lastTen
def amazingSum(n):
s = 0
while n >= 1:
s += n ** n
n -= 1
return s
def selfPowers(n):
s = amazingSum(n)
l = lastTenDigits(s)
return (l, s)
def solution():
ls = selfPowers(1000)
print(ls)
solution()
|
imthomasking/MATLAB-files
|
Python/Project.Euler/Answers.Python/48.py
|
Python
|
mit
| 391 | 0.048593 |
#!/usr/bin/python3
"""datahelpers.py: Provides functions for handling data."""
__author__ = 'Andrei Muntean'
__license__ = 'MIT License'
import numpy as np
def shuffle_data(x, y):
random_indexes = np.random.permutation(x.shape[0])
shuffled_x = np.empty_like(x)
shuffled_y = np.empty_like(y)
for index in range(0, shuffled_x.shape[0]):
random_index = random_indexes[index]
shuffled_x[index] = x[random_index]
shuffled_y[index] = y[random_index]
return x, y
def split_data(x, y, threshold = 0.7, shuffle = True):
"""Generates training and tests sets from the specified data."""
if shuffle:
x, y = shuffle_data(x, y)
pivot_index = round(threshold * x.shape[0])
training_data = {
'x': x[0 : pivot_index],
'y': y[0 : pivot_index]
}
test_data = {
'x': x[pivot_index:],
'y': y[pivot_index:]
}
return training_data, test_data
def read_data(path):
"""Reads csv-formatted data from the specified path."""
data = np.loadtxt(path, delimiter = ',')
# Gets the dependent variables. They're stored in the first column.
y = data[:, 0]
# Gets the independent variables.
x = data[:, 1:]
return x, y
|
andreimuntean/LinearRegression
|
LinearRegression/datahelpers.py
|
Python
|
mit
| 1,252 | 0.0088 |
import random
import re
from datetime import datetime, timedelta
from string import letters
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ValidationError
from django.db.models import Q, Count
from django.utils.encoding import force_text
from django.utils.http import int_to_base36
from django.views.decorators.http import require_GET
import waffle
from django_statsd.clients import statsd
import pytz
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets, serializers, mixins, filters, permissions, status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes, action
from rest_framework.authtoken.models import Token
from kitsune.access.decorators import login_required
from kitsune.questions.models import Answer
from kitsune.questions.utils import num_answers, num_solutions, num_questions
from kitsune.sumo import email_utils
from kitsune.sumo.api_utils import DateTimeUTCField, GenericAPIException, PermissionMod
from kitsune.sumo.decorators import json_view
from kitsune.users.templatetags.jinja_helpers import profile_avatar
from kitsune.users.models import Profile, RegistrationProfile, Setting
def display_name_or_none(user):
try:
return user.profile.name
except (Profile.DoesNotExist, AttributeError):
return None
class TimezoneField(serializers.Field):
def to_representation(self, obj):
return force_text(obj)
def to_internal_value(self, data):
try:
return pytz.timezone(str(data))
except pytz.exceptions.UnknownTimeZoneError:
raise ValidationError('Unknown timezone')
@login_required
@require_GET
@json_view
def usernames(request):
"""An API to provide auto-complete data for user names."""
term = request.GET.get('term', '')
query = request.GET.get('query', '')
pre = term or query
if not pre:
return []
if not request.user.is_authenticated():
return []
with statsd.timer('users.api.usernames.search'):
profiles = (
Profile.objects.filter(Q(name__istartswith=pre))
.values_list('user_id', flat=True))
users = (
User.objects.filter(
Q(username__istartswith=pre) | Q(id__in=profiles))
.extra(select={'length': 'Length(username)'})
.order_by('length').select_related('profile'))
if not waffle.switch_is_active('users-dont-limit-by-login'):
last_login = datetime.now() - timedelta(weeks=12)
users = users.filter(last_login__gte=last_login)
return [{'username': u.username,
'display_name': display_name_or_none(u),
'avatar': profile_avatar(u, 24)}
for u in users[:10]]
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def test_auth(request):
return Response({
'username': request.user.username,
'authorized': True,
})
class OnlySelf(permissions.BasePermission):
"""
Only allows operations when the current user is the object in question.
Intended for use with PermissionsFields.
TODO: This should be tied to user and object permissions better, but
for now this is a bandaid.
"""
def has_object_permission(self, request, view, obj):
request_user = getattr(request, 'user', None)
user = getattr(obj, 'user', None)
return request_user == user
class OnlySelfEdits(OnlySelf):
"""
Only allow users/profiles to be edited and deleted by themselves.
TODO: This should be tied to user and object permissions better, but
for now this is a bandaid.
"""
def has_object_permission(self, request, view, obj):
# SAFE_METHODS is a list containing all the read-only methods.
if request.method in permissions.SAFE_METHODS:
return True
else:
return super(OnlySelfEdits, self).has_object_permission(request, view, obj)
class UserSettingSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(
required=False,
write_only=True,
queryset=User.objects.all())
class Meta:
model = Setting
fields = ('name', 'value', 'user')
def get_identity(self, obj):
return obj['name']
def create(self, data):
user = data['user'] or self.context['view'].object
obj, created = self.Meta.model.objects.get_or_create(
user=user, name=data['name'], defaults={'value': data['value']})
if not created:
obj.value = data['value']
obj.save()
return obj
def update(self, instance, data):
for key in self.Meta.fields:
setattr(instance, key, data.get(key, getattr(instance, key)))
instance.save()
return instance
class ProfileSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(source='user.id', read_only=True)
username = serializers.CharField(source='user.username')
display_name = serializers.CharField(source='name', required=False)
date_joined = DateTimeUTCField(source='user.date_joined', read_only=True)
avatar = serializers.SerializerMethodField('get_avatar_url')
email = (PermissionMod(serializers.EmailField, permissions=[OnlySelf])
(source='user.email', required=True))
settings = (PermissionMod(UserSettingSerializer, permissions=[OnlySelf])
(many=True, read_only=True))
helpfulness = serializers.ReadOnlyField(source='answer_helpfulness')
answer_count = serializers.SerializerMethodField()
question_count = serializers.SerializerMethodField()
solution_count = serializers.SerializerMethodField()
last_answer_date = serializers.SerializerMethodField()
is_active = serializers.BooleanField(source='user.is_active', read_only=True)
# This is a write only field. It is very important it stays that way!
password = serializers.CharField(source='user.password', write_only=True)
timezone = TimezoneField(required=False)
class Meta:
model = Profile
fields = [
'answer_count',
'avatar',
'bio',
'city',
'country',
'date_joined',
'display_name',
'email',
'facebook',
'helpfulness',
'id',
'irc_handle',
'is_active',
'last_answer_date',
'locale',
'mozillians',
# Password is here so it can be involved in write operations. It is
# marked as write-only above, so will not be visible.
'password',
'question_count',
'settings',
'solution_count',
'timezone',
'twitter',
'username',
'website',
]
def get_avatar_url(self, profile):
request = self.context.get('request')
size = request.GET.get('avatar_size', 48) if request else 48
return profile_avatar(profile.user, size=size)
def get_question_count(self, profile):
return num_questions(profile.user)
def get_answer_count(self, profile):
return num_answers(profile.user)
def get_solution_count(self, profile):
return num_solutions(profile.user)
def get_last_answer_date(self, profile):
last_answer = profile.user.answers.order_by('-created').first()
return last_answer.created if last_answer else None
def validate(self, data):
if data.get('name') is None:
username = data.get('user', {}).get('username')
data['name'] = username
return data
def create(self, validated_data):
user_data = validated_data.pop('user')
u = RegistrationProfile.objects.create_inactive_user(
user_data['username'],
user_data['password'],
user_data['email'])
p = u.profile
for key, val in validated_data.items():
setattr(p, key, val)
p.save()
return p
def update(self, instance, validated_data):
if 'user' in validated_data:
user_data = validated_data.pop('user')
for key, val in user_data.items():
setattr(instance.user, key, val)
instance.user.save()
return super(ProfileSerializer, self).update(instance, validated_data)
def validate_username(self, username):
if re.match(r'^[\w.-]{4,30}$', username) is None:
raise ValidationError('Usernames may only be letters, numbers, "." and "-".')
if self.instance:
# update
if username != self.instance.user.username:
raise ValidationError("Can't change this field.")
else:
# create
if User.objects.filter(username=username).exists():
raise ValidationError('A user with that username exists')
return username
def validate_email(self, email):
if not self.instance:
# create
if User.objects.filter(email=email).exists():
raise ValidationError('A user with that email address already exists.')
return email
class ProfileFKSerializer(ProfileSerializer):
class Meta(ProfileSerializer.Meta):
fields = [
'username',
'display_name',
'avatar',
]
class ProfileViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
# User usernames instead of ids in urls.
lookup_field = 'user__username'
# Usernames sometimes contain periods so we want to change the regex from the default '[^/.]+'
lookup_value_regex = '[^/]+'
permission_classes = [
OnlySelfEdits,
]
filter_backends = [
DjangoFilterBackend,
filters.OrderingFilter,
]
filter_fields = []
ordering_fields = []
# Default, if not overwritten
ordering = ('-user__date_joined',)
number_blacklist = [666, 69]
# This is routed to /api/2/user/generate/
def generate(self, request, **kwargs):
"""
Generate a user with a random username and password.
"""
# The loop counter isn't used. This is an escape hatch.
for _ in range(10):
# Generate a user of the form "buddy#"
digits = random.randint(100, 10000)
if digits in self.number_blacklist:
continue
username = 'buddy{}'.format(digits)
# Check if it is taken yet.
if not User.objects.filter(username=username).exists():
break
else:
# At this point, we just have too many users.
return Response({"error": 'Unable to generate username.'},
status=500)
password = ''.join(random.choice(letters) for _ in range(10))
# Capitalize the 'b' in 'buddy'
display_name = 'B' + username[1:]
u = User.objects.create(username=username)
u.set_password(password)
u.settings.create(name='autogenerated', value='true')
u.save()
p = Profile.objects.create(user=u, name=display_name)
# This simulates the user being logged in, for purposes of exposing
# fields in the serializer below.
request.user = u
token, _ = Token.objects.get_or_create(user=u)
serializer = ProfileSerializer(instance=p, context={'request': request})
return Response({
'user': serializer.data,
'password': password,
'token': token.key,
})
# This is routed to /api/2/user/weekly-solutions/
def weekly_solutions(self, request, **kwargs):
"""
Return the most helpful users in the past week.
"""
start = datetime.now() - timedelta(days=7)
# Get a list of top 10 users and the number of solutions they have in the last week.
# It looks like [{'creator__username': 'bob', 'creator__count': 12}, ...]
# This uses ``username`` instead of ``id``, because ``username`` appears
# in the output of ``ProfileFKSerializer``, whereas ``id`` does not.
# It also reverse order the dictionary according to amount of solution so that we can get
# get top contributors
raw_counts = (
Answer.objects
.exclude(solution_for=None)
.filter(created__gt=start)
.values('creator__username')
.annotate(Count('creator'))
.order_by('creator__count')
.reverse()[:10]
)
# Turn that list into a dictionary from username -> count.
username_to_count = {u['creator__username']: u['creator__count'] for u in raw_counts}
# Get all the profiles mentioned in the above.
profiles = Profile.objects.filter(user__username__in=username_to_count.keys())
result = ProfileFKSerializer(instance=profiles, many=True).data
# Pair up the profiles and the solution counts.
for u in result:
u['weekly_solutions'] = username_to_count[u['username']]
result.sort(key=lambda u: u['weekly_solutions'], reverse=True)
return Response(result)
@action(detail=True, methods=['post'])
def set_setting(self, request, user__username=None):
user = self.get_object().user
request.data['user'] = user.pk
try:
setting = Setting.objects.get(user=user, name=request.data['name'])
except Setting.DoesNotExist:
setting = None
serializer = UserSettingSerializer(instance=setting, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
raise GenericAPIException(400, serializer.errors)
@action(detail=True, methods=['post', 'delete'])
def delete_setting(self, request, user__username=None):
profile = self.get_object()
if 'name' not in request.data:
raise GenericAPIException(400, {'name': 'This field is required'})
try:
meta = (Setting.objects
.get(user=profile.user, name=request.data['name']))
meta.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except Setting.DoesNotExist:
raise GenericAPIException(404, {'detail': 'No matching user setting found.'})
@action(detail=True, methods=['get'])
def request_password_reset(self, request, user__username=None):
profile = self.get_object()
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
c = {
'email': profile.user.email,
'domain': domain,
'site_name': site_name,
'uid': int_to_base36(profile.user.id),
'user': profile.user,
'token': default_token_generator.make_token(profile.user),
'protocol': 'https' if request.is_secure() else 'http',
}
subject = email_utils.render_email('users/email/pw_reset_subject.ltxt', c)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
@email_utils.safe_translation
def _make_mail(locale):
mail = email_utils.make_mail(
subject=subject,
text_template='users/email/pw_reset.ltxt',
html_template='users/email/pw_reset.html',
context_vars=c,
from_email=None,
to_email=profile.user.email)
return mail
email_utils.send_messages([_make_mail(profile.locale)])
return Response('', status=204)
|
anushbmx/kitsune
|
kitsune/users/api.py
|
Python
|
bsd-3-clause
| 16,254 | 0.000984 |
from pushyou import db
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255), unique=True)
status = db.Column(db.SmallInteger)
account_type = db.Column(db.SmallInteger) # Staff,Client,
email = db.Column(db.String(255), unique=True)
password_hash = db.Column(db.String(255))
password_salt = db.Column(db.String(255))
business_name = db.Column(db.String(255))
business_abn = db.Column(db.String(255))
contact_name = db.Column(db.String(255))
contact_phone = db.Column(db.String(255))
address_line1 = db.Column(db.String(255))
address_line2 = db.Column(db.String(255))
address_suburb = db.Column(db.String(255))
address_state = db.Column(db.String(255))
address_postcode = db.Column(db.String(255))
plan = db.Column(db.SmallInteger) # Basic? Gold?
max_sites = db.Column(db.SmallInteger)
max_active_promo = db.Column(db.SmallInteger)
max_promo_per_site = db.Column(db.SmallInteger)
create_date = db.Column(db.Date)
update_date = db.Column(db.Date)
last_login_date = db.Column(db.Date)
last_login_ip = db.Column(db.Date)
locations = db.relationship('Location', backref='user', lazy='dynamic')
promotions = db.relationship('Promotion', backref='user', lazy='dynamic')
def __repr__(self):
return '<User %r>' % (self.username)
|
rbeardow/boki
|
boki/user.py
|
Python
|
mit
| 1,427 | 0.004905 |
# defivelo-intranet -- Outil métier pour la gestion du Défi Vélo
# Copyright (C) 2020 Didier 'OdyX' Raboud <didier.raboud@liip.ch>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.messages.views import SuccessMessageMixin
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.generic import ListView
from django.views.generic.edit import CreateView, UpdateView
from rolepermissions.mixins import HasPermissionsMixin
from apps.common import DV_STATES
from defivelo.roles import has_permission
from defivelo.views import MenuView
from ..forms import AnnualStateSettingForm
from ..models import AnnualStateSetting
class SettingsMixin(HasPermissionsMixin):
model = AnnualStateSetting
required_permission = "settings_crud"
def dispatch(self, request, *args, **kwargs):
self.year = kwargs.pop("year")
self.cantons = (
DV_STATES
if has_permission(request.user, "cantons_all")
else self.request.user.managedstates.all().values_list("canton", flat=True)
)
return super().dispatch(request, *args, **kwargs)
def get_queryset(self):
return super().get_queryset().filter(year=self.year, canton__in=self.cantons)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Add our menu_category context
context["menu_category"] = "settings"
context["year"] = self.year
return context
def get_form_kwargs(self):
form_kwargs = super().get_form_kwargs()
form_kwargs["year"] = self.year
form_kwargs["cantons"] = self.cantons
return form_kwargs
def get_success_url(self):
return reverse_lazy(
"annualstatesettings-list", kwargs={"year": self.object.year}
)
class AnnualStateSettingsListView(SettingsMixin, MenuView, ListView):
context_object_name = "settings"
ordering = ["canton"]
class AnnualStateSettingMixin(SettingsMixin, SuccessMessageMixin, MenuView):
context_object_name = "setting"
form_class = AnnualStateSettingForm
class AnnualStateSettingCreateView(AnnualStateSettingMixin, CreateView):
success_message = _("Configuration cantonale par année créée")
class AnnualStateSettingUpdateView(AnnualStateSettingMixin, UpdateView):
success_message = _("Configuration cantonale par année mise à jour")
|
defivelo/db
|
apps/challenge/views/settings.py
|
Python
|
agpl-3.0
| 3,051 | 0.000657 |
""" Tests for new channel bot """
import datetime
import time
import unittest
import mock
import new_channel_bot
def _make_fake_api(channels, posts):
"""
Construct a fake slack API
Args: channels (dict): List of channels to mock in.
posts (array): Collection of all calls to the postMessage api.
"""
def api(method, **kwargs):
"""
Simple fake API for the methods we care about
Args:
method (string) Slack API method.
**kwargs: Arbitrary keyword arguments.
"""
if method == 'channels.list':
return channels
elif method == 'chat.postMessage':
posts.append(kwargs)
return
else:
raise Exception('Unexpected method: {}'.format(method))
return api
class NewChannelBotTests(unittest.TestCase):
""" Tests for new channel bot """
@mock.patch.object(new_channel_bot.slackclient.SlackClient, 'api_call')
def test_skips_old_channels(self, api):
""" Verify we only post new channels """
posts = []
old_channel_time = (
time.time() - datetime.timedelta(days=2).total_seconds()
)
new_channel_time = (
time.time() - datetime.timedelta(hours=23).total_seconds()
)
channels = {
'channels': [
{
'name': 'old-channel',
'purpose': {'value': 'not recently made!'},
'id': '1',
'created': old_channel_time
},
{
'name': 'new-channel',
'purpose': {'value': 'recently made!'},
'id': '2',
'created': new_channel_time
}
]
}
api.side_effect = _make_fake_api(channels, posts)
new_channel_bot.post_new_channels(channels, '#__TEST__')
self.assertEqual(len(posts), 1)
self.assertEqual('#__TEST__', posts[0].get('channel'))
self.assertIn('new-channel', posts[0].get('text'))
@mock.patch.object(new_channel_bot.slackclient.SlackClient, 'api_call')
def test_message_formatting(self, api):
""" Verify that we properly format messages """
posts = []
channels = {
'channels': [
{
'name': 'really-purposeless',
'id': '1',
'created': time.time()
},
{
'name': 'purposeless',
'purpose': {'value': ''},
'id': '2',
'created': time.time()
},
{
'name': 'purposeful',
'purpose': {'value': 'recently made!'},
'id': '3',
'created': time.time()
}
]
}
api.side_effect = _make_fake_api(channels, posts)
new_channel_bot.post_new_channels(channels, '#__TEST__')
self.assertEqual(len(posts), 3)
self.assertEqual(
'New channel <#1|really-purposeless>',
posts[0].get('text')
)
self.assertEqual(
'New channel <#2|purposeless>',
posts[1].get('text')
)
self.assertEqual(
"New channel <#3|purposeful>. Purpose: 'recently made!'",
posts[2].get('text')
)
@mock.patch.object(new_channel_bot.slackclient.SlackClient, 'api_call')
def test_unicode(self, api):
""" Tests that we can handle unicode names """
posts = []
channels = {
'channels': [
{
'name': u'\U0001f604',
'id': '1',
'created': time.time(),
'purpose': {'value': u'something\U0001f604'},
}
]
}
api.side_effect = _make_fake_api(channels, posts)
new_channel_bot.post_new_channels(channels, '#__TEST__')
self.assertEqual(len(posts), 1)
self.assertEqual(
u"New channel <#1|\U0001f604>. Purpose: 'something\U0001f604'",
posts[0].get('text')
)
|
democrats/new-channel-bot
|
tests/test_new_channel_bot.py
|
Python
|
mit
| 4,261 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._sync_groups_operations import build_cancel_sync_request, build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_by_database_request, build_list_hub_schemas_request, build_list_logs_request, build_list_sync_database_ids_request, build_refresh_hub_schema_request_initial, build_trigger_sync_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SyncGroupsOperations:
"""SyncGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_sync_database_ids(
self,
location_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SyncDatabaseIdListResult"]:
"""Gets a collection of sync database ids.
:param location_name: The name of the region where the resource is located.
:type location_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SyncDatabaseIdListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.SyncDatabaseIdListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SyncDatabaseIdListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_sync_database_ids_request(
location_name=location_name,
subscription_id=self._config.subscription_id,
template_url=self.list_sync_database_ids.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_sync_database_ids_request(
location_name=location_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SyncDatabaseIdListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_sync_database_ids.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Sql/locations/{locationName}/syncDatabaseIds'} # type: ignore
async def _refresh_hub_schema_initial(
self,
resource_group_name: str,
server_name: str,
database_name: str,
sync_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_refresh_hub_schema_request_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
sync_group_name=sync_group_name,
subscription_id=self._config.subscription_id,
template_url=self._refresh_hub_schema_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_refresh_hub_schema_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}/refreshHubSchema'} # type: ignore
@distributed_trace_async
async def begin_refresh_hub_schema(
self,
resource_group_name: str,
server_name: str,
database_name: str,
sync_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Refreshes a hub database schema.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database on which the sync group is hosted.
:type database_name: str
:param sync_group_name: The name of the sync group.
:type sync_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._refresh_hub_schema_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
sync_group_name=sync_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_refresh_hub_schema.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}/refreshHubSchema'} # type: ignore
@distributed_trace
def list_hub_schemas(
self,
resource_group_name: str,
server_name: str,
database_name: str,
sync_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SyncFullSchemaPropertiesListResult"]:
"""Gets a collection of hub database schemas.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database on which the sync group is hosted.
:type database_name: str
:param sync_group_name: The name of the sync group.
:type sync_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SyncFullSchemaPropertiesListResult or the result
of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.SyncFullSchemaPropertiesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SyncFullSchemaPropertiesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_hub_schemas_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
sync_group_name=sync_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list_hub_schemas.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_hub_schemas_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
sync_group_name=sync_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SyncFullSchemaPropertiesListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_hub_schemas.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}/hubSchemas'} # type: ignore
@distributed_trace
def list_logs(
self,
resource_group_name: str,
server_name: str,
database_name: str,
sync_group_name: str,
start_time: str,
end_time: str,
type: Union[str, "_models.SyncGroupsType"],
continuation_token_parameter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.SyncGroupLogListResult"]:
"""Gets a collection of sync group logs.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database on which the sync group is hosted.
:type database_name: str
:param sync_group_name: The name of the sync group.
:type sync_group_name: str
:param start_time: Get logs generated after this time.
:type start_time: str
:param end_time: Get logs generated before this time.
:type end_time: str
:param type: The types of logs to retrieve.
:type type: str or ~azure.mgmt.sql.models.SyncGroupsType
:param continuation_token_parameter: The continuation token for this operation.
:type continuation_token_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SyncGroupLogListResult or the result of
cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.SyncGroupLogListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SyncGroupLogListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_logs_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
sync_group_name=sync_group_name,
subscription_id=self._config.subscription_id,
start_time=start_time,
end_time=end_time,
type=type,
continuation_token_parameter=continuation_token_parameter,
template_url=self.list_logs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_logs_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
sync_group_name=sync_group_name,
subscription_id=self._config.subscription_id,
start_time=start_time,
end_time=end_time,
type=type,
continuation_token_parameter=continuation_token_parameter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SyncGroupLogListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_logs.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}/logs'} # type: ignore
@distributed_trace_async
async def cancel_sync(
self,
resource_group_name: str,
server_name: str,
database_name: str,
sync_group_name: str,
**kwargs: Any
) -> None:
"""Cancels a sync group synchronization.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database on which the sync group is hosted.
:type database_name: str
:param sync_group_name: The name of the sync group.
:type sync_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_cancel_sync_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
sync_group_name=sync_group_name,
subscription_id=self._config.subscription_id,
template_url=self.cancel_sync.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel_sync.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}/cancelSync'} # type: ignore
@distributed_trace_async
async def trigger_sync(
self,
resource_group_name: str,
server_name: str,
database_name: str,
sync_group_name: str,
**kwargs: Any
) -> None:
"""Triggers a sync group synchronization.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database on which the sync group is hosted.
:type database_name: str
:param sync_group_name: The name of the sync group.
:type sync_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_trigger_sync_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
sync_group_name=sync_group_name,
subscription_id=self._config.subscription_id,
template_url=self.trigger_sync.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
trigger_sync.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}/triggerSync'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
server_name: str,
database_name: str,
sync_group_name: str,
**kwargs: Any
) -> "_models.SyncGroup":
"""Gets a sync group.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database on which the sync group is hosted.
:type database_name: str
:param sync_group_name: The name of the sync group.
:type sync_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SyncGroup, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.SyncGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SyncGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
sync_group_name=sync_group_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SyncGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
server_name: str,
database_name: str,
sync_group_name: str,
parameters: "_models.SyncGroup",
**kwargs: Any
) -> Optional["_models.SyncGroup"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SyncGroup"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'SyncGroup')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
sync_group_name=sync_group_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SyncGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SyncGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
database_name: str,
sync_group_name: str,
parameters: "_models.SyncGroup",
**kwargs: Any
) -> AsyncLROPoller["_models.SyncGroup"]:
"""Creates or updates a sync group.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database on which the sync group is hosted.
:type database_name: str
:param sync_group_name: The name of the sync group.
:type sync_group_name: str
:param parameters: The requested sync group resource state.
:type parameters: ~azure.mgmt.sql.models.SyncGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SyncGroup or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.sql.models.SyncGroup]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SyncGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
sync_group_name=sync_group_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('SyncGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
server_name: str,
database_name: str,
sync_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
sync_group_name=sync_group_name,
subscription_id=self._config.subscription_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
server_name: str,
database_name: str,
sync_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a sync group.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database on which the sync group is hosted.
:type database_name: str
:param sync_group_name: The name of the sync group.
:type sync_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
sync_group_name=sync_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
server_name: str,
database_name: str,
sync_group_name: str,
parameters: "_models.SyncGroup",
**kwargs: Any
) -> Optional["_models.SyncGroup"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SyncGroup"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'SyncGroup')
request = build_update_request_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
sync_group_name=sync_group_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SyncGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
server_name: str,
database_name: str,
sync_group_name: str,
parameters: "_models.SyncGroup",
**kwargs: Any
) -> AsyncLROPoller["_models.SyncGroup"]:
"""Updates a sync group.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database on which the sync group is hosted.
:type database_name: str
:param sync_group_name: The name of the sync group.
:type sync_group_name: str
:param parameters: The requested sync group resource state.
:type parameters: ~azure.mgmt.sql.models.SyncGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either SyncGroup or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.sql.models.SyncGroup]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.SyncGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
sync_group_name=sync_group_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('SyncGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups/{syncGroupName}'} # type: ignore
@distributed_trace
def list_by_database(
self,
resource_group_name: str,
server_name: str,
database_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SyncGroupListResult"]:
"""Lists sync groups under a hub database.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database on which the sync group is hosted.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SyncGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.SyncGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SyncGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_database_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_database.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_database_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SyncGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/syncGroups'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/sql/azure-mgmt-sql/azure/mgmt/sql/aio/operations/_sync_groups_operations.py
|
Python
|
mit
| 47,878 | 0.004511 |
__author__ = 'bptripp'
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
"""
Initialization of CNNs via clustering of inputs and convex optimization
of outputs.
"""
def sigmoid(x, centre, gain):
y = 1 / (1 + np.exp(-gain*(x-centre)))
return y
def gaussian(x, mu, sigma):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sigma, 2.)))
def get_sigmoid_params(false_samples, true_samples, do_plot=False):
"""
Find gain and bias for sigmoid function that approximates probability
of class memberships. Probability based on Bayes' rule & gaussian
model of samples from each class.
"""
false_mu = np.mean(false_samples)
false_sigma = np.std(false_samples)
true_mu = np.mean(true_samples)
true_sigma = np.std(true_samples)
lowest = np.minimum(np.min(false_samples), np.min(true_samples))
highest = np.maximum(np.max(false_samples), np.max(true_samples))
a = np.arange(lowest, highest, (highest-lowest)/25)
p_x_false = gaussian(a, false_mu, false_sigma)
p_x_true = gaussian(a, true_mu, true_sigma)
p_x = p_x_true + p_x_false
p_true = p_x_true / p_x
popt, _ = curve_fit(sigmoid, a, p_true)
centre, gain = popt[0], popt[1]
if do_plot:
plt.hist(false_samples, a)
plt.hist(true_samples, a)
plt.plot(a, 100*sigmoid(a, centre, gain))
plt.plot(a, 100*p_true)
plt.title('centre: ' + str(centre) + ' gain: ' + str(gain))
plt.show()
return centre, gain
def check_sigmoid():
n = 1000
false_samples = 1 + .3*np.random.randn(n)
true_samples = -1 + 1*np.random.randn(n)
centre, gain = get_sigmoid_params(false_samples, true_samples, do_plot=True)
def get_convolutional_prototypes(samples, shape, patches_per_sample=5):
assert len(samples.shape) == 4
assert len(shape) == 4
wiggle = (samples.shape[2]-shape[2], samples.shape[3]-shape[3])
patches = []
for sample in samples:
for i in range(patches_per_sample):
corner = (np.random.randint(0, wiggle[0]), np.random.randint(0, wiggle[1]))
patches.append(sample[:,corner[0]:corner[0]+shape[2],corner[1]:corner[1]+shape[3]])
patches = np.array(patches)
flat = np.reshape(patches, (patches.shape[0], -1))
km = KMeans(shape[0])
km.fit(flat)
kernels = km.cluster_centers_
# normalize product of centre and corresponding kernel
for i in range(kernels.shape[0]):
kernels[i,:] = kernels[i,:] / np.linalg.norm(kernels[i,:])
return np.reshape(kernels, shape)
def get_dense_prototypes(samples, n):
km = KMeans(n)
km.fit(samples)
return km.cluster_centers_
def check_get_prototypes():
samples = np.random.rand(1000, 2, 28, 28)
prototypes = get_convolutional_prototypes(samples, (20,2,5,5))
print(prototypes.shape)
samples = np.random.rand(900, 2592)
prototypes = get_dense_prototypes(samples, 64)
print(prototypes.shape)
def get_discriminant(samples, labels):
lda = LinearDiscriminantAnalysis(solver='eigen', shrinkage='auto')
lda.fit(samples, labels)
return lda.coef_[0]
def check_discriminant():
n = 1000
labels = np.random.rand(n) < 0.5
samples = np.zeros((n,2))
for i in range(len(labels)):
if labels[i] > 0.5:
samples[i,:] = np.array([0,1]) + 1*np.random.randn(1,2)
else:
samples[i,:] = np.array([-2,-1]) + .5*np.random.randn(1,2)
coeff = get_discriminant(samples, labels)
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.scatter(samples[labels>.5,0], samples[labels>.5,1], color='g')
plt.scatter(samples[labels<.5,0], samples[labels<.5,1], color='r')
plt.plot([-coeff[0], coeff[0]], [-coeff[1], coeff[1]], color='k')
plt.subplot(1,2,2)
get_sigmoid_params(np.dot(samples[labels<.5], coeff),
np.dot(samples[labels>.5], coeff),
do_plot=True)
plt.show()
def init_model(model, X_train, Y_train):
if not (isinstance(model.layers[-1], Activation) \
and model.layers[-1].activation.__name__ == 'sigmoid'\
and isinstance(model.layers[-2], Dense)):
raise Exception('This does not look like an LDA-compatible network, which is all we support')
for i in range(len(model.layers)-2):
if isinstance(model.layers[i], Convolution2D):
inputs = get_inputs(model, X_train, i)
w, b = model.layers[i].get_weights()
w = get_convolutional_prototypes(inputs, w.shape)
b = .1 * np.ones_like(b)
model.layers[i].set_weights([w,b])
if isinstance(model.layers[i], Dense):
inputs = get_inputs(model, X_train, i)
w, b = model.layers[i].get_weights()
w = get_dense_prototypes(inputs, w.shape[1]).T
b = .1 * np.ones_like(b)
model.layers[i].set_weights([w,b])
inputs = get_inputs(model, X_train, len(model.layers)-3)
coeff = get_discriminant(inputs, Y_train)
centre, gain = get_sigmoid_params(np.dot(inputs[Y_train<.5], coeff),
np.dot(inputs[Y_train>.5], coeff))
w = coeff*gain
w = w[:,np.newaxis]
b = np.array([-centre])
model.layers[-2].set_weights([w,b])
sigmoid_inputs = get_inputs(model, X_train, len(model.layers)-1)
plt.figure()
plt.subplot(2,1,1)
bins = np.arange(np.min(Y_train), np.max(Y_train))
plt.hist(sigmoid_inputs[Y_train<.5])
plt.subplot(2,1,2)
plt.hist(sigmoid_inputs[Y_train>.5])
plt.show()
def get_inputs(model, X_train, layer):
if layer == 0:
return X_train
else:
partial_model = Sequential(layers=model.layers[:layer])
partial_model.compile('sgd', 'mse')
return partial_model.predict(X_train)
if __name__ == '__main__':
# check_sigmoid()
# check_get_prototypes()
# check_discriminant()
import cPickle
f = file('../data/bowl-test.pkl', 'rb')
# f = file('../data/depths/24_bowl-29-Feb-2016-15-01-53.pkl', 'rb')
d, bd, l = cPickle.load(f)
f.close()
d = d - np.mean(d.flatten())
d = d / np.std(d.flatten())
# n = 900
n = 90
X_train = np.zeros((n,1,80,80))
X_train[:,0,:,:] = d[:n,:,:]
Y_train = l[:n]
model = Sequential()
model.add(Convolution2D(64,9,9,input_shape=(1,80,80)))
model.add(Activation('relu'))
model.add(MaxPooling2D())
# model.add(Convolution2D(64,3,3))
# model.add(Activation('relu'))
# model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
init_model(model, X_train, Y_train)
# from visualize import plot_kernels
# plot_kernels(model.layers[0].get_weights()[0])
|
bptripp/grasp-convnet
|
py/cninit.py
|
Python
|
mit
| 7,083 | 0.009318 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import imp
import logging
import modulefinder
import optparse
import os
import sys
import zipfile
from telemetry import benchmark
from telemetry.core import command_line
from telemetry.core import discover
from telemetry.util import bootstrap
from telemetry.util import cloud_storage
from telemetry.util import path
from telemetry.util import path_set
DEPS_FILE = 'bootstrap_deps'
def FindBootstrapDependencies(base_dir):
deps_file = os.path.join(base_dir, DEPS_FILE)
if not os.path.exists(deps_file):
return []
deps_paths = bootstrap.ListAllDepsPaths(deps_file)
return set(os.path.realpath(os.path.join(
path.GetChromiumSrcDir(), os.pardir, deps_path))
for deps_path in deps_paths)
def FindPythonDependencies(module_path):
logging.info('Finding Python dependencies of %s' % module_path)
# Load the module to inherit its sys.path modifications.
imp.load_source(
os.path.splitext(os.path.basename(module_path))[0], module_path)
# Analyze the module for its imports.
finder = modulefinder.ModuleFinder()
finder.run_script(module_path)
# Filter for only imports in Chromium.
for module in finder.modules.itervalues():
# If it's an __init__.py, module.__path__ gives the package's folder.
module_path = module.__path__[0] if module.__path__ else module.__file__
if not module_path:
continue
module_path = os.path.realpath(module_path)
if not path.IsSubpath(module_path, path.GetChromiumSrcDir()):
continue
yield module_path
def FindPageSetDependencies(base_dir):
logging.info('Finding page sets in %s' % base_dir)
# Add base_dir to path so our imports relative to base_dir will work.
sys.path.append(base_dir)
tests = discover.DiscoverClasses(base_dir, base_dir, benchmark.Benchmark,
index_by_class_name=True)
for test_class in tests.itervalues():
test_obj = test_class()
# Ensure the test's default options are set if needed.
parser = optparse.OptionParser()
test_obj.AddCommandLineArgs(parser, None)
options = optparse.Values()
for k, v in parser.get_default_values().__dict__.iteritems():
options.ensure_value(k, v)
# Page set paths are relative to their runner script, not relative to us.
path.GetBaseDir = lambda: base_dir
# TODO: Loading the page set will automatically download its Cloud Storage
# deps. This is really expensive, and we don't want to do this by default.
page_set = test_obj.CreatePageSet(options)
# Add all of its serving_dirs as dependencies.
for serving_dir in page_set.serving_dirs:
yield serving_dir
def FindExcludedFiles(files, options):
# Define some filters for files.
def IsHidden(path_string):
for pathname_component in path_string.split(os.sep):
if pathname_component.startswith('.'):
return True
return False
def IsPyc(path_string):
return os.path.splitext(path_string)[1] == '.pyc'
def IsInCloudStorage(path_string):
return os.path.exists(path_string + '.sha1')
def MatchesExcludeOptions(path_string):
for pattern in options.exclude:
if (fnmatch.fnmatch(path_string, pattern) or
fnmatch.fnmatch(os.path.basename(path_string), pattern)):
return True
return False
# Collect filters we're going to use to exclude files.
exclude_conditions = [
IsHidden,
IsPyc,
IsInCloudStorage,
MatchesExcludeOptions,
]
# Check all the files against the filters.
for file_path in files:
if any(condition(file_path) for condition in exclude_conditions):
yield file_path
def FindDependencies(target_paths, options):
# Verify arguments.
for target_path in target_paths:
if not os.path.exists(target_path):
raise ValueError('Path does not exist: %s' % target_path)
dependencies = path_set.PathSet()
# Including Telemetry's major entry points will (hopefully) include Telemetry
# and all its dependencies. If the user doesn't pass any arguments, we just
# have Telemetry.
dependencies |= FindPythonDependencies(os.path.realpath(
os.path.join(path.GetTelemetryDir(), 'telemetry', 'benchmark_runner.py')))
dependencies |= FindPythonDependencies(os.path.realpath(
os.path.join(path.GetTelemetryDir(),
'telemetry', 'unittest_util', 'run_tests.py')))
dependencies |= FindBootstrapDependencies(path.GetTelemetryDir())
# Add dependencies.
for target_path in target_paths:
base_dir = os.path.dirname(os.path.realpath(target_path))
dependencies.add(base_dir)
dependencies |= FindBootstrapDependencies(base_dir)
dependencies |= FindPythonDependencies(target_path)
if options.include_page_set_data:
dependencies |= FindPageSetDependencies(base_dir)
# Remove excluded files.
dependencies -= FindExcludedFiles(set(dependencies), options)
return dependencies
def ZipDependencies(target_paths, dependencies, options):
base_dir = os.path.dirname(os.path.realpath(path.GetChromiumSrcDir()))
with zipfile.ZipFile(options.zip, 'w', zipfile.ZIP_DEFLATED) as zip_file:
# Add dependencies to archive.
for dependency_path in dependencies:
path_in_archive = os.path.join(
'telemetry', os.path.relpath(dependency_path, base_dir))
zip_file.write(dependency_path, path_in_archive)
# Add symlinks to executable paths, for ease of use.
for target_path in target_paths:
link_info = zipfile.ZipInfo(
os.path.join('telemetry', os.path.basename(target_path)))
link_info.create_system = 3 # Unix attributes.
# 010 is regular file, 0111 is the permission bits rwxrwxrwx.
link_info.external_attr = 0100777 << 16 # Octal.
relative_path = os.path.relpath(target_path, base_dir)
link_script = (
'#!/usr/bin/env python\n\n'
'import os\n'
'import sys\n\n\n'
'script = os.path.join(os.path.dirname(__file__), \'%s\')\n'
'os.execv(sys.executable, [sys.executable, script] + sys.argv[1:])'
% relative_path)
zip_file.writestr(link_info, link_script)
# Add gsutil to the archive, if it's available. The gsutil in
# depot_tools is modified to allow authentication using prodaccess.
# TODO: If there's a gsutil in telemetry/third_party/, bootstrap_deps
# will include it. Then there will be two copies of gsutil at the same
# location in the archive. This can be confusing for users.
gsutil_path = os.path.realpath(cloud_storage.FindGsutil())
if cloud_storage.SupportsProdaccess(gsutil_path):
gsutil_base_dir = os.path.join(os.path.dirname(gsutil_path), os.pardir)
gsutil_dependencies = path_set.PathSet()
gsutil_dependencies.add(os.path.dirname(gsutil_path))
# Also add modules from depot_tools that are needed by gsutil.
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'boto'))
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'fancy_urllib'))
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'retry_decorator'))
gsutil_dependencies -= FindExcludedFiles(
set(gsutil_dependencies), options)
# Also add upload.py to the archive from depot_tools, if it is available.
# This allows us to post patches without requiring a full depot_tools
# install. There's no real point in including upload.py if we do not
# also have gsutil, which is why this is inside the gsutil block.
gsutil_dependencies.add(os.path.join(gsutil_base_dir, 'upload.py'))
for dependency_path in gsutil_dependencies:
path_in_archive = os.path.join(
'telemetry', os.path.relpath(path.GetTelemetryDir(), base_dir),
'third_party', os.path.relpath(dependency_path, gsutil_base_dir))
zip_file.write(dependency_path, path_in_archive)
class FindDependenciesCommand(command_line.OptparseCommand):
"""Prints all dependencies"""
@classmethod
def AddCommandLineArgs(cls, parser, _):
parser.add_option(
'-v', '--verbose', action='count', dest='verbosity',
help='Increase verbosity level (repeat as needed).')
parser.add_option(
'-p', '--include-page-set-data', action='store_true', default=False,
help='Scan tests for page set data and include them.')
parser.add_option(
'-e', '--exclude', action='append', default=[],
help='Exclude paths matching EXCLUDE. Can be used multiple times.')
parser.add_option(
'-z', '--zip',
help='Store files in a zip archive at ZIP.')
@classmethod
def ProcessCommandLineArgs(cls, parser, args, _):
if args.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif args.verbosity:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
def Run(self, args):
target_paths = args.positional_args
dependencies = FindDependencies(target_paths, args)
if args.zip:
ZipDependencies(target_paths, dependencies, args)
print 'Zip archive written to %s.' % args.zip
else:
print '\n'.join(sorted(dependencies))
return 0
|
guorendong/iridium-browser-ubuntu
|
tools/telemetry/telemetry/util/find_dependencies.py
|
Python
|
bsd-3-clause
| 9,310 | 0.009774 |
from django.conf import settings
from django.core.exceptions import ValidationError
from django.forms import ModelForm
from django.forms.widgets import HiddenInput, TextInput, Select, CheckboxInput
from alert.userHandling.models import Alert
class CreateAlertForm(ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(CreateAlertForm, self).__init__(*args, **kwargs)
def clean_rate(self):
rate = self.cleaned_data['rate']
not_donated_enough = self.user.profile.total_donated_last_year < \
settings.MIN_DONATION['rt_alerts']
if rate == 'rt' and not_donated_enough:
# Somebody is trying to hack past the JS/HTML block on the front
# end. Don't let them create the alert until they've donated.
raise ValidationError(
u'You must donate more than $10 per year to create Real Time '
u'alerts.'
)
else:
return rate
class Meta:
model = Alert
fields = (
'name',
'query',
'rate',
'always_send_email',
)
widgets = {
'query': HiddenInput(
attrs={
'tabindex': '250'
}
),
'name': TextInput(
attrs={
'class': 'form-control',
'tabindex': '251'
}
),
'rate': Select(
attrs={
'class': 'form-control',
'tabindex': '252',
}
),
'always_send_email': CheckboxInput(
attrs={
'tabindex': '253',
}
),
}
|
shashi792/courtlistener
|
alert/alerts/forms.py
|
Python
|
agpl-3.0
| 1,815 | 0 |
# coding=utf-8
#
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import datetime
from requests.compat import urlencode, urljoin
from sickbeard import classes, logger, tvcache
from sickrage.helper.exceptions import AuthException
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
try:
import json
except ImportError:
import simplejson as json
class HDBitsProvider(TorrentProvider):
def __init__(self):
TorrentProvider.__init__(self, "HDBits")
self.username = None
self.passkey = None
self.cache = HDBitsCache(self, min_time=15) # only poll HDBits every 15 minutes max
self.url = 'https://hdbits.org'
self.urls = {
'search': urljoin(self.url, '/api/torrents'),
'rss': urljoin(self.url, '/api/torrents'),
'download': urljoin(self.url, '/download.php')
}
def _check_auth(self):
if not self.username or not self.passkey:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _checkAuthFromData(self, parsedJSON):
if 'status' in parsedJSON and 'message' in parsedJSON and parsedJSON.get('status') == 5:
logger.log("Invalid username or password. Check your settings", logger.WARNING)
return True
def _get_season_search_strings(self, ep_obj):
season_search_string = [self._make_post_data_JSON(show=ep_obj.show, season=ep_obj)]
return season_search_string
def _get_episode_search_strings(self, ep_obj, add_string=''):
episode_search_string = [self._make_post_data_JSON(show=ep_obj.show, episode=ep_obj)]
return episode_search_string
def _get_title_and_url(self, item):
title = item.get('name', '').replace(' ', '.')
url = self.urls['download'] + '?' + urlencode({'id': item['id'], 'passkey': self.passkey})
return title, url
def search(self, search_params, age=0, ep_obj=None):
# FIXME
results = []
logger.log("Search string: {0}".format
(search_params.decode('utf-8')), logger.DEBUG)
self._check_auth()
parsedJSON = self.get_url(self.urls['search'], post_data=search_params, returns='json')
if not parsedJSON:
return []
if self._checkAuthFromData(parsedJSON):
if parsedJSON and 'data' in parsedJSON:
items = parsedJSON['data']
else:
logger.log("Resulting JSON from provider isn't correct, not parsing it", logger.ERROR)
items = []
for item in items:
results.append(item)
# FIXME SORTING
return results
def find_propers(self, search_date=None):
results = []
search_terms = [' proper ', ' repack ']
for term in search_terms:
for item in self.search(self._make_post_data_JSON(search_term=term)):
if item['utadded']:
try:
result_date = datetime.datetime.fromtimestamp(int(item['utadded']))
except Exception:
result_date = None
if result_date and (not search_date or result_date > search_date):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, result_date, self.show))
return results
def _make_post_data_JSON(self, show=None, episode=None, season=None, search_term=None):
post_data = {
'username': self.username,
'passkey': self.passkey,
'category': [2],
# TV Category
}
if episode:
if show.air_by_date:
post_data['tvdb'] = {
'id': show.indexerid,
'episode': str(episode.airdate).replace('-', '|')
}
elif show.sports:
post_data['tvdb'] = {
'id': show.indexerid,
'episode': episode.airdate.strftime('%b')
}
elif show.anime:
post_data['tvdb'] = {
'id': show.indexerid,
'episode': "{0:d}".format(int(episode.scene_absolute_number))
}
else:
post_data['tvdb'] = {
'id': show.indexerid,
'season': episode.scene_season,
'episode': episode.scene_episode
}
if season:
if show.air_by_date or show.sports:
post_data['tvdb'] = {
'id': show.indexerid,
'season': str(season.airdate)[:7],
}
elif show.anime:
post_data['tvdb'] = {
'id': show.indexerid,
'season': "{0:d}".format(season.scene_absolute_number),
}
else:
post_data['tvdb'] = {
'id': show.indexerid,
'season': season.scene_season,
}
if search_term:
post_data['search'] = search_term
return json.dumps(post_data)
class HDBitsCache(tvcache.TVCache):
def _get_rss_data(self):
self.search_params = None # HDBits cache does not use search_params so set it to None
results = []
try:
parsedJSON = self.provider.get_url(self.provider.urls['rss'], post_data=self.provider._make_post_data_JSON(), returns='json')
if self.provider._checkAuthFromData(parsedJSON):
results = parsedJSON['data']
except Exception:
pass
return {'entries': results}
provider = HDBitsProvider()
|
ukanga/SickRage
|
sickbeard/providers/hdbits.py
|
Python
|
gpl-3.0
| 6,545 | 0.00275 |
# Copyright (c) 2015 FUJITSU LIMITED
# Copyright (c) 2012 EMC Corporation.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
FibreChannel Cinder Volume driver for Fujitsu ETERNUS DX S3 series.
"""
from oslo_log import log as logging
import six
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.fujitsu import eternus_dx_common
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
@interface.volumedriver
class FJDXFCDriver(driver.FibreChannelDriver):
"""FC Cinder Volume Driver for Fujitsu ETERNUS DX S3 series."""
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Fujitsu_ETERNUS_CI"
VERSION = eternus_dx_common.FJDXCommon.VERSION
def __init__(self, *args, **kwargs):
super(FJDXFCDriver, self).__init__(*args, **kwargs)
self.common = eternus_dx_common.FJDXCommon(
'fc',
configuration=self.configuration)
self.VERSION = self.common.VERSION
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Create volume."""
LOG.debug('create_volume, '
'volume id: %s, enter method.', volume['id'])
location, metadata = self.common.create_volume(volume)
v_metadata = self._get_metadata(volume)
metadata.update(v_metadata)
LOG.debug('create_volume, info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location),
'metadata': metadata}
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.debug('create_volume_from_snapshot, '
'volume id: %(vid)s, snap id: %(sid)s, enter method.',
{'vid': volume['id'], 'sid': snapshot['id']})
location, metadata = (
self.common.create_volume_from_snapshot(volume, snapshot))
v_metadata = self._get_metadata(volume)
metadata.update(v_metadata)
LOG.debug('create_volume_from_snapshot, '
'info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location),
'metadata': metadata}
def create_cloned_volume(self, volume, src_vref):
"""Create cloned volume."""
LOG.debug('create_cloned_volume, '
'target volume id: %(tid)s, '
'source volume id: %(sid)s, enter method.',
{'tid': volume['id'], 'sid': src_vref['id']})
location, metadata = (
self.common.create_cloned_volume(volume, src_vref))
v_metadata = self._get_metadata(volume)
metadata.update(v_metadata)
LOG.debug('create_cloned_volume, '
'info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location),
'metadata': metadata}
def delete_volume(self, volume):
"""Delete volume on ETERNUS."""
LOG.debug('delete_volume, '
'volume id: %s, enter method.', volume['id'])
vol_exist = self.common.delete_volume(volume)
LOG.debug('delete_volume, '
'delete: %s, exit method.', vol_exist)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
LOG.debug('create_snapshot, '
'snap id: %(sid)s, volume id: %(vid)s, enter method.',
{'sid': snapshot['id'], 'vid': snapshot['volume_id']})
location, metadata = self.common.create_snapshot(snapshot)
LOG.debug('create_snapshot, info: %s, exit method.', metadata)
return {'provider_location': six.text_type(location)}
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.debug('delete_snapshot, '
'snap id: %(sid)s, volume id: %(vid)s, enter method.',
{'sid': snapshot['id'], 'vid': snapshot['volume_id']})
vol_exist = self.common.delete_snapshot(snapshot)
LOG.debug('delete_snapshot, '
'delete: %s, exit method.', vol_exist)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
return
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
return
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
return
@fczm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
LOG.debug('initialize_connection, volume id: %(vid)s, '
'wwpns: %(wwpns)s, enter method.',
{'vid': volume['id'], 'wwpns': connector['wwpns']})
info = self.common.initialize_connection(volume, connector)
data = info['data']
init_tgt_map = (
self.common.build_fc_init_tgt_map(connector, data['target_wwn']))
data['initiator_target_map'] = init_tgt_map
info['data'] = data
LOG.debug('initialize_connection, '
'info: %s, exit method.', info)
return info
@fczm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
LOG.debug('terminate_connection, volume id: %(vid)s, '
'wwpns: %(wwpns)s, enter method.',
{'vid': volume['id'], 'wwpns': connector['wwpns']})
map_exist = self.common.terminate_connection(volume, connector)
attached = self.common.check_attached_volume_in_zone(connector)
info = {'driver_volume_type': 'fibre_channel',
'data': {}}
if not attached:
# No more volumes attached to the host
init_tgt_map = self.common.build_fc_init_tgt_map(connector)
info['data'] = {'initiator_target_map': init_tgt_map}
LOG.debug('terminate_connection, unmap: %(unmap)s, '
'connection info: %(info)s, exit method',
{'unmap': map_exist, 'info': info})
return info
def get_volume_stats(self, refresh=False):
"""Get volume stats."""
LOG.debug('get_volume_stats, refresh: %s, enter method.', refresh)
pool_name = None
if refresh is True:
data, pool_name = self.common.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'FJDXFCDriver'
data['storage_protocol'] = 'FC'
self._stats = data
LOG.debug('get_volume_stats, '
'pool name: %s, exit method.', pool_name)
return self._stats
def extend_volume(self, volume, new_size):
"""Extend volume."""
LOG.debug('extend_volume, '
'volume id: %s, enter method.', volume['id'])
used_pool_name = self.common.extend_volume(volume, new_size)
LOG.debug('extend_volume, '
'used pool name: %s, exit method.', used_pool_name)
def _get_metadata(self, volume):
v_metadata = volume.get('volume_metadata')
if v_metadata:
ret = {data['key']: data['value'] for data in v_metadata}
else:
ret = volume.get('metadata', {})
return ret
|
Hybrid-Cloud/cinder
|
cinder/volume/drivers/fujitsu/eternus_dx_fc.py
|
Python
|
apache-2.0
| 8,064 | 0 |
"""
4
2 belas
seratus 4 puluh 0
9 ribu seratus 2 puluh 1
2 puluh 1 ribu 3 puluh 0
9 ratus 5 ribu 0
8 puluh 2 juta 8 ratus 8 belas ribu seratus 8 puluh 8
3 ratus 1 juta 4 puluh 8 ribu 5 ratus 8 puluh 8
"""
def kata(n):
angka = range(11)
temp = ""
if n < 12:
temp += str(angka[n])
elif n < 20:
temp += str(n-10)+" belas"
elif n < 100:
temp += str(kata(n/10)) + " puluh "+ str(kata(n%10))
elif n < 200:
temp += "seratus "+ str(kata(n-100))
elif n < 1000:
temp += str(kata(n/100))+ " ratus " + str(kata(n%100))
elif n < 2000:
temp += "seribu "+str(kata(n-1000))
elif n < 1000000:
temp += str(kata(n/1000))+ " ribu "+ str(kata(n%1000))
elif n < 1000000000:
temp += str(kata(n/1000000)) +" juta " + str(kata(n%1000000))
return temp
print kata(4)
print kata(12)
print kata(140)
print kata(9121)
print kata(21030)
print kata(905000)
print kata(82818188)
print kata(301048588)
|
agusmakmun/Some-Examples-of-Simple-Python-Script
|
list/katakan.py
|
Python
|
agpl-3.0
| 984 | 0.01626 |
# -*- mode: python; coding: utf-8; -*-
#
# This file is part of Invenio.
# Copyright (C) 2010, 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Usage: bibsort [options]
BibSort tool
Options:
-h, --help show this help message and exit
-l, --load-config Loads the configuration from bibsort.cfg into the
database
-d, --dump-config Outputs a database dump in form of a config file
-p, --print-sorting-methods
Prints the available sorting methods
-R, --rebalance Runs the sorting methods given in '--metods'and
rebalances all the buckets.
If no method is specified, the rebalance will be done
for all the methods in the config file.
-S, --update-sorting Runs the sorting methods given in '--methods' for the
recids given in '--id'.
If no method is specified, the update will be done for
all the methods in the config file.
If no recids are specified, the update will be done
for all the records that have been
modified/inserted from the last run of the sorting.
If you want to run the sorting for all records, you
should use the '-R' option
-M, --methods=METHODS Specify the sorting methods for which the
update_sorting or rebalancing will run
(ex: --methods=method1,method2,method3).
-i, --id=RECIDS Specify the records for which the update_sorting will
run (ex: --id=1,2-56,72)
"""
__revision__ = "$Id$"
import sys
import optparse
import time
import ConfigParser
from invenio.dateutils import strftime
from invenio.dbquery import run_sql, Error
from invenio.config import CFG_ETCDIR
from invenio.bibsort_engine import run_bibsort_update, \
run_bibsort_rebalance
from invenio.bibtask import task_init, write_message, \
task_set_option, task_get_option
def load_configuration():
"""Loads the configuration for the bibsort.cfg file into the database"""
config_file = CFG_ETCDIR + "/bibsort/bibsort.cfg"
write_message('Reading config data from: %s' %config_file)
config = ConfigParser.ConfigParser()
try:
config.readfp(open(config_file))
except StandardError, err:
write_message("Cannot find configuration file: %s" \
%config_file, stream=sys.stderr)
return False
to_insert = []
for section in config.sections():
try:
name = config.get(section, "name")
definition = config.get(section, "definition")
washer = config.get(section, "washer")
except (ConfigParser.NoOptionError, StandardError), err:
write_message("For each sort_field you need to define at least \
the name, the washer and the definition. \
[error: %s]" %err, stream=sys.stderr)
return False
to_insert.append((name, definition, washer))
# all the values were correctly read from the config file
run_sql("TRUNCATE TABLE bsrMETHOD")
write_message('Old data has been deleted from bsrMETHOD table', verbose=5)
for row in to_insert:
run_sql("INSERT INTO bsrMETHOD(name, definition, washer) \
VALUES (%s, %s, %s)", (row[0], row[1], row[2]))
write_message('Method %s has been inserted into bsrMETHOD table' \
%row[0], verbose=5)
return True
def dump_configuration():
"""Creates a dump of the data existing in the bibsort tables"""
try:
results = run_sql("SELECT id, name, definition, washer FROM bsrMETHOD")
except Error, err:
write_message("The error: [%s] occured while trying to get \
the bibsort data from the database." %err, sys.stderr)
return False
write_message('The bibsort data has been read from the database.', verbose=5)
if results:
config = ConfigParser.ConfigParser()
for item in results:
section = "sort_field_%s" % item[0]
config.add_section(section)
config.set(section, "name", item[1])
config.set(section, "definition", item[2])
config.set(section, "washer", item[3])
output_file_name = CFG_ETCDIR + '/bibsort/bibsort_db_dump_%s.cfg' % \
strftime("%d%m%Y%H%M%S", time.localtime())
write_message('Opening the output file %s' %output_file_name)
try:
output_file = open(output_file_name, 'w')
config.write(output_file)
output_file.close()
except Error, err:
write_message('Can not operate on the configuration file %s [%s].' \
%(output_file_name, err), stream=sys.stderr)
return False
write_message('Configuration data dumped to file.')
else:
write_message("The bsrMETHOD table does not contain any data.")
return True
def update_sorting(methods, recids):
"""Runs the updating of the sorting tables for methods and recids
Recids is a list of integer numbers(record ids)
but can also contain intervals"""
method_list = []
if methods:
method_list = methods.strip().split(',')
recid_list = []
if recids:
cli_recid_list = recids.strip().split(',')
for recid in cli_recid_list:
if recid.find('-') > 0:
rec_range = recid.split('-')
try:
recid_min = int(rec_range[0])
recid_max = int(rec_range[1])
for rec in range(recid_min, recid_max + 1):
recid_list.append(rec)
except Error, err:
write_message("Error: [%s] occured while trying \
to parse the recids argument." %err, sys.stderr)
return False
else:
recid_list.append(int(recid))
return run_bibsort_update(recid_list, method_list)
def rebalance(methods):
"""Runs the complete sorting and rebalancing of buckets for
the methods specified in 'methods' argument"""
method_list = []
if methods:
method_list = methods.strip().split(',')
return run_bibsort_rebalance(method_list)
def print_sorting_methods():
"""Outputs the available sorting methods from the DB"""
try:
results = run_sql("SELECT name FROM bsrMETHOD")
except Error, err:
write_message("The error: [%s] occured while trying to \
get the bibsort data from the database." %err)
return False
if results:
methods = []
for result in results:
methods.append(result[0])
if len(methods) > 0:
write_message('Methods: %s' %methods)
else:
write_message("There are no sorting methods configured.")
return True
# main with option parser
# to be used in case the connection with bibsched is not wanted
def main_op():
"""Runs program and handles command line options"""
option_parser = optparse.OptionParser(description="""BibSort tool""")
option_parser.add_option('-l', '--load-config', action='store_true', \
help='Loads the configuration from bibsort.conf into the database')
option_parser.add_option('-d', '--dump-config', action='store_true', \
help='Outputs a database dump in form of a config file')
option_parser.add_option('-p', '--print-sorting-methods',
action='store_true', \
help="Prints the available sorting methods")
option_parser.add_option('-R', '--rebalance', action='store_true', \
help="Runs the sorting methods given in '--methods and rebalances all the buckets. If no method is specified, the rebalance will be done for all the methods in the config file.")
option_parser.add_option('-S', '--update-sorting', action='store_true', \
help="Runs the sorting methods given in '--methods' for the recids given in '--id'. If no method is specified, the update will be done for all the methods in the config file. If no recids are specified, the update will be done for all the records that have been modified/inserted from the last run of the sorting. If you want to run the sorting for all records, you should use the '-R' option")
option_parser.add_option('--methods', action='store', dest='methods', \
metavar='METHODS', \
help="Specify the sorting methods for which the update_sorting or rebalancing will run (ex: --methods=method1,method2,method3).")
option_parser.add_option('--id', action='store', dest='recids', \
metavar='RECIDS', \
help="Specify the records for which the update_sorting will run (ex: --id=1,2-56,72) ")
options, dummy = option_parser.parse_args()
if options.load_config and options.dump_config:
option_parser.error('.. conflicting options, please add only one')
elif options.rebalance and options.update_sorting:
option_parser.error('..conflicting options, please add only one')
elif (options.load_config or options.dump_config) and \
(options.rebalance or options.update_sorting):
option_parser.error('..conflicting options, please add only one')
if options.load_config:
load_configuration()
elif options.dump_config:
dump_configuration()
elif options.update_sorting:
update_sorting(options.methods, options.recids)
elif options.rebalance:
rebalance(options.methods)
elif options.print_sorting_methods:
print_sorting_methods()
else:
option_parser.print_help()
def main():
"""Main function that constructs the bibtask"""
task_init(authorization_action='runbibsort',
authorization_msg="BibSort Task Submission",
description = "",
help_specific_usage="""
Specific options:
-l, --load-config Loads the configuration from bibsort.conf into the
database
-d, --dump-config Outputs a database dump in form of a config file
-p, --print-sorting-methods
Prints the available sorting methods
-R, --rebalance Runs the sorting methods given in '--methods'and
rebalances all the buckets. If no method is
specified, the rebalance will be done for all
the methods in the config file.
-S, --update-sorting Runs the sorting methods given in '--methods' for the
recids given in '--id'. If no method is
specified, the update will be done for all the
methods in the config file. If no recids are
specified, the update will be done for all the records
that have been modified/inserted from the last
run of the sorting. If you want to run the
sorting for all records, you should use the '-B'
option
-M, --methods=METHODS Specify the sorting methods for which the
update_sorting or rebalancing will run (ex:
--methods=method1,method2,method3).
-i, --id=RECIDS Specify the records for which the update_sorting will
run (ex: --id=1,2-56,72)
""",
version=__revision__,
specific_params=("ldpRSM:i:",
["load-config",
"dump-config",
"print-sorting-methods",
"rebalance",
"update-sorting",
"methods=",
"id="]),
task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,
task_run_fnc=task_run_core)
def task_submit_elaborate_specific_parameter(key, value, opts, dummy_args):
"""Given the string key it checks it's meaning, eventually using the
value. Usually it fills some key in the options dict.
It must return True if it has elaborated the key, False, if it doesn't
know that key."""
#Load configuration
if key in ('-l', '--load-config'):
task_set_option('cmd', 'load')
if ('-d', '') in opts or ('--dump-conf', '') in opts:
raise StandardError(".. conflicting options, please add only one")
#Dump configuration
elif key in ('-d', '--dump_conf'):
task_set_option('cmd', 'dump')
#Print sorting methods
elif key in ('-p', '--print-sorting-methods'):
task_set_option('cmd', 'print')
#Rebalance
elif key in ('-R', '--rebalance'):
task_set_option('cmd', 'rebalance')
if ('-S', '') in opts or ('--update-sorting', '') in opts:
raise StandardError(".. conflicting options, please add only one")
#Update sorting
elif key in ('-S', '--update-sorting'):
task_set_option('cmd', 'sort')
#Define methods
elif key in ('-M', '--methods'):
task_set_option('methods', value)
#Define records
elif key in ('-i', '--id'):
task_set_option('recids', value)
else:
return False
return True
def task_run_core():
"""Reimplement to add the body of the task"""
write_message("bibsort starting..")
cmd = task_get_option('cmd')
methods = task_get_option('methods')
recids = task_get_option('recids')
write_message("Task parameters: command=%s ; methods=%s ; recids=%s" \
% (cmd, methods, recids), verbose=2)
executed_correctly = False
# if no command is defined, run sorting
if not cmd:
cmd = 'sort'
if cmd == 'load':
write_message('Starting loading the configuration \
from the cfg file to the db.', verbose=5)
executed_correctly = load_configuration()
if executed_correctly:
write_message('Loading completed.', verbose=5)
elif cmd == 'dump':
write_message('Starting dumping the configuration \
from the db into the cfg file.', verbose=5)
executed_correctly = dump_configuration()
if executed_correctly:
write_message('Dumping completed.', verbose=5)
elif cmd == 'print':
executed_correctly = print_sorting_methods()
elif cmd == 'sort':
write_message('Starting sorting.', verbose=5)
executed_correctly = update_sorting(methods, recids)
if executed_correctly:
write_message('Sorting completed.', verbose=5)
elif cmd == 'rebalance':
write_message('Starting rebalancing the sorting buckets.', verbose=5)
executed_correctly = rebalance(methods)
if executed_correctly:
write_message('Rebalancing completed.', verbose=5)
else:
write_message("This action is not possible. \
See the --help for available actions.", sys.stderr)
write_message('bibsort exiting..')
return executed_correctly
if __name__ == '__main__':
main()
|
CERNDocumentServer/invenio
|
modules/bibsort/lib/bibsort_daemon.py
|
Python
|
gpl-2.0
| 15,964 | 0.003257 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-31 17:48
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('checkout', '0002_auto_20160724_1533'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.IntegerField(blank=True, choices=[(0, 'Aguardando Pagamento'), (1, 'Concluída'), (2, 'Cancelada')], default=0, verbose_name='Situação')),
('payment_option', models.CharField(choices=[('pagseguro', 'PagSeguro'), ('paypal', 'Paypal')], max_length=20, verbose_name='Opção de Pagamento')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Criado em')),
('modified', models.DateTimeField(auto_now=True, verbose_name='Modificado em')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Usuário')),
],
options={
'verbose_name_plural': 'Pedidos',
'verbose_name': 'Pedido',
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(default=1, verbose_name='Quantidade')),
('price', models.DecimalField(decimal_places=2, max_digits=8, verbose_name='Preço')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='checkout.Order', verbose_name='Pedido')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='catalog.Product', verbose_name='Produto')),
],
options={
'verbose_name_plural': 'Itens dos pedidos',
'verbose_name': 'Item do pedido',
},
),
]
|
gileno/djangoecommerce
|
checkout/migrations/0003_order_orderitem.py
|
Python
|
cc0-1.0
| 2,362 | 0.004671 |
#
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import picosat
import pysmt.logics
from pysmt import typing as types
from pysmt.solvers.solver import Solver
from pysmt.solvers.eager import EagerModel
from pysmt.rewritings import CNFizer
from pysmt.decorators import clear_pending_pop, catch_conversion_error
from six.moves import xrange
from six import iteritems
class PicosatSolver(Solver):
"""PicoSAT solver"""
LOGICS = [ pysmt.logics.QF_BOOL ]
def __init__(self, environment, logic, user_options):
Solver.__init__(self,
environment=environment,
logic=logic,
user_options=user_options)
self.mgr = environment.formula_manager
self.pico = picosat.picosat_init()
self.converter = None
self.cnfizer = CNFizer(environment=environment)
self.latest_model = None
self._var_ids = {}
def _get_var_id(self, symbol):
if not symbol.is_symbol(types.BOOL):
raise NotImplementedError("No theory terms are supported in PicoSAT")
if symbol in self._var_ids:
return self._var_ids[symbol]
else:
vid = picosat.picosat_inc_max_var(self.pico)
self._var_ids[symbol] = vid
return vid
@clear_pending_pop
def reset_assertions(self):
picosat.picosat_reset(self.pico)
self.pico = picosat.picosat_init()
@clear_pending_pop
def declare_variable(self, var):
# no need to declare variables
pass
def _get_pico_lit(self, lit):
mult = 1
var = lit
if lit.is_not():
mult = -1
var = lit.arg(0)
vid = self._get_var_id(var)
return vid * mult
@clear_pending_pop
@catch_conversion_error
def add_assertion(self, formula, named=None):
# First, we get rid of True/False constants
formula = formula.simplify()
if formula.is_false():
picosat.picosat_add(self.pico, 0)
elif not formula.is_true():
cnf = self.cnfizer.convert(formula)
self._add_cnf_assertion(cnf)
def _add_cnf_assertion(self, cnf):
for clause in cnf:
for lit in clause:
v = self._get_pico_lit(lit)
picosat.picosat_add(self.pico, v)
picosat.picosat_add(self.pico, 0)
@clear_pending_pop
@catch_conversion_error
def solve(self, assumptions=None):
if assumptions is not None:
cnf = []
for a in assumptions:
cnf += self.cnfizer.convert(a)
missing = []
for clause in cnf:
if len(clause) == 1:
v = self._get_pico_lit(next(iter(clause)))
picosat.picosat_assume(self.pico, v)
else:
missing.append(clause)
if len(missing) > 0:
self.push()
self._add_cnf_assertion(missing)
self.pending_pop = True
res = picosat.picosat_sat(self.pico, -1)
if res == picosat.PICOSAT_SATISFIABLE:
self.latest_model = self.get_model()
return True
else:
self.latest_model = None
return False
def get_value(self, item):
if self.latest_model is None:
self.get_model()
return self.latest_model.get_value(item)
def get_model(self):
assignment = {}
for var, vid in iteritems(self._var_ids):
v = picosat.picosat_deref(self.pico, vid)
if v == 0:
assert False
value = self.mgr.Bool(v == 1)
assignment[var] = value
return EagerModel(assignment=assignment,
environment=self.environment)
@clear_pending_pop
def push(self, levels=1):
for _ in xrange(levels):
picosat.picosat_push(self.pico)
@clear_pending_pop
def pop(self, levels=1):
for _ in xrange(levels):
picosat.picosat_pop(self.pico)
def exit(self):
if not self._destroyed:
self._destroyed = True
picosat.picosat_reset(self.pico)
|
idkwim/pysmt
|
pysmt/solvers/pico.py
|
Python
|
apache-2.0
| 4,835 | 0.001861 |
# Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import glob
import os.path
import re
import signal
import sys
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.utils.fileutil as fileutil
from azurelinuxagent.common import version
from azurelinuxagent.common.exception import ProtocolError
from azurelinuxagent.common.osutil import get_osutil
from azurelinuxagent.common.persist_firewall_rules import PersistFirewallRulesHandler
from azurelinuxagent.common.protocol.util import get_protocol_util
from azurelinuxagent.ga.exthandlers import HANDLER_COMPLETE_NAME_PATTERN
def read_input(message):
if sys.version_info[0] >= 3:
return input(message)
else:
# This is not defined in python3, and the linter will thus
# throw an undefined-variable<E0602> error on this line.
# Suppress it here.
return raw_input(message) # pylint: disable=E0602
class DeprovisionAction(object):
def __init__(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
self.func = func
self.args = args
self.kwargs = kwargs
def invoke(self):
self.func(*self.args, **self.kwargs)
class DeprovisionHandler(object):
def __init__(self):
self.osutil = get_osutil()
self.protocol_util = get_protocol_util()
self.actions_running = False
signal.signal(signal.SIGINT, self.handle_interrupt_signal)
def del_root_password(self, warnings, actions):
warnings.append("WARNING! root password will be disabled. "
"You will not be able to login as root.")
actions.append(DeprovisionAction(self.osutil.del_root_password))
def del_user(self, warnings, actions):
try:
ovfenv = self.protocol_util.get_ovf_env()
except ProtocolError:
warnings.append("WARNING! ovf-env.xml is not found.")
warnings.append("WARNING! Skip delete user.")
return
username = ovfenv.username
warnings.append(("WARNING! {0} account and entire home directory "
"will be deleted.").format(username))
actions.append(DeprovisionAction(self.osutil.del_account,
[username]))
def regen_ssh_host_key(self, warnings, actions):
warnings.append("WARNING! All SSH host key pairs will be deleted.")
actions.append(DeprovisionAction(fileutil.rm_files,
[conf.get_ssh_key_glob()]))
def stop_agent_service(self, warnings, actions):
warnings.append("WARNING! The waagent service will be stopped.")
actions.append(DeprovisionAction(self.osutil.stop_agent_service))
def del_dirs(self, warnings, actions): # pylint: disable=W0613
dirs = [conf.get_lib_dir(), conf.get_ext_log_dir()]
actions.append(DeprovisionAction(fileutil.rm_dirs, dirs))
def del_files(self, warnings, actions): # pylint: disable=W0613
files = ['/root/.bash_history', conf.get_agent_log_file()]
actions.append(DeprovisionAction(fileutil.rm_files, files))
# For OpenBSD
actions.append(DeprovisionAction(fileutil.rm_files,
["/etc/random.seed",
"/var/db/host.random",
"/etc/isakmpd/local.pub",
"/etc/isakmpd/private/local.key",
"/etc/iked/private/local.key",
"/etc/iked/local.pub"]))
def del_resolv(self, warnings, actions):
warnings.append("WARNING! /etc/resolv.conf will be deleted.")
files_to_del = ["/etc/resolv.conf"]
actions.append(DeprovisionAction(fileutil.rm_files, files_to_del))
def del_dhcp_lease(self, warnings, actions):
warnings.append("WARNING! Cached DHCP leases will be deleted.")
dirs_to_del = ["/var/lib/dhclient", "/var/lib/dhcpcd", "/var/lib/dhcp"]
actions.append(DeprovisionAction(fileutil.rm_dirs, dirs_to_del))
# For FreeBSD and OpenBSD
actions.append(DeprovisionAction(fileutil.rm_files,
["/var/db/dhclient.leases.*"]))
# For FreeBSD, NM controlled
actions.append(DeprovisionAction(fileutil.rm_files,
["/var/lib/NetworkManager/dhclient-*.lease"]))
def del_ext_handler_files(self, warnings, actions): # pylint: disable=W0613
ext_dirs = [d for d in os.listdir(conf.get_lib_dir())
if os.path.isdir(os.path.join(conf.get_lib_dir(), d))
and re.match(HANDLER_COMPLETE_NAME_PATTERN, d) is not None
and not version.is_agent_path(d)]
for ext_dir in ext_dirs:
ext_base = os.path.join(conf.get_lib_dir(), ext_dir)
files = glob.glob(os.path.join(ext_base, 'status', '*.status'))
files += glob.glob(os.path.join(ext_base, 'config', '*.settings'))
files += glob.glob(os.path.join(ext_base, 'config', 'HandlerStatus'))
files += glob.glob(os.path.join(ext_base, 'mrseq'))
if len(files) > 0:
actions.append(DeprovisionAction(fileutil.rm_files, files))
def del_lib_dir_files(self, warnings, actions): # pylint: disable=W0613
known_files = [
'HostingEnvironmentConfig.xml',
'Incarnation',
'partition',
'Protocol',
'SharedConfig.xml',
'WireServerEndpoint'
]
known_files_glob = [
'Extensions.*.xml',
'ExtensionsConfig.*.xml',
'GoalState.*.xml'
]
lib_dir = conf.get_lib_dir()
files = [f for f in \
[os.path.join(lib_dir, kf) for kf in known_files] \
if os.path.isfile(f)]
for p in known_files_glob:
files += glob.glob(os.path.join(lib_dir, p))
if len(files) > 0:
actions.append(DeprovisionAction(fileutil.rm_files, files))
def reset_hostname(self, warnings, actions): # pylint: disable=W0613
localhost = ["localhost.localdomain"]
actions.append(DeprovisionAction(self.osutil.set_hostname,
localhost))
actions.append(DeprovisionAction(self.osutil.set_dhcp_hostname,
localhost))
def setup(self, deluser):
warnings = []
actions = []
self.stop_agent_service(warnings, actions)
if conf.get_regenerate_ssh_host_key():
self.regen_ssh_host_key(warnings, actions)
self.del_dhcp_lease(warnings, actions)
self.reset_hostname(warnings, actions)
if conf.get_delete_root_password():
self.del_root_password(warnings, actions)
self.del_dirs(warnings, actions)
self.del_files(warnings, actions)
self.del_resolv(warnings, actions)
if deluser:
self.del_user(warnings, actions)
self.del_persist_firewall_rules(actions)
return warnings, actions
def setup_changed_unique_id(self):
warnings = []
actions = []
self.del_dhcp_lease(warnings, actions)
self.del_lib_dir_files(warnings, actions)
self.del_ext_handler_files(warnings, actions)
self.del_persist_firewall_rules(actions)
return warnings, actions
def run(self, force=False, deluser=False):
warnings, actions = self.setup(deluser)
self.do_warnings(warnings)
if self.do_confirmation(force=force):
self.do_actions(actions)
def run_changed_unique_id(self):
'''
Clean-up files and directories that may interfere when the VM unique
identifier has changed.
While users *should* manually deprovision a VM, the files removed by
this routine will help keep the agent from getting confused
(since incarnation and extension settings, among other items, will
no longer be monotonically increasing).
'''
warnings, actions = self.setup_changed_unique_id()
self.do_warnings(warnings)
self.do_actions(actions)
def do_actions(self, actions):
self.actions_running = True
for action in actions:
action.invoke()
self.actions_running = False
def do_confirmation(self, force=False):
if force:
return True
confirm = read_input("Do you want to proceed (y/n)")
return True if confirm.lower().startswith('y') else False
def do_warnings(self, warnings):
for warning in warnings:
print(warning)
def handle_interrupt_signal(self, signum, frame): # pylint: disable=W0613
if not self.actions_running:
print("Deprovision is interrupted.")
sys.exit(0)
print ('Deprovisioning may not be interrupted.')
return
@staticmethod
def del_persist_firewall_rules(actions):
agent_network_service_path = PersistFirewallRulesHandler.get_service_file_path()
actions.append(DeprovisionAction(fileutil.rm_files,
[agent_network_service_path, os.path.join(conf.get_lib_dir(),
PersistFirewallRulesHandler.BINARY_FILE_NAME)]))
|
Azure/WALinuxAgent
|
azurelinuxagent/pa/deprovision/default.py
|
Python
|
apache-2.0
| 10,146 | 0.001774 |
# author: Adrian Rosebrock
# website: http://www.pyimagesearch.com
# USAGE
# BE SURE TO INSTALL 'imutils' PRIOR TO EXECUTING THIS COMMAND
# python sorting_contours.py
# import the necessary packages
from imutils import contours
import imutils
import cv2
# load the shapes image clone it, convert it to grayscale, and
# detect edges in the image
image = cv2.imread("../demo_images/shapes.png")
orig = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edged = imutils.auto_canny(gray)
# find contours in the edge map
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the (unsorted) contours and label them
for (i, c) in enumerate(cnts):
orig = contours.label_contour(orig, c, i, color=(240, 0, 159))
# show the original image
cv2.imshow("Original", orig)
# loop over the sorting methods
for method in ("left-to-right", "right-to-left", "top-to-bottom", "bottom-to-top"):
# sort the contours
(cnts, boundingBoxes) = contours.sort_contours(cnts, method=method)
clone = image.copy()
# loop over the sorted contours and label them
for (i, c) in enumerate(cnts):
sortedImage = contours.label_contour(clone, c, i, color=(240, 0, 159))
# show the sorted contour image
cv2.imshow(method, sortedImage)
# wait for a keypress
cv2.waitKey(0)
|
jrosebr1/imutils
|
demos/sorting_contours.py
|
Python
|
mit
| 1,343 | 0.008935 |
'''
'''
import re
import http
import logging
import urllib
import urllib.request
from itertools import *
import collections.abc
from versa.driver import memory
from versa import I, VERSA_BASEIRI, ORIGIN, RELATIONSHIP, TARGET, ATTRIBUTES
from versa.reader import rdfalite
from versa.reader.rdfalite import RDF_NS, SCHEMAORG_NS
from versa import util as versautil
from bibframe import BFZ, BL
from bibframe.zextra import LL
from rdflib import URIRef, Literal
from rdflib import BNode
from amara3 import iri
from amara3.uxml import tree
from amara3.uxml import xmliter
from amara3.uxml.treeutil import *
from amara3.uxml import html5
RDFTYPE = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'
SCHEMAORG = 'http://schema.org/'
def load_rdfa_page(site, max_retries=1):
'''
Helper to load RDFa page as text, plus load a Versa model with the metadata
Returns a versa memory model and the raw site text, except in eror case where it returns None and the error
'''
retry_count = 0
while True:
model = memory.connection()
try:
with urllib.request.urlopen(site) as resourcefp:
sitetext = resourcefp.read()
rdfalite.toversa(sitetext, model, site)
break #Success, so break out of retry loop
except (urllib.error.HTTPError, urllib.error.URLError, http.client.RemoteDisconnected) as e:
retry_count += 1
if retry_count >= max_retries:
return None, e
return model, sitetext
async def rdfa_from_page(url, session=None, max_retries=1):
'''
Async helper to load RDFa page as text, plus load a Versa model with the metadata
Yields a versa memory model, the raw site text and HTTP response info, except in error case where it returns None and the exception
>>> from amara3.asynctools import go_async
>>> from librarylink.util import rdfa_from_page
>>> from versa import util as versautil
>>> url = "http://link.crlibrary.org/portal/Estamos-en-un-libro-por-Mo-Willems--traducido/ZAxkTVTDCxE/"
>>> model, sitetext, response = go_async(rdfa_from_page(url))
>>> next(versautil.lookup(model, 'http://link.crlibrary.org/resource/zXft1yv0T9k/', 'http://schema.org/name'))
'Libros y lectura -- Novela juvenil'
'''
retry_count = 0
while True:
model = memory.connection()
try:
if session == None:
import aiohttp
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
body = await response.read()
rdfalite.toversa(body, model, url)
return model, body, response
else:
async with session.get(url) as response:
body = await response.read()
rdfalite.toversa(body, model, url)
return model, body, response
except Exception as e:
#print(url, f'[EXCEPTION {e}], context: {context}')
retry_count += 1
if retry_count >= max_retries:
return None, e, None
#Legacy name
prep_site_model = load_rdfa_page
def rdf_from_site(site, rules=None):
'''
>>> from librarylink.util import rdf_from_site
>>> g = rdf_from_site('http://link.denverlibrary.org')
>>> s = g.serialize(format='json-ld', indent=2)
>>> with open('denverlibrary.ld.json', 'wb') as fp: fp.write(s)
>>> rules = {'ignore-predicates': ['http://bibfra.me/', 'http://library.link/'], 'rename-predicates': {'http://library.link/vocab/branchOf': 'http://schema.org/branch'}}
>>> g = rdf_from_site('http://link.denverlibrary.org', rules=rules)
>>> s = g.serialize(format='json-ld', indent=2)
>>> with open('denverlibrary.ld.json', 'wb') as fp: fp.write(s)
'''
from rdflib import ConjunctiveGraph, URIRef, Literal, RDF, RDFS
from versa.writer.rdf import mock_bnode, prep, RDF_TYPE
#Also requires: pip install rdflib-jsonld
rules = rules or {}
ignore_pred = rules.get('ignore-predicates', set())
rename_pred = rules.get('rename-predicates', {})
model, sitetext = load_rdfa_page(site)
if not model:
return None
g = ConjunctiveGraph()
#Hoover up everything with a type
for o, r, t, a in model.match():
for oldp, newp in rename_pred.items():
if r == oldp: r = newp
for igp in ignore_pred:
if r.startswith(igp):
break
else:
g.add(prep(o, r, t))
return g
def jsonize_site(site, rules=None):
'''
>>> from librarylink.util import jsonize_site
>>> obj = jsonize_site('http://link.denverlibrary.org')
>>> with open('denverlibrary.ld.json', 'w') as fp: json.dump(obj, fp, indent=2)
>>> rules = {'ignore-predicates': ['http://bibfra.me/', 'http://library.link/'], 'rename-predicates': {'http://library.link/vocab/branchOf': 'http://schema.org/branch'}}
>>> obj = jsonize_site('http://link.denverlibrary.org', rules=rules)
>>> with open('denverlibrary.ld.json', 'w') as fp: json.dump(obj, fp, indent=2)
'''
from versa.util import uniquify
from versa.writer import jsonld
rules = rules or {}
ignore_pred = rules.get('ignore-predicates', set())
rename_pred = rules.get('rename-predicates', {})
ignore_oftypes = rules.get('ignore-oftypes', [])
invert = rules.get('invert', {})
context = rules.get('context', {})
pre_model, _ = load_rdfa_page(site)
if not pre_model:
return None
uniquify(pre_model)
post_model = memory.connection()
for o, r, t, a in pre_model.match():
#print(o, r, t)
for oldp, newp in rename_pred:
if r == oldp: r = newp
for rpre, rpost in invert:
if r == rpre:
assert isinstance(t, I)
o, r, t = t, rpost, o
for igp in ignore_pred:
if r.startswith(igp):
break
else:
post_model.add(o, r, t, a)
obj = jsonld.bind(post_model, context=context, ignore_oftypes=ignore_oftypes)
return obj
def get_orgname(site, reuse=None):
'''
Given a site URL return the org's name
>>> from librarylink.util import all_sites, get_orgname
>>> org = next(s for s in all_sites() if 'denverlibrary' in s.host )
>>> get_orgname(org)
'Denver Public Library'
>>> get_orgname('http://link.denverlibrary.org/')
'Denver Public Library'
'''
if reuse:
model, sitetext = reuse
else:
model, sitetext = load_rdfa_page(site)
if not model:
return None
for o, r, t, a in model.match(None, RDF_NS + 'type', SCHEMAORG_NS + 'Organization'):
name = versautil.simple_lookup(model, o, SCHEMAORG_NS + 'name')
if name is not None: return name
#schema:Organization not reliable the way it's used in LLN
#orgentity = versautil.simple_lookup_byvalue(model, RDF_NS + 'type', SCHEMAORG_NS + 'LibrarySystem')
#orgentity = versautil.simple_lookup_byvalue(model, SCHEMAORG_NS + 'url', baseurl)
#print(orgentity)
#name = versautil.simple_lookup(model, orgentity, SCHEMAORG_NS + 'name')
#name = versautil.simple_lookup(model, baseurl + '#_default', BL + 'name')
#return name
NETWORK_HINTS = {
#e.g. from http://augusta.library.link/
#<link href="/static/liblink_ebsco/css/network.css" rel="stylesheet">
b'liblink_ebsco/css/network.css': 'ebsco',
#e.g. from http://msu.library.link/
#<link href="/static/liblink_iii/css/network.css" rel="stylesheet"/>
b'liblink_iii/css/network.css': 'iii',
#e.g. from http://link.houstonlibrary.org/
#<link href="/static/liblink_bcv/css/network.css" rel="stylesheet"/>
b'liblink_bcv/css/network.css': 'bcv',
#e.g. from http://link.library.gmu.edu/
#<link href="/static/liblink_atlas/css/network.css" rel="stylesheet"/>
b'liblink_atlas/css/network.css': 'atlas',
}
PIPELINE_VERSION_PAT = re.compile(b'<dt>Transformation Pipeline</dt>\s*<dd>([^<]*)</dd>', re.MULTILINE)
TEMPLATE_VERSION_PAT = re.compile(b'<dt>Template Version</dt>\s*<dd>([^<]*)</dd>', re.MULTILINE)
def get_orgdetails(site, reuse=None):
'''
Given an organization object as returned from librarylink.util.all_sites, or just a plain base URL string; return the org's name
>>> from librarylink.util import all_sites, get_orgdetails
>>> det = get_orgdetails('http://link.dcl.org/')
>>> det['name']
'Douglas County Libraries'
>>> org = next(s for s in all_sites() if 'denverlibrary' in s.host )
>>> det = get_orgdetails(org.base_url)
>>> det['name']
'Denver Public Library'
'''
if reuse:
model, sitetext = reuse
else:
model, sitetext = load_rdfa_page(site)
if not model:
return None
details = {'name': None, 'group': None, 'groupname': None, 'network': None, 'features': set()}
id_ = None
for o, r, t, a in model.match(None, RDF_NS + 'type', SCHEMAORG_NS + 'LibrarySystem'):
id_ = o
details['name'] = next(versautil.lookup(model, o, SCHEMAORG_NS + 'name'), '').strip()
break
details['id'] = id_
#for o, r, t, a in model.match(None, SCHEMAORG_NS + 'member'):
# group = t.split('#')[0]
for o, r, t, a in model.match(None, RDF_NS + 'type', SCHEMAORG_NS + 'Consortium'):
details['group'] = versautil.simple_lookup(model, o, SCHEMAORG_NS + 'url')
#group = o.split('#')[0]
details['groupname'] = next(versautil.lookup(model, o, SCHEMAORG_NS + 'name'), '').strip()
break
network = 'zviz'
for searchstr in NETWORK_HINTS:
if searchstr in sitetext:
details['network'] = NETWORK_HINTS[searchstr]
m = PIPELINE_VERSION_PAT.search(sitetext)
if m:
details['pipeline_ver'] = m.group(1).decode('utf-8')
else:
details['pipeline_ver'] = None
#print('Unable to get pipeline version from:', site)
m = TEMPLATE_VERSION_PAT.search(sitetext)
if m:
details['template_ver'] = m.group(1).decode('utf-8')
else:
details['template_ver'] = None
#print('Unable to get template version from:', site)
for o, r, t, a in model.match(None, LL+'feature'):
details['features'].add(t)
#Legacy, for libraries where the above isn't published
if b'<img class="img-responsive" src="/static/liblink_ea/img/nlogo.png"' in sitetext:
details['features'].add('http://library.link/ext/feature/novelist/merge')
details['same-as'] = []
for o, r, t, a in model.match(None, RDF_NS + 'type', SCHEMAORG_NS + 'LibrarySystem'):
for _, r, t, a in model.match(o, SCHEMAORG_NS + 'sameAs'):
details['same-as'].append(t)
break
for o, r, t, a in model.match(None, RDF_NS + 'type', SCHEMAORG_NS + 'LibrarySystem'):
logo = versautil.simple_lookup(model, o, SCHEMAORG_NS + 'logo')
details['logo'] = logo.strip() if logo else logo
break
return details
def get_branches(site, reuse=None):
'''
Given an organization object as returned from librarylink.util.all_sites, or just a plain base URL string; return the org's name
>>> from librarylink.util import all_sites, get_branches
>>> org = next(s for s in all_sites() if 'denverlibrary' in s.host )
>>> get_branches(org)
'Denver Public Library'
>>> get_branches('http://link.denverlibrary.org/')
'Denver Public Library'
'''
if reuse:
model, sitetext = reuse
else:
model, sitetext = load_rdfa_page(site)
if not model:
return None
branches = []
for o, r, t, a in model.match(None, RDF_NS + 'type', SCHEMAORG_NS + 'Library'):
id_ = o
name = next(versautil.lookup(model, o, SCHEMAORG_NS + 'name'), '').strip()
url = versautil.simple_lookup(model, o, SCHEMAORG_NS + 'url')
loc = versautil.simple_lookup(model, o, SCHEMAORG_NS + 'location')
addr = versautil.simple_lookup(model, o, SCHEMAORG_NS + 'address')
#Goes schema:Library - schema:location -> schema:Place - schema:geo -> Coordinates
if loc:
loc = versautil.simple_lookup(model, loc, SCHEMAORG_NS + 'geo')
if loc:
lat = versautil.simple_lookup(model, loc, SCHEMAORG_NS + 'latitude')
long_ = versautil.simple_lookup(model, loc, SCHEMAORG_NS + 'longitude')
if addr:
#rdf:type schema:PostalAddress
#schema:streetAddress "2111 Snow Road"@en
#schema:addressLocality "Parma"@en
#schema:addressRegion "OH"@en
#schema:postalCode "44134"@en
#schema:addressCountry "US"@en
street = versautil.simple_lookup(model, addr, SCHEMAORG_NS + 'streetAddress')
locality = versautil.simple_lookup(model, addr, SCHEMAORG_NS + 'addressLocality')
region = versautil.simple_lookup(model, addr, SCHEMAORG_NS + 'addressRegion')
postcode = versautil.simple_lookup(model, addr, SCHEMAORG_NS + 'postalCode')
country = versautil.simple_lookup(model, addr, SCHEMAORG_NS + 'addressCountry')
branches.append((
id_,
url,
name,
(lat, long_) if loc else None,
(street, locality, region, postcode, country) if addr else None
))
return branches
def llnurl_ident(url):
'''
Return the identifying pair of (site, hash) from an LLN URL
>>> from librarylink.util import llnurl_ident
>>> llnurl_ident('http://link.worthingtonlibraries.org/resource/9bz8W30aSZY/')
('link.worthingtonlibraries.org', '9bz8W30aSZY')
>>> llnurl_ident('http://link.worthingtonlibraries.org/portal/Unshakeable--your-financial-freedom-playbook/cZlfLtSpcng/')
('link.worthingtonlibraries.org', 'cZlfLtSpcng')
'''
scheme, host, path, query, fragment = iri.split_uri_ref(url)
try:
if path.startswith('/resource/'):
rhash = path.partition('/resource/')[-1].split('/')[0]
elif '/portal/' in url:
rhash = path.partition('/portal/')[-1].split('/')[1]
else:
raise ValueError('Invalid LLN URL: ' + repr(url))
except IndexError as e:
#FIXME L10N
raise ValueError('Invalid LLN URL: ' + repr(url))
return host, rhash
def simplify_link(url):
'''
Return a simplified & unique form of an LLN URL
>>> from librarylink.util import simplify_link
>>> simplify_link('http://link.worthingtonlibraries.org/resource/9bz8W30aSZY/')
'http://link.worthingtonlibraries.org/resource/9bz8W30aSZY/'
>>> simplify_link('http://link.worthingtonlibraries.org/portal/Unshakeable--your-financial-freedom-playbook/cZlfLtSpcng/')
'http://link.worthingtonlibraries.org/portal/Unshakeable--your-financial-freedom-playbook/cZlfLtSpcng/'
>>> simplify_link('http://link.worthingtonlibraries.org/portal/Unshakeable--your-financial-freedom-playbook/cZlfLtSpcng/borrow/')
'http://link.worthingtonlibraries.org/portal/Unshakeable--your-financial-freedom-playbook/cZlfLtSpcng/'
>>> simplify_link('http://link.worthingtonlibraries.org/res/9bz8W30aSZY/boo/') is None
True
>>> simplify_link('http://link.worthingtonlibraries.org/resource/9bz8W30aSZY/boo/')
'http://link.worthingtonlibraries.org/resource/9bz8W30aSZY/'
>>> simplify_link('/res/9bz8W30aSZY/boo/') is None
True
>>> simplify_link('/resource/9bz8W30aSZY/boo/')
'/resource/9bz8W30aSZY/'
>>> simplify_link('https://link.worthingtonlibraries.org/resource/9bz8W30aSZY/')
'https://link.worthingtonlibraries.org/resource/9bz8W30aSZY/'
>>> simplify_link('https://link.worthingtonlibraries.org/resource/9bz8W30aSZY/borrow/')
'https://link.worthingtonlibraries.org/resource/9bz8W30aSZY/'
'''
scheme, auth, path, query, fragment = iri.split_uri_ref(url)
try:
if path.startswith('/resource/'):
path = '/resource/' + path.partition('/resource/')[-1].split('/')[0] + '/'
return iri.unsplit_uri_ref((scheme, auth, path, None, None))
if '/portal/' in url:
path = '/portal/' + '/'.join(path.partition('/portal/')[-1].split('/')[:2]) + '/'
return iri.unsplit_uri_ref((scheme, auth, path, None, None))
else:
path = None
except IndexError as e:
#FIXME L10N
raise ValueError('Invalid LLN URL: ' + repr(url))
return path
class liblink_set(collections.abc.MutableSet):
'''
Smart collection of URLs that understands Library.Link URLs and how to dedup them for set operations
It can also manage a set of exclusions, e.g. to eliminate a URL for repeat processing
'''
def __init__(self, iterable=None):
self._rawset = set()
self._exclusions = set()
if iterable is not None:
self |= iterable
def add(self, item):
simplified = simplify_link(item) or item
if simplified not in self._exclusions:
self._rawset.add(simplified)
def exclude(self, item):
simplified = simplify_link(item) or item
self._rawset.discard(simplified)
self._exclusions.add(simplified)
def __len__(self):
return len(self._rawset)
def __contains__(self, item):
simplified = simplify_link(item) or item
return simplified in self._rawset
def discard(self, item):
simplified = simplify_link(item) or item
self._rawset.discard(simplified)
def __iter__(self):
yield from self._rawset
def __repr__(self):
s = 'RAWSET: ' + repr(self._rawset) + '\n' + 'EXCLUSIONS: ' + repr(self._exclusions)
return s
|
uogbuji/Library.Link
|
pylib/util.py
|
Python
|
apache-2.0
| 17,711 | 0.005533 |
import os
import time
import cPickle
from Plugins.Plugin import PluginDescriptor
from Screens.Console import Console
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.Ipkg import Ipkg
from Screens.SoftwareUpdate import UpdatePlugin
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Input import Input
from Components.Ipkg import IpkgComponent
from Components.Sources.StaticText import StaticText
from Components.ScrollLabel import ScrollLabel
from Components.Pixmap import Pixmap
from Components.MenuList import MenuList
from Components.Sources.List import List
from Components.Harddisk import harddiskmanager
from Components.config import config, getConfigListEntry, ConfigSubsection, ConfigText, ConfigLocations, ConfigYesNo, ConfigSelection
from Components.ConfigList import ConfigListScreen
from Components.Console import Console
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from Components.SelectionList import SelectionList
from Components.PluginComponent import plugins
from Components.About import about
from Components.PackageInfo import PackageInfoHandler
from Components.Language import language
from Components.AVSwitch import AVSwitch
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, SCOPE_CURRENT_PLUGIN, SCOPE_CURRENT_SKIN, SCOPE_METADIR
from Tools.LoadPixmap import LoadPixmap
from Tools.NumericalTextInput import NumericalTextInput
from enigma import RT_HALIGN_LEFT, RT_VALIGN_CENTER, eListbox, gFont, getDesktop, ePicLoad, eRCInput, getPrevAsciiCode, eEnv
from twisted.web import client
from ImageWizard import ImageWizard
from BackupRestore import BackupSelection, RestoreMenu, BackupScreen, RestoreScreen, getBackupPath, getBackupFilename
from SoftwareTools import iSoftwareTools
config.plugins.configurationbackup = ConfigSubsection()
config.plugins.configurationbackup.backuplocation = ConfigText(default = '/media/hdd/', visible_width = 50, fixed_size = False)
config.plugins.configurationbackup.backupdirs = ConfigLocations(default=[eEnv.resolve('${sysconfdir}/enigma2/'), '/etc/network/interfaces', '/etc/wpa_supplicant.conf', '/etc/wpa_supplicant.ath0.conf', '/etc/wpa_supplicant.wlan0.conf', '/etc/resolv.conf', '/etc/default_gw', '/etc/hostname'])
config.plugins.softwaremanager = ConfigSubsection()
config.plugins.softwaremanager.overwriteConfigFiles = ConfigSelection(
[
("Y", _("Yes, always")),
("N", _("No, never")),
("ask", _("Always ask"))
], "Y")
config.plugins.softwaremanager.onSetupMenu = ConfigYesNo(default=False)
config.plugins.softwaremanager.onBlueButton = ConfigYesNo(default=False)
config.plugins.softwaremanager.epgcache = ConfigYesNo(default=False)
def write_cache(cache_file, cache_data):
try:
path = os.path.dirname(cache_file)
if not os.path.isdir(path):
os.mkdir(path)
cPickle.dump(cache_data, open(cache_file, 'w'), -1)
except Exception, ex:
print "Failed to write cache data to %s:" % cache_file, ex
def valid_cache(cache_file, cache_ttl):
#See if the cache file exists and is still living
try:
mtime = os.stat(cache_file)[os.stat.ST_MTIME]
except:
return 0
curr_time = time.time()
if (curr_time - mtime) > cache_ttl:
return 0
else:
return 1
def load_cache(cache_file):
return cPickle.load(open(cache_file))
class UpdatePluginMenu(Screen):
skin = """
<screen name="UpdatePluginMenu" position="center,center" size="610,410" title="Software management" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<ePixmap pixmap="skin_default/border_menu_350.png" position="5,50" zPosition="1" size="350,300" transparent="1" alphatest="on" />
<widget source="menu" render="Listbox" position="15,60" size="330,290" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (2, 2), size = (330, 24), flags = RT_HALIGN_LEFT, text = 1), # index 0 is the MenuText,
],
"fonts": [gFont("Regular", 22)],
"itemHeight": 25
}
</convert>
</widget>
<widget source="menu" render="Listbox" position="360,50" size="240,300" scrollbarMode="showNever" selectionDisabled="1">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (2, 2), size = (240, 300), flags = RT_HALIGN_CENTER|RT_VALIGN_CENTER|RT_WRAP, text = 2), # index 2 is the Description,
],
"fonts": [gFont("Regular", 22)],
"itemHeight": 300
}
</convert>
</widget>
<widget source="status" render="Label" position="5,360" zPosition="10" size="600,50" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, args = 0):
Screen.__init__(self, session)
self.skin_path = plugin_path
self.menu = args
self.list = []
self.oktext = _("\nPress OK on your remote control to continue.")
self.menutext = _("Press MENU on your remote control for additional options.")
self.infotext = _("Press INFO on your remote control for additional information.")
self.text = ""
self.backupdirs = ' '.join( config.plugins.configurationbackup.backupdirs.value )
if self.menu == 0:
print "building menu entries"
self.list.append(("install-extensions", _("Manage extensions"), _("\nManage extensions or plugins for your receiver" ) + self.oktext, None))
self.list.append(("software-update", _("Software update"), _("\nOnline update of your receiver software." ) + self.oktext, None))
self.list.append(("software-restore", _("Software restore"), _("\nRestore your receiver with a new firmware." ) + self.oktext, None))
self.list.append(("system-backup", _("Backup system settings"), _("\nBackup your receiver settings." ) + self.oktext + "\n\n" + self.infotext, None))
self.list.append(("system-restore",_("Restore system settings"), _("\nRestore your receiver settings." ) + self.oktext, None))
self.list.append(("ipkg-install", _("Install local extension"), _("\nScan for local extensions and install them." ) + self.oktext, None))
for p in plugins.getPlugins(PluginDescriptor.WHERE_SOFTWAREMANAGER):
if "SoftwareSupported" in p.__call__:
callFnc = p.__call__["SoftwareSupported"](None)
if callFnc is not None:
if "menuEntryName" in p.__call__:
menuEntryName = p.__call__["menuEntryName"](None)
else:
menuEntryName = _('Extended Software')
if "menuEntryDescription" in p.__call__:
menuEntryDescription = p.__call__["menuEntryDescription"](None)
else:
menuEntryDescription = _('Extended Software Plugin')
self.list.append(('default-plugin', menuEntryName, menuEntryDescription + self.oktext, callFnc))
if config.usage.setup_level.index >= 2: # expert+
self.list.append(("advanced", _("Advanced options"), _("\nAdvanced options and settings." ) + self.oktext, None))
elif self.menu == 1:
self.list.append(("advancedrestore", _("Advanced restore"), _("\nRestore your backups by date." ) + self.oktext, None))
self.list.append(("backuplocation", _("Select backup location"), _("\nSelect your backup device.\nCurrent device: " ) + config.plugins.configurationbackup.backuplocation.value + self.oktext, None))
self.list.append(("backupfiles", _("Select backup files"), _("Select files for backup.") + self.oktext + "\n\n" + self.infotext, None))
if config.usage.setup_level.index >= 2: # expert+
self.list.append(("ipkg-manager", _("Packet management"), _("\nView, install and remove available or installed packages." ) + self.oktext, None))
self.list.append(("ipkg-source",_("Select upgrade source"), _("\nEdit the upgrade source address." ) + self.oktext, None))
for p in plugins.getPlugins(PluginDescriptor.WHERE_SOFTWAREMANAGER):
if "AdvancedSoftwareSupported" in p.__call__:
callFnc = p.__call__["AdvancedSoftwareSupported"](None)
if callFnc is not None:
if "menuEntryName" in p.__call__:
menuEntryName = p.__call__["menuEntryName"](None)
else:
menuEntryName = _('Advanced software')
if "menuEntryDescription" in p.__call__:
menuEntryDescription = p.__call__["menuEntryDescription"](None)
else:
menuEntryDescription = _('Advanced software plugin')
self.list.append(('advanced-plugin', menuEntryName, menuEntryDescription + self.oktext, callFnc))
self["menu"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["status"] = StaticText(self.menutext)
self["shortcuts"] = NumberActionMap(["ShortcutActions", "WizardActions", "InfobarEPGActions", "MenuActions", "NumberActions"],
{
"ok": self.go,
"back": self.close,
"red": self.close,
"menu": self.handleMenu,
"showEventInfo": self.handleInfo,
"1": self.go,
"2": self.go,
"3": self.go,
"4": self.go,
"5": self.go,
"6": self.go,
"7": self.go,
"8": self.go,
"9": self.go,
}, -1)
self.onLayoutFinish.append(self.layoutFinished)
self.backuppath = getBackupPath()
self.backupfile = getBackupFilename()
self.fullbackupfilename = self.backuppath + "/" + self.backupfile
self.onShown.append(self.setWindowTitle)
self.onChangedEntry = []
self["menu"].onSelectionChanged.append(self.selectionChanged)
def createSummary(self):
from Screens.PluginBrowser import PluginBrowserSummary
return PluginBrowserSummary
def selectionChanged(self):
item = self["menu"].getCurrent()
if item:
name = item[1]
desc = item[2]
else:
name = "-"
desc = ""
for cb in self.onChangedEntry:
cb(name, desc)
def layoutFinished(self):
idx = 0
self["menu"].index = idx
def setWindowTitle(self):
self.setTitle(_("Software management"))
def cleanup(self):
iSoftwareTools.cleanupSoftwareTools()
def getUpdateInfos(self):
if iSoftwareTools.NetworkConnectionAvailable is True:
if iSoftwareTools.available_updates is not 0:
self.text = _("There are at least %s updates available.") % (str(iSoftwareTools.available_updates))
else:
self.text = "" #_("There are no updates available.")
if iSoftwareTools.list_updating is True:
self.text += "\n" + _("A search for available updates is currently in progress.")
else:
self.text = _("No network connection available.")
self["status"].setText(self.text)
def handleMenu(self):
self.session.open(SoftwareManagerSetup)
def handleInfo(self):
current = self["menu"].getCurrent()
if current:
currentEntry = current[0]
if currentEntry in ("system-backup","backupfiles"):
self.session.open(SoftwareManagerInfo, mode = "backupinfo")
def go(self, num = None):
if num is not None:
num -= 1
if not num < self["menu"].count():
return
self["menu"].setIndex(num)
current = self["menu"].getCurrent()
if current:
currentEntry = current[0]
if self.menu == 0:
if (currentEntry == "software-update"):
self.session.open(UpdatePlugin, self.skin_path)
elif (currentEntry == "software-restore"):
self.session.open(ImageWizard)
elif (currentEntry == "install-extensions"):
self.session.open(PluginManager, self.skin_path)
elif (currentEntry == "system-backup"):
self.session.openWithCallback(self.backupDone,BackupScreen, runBackup = True)
elif (currentEntry == "system-restore"):
if os.path.exists(self.fullbackupfilename):
self.session.openWithCallback(self.startRestore, MessageBox, _("Are you sure you want to restore the backup?\nYour receiver will restart after the backup has been restored!"))
else:
self.session.open(MessageBox, _("Sorry, no backups found!"), MessageBox.TYPE_INFO, timeout = 10)
elif (currentEntry == "ipkg-install"):
try:
from Plugins.Extensions.MediaScanner.plugin import main
main(self.session)
except:
self.session.open(MessageBox, _("Sorry, %s has not been installed!") % ("MediaScanner"), MessageBox.TYPE_INFO, timeout = 10)
elif (currentEntry == "default-plugin"):
self.extended = current[3]
self.extended(self.session, None)
elif (currentEntry == "advanced"):
self.session.open(UpdatePluginMenu, 1)
elif self.menu == 1:
if (currentEntry == "ipkg-manager"):
self.session.open(PacketManager, self.skin_path)
elif (currentEntry == "backuplocation"):
parts = [ (r.description, r.mountpoint, self.session) for r in harddiskmanager.getMountedPartitions(onlyhotplug = False)]
for x in parts:
if not os.access(x[1], os.F_OK|os.R_OK|os.W_OK) or x[1] == '/':
parts.remove(x)
if len(parts):
self.session.openWithCallback(self.backuplocation_choosen, ChoiceBox, title = _("Please select medium to use as backup location"), list = parts)
elif (currentEntry == "backupfiles"):
self.session.openWithCallback(self.backupfiles_choosen,BackupSelection)
elif (currentEntry == "advancedrestore"):
self.session.open(RestoreMenu, self.skin_path)
elif (currentEntry == "ipkg-source"):
self.session.open(IPKGMenu, self.skin_path)
elif (currentEntry == "advanced-plugin"):
self.extended = current[3]
self.extended(self.session, None)
def backupfiles_choosen(self, ret):
self.backupdirs = ' '.join( config.plugins.configurationbackup.backupdirs.value )
config.plugins.configurationbackup.backupdirs.save()
config.plugins.configurationbackup.save()
config.save()
def backuplocation_choosen(self, option):
oldpath = config.plugins.configurationbackup.backuplocation.getValue()
if option is not None:
config.plugins.configurationbackup.backuplocation.value = str(option[1])
config.plugins.configurationbackup.backuplocation.save()
config.plugins.configurationbackup.save()
config.save()
newpath = config.plugins.configurationbackup.backuplocation.getValue()
if newpath != oldpath:
self.createBackupfolders()
def createBackupfolders(self):
print "Creating backup folder if not already there..."
self.backuppath = getBackupPath()
try:
if (os.path.exists(self.backuppath) == False):
os.makedirs(self.backuppath)
except OSError:
self.session.open(MessageBox, _("Sorry, your backup destination is not writeable.\nPlease select a different one."), MessageBox.TYPE_INFO, timeout = 10)
def backupDone(self,retval = None):
if retval is True:
self.session.open(MessageBox, _("Backup completed."), MessageBox.TYPE_INFO, timeout = 10)
else:
self.session.open(MessageBox, _("Backup failed."), MessageBox.TYPE_INFO, timeout = 10)
def startRestore(self, ret = False):
if (ret == True):
self.exe = True
self.session.open(RestoreScreen, runRestore = True)
class SoftwareManagerSetup(Screen, ConfigListScreen):
skin = """
<screen name="SoftwareManagerSetup" position="center,center" size="560,440" title="SoftwareManager setup">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="config" position="5,50" size="550,350" scrollbarMode="showOnDemand" />
<ePixmap pixmap="skin_default/div-h.png" position="0,400" zPosition="1" size="560,2" />
<widget source="introduction" render="Label" position="5,410" size="550,30" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, skin_path = None):
Screen.__init__(self, session)
self.session = session
self.skin_path = skin_path
if self.skin_path is None:
self.skin_path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager")
self.onChangedEntry = [ ]
self.setup_title = _("Software manager setup")
self.overwriteConfigfilesEntry = None
self.list = [ ]
ConfigListScreen.__init__(self, self.list, session = session, on_change = self.changedEntry)
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.apply,
"menu": self.closeRecursive,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["introduction"] = StaticText()
self.createSetup()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def createSetup(self):
self.list = [ ]
self.overwriteConfigfilesEntry = getConfigListEntry(_("Overwrite configuration files?"), config.plugins.softwaremanager.overwriteConfigFiles)
self.list.append(self.overwriteConfigfilesEntry)
self.list.append(getConfigListEntry(_("show softwaremanager in setup menu"), config.plugins.softwaremanager.onSetupMenu))
self.list.append(getConfigListEntry(_("show softwaremanager on blue button"), config.plugins.softwaremanager.onBlueButton))
self.list.append(getConfigListEntry(_("epg cache backup"), config.plugins.softwaremanager.epgcache))
self["config"].list = self.list
self["config"].l.setSeperation(400)
self["config"].l.setList(self.list)
if not self.selectionChanged in self["config"].onSelectionChanged:
self["config"].onSelectionChanged.append(self.selectionChanged)
self.selectionChanged()
def selectionChanged(self):
if self["config"].getCurrent() == self.overwriteConfigfilesEntry:
self["introduction"].setText(_("Overwrite configuration files during software upgrade?"))
else:
self["introduction"].setText("")
def newConfig(self):
pass
def keyLeft(self):
ConfigListScreen.keyLeft(self)
def keyRight(self):
ConfigListScreen.keyRight(self)
def confirm(self, confirmed):
if not confirmed:
print "not confirmed"
return
else:
self.keySave()
plugins.clearPluginList()
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
def apply(self):
self.session.openWithCallback(self.confirm, MessageBox, _("Use these settings?"), MessageBox.TYPE_YESNO, timeout = 20, default = True)
def cancelConfirm(self, result):
if not result:
return
for x in self["config"].list:
x[1].cancel()
self.close()
def keyCancel(self):
if self["config"].isChanged():
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"), MessageBox.TYPE_YESNO, timeout = 20, default = True)
else:
self.close()
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
self.selectionChanged()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].value)
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
class SoftwareManagerInfo(Screen):
skin = """
<screen name="SoftwareManagerInfo" position="center,center" size="560,440" title="SoftwareManager information">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="550,340" scrollbarMode="showOnDemand" selectionDisabled="0">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (5, 0), size = (540, 26), font=0, flags = RT_HALIGN_LEFT | RT_HALIGN_CENTER, text = 0), # index 0 is the name
],
"fonts": [gFont("Regular", 24),gFont("Regular", 22)],
"itemHeight": 26
}
</convert>
</widget>
<ePixmap pixmap="skin_default/div-h.png" position="0,400" zPosition="1" size="560,2" />
<widget source="introduction" render="Label" position="5,410" size="550,30" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, skin_path = None, mode = None):
Screen.__init__(self, session)
self.session = session
self.mode = mode
self.skin_path = skin_path
if self.skin_path is None:
self.skin_path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager")
self["actions"] = ActionMap(["ShortcutActions", "WizardActions"],
{
"back": self.close,
"red": self.close,
}, -2)
self.list = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText()
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["introduction"] = StaticText()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("Softwaremanager information"))
if self.mode is not None:
self.showInfos()
def showInfos(self):
if self.mode == "backupinfo":
self.list = []
backupfiles = config.plugins.configurationbackup.backupdirs.value
for entry in backupfiles:
self.list.append((entry,))
self['list'].setList(self.list)
class PluginManager(Screen, PackageInfoHandler):
skin = """
<screen name="PluginManager" position="center,center" size="560,440" title="Extensions management" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="550,360" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"templates":
{"default": (51,[
MultiContentEntryText(pos = (0, 1), size = (470, 24), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name
MultiContentEntryText(pos = (0, 25), size = (470, 24), font=1, flags = RT_HALIGN_LEFT, text = 2), # index 2 is the description
MultiContentEntryPixmapAlphaTest(pos = (475, 0), size = (48, 48), png = 5), # index 5 is the status pixmap
MultiContentEntryPixmapAlphaTest(pos = (0, 49), size = (550, 2), png = 6), # index 6 is the div pixmap
]),
"category": (40,[
MultiContentEntryText(pos = (30, 0), size = (500, 22), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name
MultiContentEntryText(pos = (30, 22), size = (500, 16), font=2, flags = RT_HALIGN_LEFT, text = 1), # index 1 is the description
MultiContentEntryPixmapAlphaTest(pos = (0, 38), size = (550, 2), png = 3), # index 3 is the div pixmap
])
},
"fonts": [gFont("Regular", 22),gFont("Regular", 20),gFont("Regular", 16)],
"itemHeight": 52
}
</convert>
</widget>
<widget source="status" render="Label" position="5,410" zPosition="10" size="540,30" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, plugin_path = None, args = None):
Screen.__init__(self, session)
self.session = session
self.skin_path = plugin_path
if self.skin_path is None:
self.skin_path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager")
self["shortcuts"] = ActionMap(["ShortcutActions", "WizardActions", "InfobarEPGActions", "HelpActions" ],
{
"ok": self.handleCurrent,
"back": self.exit,
"red": self.exit,
"green": self.handleCurrent,
"yellow": self.handleSelected,
"showEventInfo": self.handleSelected,
"displayHelp": self.handleHelp,
}, -1)
self.list = []
self.statuslist = []
self.selectedFiles = []
self.categoryList = []
self.packetlist = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText("")
self["key_yellow"] = StaticText("")
self["key_blue"] = StaticText("")
self["status"] = StaticText("")
self.cmdList = []
self.oktext = _("\nAfter pressing OK, please wait!")
if not self.selectionChanged in self["list"].onSelectionChanged:
self["list"].onSelectionChanged.append(self.selectionChanged)
self.currList = ""
self.currentSelectedTag = None
self.currentSelectedIndex = None
self.currentSelectedPackage = None
self.saved_currentSelectedPackage = None
self.restartRequired = False
self.onShown.append(self.setWindowTitle)
self.onLayoutFinish.append(self.getUpdateInfos)
def setWindowTitle(self):
self.setTitle(_("Extensions management"))
def exit(self):
if self.currList == "packages":
self.currList = "category"
self.currentSelectedTag = None
self["list"].style = "category"
self['list'].setList(self.categoryList)
self["list"].setIndex(self.currentSelectedIndex)
self["list"].updateList(self.categoryList)
self.selectionChanged()
else:
iSoftwareTools.cleanupSoftwareTools()
self.prepareInstall()
if len(self.cmdList):
self.session.openWithCallback(self.runExecute, PluginManagerInfo, self.skin_path, self.cmdList)
else:
self.close()
def handleHelp(self):
if self.currList != "status":
self.session.open(PluginManagerHelp, self.skin_path)
def setState(self,status = None):
if status:
self.currList = "status"
self.statuslist = []
self["key_green"].setText("")
self["key_blue"].setText("")
self["key_yellow"].setText("")
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
if status == 'update':
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgrade.png"))
self.statuslist.append(( _("Updating software catalog"), '', _("Searching for available updates. Please wait..." ),'', '', statuspng, divpng, None, '' ))
elif status == 'sync':
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgrade.png"))
self.statuslist.append(( _("Package list update"), '', _("Searching for new installed or removed packages. Please wait..." ),'', '', statuspng, divpng, None, '' ))
elif status == 'error':
self["key_green"].setText(_("Continue"))
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png"))
self.statuslist.append(( _("Error"), '', _("An error occurred while downloading the packetlist. Please try again." ),'', '', statuspng, divpng, None, '' ))
self["list"].style = "default"
self['list'].setList(self.statuslist)
def getUpdateInfos(self):
if (iSoftwareTools.lastDownloadDate is not None and iSoftwareTools.NetworkConnectionAvailable is False):
self.rebuildList()
else:
self.setState('update')
iSoftwareTools.startSoftwareTools(self.getUpdateInfosCB)
def getUpdateInfosCB(self, retval = None):
if retval is not None:
if retval is True:
if iSoftwareTools.available_updates is not 0:
self["status"].setText(_("There are at least ") + str(iSoftwareTools.available_updates) + _(" updates available."))
else:
self["status"].setText(_("There are no updates available."))
self.rebuildList()
elif retval is False:
if iSoftwareTools.lastDownloadDate is None:
self.setState('error')
if iSoftwareTools.NetworkConnectionAvailable:
self["status"].setText(_("Updatefeed not available."))
else:
self["status"].setText(_("No network connection available."))
else:
iSoftwareTools.lastDownloadDate = time.time()
iSoftwareTools.list_updating = True
self.setState('update')
iSoftwareTools.getUpdates(self.getUpdateInfosCB)
def rebuildList(self, retval = None):
if self.currentSelectedTag is None:
self.buildCategoryList()
else:
self.buildPacketList(self.currentSelectedTag)
def selectionChanged(self):
current = self["list"].getCurrent()
self["status"].setText("")
if current:
if self.currList == "packages":
self["key_red"].setText(_("Back"))
if current[4] == 'installed':
self["key_green"].setText(_("Uninstall"))
elif current[4] == 'installable':
self["key_green"].setText(_("Install"))
if iSoftwareTools.NetworkConnectionAvailable is False:
self["key_green"].setText("")
elif current[4] == 'remove':
self["key_green"].setText(_("Undo uninstall"))
elif current[4] == 'install':
self["key_green"].setText(_("Undo install"))
if iSoftwareTools.NetworkConnectionAvailable is False:
self["key_green"].setText("")
self["key_yellow"].setText(_("View details"))
self["key_blue"].setText("")
if len(self.selectedFiles) == 0 and iSoftwareTools.available_updates is not 0:
self["status"].setText(_("There are at least ") + str(iSoftwareTools.available_updates) + _(" updates available."))
elif len(self.selectedFiles) is not 0:
self["status"].setText(str(len(self.selectedFiles)) + _(" packages selected."))
else:
self["status"].setText(_("There are currently no outstanding actions."))
elif self.currList == "category":
self["key_red"].setText(_("Close"))
self["key_green"].setText("")
self["key_yellow"].setText("")
self["key_blue"].setText("")
if len(self.selectedFiles) == 0 and iSoftwareTools.available_updates is not 0:
self["status"].setText(_("There are at least ") + str(iSoftwareTools.available_updates) + _(" updates available."))
self["key_yellow"].setText(_("Update"))
elif len(self.selectedFiles) is not 0:
self["status"].setText(str(len(self.selectedFiles)) + _(" packages selected."))
self["key_yellow"].setText(_("Process"))
else:
self["status"].setText(_("There are currently no outstanding actions."))
def getSelectionState(self, detailsFile):
for entry in self.selectedFiles:
if entry[0] == detailsFile:
return True
return False
def handleCurrent(self):
current = self["list"].getCurrent()
if current:
if self.currList == "category":
self.currentSelectedIndex = self["list"].index
selectedTag = current[2]
self.buildPacketList(selectedTag)
elif self.currList == "packages":
if current[7] is not '':
idx = self["list"].getIndex()
detailsFile = self.list[idx][1]
if self.list[idx][7] == True:
for entry in self.selectedFiles:
if entry[0] == detailsFile:
self.selectedFiles.remove(entry)
else:
alreadyinList = False
for entry in self.selectedFiles:
if entry[0] == detailsFile:
alreadyinList = True
if not alreadyinList:
if (iSoftwareTools.NetworkConnectionAvailable is False and current[4] in ('installable','install')):
pass
else:
self.selectedFiles.append((detailsFile,current[4],current[3]))
self.currentSelectedPackage = ((detailsFile,current[4],current[3]))
if current[4] == 'installed':
self.list[idx] = self.buildEntryComponent(current[0], current[1], current[2], current[3], 'remove', True)
elif current[4] == 'installable':
if iSoftwareTools.NetworkConnectionAvailable:
self.list[idx] = self.buildEntryComponent(current[0], current[1], current[2], current[3], 'install', True)
elif current[4] == 'remove':
self.list[idx] = self.buildEntryComponent(current[0], current[1], current[2], current[3], 'installed', False)
elif current[4] == 'install':
if iSoftwareTools.NetworkConnectionAvailable:
self.list[idx] = self.buildEntryComponent(current[0], current[1], current[2], current[3], 'installable',False)
self["list"].setList(self.list)
self["list"].setIndex(idx)
self["list"].updateList(self.list)
self.selectionChanged()
elif self.currList == "status":
iSoftwareTools.lastDownloadDate = time.time()
iSoftwareTools.list_updating = True
self.setState('update')
iSoftwareTools.getUpdates(self.getUpdateInfosCB)
def handleSelected(self):
current = self["list"].getCurrent()
if current:
if self.currList == "packages":
if current[7] is not '':
detailsfile = iSoftwareTools.directory[0] + "/" + current[1]
if (os.path.exists(detailsfile) == True):
self.saved_currentSelectedPackage = self.currentSelectedPackage
self.session.openWithCallback(self.detailsClosed, PluginDetails, self.skin_path, current)
else:
self.session.open(MessageBox, _("Sorry, no details available!"), MessageBox.TYPE_INFO, timeout = 10)
elif self.currList == "category":
self.prepareInstall()
if len(self.cmdList):
self.session.openWithCallback(self.runExecute, PluginManagerInfo, self.skin_path, self.cmdList)
def detailsClosed(self, result = None):
if result is not None:
if result is not False:
self.setState('sync')
iSoftwareTools.lastDownloadDate = time.time()
for entry in self.selectedFiles:
if entry == self.saved_currentSelectedPackage:
self.selectedFiles.remove(entry)
iSoftwareTools.startIpkgListInstalled(self.rebuildList)
def buildEntryComponent(self, name, details, description, packagename, state, selected = False):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
installedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installed.png"))
installablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installable.png"))
removepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png"))
installpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/install.png"))
if state == 'installed':
return((name, details, description, packagename, state, installedpng, divpng, selected))
elif state == 'installable':
return((name, details, description, packagename, state, installablepng, divpng, selected))
elif state == 'remove':
return((name, details, description, packagename, state, removepng, divpng, selected))
elif state == 'install':
return((name, details, description, packagename, state, installpng, divpng, selected))
def buildPacketList(self, categorytag = None):
if categorytag is not None:
self.currList = "packages"
self.currentSelectedTag = categorytag
self.packetlist = []
for package in iSoftwareTools.packagesIndexlist[:]:
prerequisites = package[0]["prerequisites"]
if "tag" in prerequisites:
for foundtag in prerequisites["tag"]:
if categorytag == foundtag:
attributes = package[0]["attributes"]
if "packagetype" in attributes:
if attributes["packagetype"] == "internal":
continue
self.packetlist.append([attributes["name"], attributes["details"], attributes["shortdescription"], attributes["packagename"]])
else:
self.packetlist.append([attributes["name"], attributes["details"], attributes["shortdescription"], attributes["packagename"]])
self.list = []
for x in self.packetlist:
status = ""
name = x[0].strip()
details = x[1].strip()
description = x[2].strip()
if not description:
description = "No description available."
packagename = x[3].strip()
selectState = self.getSelectionState(details)
if packagename in iSoftwareTools.installed_packetlist:
if selectState == True:
status = "remove"
else:
status = "installed"
self.list.append(self.buildEntryComponent(name, _(details), _(description), packagename, status, selected = selectState))
else:
if selectState == True:
status = "install"
else:
status = "installable"
self.list.append(self.buildEntryComponent(name, _(details), _(description), packagename, status, selected = selectState))
if len(self.list):
self.list.sort(key=lambda x: x[0])
self["list"].style = "default"
self['list'].setList(self.list)
self["list"].updateList(self.list)
self.selectionChanged()
def buildCategoryList(self):
self.currList = "category"
self.categories = []
self.categoryList = []
for package in iSoftwareTools.packagesIndexlist[:]:
prerequisites = package[0]["prerequisites"]
if "tag" in prerequisites:
for foundtag in prerequisites["tag"]:
attributes = package[0]["attributes"]
if foundtag not in self.categories:
self.categories.append(foundtag)
self.categoryList.append(self.buildCategoryComponent(foundtag))
self.categoryList.sort(key=lambda x: x[0])
self["list"].style = "category"
self['list'].setList(self.categoryList)
self["list"].updateList(self.categoryList)
self.selectionChanged()
def buildCategoryComponent(self, tag = None):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
if tag is not None:
if tag == 'System':
return(( _("System"), _("View list of available system extensions" ), tag, divpng ))
elif tag == 'Skin':
return(( _("Skins"), _("View list of available skins" ), tag, divpng ))
elif tag == 'Recording':
return(( _("Recordings"), _("View list of available recording extensions" ), tag, divpng ))
elif tag == 'Network':
return(( _("Network"), _("View list of available networking extensions" ), tag, divpng ))
elif tag == 'CI':
return(( _("Common Interface"), _("View list of available CommonInterface extensions" ), tag, divpng ))
elif tag == 'Default':
return(( _("Default settings"), _("View list of available default settings" ), tag, divpng ))
elif tag == 'SAT':
return(( _("Satellite equipment"), _("View list of available Satellite equipment extensions." ), tag, divpng ))
elif tag == 'Software':
return(( _("Software"), _("View list of available software extensions" ), tag, divpng ))
elif tag == 'Multimedia':
return(( _("Multimedia"), _("View list of available multimedia extensions." ), tag, divpng ))
elif tag == 'Display':
return(( _("Display and userinterface"), _("View list of available display and userinterface extensions." ), tag, divpng ))
elif tag == 'EPG':
return(( _("Electronic Program Guide"), _("View list of available EPG extensions." ), tag, divpng ))
elif tag == 'Communication':
return(( _("Communication"), _("View list of available communication extensions." ), tag, divpng ))
else: # dynamically generate non existent tags
return(( str(tag), _("View list of available ") + str(tag) + _(" extensions." ), tag, divpng ))
def prepareInstall(self):
self.cmdList = []
if iSoftwareTools.available_updates > 0:
self.cmdList.append((IpkgComponent.CMD_UPGRADE, { "test_only": False }))
if self.selectedFiles and len(self.selectedFiles):
for plugin in self.selectedFiles:
detailsfile = iSoftwareTools.directory[0] + "/" + plugin[0]
if (os.path.exists(detailsfile) == True):
iSoftwareTools.fillPackageDetails(plugin[0])
self.package = iSoftwareTools.packageDetails[0]
if "attributes" in self.package[0]:
self.attributes = self.package[0]["attributes"]
if "needsRestart" in self.attributes:
self.restartRequired = True
if "package" in self.attributes:
self.packagefiles = self.attributes["package"]
if plugin[1] == 'installed':
if self.packagefiles:
for package in self.packagefiles[:]:
self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": package["name"] }))
else:
self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": plugin[2] }))
else:
if self.packagefiles:
for package in self.packagefiles[:]:
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": package["name"] }))
else:
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": plugin[2] }))
else:
if plugin[1] == 'installed':
self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": plugin[2] }))
else:
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": plugin[2] }))
def runExecute(self, result = None):
if result is not None:
if result[0] is True:
self.session.openWithCallback(self.runExecuteFinished, Ipkg, cmdList = self.cmdList)
elif result[0] is False:
self.cmdList = result[1]
self.session.openWithCallback(self.runExecuteFinished, Ipkg, cmdList = self.cmdList)
else:
self.close()
def runExecuteFinished(self):
self.reloadPluginlist()
if plugins.restartRequired or self.restartRequired:
self.session.openWithCallback(self.ExecuteReboot, MessageBox, _("Installation or removal has completed.") + "\n" +_("Do you want to reboot your receiver?"), MessageBox.TYPE_YESNO)
else:
self.selectedFiles = []
self.restartRequired = False
self.detailsClosed(True)
def ExecuteReboot(self, result):
if result:
self.session.open(TryQuitMainloop,retvalue=3)
else:
self.selectedFiles = []
self.restartRequired = False
self.detailsClosed(True)
def reloadPluginlist(self):
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
class PluginManagerInfo(Screen):
skin = """
<screen name="PluginManagerInfo" position="center,center" size="560,450" title="Plugin manager activity information" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="550,350" scrollbarMode="showOnDemand" selectionDisabled="1">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (50, 0), size = (150, 26), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name
MultiContentEntryText(pos = (50, 27), size = (540, 23), font=1, flags = RT_HALIGN_LEFT, text = 1), # index 1 is the state
MultiContentEntryPixmapAlphaTest(pos = (0, 1), size = (48, 48), png = 2), # index 2 is the status pixmap
MultiContentEntryPixmapAlphaTest(pos = (0, 48), size = (550, 2), png = 3), # index 3 is the div pixmap
],
"fonts": [gFont("Regular", 24),gFont("Regular", 22)],
"itemHeight": 50
}
</convert>
</widget>
<ePixmap pixmap="skin_default/div-h.png" position="0,404" zPosition="10" size="560,2" transparent="1" alphatest="on" />
<widget source="status" render="Label" position="5,408" zPosition="10" size="550,44" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, plugin_path, cmdlist = None):
Screen.__init__(self, session)
self.session = session
self.skin_path = plugin_path
self.cmdlist = cmdlist
self["shortcuts"] = ActionMap(["ShortcutActions", "WizardActions"],
{
"ok": self.process_all,
"back": self.exit,
"red": self.exit,
"green": self.process_extensions,
}, -1)
self.list = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Only extensions."))
self["status"] = StaticText(_("Following tasks will be done after you press OK!"))
self.onShown.append(self.setWindowTitle)
self.onLayoutFinish.append(self.rebuildList)
def setWindowTitle(self):
self.setTitle(_("Plugin manager activity information"))
def rebuildList(self):
self.list = []
if self.cmdlist is not None:
for entry in self.cmdlist:
action = ""
info = ""
cmd = entry[0]
if cmd == 0:
action = 'install'
elif cmd == 2:
action = 'remove'
else:
action = 'upgrade'
args = entry[1]
if cmd == 0:
info = args['package']
elif cmd == 2:
info = args['package']
else:
info = _("receiver software because updates are available.")
self.list.append(self.buildEntryComponent(action,info))
self['list'].setList(self.list)
self['list'].updateList(self.list)
def buildEntryComponent(self, action,info):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
upgradepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgrade.png"))
installpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/install.png"))
removepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png"))
if action == 'install':
return(( _('Installing'), info, installpng, divpng))
elif action == 'remove':
return(( _('Removing'), info, removepng, divpng))
else:
return(( _('Upgrading'), info, upgradepng, divpng))
def exit(self):
self.close()
def process_all(self):
self.close((True,None))
def process_extensions(self):
self.list = []
if self.cmdlist is not None:
for entry in self.cmdlist:
cmd = entry[0]
if entry[0] in (0,2):
self.list.append((entry))
self.close((False,self.list))
class PluginManagerHelp(Screen):
skin = """
<screen name="PluginManagerHelp" position="center,center" size="560,450" title="Plugin manager help" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="550,350" scrollbarMode="showOnDemand" selectionDisabled="1">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (50, 0), size = (540, 26), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name
MultiContentEntryText(pos = (50, 27), size = (540, 23), font=1, flags = RT_HALIGN_LEFT, text = 1), # index 1 is the state
MultiContentEntryPixmapAlphaTest(pos = (0, 1), size = (48, 48), png = 2), # index 2 is the status pixmap
MultiContentEntryPixmapAlphaTest(pos = (0, 48), size = (550, 2), png = 3), # index 3 is the div pixmap
],
"fonts": [gFont("Regular", 24),gFont("Regular", 22)],
"itemHeight": 50
}
</convert>
</widget>
<ePixmap pixmap="skin_default/div-h.png" position="0,404" zPosition="10" size="560,2" transparent="1" alphatest="on" />
<widget source="status" render="Label" position="5,408" zPosition="10" size="550,44" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, plugin_path):
Screen.__init__(self, session)
self.session = session
self.skin_path = plugin_path
self["shortcuts"] = ActionMap(["ShortcutActions", "WizardActions"],
{
"back": self.exit,
"red": self.exit,
}, -1)
self.list = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["status"] = StaticText(_("A small overview of the available icon states and actions."))
self.onShown.append(self.setWindowTitle)
self.onLayoutFinish.append(self.rebuildList)
def setWindowTitle(self):
self.setTitle(_("Plugin manager help"))
def rebuildList(self):
self.list = []
self.list.append(self.buildEntryComponent('install'))
self.list.append(self.buildEntryComponent('installable'))
self.list.append(self.buildEntryComponent('installed'))
self.list.append(self.buildEntryComponent('remove'))
self['list'].setList(self.list)
self['list'].updateList(self.list)
def buildEntryComponent(self, state):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
installedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installed.png"))
installablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installable.png"))
removepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png"))
installpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/install.png"))
if state == 'installed':
return(( _('This plugin is installed.'), _('You can remove this plugin.'), installedpng, divpng))
elif state == 'installable':
return(( _('This plugin is not installed.'), _('You can install this plugin.'), installablepng, divpng))
elif state == 'install':
return(( _('This plugin will be installed.'), _('You can cancel the installation.'), installpng, divpng))
elif state == 'remove':
return(( _('This plugin will be removed.'), _('You can cancel the removal.'), removepng, divpng))
def exit(self):
self.close()
class PluginDetails(Screen, PackageInfoHandler):
skin = """
<screen name="PluginDetails" position="center,center" size="600,440" title="Plugin details" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="author" render="Label" position="10,50" size="500,25" zPosition="10" font="Regular;21" transparent="1" />
<widget name="statuspic" position="550,40" size="48,48" alphatest="on"/>
<widget name="divpic" position="0,80" size="600,2" alphatest="on"/>
<widget name="detailtext" position="10,90" size="270,330" zPosition="10" font="Regular;21" transparent="1" halign="left" valign="top"/>
<widget name="screenshot" position="290,90" size="300,330" alphatest="on"/>
</screen>"""
def __init__(self, session, plugin_path, packagedata = None):
Screen.__init__(self, session)
self.skin_path = plugin_path
self.language = language.getLanguage()[:2] # getLanguage returns e.g. "fi_FI" for "language_country"
self.attributes = None
PackageInfoHandler.__init__(self, self.statusCallback)
self.directory = resolveFilename(SCOPE_METADIR)
if packagedata:
self.pluginname = packagedata[0]
self.details = packagedata[1]
self.pluginstate = packagedata[4]
self.statuspicinstance = packagedata[5]
self.divpicinstance = packagedata[6]
self.fillPackageDetails(self.details)
self.thumbnail = ""
self["shortcuts"] = ActionMap(["ShortcutActions", "WizardActions"],
{
"back": self.exit,
"red": self.exit,
"green": self.go,
"up": self.pageUp,
"down": self.pageDown,
"left": self.pageUp,
"right": self.pageDown,
}, -1)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText("")
self["author"] = StaticText()
self["statuspic"] = Pixmap()
self["divpic"] = Pixmap()
self["screenshot"] = Pixmap()
self["detailtext"] = ScrollLabel()
self["statuspic"].hide()
self["screenshot"].hide()
self["divpic"].hide()
self.package = self.packageDetails[0]
if "attributes" in self.package[0]:
self.attributes = self.package[0]["attributes"]
self.restartRequired = False
self.cmdList = []
self.oktext = _("\nAfter pressing OK, please wait!")
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.paintScreenshotPixmapCB)
self.onShown.append(self.setWindowTitle)
self.onLayoutFinish.append(self.setInfos)
def setWindowTitle(self):
self.setTitle(_("Details for plugin: ") + self.pluginname )
def exit(self):
self.close(False)
def pageUp(self):
self["detailtext"].pageUp()
def pageDown(self):
self["detailtext"].pageDown()
def statusCallback(self, status, progress):
pass
def setInfos(self):
if "screenshot" in self.attributes:
self.loadThumbnail(self.attributes)
if "name" in self.attributes:
self.pluginname = self.attributes["name"]
else:
self.pluginname = _("unknown")
if "author" in self.attributes:
self.author = self.attributes["author"]
else:
self.author = _("unknown")
if "description" in self.attributes:
self.description = _(self.attributes["description"].replace("\\n", "\n"))
else:
self.description = _("No description available.")
self["author"].setText(_("Author: ") + self.author)
self["detailtext"].setText(_(self.description))
if self.pluginstate in ('installable', 'install'):
if iSoftwareTools.NetworkConnectionAvailable:
self["key_green"].setText(_("Install"))
else:
self["key_green"].setText("")
else:
self["key_green"].setText(_("Remove"))
def loadThumbnail(self, entry):
thumbnailUrl = None
if "screenshot" in entry:
thumbnailUrl = entry["screenshot"]
if self.language == "de":
if thumbnailUrl[-7:] == "_en.jpg":
thumbnailUrl = thumbnailUrl[:-7] + "_de.jpg"
if thumbnailUrl is not None:
self.thumbnail = "/tmp/" + thumbnailUrl.split('/')[-1]
print "[PluginDetails] downloading screenshot " + thumbnailUrl + " to " + self.thumbnail
if iSoftwareTools.NetworkConnectionAvailable:
client.downloadPage(thumbnailUrl,self.thumbnail).addCallback(self.setThumbnail).addErrback(self.fetchFailed)
else:
self.setThumbnail(noScreenshot = True)
else:
self.setThumbnail(noScreenshot = True)
def setThumbnail(self, noScreenshot = False):
if not noScreenshot:
filename = self.thumbnail
else:
filename = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/noprev.png")
sc = AVSwitch().getFramebufferScale()
self.picload.setPara((self["screenshot"].instance.size().width(), self["screenshot"].instance.size().height(), sc[0], sc[1], False, 1, "#00000000"))
self.picload.startDecode(filename)
if self.statuspicinstance is not None:
self["statuspic"].instance.setPixmap(self.statuspicinstance.__deref__())
self["statuspic"].show()
if self.divpicinstance is not None:
self["divpic"].instance.setPixmap(self.divpicinstance.__deref__())
self["divpic"].show()
def paintScreenshotPixmapCB(self, picInfo=None):
ptr = self.picload.getData()
if ptr is not None:
self["screenshot"].instance.setPixmap(ptr.__deref__())
self["screenshot"].show()
else:
self.setThumbnail(noScreenshot = True)
def go(self):
if "package" in self.attributes:
self.packagefiles = self.attributes["package"]
if "needsRestart" in self.attributes:
self.restartRequired = True
self.cmdList = []
if self.pluginstate in ('installed', 'remove'):
if self.packagefiles:
for package in self.packagefiles[:]:
self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": package["name"] }))
if len(self.cmdList):
self.session.openWithCallback(self.runRemove, MessageBox, _("Do you want to remove the package:\n") + self.pluginname + "\n" + self.oktext)
else:
if iSoftwareTools.NetworkConnectionAvailable:
if self.packagefiles:
for package in self.packagefiles[:]:
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": package["name"] }))
if len(self.cmdList):
self.session.openWithCallback(self.runUpgrade, MessageBox, _("Do you want to install the package:\n") + self.pluginname + "\n" + self.oktext)
def runUpgrade(self, result):
if result:
self.session.openWithCallback(self.runUpgradeFinished, Ipkg, cmdList = self.cmdList)
def runUpgradeFinished(self):
self.reloadPluginlist()
if plugins.restartRequired or self.restartRequired:
self.session.openWithCallback(self.UpgradeReboot, MessageBox, _("Installation has completed.") + "\n" + _("Do you want to reboot your receiver?"), MessageBox.TYPE_YESNO)
else:
self.close(True)
def UpgradeReboot(self, result):
if result:
self.session.open(TryQuitMainloop,retvalue=3)
self.close(True)
def runRemove(self, result):
if result:
self.session.openWithCallback(self.runRemoveFinished, Ipkg, cmdList = self.cmdList)
def runRemoveFinished(self):
self.close(True)
def reloadPluginlist(self):
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
def fetchFailed(self,string):
self.setThumbnail(noScreenshot = True)
print "[PluginDetails] fetch failed " + string.getErrorMessage()
class IPKGMenu(Screen):
skin = """
<screen name="IPKGMenu" position="center,center" size="560,400" title="Select upgrade source to edit." >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="filelist" position="5,50" size="550,340" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, plugin_path):
Screen.__init__(self, session)
self.skin_path = plugin_path
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Edit"))
self.sel = []
self.val = []
self.entry = False
self.exe = False
self.path = ""
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.KeyOk,
"cancel": self.keyCancel
}, -1)
self["shortcuts"] = ActionMap(["ShortcutActions"],
{
"red": self.keyCancel,
"green": self.KeyOk,
})
self["filelist"] = MenuList([])
self.fill_list()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setWindowTitle()
def setWindowTitle(self):
self.setTitle(_("Select upgrade source to edit."))
def fill_list(self):
flist = []
self.path = '/etc/opkg/'
if (os.path.exists(self.path) == False):
self.entry = False
return
for file in os.listdir(self.path):
if file.endswith(".conf"):
if file not in ('arch.conf', 'opkg.conf'):
flist.append((file))
self.entry = True
self["filelist"].l.setList(flist)
def KeyOk(self):
if (self.exe == False) and (self.entry == True):
self.sel = self["filelist"].getCurrent()
self.val = self.path + self.sel
self.session.open(IPKGSource, self.val)
def keyCancel(self):
self.close()
def Exit(self):
self.close()
class IPKGSource(Screen):
skin = """
<screen name="IPKGSource" position="center,center" size="560,80" title="Edit upgrade source url." >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="text" position="5,50" size="550,25" font="Regular;20" backgroundColor="background" foregroundColor="#cccccc" />
</screen>"""
def __init__(self, session, configfile = None):
Screen.__init__(self, session)
self.session = session
self.configfile = configfile
text = ""
if self.configfile:
try:
fp = file(configfile, 'r')
sources = fp.readlines()
if sources:
text = sources[0]
fp.close()
except IOError:
pass
desk = getDesktop(0)
x= int(desk.size().width())
y= int(desk.size().height())
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
if (y>=720):
self["text"] = Input(text, maxSize=False, type=Input.TEXT)
else:
self["text"] = Input(text, maxSize=False, visible_width = 55, type=Input.TEXT)
self["actions"] = NumberActionMap(["WizardActions", "InputActions", "TextEntryActions", "KeyboardInputActions","ShortcutActions"],
{
"ok": self.go,
"back": self.close,
"red": self.close,
"green": self.go,
"left": self.keyLeft,
"right": self.keyRight,
"home": self.keyHome,
"end": self.keyEnd,
"deleteForward": self.keyDeleteForward,
"deleteBackward": self.keyDeleteBackward,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setWindowTitle()
self["text"].right()
def setWindowTitle(self):
self.setTitle(_("Edit upgrade source url."))
def go(self):
text = self["text"].getText()
if text:
fp = file(self.configfile, 'w')
fp.write(text)
fp.write("\n")
fp.close()
self.close()
def keyLeft(self):
self["text"].left()
def keyRight(self):
self["text"].right()
def keyHome(self):
self["text"].home()
def keyEnd(self):
self["text"].end()
def keyDeleteForward(self):
self["text"].delete()
def keyDeleteBackward(self):
self["text"].deleteBackward()
def keyNumberGlobal(self, number):
self["text"].number(number)
class PacketManager(Screen, NumericalTextInput):
skin = """
<screen name="PacketManager" position="center,center" size="530,420" title="Packet manager" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="520,365" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (5, 1), size = (440, 28), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name
MultiContentEntryText(pos = (5, 26), size = (440, 20), font=1, flags = RT_HALIGN_LEFT, text = 2), # index 2 is the description
MultiContentEntryPixmapAlphaTest(pos = (445, 2), size = (48, 48), png = 4), # index 4 is the status pixmap
MultiContentEntryPixmapAlphaTest(pos = (5, 50), size = (510, 2), png = 5), # index 4 is the div pixmap
],
"fonts": [gFont("Regular", 22),gFont("Regular", 14)],
"itemHeight": 52
}
</convert>
</widget>
</screen>"""
def __init__(self, session, plugin_path, args = None):
Screen.__init__(self, session)
NumericalTextInput.__init__(self)
self.session = session
self.skin_path = plugin_path
self.setUseableChars(u'1234567890abcdefghijklmnopqrstuvwxyz')
self["shortcuts"] = NumberActionMap(["ShortcutActions", "WizardActions", "NumberActions", "InputActions", "InputAsciiActions", "KeyboardInputActions" ],
{
"ok": self.go,
"back": self.exit,
"red": self.exit,
"green": self.reload,
"gotAsciiCode": self.keyGotAscii,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
self.list = []
self.statuslist = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Reload"))
self.list_updating = True
self.packetlist = []
self.installed_packetlist = {}
self.upgradeable_packages = {}
self.Console = Console()
self.cmdList = []
self.cachelist = []
self.cache_ttl = 86400 #600 is default, 0 disables, Seconds cache is considered valid (24h should be ok for caching ipkgs)
self.cache_file = eEnv.resolve('${libdir}/enigma2/python/Plugins/SystemPlugins/SoftwareManager/packetmanager.cache') #Path to cache directory
self.oktext = _("\nAfter pressing OK, please wait!")
self.unwanted_extensions = ('-dbg', '-dev', '-doc', '-staticdev', '-src', 'busybox')
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
self.onShown.append(self.setWindowTitle)
self.onLayoutFinish.append(self.rebuildList)
rcinput = eRCInput.getInstance()
rcinput.setKeyboardMode(rcinput.kmAscii)
def keyNumberGlobal(self, val):
key = self.getKey(val)
if key is not None:
keyvalue = key.encode("utf-8")
if len(keyvalue) == 1:
self.setNextIdx(keyvalue[0])
def keyGotAscii(self):
keyvalue = unichr(getPrevAsciiCode()).encode("utf-8")
if len(keyvalue) == 1:
self.setNextIdx(keyvalue[0])
def setNextIdx(self,char):
if char in ("0", "1", "a"):
self["list"].setIndex(0)
else:
idx = self.getNextIdx(char)
if idx and idx <= self["list"].count:
self["list"].setIndex(idx)
def getNextIdx(self,char):
for idx, i in enumerate(self["list"].list):
if i[0] and (i[0][0] == char):
return idx
def exit(self):
self.ipkg.stop()
if self.Console is not None:
self.Console.killAll()
rcinput = eRCInput.getInstance()
rcinput.setKeyboardMode(rcinput.kmNone)
self.close()
def reload(self):
if (os.path.exists(self.cache_file) == True):
os.unlink(self.cache_file)
self.list_updating = True
self.rebuildList()
def setWindowTitle(self):
self.setTitle(_("Packet manager"))
def setStatus(self,status = None):
if status:
self.statuslist = []
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
if status == 'update':
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgrade.png"))
self.statuslist.append(( _("Package list update"), '', _("Trying to download a new packetlist. Please wait..." ),'',statuspng, divpng ))
self['list'].setList(self.statuslist)
elif status == 'error':
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png"))
self.statuslist.append(( _("Error"), '', _("An error occurred while downloading the packetlist. Please try again." ),'',statuspng, divpng ))
self['list'].setList(self.statuslist)
def rebuildList(self):
self.setStatus('update')
self.inv_cache = 0
self.vc = valid_cache(self.cache_file, self.cache_ttl)
if self.cache_ttl > 0 and self.vc != 0:
try:
self.buildPacketList()
except:
self.inv_cache = 1
if self.cache_ttl == 0 or self.inv_cache == 1 or self.vc == 0:
self.run = 0
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
def go(self, returnValue = None):
cur = self["list"].getCurrent()
if cur:
status = cur[3]
package = cur[0]
self.cmdList = []
if status == 'installed':
self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": package }))
if len(self.cmdList):
self.session.openWithCallback(self.runRemove, MessageBox, _("Do you want to remove the package:\n") + package + "\n" + self.oktext)
elif status == 'upgradeable':
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": package }))
if len(self.cmdList):
self.session.openWithCallback(self.runUpgrade, MessageBox, _("Do you want to upgrade the package:\n") + package + "\n" + self.oktext)
elif status == "installable":
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": package }))
if len(self.cmdList):
self.session.openWithCallback(self.runUpgrade, MessageBox, _("Do you want to install the package:\n") + package + "\n" + self.oktext)
def runRemove(self, result):
if result:
self.session.openWithCallback(self.runRemoveFinished, Ipkg, cmdList = self.cmdList)
def runRemoveFinished(self):
self.session.openWithCallback(self.RemoveReboot, MessageBox, _("Removal has completed.") + "\n" + _("Do you want to reboot your receiver?"), MessageBox.TYPE_YESNO)
def RemoveReboot(self, result):
if result is None:
return
if result is False:
cur = self["list"].getCurrent()
if cur:
item = self['list'].getIndex()
self.list[item] = self.buildEntryComponent(cur[0], cur[1], cur[2], 'installable')
self.cachelist[item] = [cur[0], cur[1], cur[2], 'installable']
self['list'].setList(self.list)
write_cache(self.cache_file, self.cachelist)
self.reloadPluginlist()
if result:
self.session.open(TryQuitMainloop,retvalue=3)
def runUpgrade(self, result):
if result:
self.session.openWithCallback(self.runUpgradeFinished, Ipkg, cmdList = self.cmdList)
def runUpgradeFinished(self):
self.session.openWithCallback(self.UpgradeReboot, MessageBox, _("Update has completed.") + "\n" +_("Do you want to reboot your receiver?"), MessageBox.TYPE_YESNO)
def UpgradeReboot(self, result):
if result is None:
return
if result is False:
cur = self["list"].getCurrent()
if cur:
item = self['list'].getIndex()
self.list[item] = self.buildEntryComponent(cur[0], cur[1], cur[2], 'installed')
self.cachelist[item] = [cur[0], cur[1], cur[2], 'installed']
self['list'].setList(self.list)
write_cache(self.cache_file, self.cachelist)
self.reloadPluginlist()
if result:
self.session.open(TryQuitMainloop,retvalue=3)
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_ERROR:
self.list_updating = False
self.setStatus('error')
elif event == IpkgComponent.EVENT_DONE:
if self.list_updating:
self.list_updating = False
if not self.Console:
self.Console = Console()
cmd = self.ipkg.ipkg + " list"
self.Console.ePopen(cmd, self.IpkgList_Finished)
pass
def IpkgList_Finished(self, result, retval, extra_args = None):
if result:
self.packetlist = []
last_name = ""
for x in result.splitlines():
if ' - ' in x:
tokens = x.split(' - ')
name = tokens[0].strip()
if name and not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 1 and tokens[1].strip() or ""
descr = l > 2 and tokens[2].strip() or ""
if name == last_name:
continue
last_name = name
self.packetlist.append([name, version, descr])
elif len(self.packetlist) > 0:
# no ' - ' in the text, assume that this is the description
# therefore add this text to the last packet description
last_packet = self.packetlist[-1]
last_packet[2] = last_packet[2] + x
self.packetlist[:-1] + last_packet
if not self.Console:
self.Console = Console()
cmd = self.ipkg.ipkg + " list_installed"
self.Console.ePopen(cmd, self.IpkgListInstalled_Finished)
def IpkgListInstalled_Finished(self, result, retval, extra_args = None):
if result:
self.installed_packetlist = {}
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 1 and tokens[1].strip() or ""
self.installed_packetlist[name] = version
if not self.Console:
self.Console = Console()
cmd = "opkg list-upgradable"
self.Console.ePopen(cmd, self.OpkgListUpgradeable_Finished)
def OpkgListUpgradeable_Finished(self, result, retval, extra_args = None):
if result:
self.upgradeable_packages = {}
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 2 and tokens[2].strip() or ""
self.upgradeable_packages[name] = version
self.buildPacketList()
def buildEntryComponent(self, name, version, description, state):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
if not description:
description = "No description available."
if state == 'installed':
installedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installed.png"))
return((name, version, _(description), state, installedpng, divpng))
elif state == 'upgradeable':
upgradeablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgradeable.png"))
return((name, version, _(description), state, upgradeablepng, divpng))
else:
installablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installable.png"))
return((name, version, _(description), state, installablepng, divpng))
def buildPacketList(self):
self.list = []
self.cachelist = []
if self.cache_ttl > 0 and self.vc != 0:
print 'Loading packagelist cache from ',self.cache_file
try:
self.cachelist = load_cache(self.cache_file)
if len(self.cachelist) > 0:
for x in self.cachelist:
self.list.append(self.buildEntryComponent(x[0], x[1], x[2], x[3]))
self['list'].setList(self.list)
except:
self.inv_cache = 1
if self.cache_ttl == 0 or self.inv_cache == 1 or self.vc == 0:
print 'rebuilding fresh package list'
for x in self.packetlist:
status = ""
if x[0] in self.installed_packetlist:
if x[0] in self.upgradeable_packages:
status = "upgradeable"
else:
status = "installed"
else:
status = "installable"
self.list.append(self.buildEntryComponent(x[0], x[1], x[2], status))
self.cachelist.append([x[0], x[1], x[2], status])
write_cache(self.cache_file, self.cachelist)
self['list'].setList(self.list)
def reloadPluginlist(self):
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
class IpkgInstaller(Screen):
skin = """
<screen name="IpkgInstaller" position="center,center" size="550,450" title="Install extensions" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="list" position="5,50" size="540,360" />
<ePixmap pixmap="skin_default/div-h.png" position="0,410" zPosition="10" size="560,2" transparent="1" alphatest="on" />
<widget source="introduction" render="Label" position="5,420" zPosition="10" size="550,30" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, list):
Screen.__init__(self, session)
self.list = SelectionList()
self["list"] = self.list
p = 0
if len(list):
p = list[0].rfind("/")
title = list[0][:p]
self.title = ("%s %s %s") % (_("Install extensions"), _("from"), title)
for listindex in range(len(list)):
self.list.addSelection(list[listindex][p+1:], list[listindex], listindex, False)
self.list.sort()
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Install"))
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText(_("Invert"))
self["introduction"] = StaticText(_("Press OK to toggle the selection."))
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"ok": self.list.toggleSelection,
"cancel": self.close,
"red": self.close,
"green": self.install,
"blue": self.list.toggleAllSelection
}, -1)
def install(self):
list = self.list.getSelectionsList()
cmdList = []
for item in list:
cmdList.append((IpkgComponent.CMD_INSTALL, { "package": item[1] }))
self.session.open(Ipkg, cmdList = cmdList)
def filescan_open(list, session, **kwargs):
filelist = [x.path for x in list]
session.open(IpkgInstaller, filelist) # list
def filescan(**kwargs):
from Components.Scanner import Scanner, ScanPath
return \
Scanner(mimetypes = ["application/x-debian-package"],
paths_to_scan =
[
ScanPath(path = "ipk", with_subdirs = True),
ScanPath(path = "", with_subdirs = False),
],
name = "Ipkg",
description = _("Install extensions."),
openfnc = filescan_open, )
def UpgradeMain(session, **kwargs):
session.open(UpdatePluginMenu)
def startSetup(menuid):
if menuid == "setup" and config.plugins.softwaremanager.onSetupMenu.value:
return [(_("Software management"), UpgradeMain, "software_manager", 50)]
return [ ]
def Plugins(path, **kwargs):
global plugin_path
plugin_path = path
list = [
PluginDescriptor(name=_("Software management"), description=_("Manage your receiver's software"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc=startSetup),
PluginDescriptor(name=_("Ipkg"), where = PluginDescriptor.WHERE_FILESCAN, needsRestart = False, fnc = filescan)
]
if not config.plugins.softwaremanager.onSetupMenu.value and not config.plugins.softwaremanager.onBlueButton.value:
list.append(PluginDescriptor(name=_("Software management"), description=_("Manage your receiver's software"), where = PluginDescriptor.WHERE_PLUGINMENU, needsRestart = False, fnc=UpgradeMain))
if config.plugins.softwaremanager.onBlueButton.value:
list.append(PluginDescriptor(name=_("Software management"), description=_("Manage your receiver's software"), where = PluginDescriptor.WHERE_EXTENSIONSMENU, needsRestart = False, fnc=UpgradeMain))
return list
|
Taapat/enigma2-openpli-fulan
|
lib/python/Plugins/SystemPlugins/SoftwareManager/plugin.py
|
Python
|
gpl-2.0
| 81,511 | 0.028143 |
def extractWwwTccedwardsCom(item):
'''
Parser for 'www.tccedwards.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractWwwTccedwardsCom.py
|
Python
|
bsd-3-clause
| 548 | 0.034672 |
# coding=utf-8
from django.apps import AppConfig
from watson import search as watson
class CatalogConfig(AppConfig):
name = 'catalog'
verbose_name = 'Catálogo'
def ready(self):
Product = self.get_model('Product')
watson.register(Product)
|
gileno/djangoecommerce
|
catalog/apps.py
|
Python
|
cc0-1.0
| 272 | 0 |
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'scrapers'))
sys.path.append("..")
from datetime import datetime
from pymongo import MongoClient
from database import *
import error
import sync
import group_members as groupMembersApi
def importGroupMembers ( school_id, branch_id, team_element_id, session = False, username = False, password = False ):
try:
objectList = groupMembersApi.group_members({
"school_id" : school_id,
"branch_id" : branch_id,
"team_element_id" : team_element_id,
"username" : username,
"password" : password
}, session)
if objectList is None:
error.log(__file__, False, "Unknown Object")
return False
if not "status" in objectList:
error.log(__file__, False, "Unknown Object")
return False
if objectList["status"] == "ok":
members = []
for row in objectList["objects"]:
if row["type"] == "student":
unique = {
"student_id" : row["person_id"]
}
contextCards = []
contextCards.append(row["context_card_id"])
existsing = db.persons.find(unique).limit(1)
if existsing.count() > 0:
existsing = existsing[0]
if "context_cards" in existsing:
for card in existsing["context_cards"]:
if not card in contextCards:
contextCards.append(card)
element = {
"type" : "student",
"student_id" : row["person_id"],
"name" : unicode(str(row["full_name"]).decode("utf8")),
"class_student_id" : unicode(str(row["person_text_id"]).decode("utf8")),
"last_name" : unicode(str(row["last_name"]).decode("utf8")),
"first_name" : unicode(str(row["first_name"]).decode("utf8")),
"context_cards" : contextCards,
"school_id" : str(school_id),
"branch_id" : str(branch_id)
}
if "field_of_study" in row:
# Add Field of Study Sybc
element["field_of_study"] = {
"name" : row["field_of_study"]["name"],
"field_of_study_id" : row["field_of_study"]["field_of_study_id"]
}
if "picture_id" in row:
# Launch Fetch Picture Task
element["picture_id"] = row["picture_id"]
else:
unique = {
"teacher_id" : row["person_id"]
}
contextCards = []
contextCards.append(row["context_card_id"])
existsing = db.persons.find(unique).limit(1)
if existsing.count() > 0:
existsing = existsing[0]
if "context_cards" in existsing:
for card in existsing["context_cards"]:
if not card in contextCards:
contextCards.append(card)
element = {
"teacher_id" : str(row["person_id"]),
"last_name" : unicode(str(row["last_name"]).decode("utf8")),
"first_name" : unicode(str(row["first_name"]).decode("utf8")),
"type" : "teacher",
"name" : unicode(str(row["full_name"]).decode("utf8")),
"abbrevation" : unicode(str(row["person_text_id"]).decode("utf8")),
"context_cards" : contextCards,
"school_id" : str(school_id),
"branch_id" : str(branch_id)
}
# Add Team to teacher
if "picture_id" in row:
# Launch Fetch Picture Task
element["picture_id"] = row["picture_id"]
status = sync.sync(db.persons, unique, element)
members.append(status["_id"])
unique = {
"school_id" : str(school_id),
"branch_id" : str(branch_id),
"team_element_id" : str(team_element_id)
}
element = {
"school_id" : str(school_id),
"branch_id" : str(branch_id),
"team_element_id" : str(team_element_id),
"members" : members
}
status = sync.sync(db.team_elements, unique, element)
return True
else:
if "error" in objectList:
error.log(__file__, False, objectList["error"])
return False
else:
error.log(__file__, False, "Unknown error")
return False
except Exception, e:
error.log(__file__, False, str(e))
return False
|
boh1996/LectioAPI
|
importers/importGroupMembers.py
|
Python
|
mit
| 3,867 | 0.041376 |
from datetime import datetime
from src import yamlsettings as settings
from models.models import Workday
from calendar import monthrange
import time
from math import ceil,floor
class Utils:
WEEKDAYS=["monday","tuesday","wednesday","thursday", "friday", "saturday", "sunday"]
MONTHS=["january","february","march","april","may","june","july","august","september","november","december"]
def __init__(self):
pass
@staticmethod
def getDatesFromRange(dS,dE):
'''
Returns an array with all dates within the given range
(includes start & end)
like:
[
{"date": <Date>, "timestamp":<utc_seconds>}...
]
'''
iterts = datetime.strptime(str(dS.day)+"."+str(dS.month)+"."+str(dS.year), "%d.%m.%Y").timestamp()
iterdate=datetime.fromtimestamp(iterts).date()
dates=[{"date":iterdate,"timestamp":iterts}]
while True:
iterts=iterts+86400
iterdate=datetime.fromtimestamp(iterts).date()
dates.append({"date":iterdate, "timestamp":iterts})
if(iterdate == dE):
break
return dates
@staticmethod
def getWeekdates(ts):
'''
Returns an array with all dates within the week of the given timestamp
like:
[
{"date": <Date>, "timestamp":<utc_seconds>}...
]
'''
daystr = datetime.fromtimestamp(ts).strftime("%A").lower()
weekdates=[]
weekday_index = Utils.WEEKDAYS.index(daystr)
weekstartts = ts - (weekday_index*86400)
weekendts = ts + ((len(Utils.WEEKDAYS)-(weekday_index+1))*86400)
weekend = datetime.fromtimestamp(weekendts).strftime("%d.%m.%Y")
iterts=weekstartts
while True:
#add date to array
iterdate=datetime.fromtimestamp(iterts).strftime("%d.%m.%Y")
weekdates.append({"date":datetime.fromtimestamp(iterts).date(),"timestamp":iterts})
#break when the end of the week is reached
if(iterdate==weekend):
break
#Add a day
iterts+=86400
return weekdates
@staticmethod
def getMonthdates(ts):
'''
Returns an array with all dates within the month of the given timestamp
like:
[
{"date": <Date>, "timestamp":<utc_seconds>}...
]
'''
monthdates=[]
dateobj=datetime.fromtimestamp(ts).date()
monthdays = monthrange(dateobj.year,dateobj.month)[1]#get the days of the month
for day in range(1,monthdays+1):
datets=datetime.strptime(str(day)+"."+str(dateobj.month)+"."+str(dateobj.year), "%d.%m.%Y").timestamp()
monthdates.append({"date":datetime.fromtimestamp(datets).date(),"timestamp":datets})
return monthdates
@staticmethod
def getYearDates(ts):
'''
Returns an array with all dates within the year of the given timestamp
based on the year_swap setting
like:
[
{"date": <Date>, "timestamp":<utc_seconds>}...
]
'''
yeardates=[]
dateobj=datetime.fromtimestamp(ts).date()
swap=datetime.strptime(settings.get("year_swap")+"."+str(dateobj.year),"%d.%m.%Y").timestamp()-86400
if(ts<swap):
ts=swap=datetime.strptime(settings.get("year_swap")+"."+str(dateobj.year-1),"%d.%m.%Y").timestamp()-8640
dateobj=datetime.fromtimestamp(ts).date()
dStart=datetime.strptime(settings.get("year_swap")+"."+str(dateobj.year),"%d.%m.%Y").date()
lDayTs=datetime.strptime(settings.get("year_swap")+"."+str(dateobj.year),"%d.%m.%Y").timestamp()-86400
lDayDt=datetime.fromtimestamp(lDayTs).date()
dEnd=datetime.strptime(str(lDayDt.day)+"."+str(lDayDt.month)+"."+str(dateobj.year+1),"%d.%m.%Y").date()
0
return Utils.getDatesFromRange(dStart,dEnd)
@staticmethod
def getDoneWork(historydir, ts):
'''
Returns a dict with the work done in the current:
{
"now":<utc_seconds>
"day":<utc_seconds>
"week":<utc_seconds>
"month":<utc_seconds>
"year":<utc_seconds>
}
'''
day=Utils.getDoneWorkT("day",historydir,ts)
week=Utils.getDoneWorkT("week",historydir,ts)
month=Utils.getDoneWorkT("month",historydir,ts)
year=Utils.getDoneWorkT("year",historydir,ts)
now=year
return {
"now":now,
"day":day,
"week":week,
"month":month,
"year":year
}
@staticmethod
def getDoneWorkT(type,historydir,ts):
'''
Returns the done work depending on <type> in seconds
starting from year_swap as long as <ts> is above year_swap
type can be:
"year"
"month"
"week"
"day"
"until"
'''
work=0
wdos=[]
if(type=="year"):
wdos=Workday.loadYear(historydir,ts)
elif(type=="month"):
wdos=Workday.loadMonth(historydir,ts)
elif(type=="week"):
wdos=Workday.loadWeek(historydir,ts)
elif(type=="day"):
wdd=Workday.loadDay(historydir,ts)
if(not wdd.get("workday") and datetime.fromtimestamp(time.time()).date() == givenDate):
wdos.append(Workday.loadLast(historydir)) #this happens when a workday is longer than 12pm
else:
wdos.append(wdd)
elif(type=="until"):
wdos=Workday.loadYear(historydir,ts)
givenDate=datetime.fromtimestamp(ts).date()
for wdo in wdos:
if(not type=="until"):
if(not wdo.get("workday") or not Utils.inCalc(wdo.get("workday").start)):
continue #Just a day without a saved workday object or out of our calc range
work+=Utils.getWDStats(wdo.get("workday")).get("worktime")
else:#type==until
if(ts<wdos[0].get("timestamp")):# year_swap workaround
return work
if(wdo.get("workday") and Utils.inCalc(wdo.get("workday").start)):
work+=Utils.getWDStats(wdo.get("workday")).get("worktime")
if(type=="until" and wdo.get("date") == givenDate):
break # for "until" we break here
return work
@staticmethod
def getRequiredWork(ts):
'''
Returns a dict with the work required for the current:
{
"now":<utc_seconds>
"day":<utc_seconds>
"week":<utc_seconds>
"month":<utc_seconds>
"year":<utc_seconds>
}
'''
now=Utils.getRequiredWorkT("until",time.time())
day=Utils.getRequiredWorkT("day",ts)
week=Utils.getRequiredWorkT("week",ts)
month=Utils.getRequiredWorkT("month",ts)
year=Utils.getRequiredWorkT("year",ts)
return{
"now":now,
"day":day,
"week":week,
"month":month,
"year":year
}
@staticmethod
def getRequiredWorkT(type,ts):
'''
Returns the required work depending on <type> in seconds
starting from year_swap as long as <ts> is above year_swap
type can be:
"year"
"month"
"week"
"day"
"until"
'''
work=0
dates=[]
if(type=="year"):
dates=Utils.getYearDates(ts)
elif(type=="month"):
dates=Utils.getMonthdates(ts)
elif(type=="week"):
dates=Utils.getWeekdates(ts)
elif(type=="day"):
return Utils.getMinutesPerDay(ts)
elif(type=="until"):
dates=Utils.getYearDates(ts)
if(ts<dates[0].get("timestamp")):# year_swap workaround
return work
givenDate=datetime.fromtimestamp(ts).date()
for dat in dates:
if(not Utils.isFree(dat.get("timestamp"))):
work+=Utils.getMinutesPerDay(dat.get("timestamp"))
#We want to stop here for "until" otherwise we
#take all values.
if(type=="until" and dat.get("date")==givenDate):
break
return work
@staticmethod
def getPreviousLastMonthDay(ts):
'''
Returns the last day of the previous month as a timestamp
'''
dat=datetime.fromtimestamp(ts).date()
lYear=dat.year
lMonth=dat.month-1
if(lMonth<1):
lMonth=12
lYear-=1
lDay=monthrange(lYear,lMonth)[1]
return datetime.strptime(str(lDay)+"."+str(lMonth)+"."+str(lYear),"%d.%m.%Y").timestamp()
@staticmethod
def getPreviousLastYearDay(ts):
'''
Returns the last day of the previous "year" as a timestamp
The start of the year is defined by year_swap, so this is one day
before year_swap
'''
datnow=datetime.fromtimestamp(ts).date()
swap=settings.get("year_swap").split(".")
yearstart=datetime.strptime(swap[0]+"."+swap[1]+"."+str(datnow.year), "%d.%m.%Y").timestamp()
return yearstart-86400
@staticmethod
def getPreviousLastWeekDay(ts):
'''
Returns the last day of the previous week as a timestamp
'''
while True:
ts-=3600
dat=datetime.fromtimestamp(ts).date()
if(dat.weekday()==6):#sunday
#strip time
return datetime.strptime(str(dat.day)+"."+str(dat.month)+"."+str(dat.year), "%d.%m.%Y").timestamp()
@staticmethod
def inCalc(ts):
'''
Returns true when the day of the date is in the calc cycle.
'''
dat=datetime.fromtimestamp(ts).date()
if settings.get("calc_cycles").get(dat):
return True
@staticmethod
def getMinutesPerDay(ts):
'''
Returns the required worktime in minutes for the date at the given timestamp
'''
return settings.get("calc_cycles").get(datetime.fromtimestamp(ts).date()).get("minutes_per_day")*60
@staticmethod
def isFree(dt):
'''
Returns true when the day of the date is either a holiday,
specialday, non workday or not within the calc cycle.
'''
dat = datetime.fromtimestamp(dt).date()
daystr = datetime.fromtimestamp(dt).strftime("%A").lower()
datestr = datetime.fromtimestamp(dt).strftime("%d.%m.%Y")
yearstr = datetime.fromtimestamp(dt).strftime("%Y")
#holidays from settings
s_holidays=settings.get("holidays")
#Specialdays are free
s_specialdays=[]
for specialday in settings.get("specialdays"):
s_specialdays.append(specialday+"."+yearstr)#Special days in the settings miss the year
#Everything what is not in the calc_cycle is a free day
if(not settings.get("calc_cycles").get(dat)):#only dates from a user defined range are there
return True
if(daystr not in settings.get("calc_cycles").get(dat).get("workdays") or (datestr in s_holidays or datestr in s_specialdays)):
return True
return False
@staticmethod
def getWDStats(wd):
'''
Returns information about the workday in a dictionary
{
worktime:<utc_seconds>
breaktime:<utc_seconds>
}
'''
last_stamp=time.time()
if(wd.end):
last_stamp=wd.end
breaktime=0
for breaks in wd.breaks:
if(breaks.get("start") and breaks.get("end")):
breaktime+=breaks.get("end")-breaks.get("start")
elif(breaks.get("start") and not breaks.get("end") and not wd.end):
breaktime+=time.time()-breaks.get("start")
last_stamp=breaks.get("start")
worktime=last_stamp-wd.start
worktime-=breaktime
return {
"worktime":worktime,
"breaktime":breaktime
}
@staticmethod
def evalEditDay(historydir, ts=None, noOpenDays=False):
'''
When no timestamp is given, 1st. the last open day is looked up, then
2nd. the last closed day is looked up.
if noOpenDays is True it will ignore not yet closed days
In case a timestamp is given the specified day is looked up.
returns <Workday> or None
'''
wd = None
if(not ts):
wdObj = Workday.loadLast(historydir)
if(noOpenDays == True or not wdObj.get("workday")):
wd = Workday.loadLastNDay(historydir,time.time());
else:
wd = wdObj.get("workday")
else:
wd = Workday.loadDay(historydir, ts).get("workday")
return wd
@staticmethod
def pb(rawstring):
'''
Returns text decorated with border
'''
out=""
if rawstring:
while len(rawstring) > 0:
out+="| "
fill=""
end=len(rawstring)
if(end > settings.get("border_width")-4):
end=settings.get("border_width")-4
elif(end < settings.get("border_width")-4):
fill=" "*((settings.get("border_width")-4)-end)
out+=rawstring[0:end]+fill+"| \n"
rawstring=rawstring[end:len(rawstring)+1]
return out
@staticmethod
def pbn():
'''
Returns a newline with decorated border
'''
return "|"+(" "*(settings.get("border_width")-3))+"|\n"
@staticmethod
def pbdiv():
'''
Returns a dividing line
'''
return "|"+("-"*(settings.get("border_width")-3))+"|\n"
@staticmethod
def pf(text,size):
'''
Fills the given string with spaces up to the given count
'''
if(len(text)<size):
return text+(" "*(size-len(text)))
return text
@staticmethod
def pfb(text="",size=settings.get("border_width")-5,symbol="#"):
'''
Fills the given string with # up to the given count
'''
if(len(text)<size):
return text+(symbol*(size-len(text)))
return text
@staticmethod
def pfl(text="",size=settings.get("border_width")-5):
'''
Fills the given string with - up to the given count
'''
if(len(text)<size):
return text+("-"*(size-len(text)))
return text
@staticmethod
def head(text, underline=settings.get("border_width")-5,symbol="#"):
'''
Returns a head
'''
str=""
str+=Utils.pbn()
str+=Utils.pb(text)
str+=Utils.pb(Utils.pfb("",size=underline,symbol=symbol))
return str
@staticmethod
def formatDecH(ts):
return str(round(ts/3600,2))+"h"
@staticmethod
def formatDHM(ts,posSign=False):
'''
Formats seconds to a day, hours and minutes string
'''
txt=""
if(ts<0):
txt+="-"
ts=abs(ts)
elif(posSign and ts>=0):
txt+="+"
wd=86400
daysf=ts/wd
if(daysf > 0):
days=floor(daysf)
else:
days=ceil(daysf)
txt+=str(days)+"d "
ts=ts-(days*wd)
hoursf=ts/3600
if(hoursf > 0):
hours=floor(hoursf)
else:
hours=ceil(hoursf)
txt+=str(hours)+"h "
minutesf=ts%3600/60
if(minutesf > 0):
minutes=floor(minutesf)
else:
minutes=ceil(minutesf)
txt+=str(minutes)+"m "
return txt
@staticmethod
def formatHM(ts,posSign=False):
'''
Formats seconds to a hours and minutes string
'''
txt=""
if(ts<0):
txt+="-"
ts=abs(ts)
elif(posSign and ts>=0):
txt+="+"
hoursf=ts/3600
if(hoursf > 0):
hours=floor(hoursf)
else:
hours=ceil(hoursf)
txt+=str(hours)+"h "
minutesf=ts%3600/60
if(minutesf > 0):
minutes=floor(minutesf)
else:
minutes=ceil(minutesf)
txt+=str(minutes)+"m "
return txt
@staticmethod
def convertHMToSeconds(str, separator=":"):
if(not str.count(separator) == 1):
raise ValueError("Invalid format")
arr = str.split(separator)
s = 0
s+=int(arr[0])*3600
s+=int(arr[1])*60
return s
|
Nexolight/wtstamp
|
src/utils.py
|
Python
|
gpl-2.0
| 17,014 | 0.020336 |
# Copyright (C) 2010,2011,2012 Chris Lalancette <clalance@redhat.com>
# Copyright (C) 2012,2013 Chris Lalancette <clalancette@gmail.com>
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation;
# version 2.1 of the License.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# Originally borrowed from Oz 29-Mar-2013
"""
Miscellaneous utility functions.
"""
import os
import random
import subprocess
import tempfile
import errno
import stat
import shutil
import pycurl
import collections
def generate_full_auto_path(relative):
"""
Function to find the absolute path to an unattended installation file.
"""
# all of the automated installation paths are installed to $pkg_path/auto,
# so we just need to find it and generate the right path here
if relative is None:
raise Exception("The relative path cannot be None")
pkg_path = os.path.dirname(__file__)
return os.path.abspath(os.path.join(pkg_path, "auto", relative))
def executable_exists(program):
"""
Function to find out whether an executable exists in the PATH
of the user. If so, the absolute path to the executable is returned.
If not, an exception is raised.
"""
def is_exe(fpath):
"""
Helper method to check if a file exists and is executable
"""
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
if program is None:
raise Exception("Invalid program name passed")
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
raise Exception("Could not find %s" % (program))
def copyfile_sparse(src, dest):
"""
Function to copy a file sparsely if possible. The logic here is
all taken from coreutils cp, specifically the 'sparse_copy' function.
"""
if src is None:
raise Exception("Source of copy cannot be None")
if dest is None:
raise Exception("Destination of copy cannot be None")
src_fd = os.open(src, os.O_RDONLY)
dest_fd = os.open(dest, os.O_WRONLY|os.O_CREAT|os.O_TRUNC)
sb = os.fstat(src_fd)
# See io_blksize() in coreutils for an explanation of why 32*1024
buf_size = max(32*1024, sb.st_blksize)
size = sb.st_size
destlen = 0
while size != 0:
buf = os.read(src_fd, min(buf_size, size))
if len(buf) == 0:
break
buflen = len(buf)
if buf == '\0'*buflen:
os.lseek(dest_fd, buflen, os.SEEK_CUR)
else:
# FIXME: check out the python implementation of write, we might have
# to handle EINTR here
os.write(dest_fd, buf)
destlen += len(buf)
size -= len(buf)
os.ftruncate(dest_fd, destlen)
os.close(src_fd)
os.close(dest_fd)
def bsd_split(line, digest_type):
"""
Function to split a BSD-style checksum line into a filename and
checksum.
"""
current = len(digest_type)
if line[current] == ' ':
current += 1
if line[current] != '(':
return None, None
current += 1
# find end of filename. The BSD 'md5' and 'sha1' commands do not escape
# filenames, so search backwards for the last ')'
file_end = line.rfind(')')
if file_end == -1:
# could not find the ending ), fail
return None, None
filename = line[current:file_end]
line = line[(file_end + 1):]
line = line.lstrip()
if line[0] != '=':
return None, None
line = line[1:]
line = line.lstrip()
if line[-1] == '\n':
line = line[:-1]
return line, filename
def sum_split(line, digest_bits):
"""
Function to split a normal Linux checksum line into a filename and
checksum.
"""
digest_hex_bytes = digest_bits / 4
min_digest_line_length = digest_hex_bytes + 2 + 1 # length of hex message digest + blank and binary indicator (2 bytes) + minimum file length (1 byte)
min_length = min_digest_line_length
if line[0] == '\\':
min_length = min_length + 1
if len(line) < min_length:
# if the line is too short, skip it
return None, None
if line[0] == '\\':
current = digest_hex_bytes + 1
hex_digest = line[1:current]
escaped_filename = True
else:
current = digest_hex_bytes
hex_digest = line[0:current]
escaped_filename = False
# if the digest is not immediately followed by a white space, it is an
# error
if line[current] != ' ' and line[current] != '\t':
return None, None
current += 1
# if the whitespace is not immediately followed by another space or a *,
# it is an error
if line[current] != ' ' and line[current] != '*':
return None, None
if line[current] == '*':
binary = True
current += 1
if line[-1] == '\n':
filename = line[current:-1]
else:
filename = line[current:]
if escaped_filename:
# FIXME: a \0 is not allowed in the sum file format, but
# string_escape allows it. We'd probably have to implement our
# own codec to fix this
filename = filename.decode('string_escape')
return hex_digest, filename
def get_sum_from_file(sumfile, file_to_find, digest_bits, digest_type):
"""
Function to get a checksum digest out of a checksum file given a
filename.
"""
retval = None
f = open(sumfile, 'r')
for line in f:
binary = False
# remove any leading whitespace
line = line.lstrip()
# ignore blank lines
if len(line) == 0:
continue
# ignore comment lines
if line[0] == '#':
continue
if line.startswith(digest_type):
# OK, if it starts with a string of ["MD5", "SHA1", "SHA256"], then
# this is a BSD-style sumfile
hex_digest, filename = bsd_split(line, digest_type)
else:
# regular sumfile
hex_digest, filename = sum_split(line, digest_bits)
if hex_digest is None or filename is None:
continue
if filename == file_to_find:
retval = hex_digest
break
f.close()
return retval
def get_md5sum_from_file(sumfile, file_to_find):
"""
Function to get an MD5 checksum out of a checksum file given a filename.
"""
return get_sum_from_file(sumfile, file_to_find, 128, "MD5")
def get_sha1sum_from_file(sumfile, file_to_find):
"""
Function to get a SHA1 checksum out of a checksum file given a filename.
"""
return get_sum_from_file(sumfile, file_to_find, 160, "SHA1")
def get_sha256sum_from_file(sumfile, file_to_find):
"""
Function to get a SHA256 checksum out of a checksum file given a
filename.
"""
return get_sum_from_file(sumfile, file_to_find, 256, "SHA256")
def string_to_bool(instr):
"""
Function to take a string and determine whether it is True, Yes, False,
or No. It takes a single argument, which is the string to examine.
Returns True if instr is "Yes" or "True", False if instr is "No"
or "False", and None otherwise.
"""
if instr is None:
raise Exception("Input string was None!")
lower = instr.lower()
if lower == 'no' or lower == 'false':
return False
if lower == 'yes' or lower == 'true':
return True
return None
def generate_macaddress():
"""
Function to generate a random MAC address.
"""
mac = [0x52, 0x54, 0x00, random.randint(0x00, 0xff),
random.randint(0x00, 0xff), random.randint(0x00, 0xff)]
return ':'.join(["%02x" % x for x in mac])
class SubprocessException(Exception):
"""
Class for subprocess exceptions. In addition to a error message, it
also has a retcode member that has the returncode from the command.
"""
def __init__(self, msg, retcode):
Exception.__init__(self, msg)
self.retcode = retcode
def subprocess_check_output(*popenargs, **kwargs):
"""
Function to call a subprocess and gather the output.
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if 'stderr' in kwargs:
raise ValueError('stderr argument not allowed, it will be overridden.')
executable_exists(popenargs[0][0])
# NOTE: it is very, very important that we use temporary files for
# collecting stdout and stderr here. There is a nasty bug in python
# subprocess; if your process produces more than 64k of data on an fd that
# is using subprocess.PIPE, the whole thing will hang. To avoid this, we
# use temporary fds to capture the data
stdouttmp = tempfile.TemporaryFile()
stderrtmp = tempfile.TemporaryFile()
process = subprocess.Popen(stdout=stdouttmp, stderr=stderrtmp, *popenargs,
**kwargs)
process.communicate()
retcode = process.poll()
stdouttmp.seek(0, 0)
stdout = stdouttmp.read()
stdouttmp.close()
stderrtmp.seek(0, 0)
stderr = stderrtmp.read()
stderrtmp.close()
if retcode:
cmd = ' '.join(*popenargs)
raise SubprocessException("'%s' failed(%d): %s" % (cmd, retcode, stderr), retcode)
return (stdout, stderr, retcode)
def ssh_execute_command(guestaddr, sshprivkey, command, timeout=10,
tunnels=None):
"""
Function to execute a command on the guest using SSH and return the
output.
"""
# ServerAliveInterval protects against NAT firewall timeouts
# on long-running commands with no output
#
# PasswordAuthentication=no prevents us from falling back to
# keyboard-interactive password prompting
#
# -F /dev/null makes sure that we don't use the global or per-user
# configuration files
cmd = ["ssh", "-i", sshprivkey,
"-F", "/dev/null",
"-o", "ServerAliveInterval=30",
"-o", "StrictHostKeyChecking=no",
"-o", "ConnectTimeout=" + str(timeout),
"-o", "UserKnownHostsFile=/dev/null",
"-o", "PasswordAuthentication=no"]
if tunnels:
for host in tunnels:
for port in tunnels[host]:
cmd.append("-R %s:%s:%s" % (tunnels[host][port], host, port))
cmd.extend( ["root@" + guestaddr, command] )
return subprocess_check_output(cmd)
def scp_copy_file(guestaddr, sshprivkey, file_to_upload, destination,
timeout=10):
"""
Function to upload a file to the guest using scp.
"""
ssh_execute_command(guestaddr, sshprivkey,
"mkdir -p " + os.path.dirname(destination), timeout)
# ServerAliveInterval protects against NAT firewall timeouts
# on long-running commands with no output
#
# PasswordAuthentication=no prevents us from falling back to
# keyboard-interactive password prompting
#
# -F /dev/null makes sure that we don't use the global or per-user
# configuration files
return subprocess_check_output(["scp", "-i", sshprivkey,
"-F", "/dev/null",
"-o", "ServerAliveInterval=30",
"-o", "StrictHostKeyChecking=no",
"-o", "ConnectTimeout=" + str(timeout),
"-o", "UserKnownHostsFile=/dev/null",
"-o", "PasswordAuthentication=no",
file_to_upload,
"root@" + guestaddr + ":" + destination])
def mkdir_p(path):
"""
Function to make a directory and all intermediate directories as
necessary. The functionality differs from os.makedirs slightly, in
that this function does *not* raise an error if the directory already
exists.
"""
if path is None:
raise Exception("Path cannot be None")
try:
os.makedirs(path)
except OSError as err:
if err.errno != errno.EEXIST or not os.path.isdir(path):
raise
def copy_modify_file(inname, outname, subfunc):
"""
Function to copy a file from inname to outname, passing each line
through subfunc first. subfunc is expected to be a method that takes
a single argument in (the next line), and returns a string to be
written to the output file after modification (if any).
"""
if inname is None:
raise Exception("input filename is None")
if outname is None:
raise Exception("output filename is None")
if subfunc is None:
raise Exception("subfunction is None")
if not isinstance(subfunc, collections.Callable):
raise Exception("subfunction is not callable")
infile = open(inname, 'r')
outfile = open(outname, 'w')
for line in infile:
outfile.write(subfunc(line))
infile.close()
outfile.close()
def write_cpio(inputdict, outputfile):
"""
Function to write a CPIO archive in the "New ASCII Format". The
inputlist is a dictionary of files to put in the archive, where the
dictionary key is the path to the file on the local filesystem and the
dictionary value is the location that the file should have in the cpio
archive. The outputfile is the location of the final cpio archive that
will be written.
"""
if inputdict is None:
raise Exception("input dictionary was None")
if outputfile is None:
raise Exception("output file was None")
outf = open(outputfile, "w")
try:
for inputfile, destfile in list(inputdict.items()):
inf = open(inputfile, 'r')
st = os.fstat(inf.fileno())
# 070701 is the magic for new CPIO (newc in cpio parlance)
outf.write("070701")
# inode (really just needs to be unique)
outf.write("%08x" % (st[stat.ST_INO]))
# mode
outf.write("%08x" % (st[stat.ST_MODE]))
# uid is 0
outf.write("00000000")
# gid is 0
outf.write("00000000")
# nlink (always a single link for a single file)
outf.write("00000001")
# mtime
outf.write("%08x" % (st[stat.ST_MTIME]))
# filesize
outf.write("%08x" % (st[stat.ST_SIZE]))
# devmajor
outf.write("%08x" % (os.major(st[stat.ST_DEV])))
# dev minor
outf.write("%08x" % (os.minor(st[stat.ST_DEV])))
# rdevmajor (always 0)
outf.write("00000000")
# rdevminor (always 0)
outf.write("00000000")
# namesize (the length of the name plus 1 for the NUL padding)
outf.write("%08x" % (len(destfile) + 1))
# check (always 0)
outf.write("00000000")
# write the name of the inputfile minus the leading /
stripped = destfile.lstrip('/')
outf.write(stripped)
# we now need to write sentinel NUL byte(s). We need to make the
# header (110 bytes) plus the filename, plus the sentinel a
# multiple of 4 bytes. Note that we always need at *least* one NUL,
# so if it is exactly a multiple of 4 we need to write 4 NULs
outf.write("\x00"*(4 - ((110+len(stripped)) % 4)))
# now write the data from the input file
outf.writelines(inf)
inf.close()
# we now need to write out NUL byte(s) to make it a multiple of 4.
# note that unlike the name, we do *not* have to have any NUL bytes,
# so if it is already aligned on 4 bytes do nothing
remainder = st[stat.ST_SIZE] % 4
if remainder != 0:
outf.write("\x00"*(4 - remainder))
# now that we have written all of the file entries, write the trailer
outf.write("070701")
# zero inode
outf.write("00000000")
# zero mode
outf.write("00000000")
# zero uid
outf.write("00000000")
# zero gid
outf.write("00000000")
# one nlink
outf.write("00000001")
# zero mtime
outf.write("00000000")
# zero filesize
outf.write("00000000")
# zero devmajor
outf.write("00000000")
# zero devminor
outf.write("00000000")
# zero rdevmajor
outf.write("00000000")
# zero rdevminor
outf.write("00000000")
# 0xB namesize
outf.write("0000000B")
# zero check
outf.write("00000000")
# trailer
outf.write("TRAILER!!!")
# finally, we need to pad to the closest 512 bytes
outf.write("\x00"*(512 - (outf.tell() % 512)))
except:
os.unlink(outputfile)
raise
outf.close()
def config_get_key(config, section, key, default):
"""
Function to retrieve config parameters out of the config file.
"""
if config is not None and config.has_section(section) and config.has_option(section, key):
return config.get(section, key)
else:
return default
def config_get_boolean_key(config, section, key, default):
"""
Function to retrieve boolean config parameters out of the config file.
"""
value = config_get_key(config, section, key, None)
if value is None:
return default
retval = string_to_bool(value)
if retval is None:
raise Exception("Configuration parameter '%s' must be True, Yes, False, or No" % (key))
return retval
def rmtree_and_sync(directory):
shutil.rmtree(directory)
# after we do the rmtree, there are usually a lot of metadata updates
# pending. This can cause the next steps (especially the steps where
# libvirt is launching the guest) to fail, just because they timeout. To
# try to workaround this, fsync the directory, which will cause us to wait
# until those updates have made it to disk. Note that this cannot save us
# if the system is extremely busy for other reasons, but at least the
# problem won't be self-inflicted.
fd = os.open(os.path.dirname(directory), os.O_RDONLY)
os.fsync(fd)
os.close(fd)
def default_output_dir():
if os.geteuid() == 0:
directory = "/var/lib/libvirt/images"
else:
directory = "~/.oz/images"
return os.path.expanduser(directory)
def default_data_dir():
if os.geteuid() == 0:
directory = "/var/lib/oz"
else:
directory = "~/.oz"
return os.path.expanduser(directory)
def default_screenshot_dir():
return os.path.join(default_data_dir(), "screenshots")
def http_get_header(url, redirect=True):
"""
Function to get the HTTP headers from a URL. The available headers will be
returned in a dictionary. If redirect=True (the default), then this
function will automatically follow http redirects through to the final
destination, entirely transparently to the caller. If redirect=False, then
this function will follow http redirects through to the final destination,
and also store that information in the 'Redirect-URL' key. Note that
'Redirect-URL' will always be None in the redirect=True case, and may be
None in the redirect=True case if no redirects were required.
"""
info = {}
def _header(buf):
buf = buf.strip()
if len(buf) == 0:
return
split = buf.split(':')
if len(split) < 2:
# not a valid header; skip
return
key = split[0].strip()
value = split[1].strip()
info[key] = value
def _data(buf):
pass
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.NOBODY, True)
c.setopt(c.HEADERFUNCTION, _header)
c.setopt(c.HEADER, True)
c.setopt(c.WRITEFUNCTION, _data)
if redirect:
c.setopt(c.FOLLOWLOCATION, True)
c.perform()
info['HTTP-Code'] = c.getinfo(c.HTTP_CODE)
if info['HTTP-Code'] == 0:
# if this was a file:/// URL, then the HTTP_CODE returned 0.
# set it to 200 to be compatible with http
info['HTTP-Code'] = 200
if not redirect:
info['Redirect-URL'] = c.getinfo(c.REDIRECT_URL)
c.close()
return info
def http_download_file(url, fd, show_progress, logger):
"""
Function to download a file from url to file descriptor fd.
"""
class Progress(object):
def __init__(self):
self.last_mb = -1
def _progress(self, down_total, down_current, up_total, up_current):
"""
Function that is called back from the pycurl perform() method to
update the progress information.
"""
if down_total == 0:
return
current_mb = int(down_current) / 10485760
if current_mb > self.last_mb or down_current == down_total:
self.last_mb = current_mb
logger.debug("%dkB of %dkB" % (down_current/1024, down_total/1024))
def _data(buf):
"""
Function that is called back from the pycurl perform() method to
actually write data to disk.
"""
os.write(fd, buf)
progress = Progress()
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.CONNECTTIMEOUT, 5)
c.setopt(c.WRITEFUNCTION, _data)
c.setopt(c.FOLLOWLOCATION, 1)
if show_progress:
c.setopt(c.NOPROGRESS, 0)
c.setopt(c.PROGRESSFUNCTION, progress._progress)
c.perform()
c.close()
|
imcleod/anaconda-ec2
|
ozutil.py
|
Python
|
lgpl-2.1
| 22,263 | 0.001887 |
"""Test adding external statistics from Tibber."""
from unittest.mock import AsyncMock
from homeassistant.components.recorder.statistics import statistics_during_period
from homeassistant.components.tibber.sensor import TibberDataCoordinator
from homeassistant.util import dt as dt_util
from .test_common import CONSUMPTION_DATA_1, mock_get_homes
from tests.common import async_init_recorder_component
from tests.components.recorder.common import async_wait_recording_done_without_instance
async def test_async_setup_entry(hass):
"""Test setup Tibber."""
await async_init_recorder_component(hass)
tibber_connection = AsyncMock()
tibber_connection.name = "tibber"
tibber_connection.fetch_consumption_data_active_homes.return_value = None
tibber_connection.get_homes = mock_get_homes
coordinator = TibberDataCoordinator(hass, tibber_connection)
await coordinator._async_update_data()
await async_wait_recording_done_without_instance(hass)
# Validate consumption
statistic_id = "tibber:energy_consumption_home_id"
stats = await hass.async_add_executor_job(
statistics_during_period,
hass,
dt_util.parse_datetime(CONSUMPTION_DATA_1[0]["from"]),
None,
[statistic_id],
"hour",
True,
)
assert len(stats) == 1
assert len(stats[statistic_id]) == 3
_sum = 0
for k, stat in enumerate(stats[statistic_id]):
assert stat["start"] == dt_util.parse_datetime(CONSUMPTION_DATA_1[k]["from"])
assert stat["state"] == CONSUMPTION_DATA_1[k]["consumption"]
assert stat["mean"] is None
assert stat["min"] is None
assert stat["max"] is None
assert stat["last_reset"] is None
_sum += CONSUMPTION_DATA_1[k]["consumption"]
assert stat["sum"] == _sum
# Validate cost
statistic_id = "tibber:energy_totalcost_home_id"
stats = await hass.async_add_executor_job(
statistics_during_period,
hass,
dt_util.parse_datetime(CONSUMPTION_DATA_1[0]["from"]),
None,
[statistic_id],
"hour",
True,
)
assert len(stats) == 1
assert len(stats[statistic_id]) == 3
_sum = 0
for k, stat in enumerate(stats[statistic_id]):
assert stat["start"] == dt_util.parse_datetime(CONSUMPTION_DATA_1[k]["from"])
assert stat["state"] == CONSUMPTION_DATA_1[k]["totalCost"]
assert stat["mean"] is None
assert stat["min"] is None
assert stat["max"] is None
assert stat["last_reset"] is None
_sum += CONSUMPTION_DATA_1[k]["totalCost"]
assert stat["sum"] == _sum
|
rohitranjan1991/home-assistant
|
tests/components/tibber/test_statistics.py
|
Python
|
mit
| 2,649 | 0.00151 |
""" Various global variables """
import os
PROG_NAME = "gBuilder"
PROG_VERSION = "2.0.0"
environ = {"os":"Windows",
"path":os.environ["GINI_HOME"]+"/",
"remotepath":"./",
"images":os.environ["GINI_HOME"]+"/share/gbuilder/images/",
"config":os.environ["GINI_HOME"]+"/etc/",
"sav":os.environ["GINI_HOME"]+"/sav/",
"tmp":os.environ["GINI_HOME"]+"/tmp/",
"doc":os.environ["GINI_HOME"]+"/doc/"}
options = {"names":True,
"systray":False,
"elasticMode":False, "keepElasticMode":False,
"smoothing":True, "glowingLights":True, "style":"Plastique",
"grid":True, "gridColor":"(240,240,240)",
"background":environ["images"] + "background.jpg",
"windowTheme":environ["images"] + "background2.jpg",
"baseTheme":environ["images"] + "background3.jpg",
"autorouting":True, "autogen":True, "autocompile":True,
"graphing":True, "username":"",
"server":"localhost", "session":"GINI", "autoconnect":True,
"localPort":"10001", "remotePort":"10000",
"restore":True}
mainWidgets = {"app":None,
"main":None,
"canvas":None,
"tab":None,
"popup":None,
"log":None,
"tm":None,
"properties":None,
"interfaces":None,
"routes":None,
"drop":None,
"client":None}
defaultOptions = {"palette":None}
|
sciyoshi/gini
|
frontend/src/gbuilder/Core/globals.py
|
Python
|
mit
| 1,542 | 0.029183 |
"""This module contains tests that exercise the canned VMware Automate stuff."""
from textwrap import dedent
import fauxfactory
import pytest
from widgetastic.widget import View
from widgetastic_patternfly import Dropdown
from cfme import test_requirements
from cfme.common import BaseLoggedInPage
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.wait import wait_for
pytestmark = [
test_requirements.automate,
pytest.mark.meta(server_roles="+automate"),
pytest.mark.long_running,
pytest.mark.ignore_stream("upstream"),
pytest.mark.tier(3),
pytest.mark.provider(
[VMwareProvider], required_fields=[['provisioning', 'template']],
scope="module")
]
@pytest.fixture(scope="module")
def cls(domain):
original_class = domain.parent\
.instantiate(name='ManageIQ')\
.namespaces.instantiate(name='System')\
.classes.instantiate(name='Request')
original_class.copy_to(domain=domain)
return domain.namespaces.instantiate(name='System').classes.instantiate(name='Request')
@pytest.fixture(scope="module")
def testing_group(appliance):
group_desc = fauxfactory.gen_alphanumeric()
group = appliance.collections.button_groups.create(
text=group_desc,
hover=group_desc,
type=appliance.collections.button_groups.VM_INSTANCE
)
yield group
group.delete_if_exists()
@pytest.fixture(scope="function")
def testing_vm(setup_provider, provider):
collection = provider.appliance.provider_based_collection(provider)
try:
template_name = provider.data['templates']['full_template']['name']
except KeyError:
pytest.skip('Unable to identify full_template for provider: {}'.format(provider))
vm = collection.instantiate(
random_vm_name("ae-hd"),
provider,
template_name=template_name
)
try:
vm.create_on_provider(find_in_cfme=True, allow_skip="default")
yield vm
finally:
vm.cleanup_on_provider()
def test_vmware_vimapi_hotadd_disk(
appliance, request, testing_group, testing_vm, domain, cls):
"""Tests hot adding a disk to vmware vm. This test exercises the `VMware_HotAdd_Disk` method,
located in `/Integration/VMware/VimApi`
Polarion:
assignee: ghubale
initialEstimate: 1/8h
casecomponent: Automate
caseimportance: critical
tags: automate
testSteps:
1. It creates an instance in ``System/Request`` that can be accessible from eg. button
2. Then it creates a button, that refers to the ``VMware_HotAdd_Disk`` in ``Request``.
The button shall belong in the VM and instance button group.
3. After the button is created, it goes to a VM's summary page, clicks the button.
4. The test waits until the capacity of disks is raised.
Bugzilla:
1211627
1311221
"""
meth = cls.methods.create(
name=fauxfactory.gen_alpha(15, start="load_value_"),
script=dedent('''\
# Sets the capacity of the new disk.
$evm.root['size'] = 1 # GB
exit MIQ_OK
'''))
request.addfinalizer(meth.delete_if_exists)
# Instance that calls the method and is accessible from the button
instance = cls.instances.create(
name=fauxfactory.gen_alpha(23, start="VMware_HotAdd_Disk_"),
fields={
"meth4": {'value': meth.name}, # To get the value
"rel5": {'value': "/Integration/VMware/VimApi/VMware_HotAdd_Disk"},
},
)
request.addfinalizer(instance.delete_if_exists)
# Button that will invoke the dialog and action
button_name = fauxfactory.gen_alphanumeric()
button = testing_group.buttons.create(
text=button_name,
hover=button_name,
system="Request",
request=instance.name)
request.addfinalizer(button.delete_if_exists)
def _get_disk_capacity():
view = testing_vm.load_details(refresh=True)
return view.entities.summary('Datastore Allocation Summary').get_text_of('Total Allocation')
original_disk_capacity = _get_disk_capacity()
logger.info('Initial disk allocation: %s', original_disk_capacity)
class CustomButtonView(View):
custom_button = Dropdown(testing_group.text)
view = appliance.browser.create_view(CustomButtonView)
view.custom_button.item_select(button.text)
view = appliance.browser.create_view(BaseLoggedInPage)
view.flash.assert_no_error()
try:
wait_for(
lambda: _get_disk_capacity() > original_disk_capacity, num_sec=180, delay=5)
finally:
logger.info('End disk capacity: %s', _get_disk_capacity())
|
izapolsk/integration_tests
|
cfme/tests/automate/test_vmware_methods.py
|
Python
|
gpl-2.0
| 4,838 | 0.00186 |
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
# Parameters
# ==================================================
# Data Parameters
# tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
# tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the positive data.")
# Eval Parameters
# tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "./runs/1495355705/checkpoints", "Checkpoint directory from training run")
# tf.flags.DEFINE_boolean("eval_train", False, "Evaluate on all training data")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# Evaluation
# ==================================================
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
embeddings_ts = graph.get_operation_by_name("embedding/W").outputs[0]
embeddings = sess.run(embeddings_ts)[1:]
np.save("embedding", embeddings)
print "hello"
|
kevinlee9/cnn-text-classification-tf
|
load.py
|
Python
|
apache-2.0
| 1,773 | 0.003384 |
from bot_constant import FORWARD_LIST
import global_vars
from utils import send_both_side
from command import command_listener
import telegram
import logging
logger = logging.getLogger("CTBPlugin." + __name__)
logger.debug(__name__ + " loading")
global_vars.create_variable('group_members', [[]] * len(FORWARD_LIST))
def reload_all_qq_namelist():
for i in range(len(FORWARD_LIST)):
global_vars.group_members[i] = global_vars.qq_bot.get_group_member_list(group_id=FORWARD_LIST[i]['QQ'])
@command_listener('update namelist', 'name', description='update namelist for current group')
def update_namelist(forward_index: int,
tg_group_id: int=None,
tg_user: telegram.User=None,
tg_message_id: int=None,
tg_reply_to: telegram.Message=None,
qq_group_id: int=None,
qq_discuss_id: int=None,
qq_user: int=None):
global_vars.group_members[forward_index] = global_vars.qq_bot.get_group_member_list(group_id=FORWARD_LIST[forward_index]['QQ'])
message = 'QQ群名片已重新加载'
return send_both_side(forward_index,
message,
qq_group_id,
qq_discuss_id,
tg_group_id,
tg_message_id)
reload_all_qq_namelist()
|
nickyc4/coolq-telegram-bot
|
plugins/qq_namelist.py
|
Python
|
gpl-3.0
| 1,401 | 0.012274 |
from setuptools import setup
with open('README.md') as f:
long_description = f.read()
setup(
name='Flask-pyoidc',
version='3.9.0',
packages=['flask_pyoidc'],
package_dir={'': 'src'},
url='https://github.com/zamzterz/flask-pyoidc',
license='Apache 2.0',
author='Samuel Gulliksson',
author_email='samuel.gulliksson@gmail.com',
description='Flask extension for OpenID Connect authentication.',
install_requires=[
'oic>=1.2.1',
'Flask',
'requests',
'importlib_resources'
],
package_data={'flask_pyoidc': ['parse_fragment.html']},
long_description=long_description,
long_description_content_type='text/markdown',
)
|
its-dirg/Flask-pyoidc
|
setup.py
|
Python
|
apache-2.0
| 704 | 0 |
#!/usr/bin/env python
"""RDFValues used to communicate with Chipsec."""
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_proto import chipsec_pb2
class DumpFlashImageRequest(rdf_structs.RDFProtoStruct):
"""A request to Chipsec to dump the flash image (BIOS)."""
protobuf = chipsec_pb2.DumpFlashImageRequest
class DumpFlashImageResponse(rdf_structs.RDFProtoStruct):
"""A response from Chipsec to dump the flash image (BIOS)."""
protobuf = chipsec_pb2.DumpFlashImageResponse
rdf_deps = [
rdf_paths.PathSpec,
]
class ACPITableData(rdf_structs.RDFProtoStruct):
"""Response from Chipsec for one ACPI table."""
protobuf = chipsec_pb2.ACPITableData
rdf_deps = [
rdfvalue.RDFBytes,
]
class DumpACPITableRequest(rdf_structs.RDFProtoStruct):
"""A request to Chipsec to dump an ACPI table."""
protobuf = chipsec_pb2.DumpACPITableRequest
class DumpACPITableResponse(rdf_structs.RDFProtoStruct):
"""A response from Chipsec to dump an ACPI table."""
protobuf = chipsec_pb2.DumpACPITableResponse
rdf_deps = [
ACPITableData,
]
|
google/grr
|
grr/core/grr_response_core/lib/rdfvalues/chipsec_types.py
|
Python
|
apache-2.0
| 1,216 | 0.010691 |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import httpretty
import pytest
from selenium.common.exceptions import InvalidArgumentException
from appium.webdriver.webdriver import WebDriver
from test.unit.helper.test_helper import android_w3c_driver, appium_command, get_httpretty_request_body
class TestWebDriverRemoteFs(object):
@httpretty.activate
def test_push_file(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/path/to/file.txt'
data = base64.b64encode(bytes('HelloWorld', 'utf-8')).decode('utf-8')
assert isinstance(driver.push_file(dest_path, data), WebDriver)
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
assert d['data'] == str(data)
@httpretty.activate
def test_push_file_invalid_arg_exception_without_src_path_and_base64data(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/path/to/file.txt'
with pytest.raises(InvalidArgumentException):
driver.push_file(dest_path)
@httpretty.activate
def test_push_file_invalid_arg_exception_with_src_file_not_found(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/dest_path/to/file.txt'
src_path = '/src_path/to/file.txt'
with pytest.raises(InvalidArgumentException):
driver.push_file(dest_path, source_path=src_path)
@httpretty.activate
def test_pull_file(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/pull_file'),
body='{"value": "SGVsbG9Xb3JsZA=="}',
)
dest_path = '/path/to/file.txt'
assert driver.pull_file(dest_path) == str(base64.b64encode(bytes('HelloWorld', 'utf-8')).decode('utf-8'))
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
@httpretty.activate
def test_pull_folder(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/pull_folder'),
body='{"value": "base64EncodedZippedFolderData"}',
)
dest_path = '/path/to/file.txt'
assert driver.pull_folder(dest_path) == 'base64EncodedZippedFolderData'
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
|
appium/python-client
|
test/unit/webdriver/device/remote_fs_test.py
|
Python
|
apache-2.0
| 3,434 | 0.000874 |
# This file is part of PRAW.
#
# PRAW is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# PRAW is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# PRAW. If not, see <http://www.gnu.org/licenses/>.
"""
Error classes.
Includes two main exceptions: ClientException, when something goes
wrong on our end, and APIExeception for when something goes wrong on the
server side. A number of classes extend these two main exceptions for more
specific exceptions.
"""
from __future__ import print_function, unicode_literals
import inspect
import six
import sys
class PRAWException(Exception):
"""The base PRAW Exception class.
Ideally, this can be caught to handle any exception from PRAW.
"""
class ClientException(PRAWException):
"""Base exception class for errors that don't involve the remote API."""
def __init__(self, message=None):
"""Construct a ClientException.
:param message: The error message to display.
"""
if not message:
message = 'Clientside error'
super(ClientException, self).__init__()
self.message = message
def __str__(self):
"""Return the message of the error."""
return self.message
class OAuthScopeRequired(ClientException):
"""Indicates that an OAuth2 scope is required to make the function call.
The attribute `scope` will contain the name of the necessary scope.
"""
def __init__(self, function, scope, message=None):
"""Contruct an OAuthScopeRequiredClientException.
:param function: The function that requires a scope.
:param scope: The scope required for the function.
:param message: A custom message to associate with the
exception. Default: `function` requires the OAuth2 scope `scope`
"""
if not message:
message = '`{0}` requires the OAuth2 scope `{1}`'.format(function,
scope)
super(OAuthScopeRequired, self).__init__(message)
self.scope = scope
class LoginRequired(ClientException):
"""Indicates that a logged in session is required.
This exception is raised on a preemptive basis, whereas NotLoggedIn occurs
in response to a lack of credentials on a privileged API call.
"""
def __init__(self, function, message=None):
"""Construct a LoginRequired exception.
:param function: The function that requires login-based authentication.
:param message: A custom message to associate with the exception.
Default: `function` requires a logged in session
"""
if not message:
message = '`{0}` requires a logged in session'.format(function)
super(LoginRequired, self).__init__(message)
class LoginOrScopeRequired(OAuthScopeRequired, LoginRequired):
"""Indicates that either a logged in session or OAuth2 scope is required.
The attribute `scope` will contain the name of the necessary scope.
"""
def __init__(self, function, scope, message=None):
"""Construct a LoginOrScopeRequired exception.
:param function: The function that requires authentication.
:param scope: The scope that is required if not logged in.
:param message: A custom message to associate with the exception.
Default: `function` requires a logged in session or the OAuth2
scope `scope`
"""
if not message:
message = ('`{0}` requires a logged in session or the '
'OAuth2 scope `{1}`').format(function, scope)
super(LoginOrScopeRequired, self).__init__(function, scope, message)
class ModeratorRequired(LoginRequired):
"""Indicates that a moderator of the subreddit is required."""
def __init__(self, function):
"""Construct a ModeratorRequired exception.
:param function: The function that requires moderator access.
"""
message = ('`{0}` requires a moderator '
'of the subreddit').format(function)
super(ModeratorRequired, self).__init__(message)
class ModeratorOrScopeRequired(LoginOrScopeRequired, ModeratorRequired):
"""Indicates that a moderator of the sub or OAuth2 scope is required.
The attribute `scope` will contain the name of the necessary scope.
"""
def __init__(self, function, scope):
"""Construct a ModeratorOrScopeRequired exception.
:param function: The function that requires moderator authentication or
a moderator scope..
:param scope: The scope that is required if not logged in with
moderator access..
"""
message = ('`{0}` requires a moderator of the subreddit or the '
'OAuth2 scope `{1}`').format(function, scope)
super(ModeratorOrScopeRequired, self).__init__(function, scope,
message)
class OAuthAppRequired(ClientException):
"""Raised when an OAuth client cannot be initialized.
This occurs when any one of the OAuth config values are not set.
"""
class HTTPException(PRAWException):
"""Base class for HTTP related exceptions."""
def __init__(self, _raw, message=None):
"""Construct a HTTPException.
:params _raw: The internal request library response object. This object
is mapped to attribute `_raw` whose format may change at any time.
"""
if not message:
message = 'HTTP error'
super(HTTPException, self).__init__()
self._raw = _raw
self.message = message
def __str__(self):
"""Return the message of the error."""
return self.message
class Forbidden(HTTPException):
"""Raised when the user does not have permission to the entity."""
class NotFound(HTTPException):
"""Raised when the requested entity is not found."""
class InvalidComment(PRAWException):
"""Indicate that the comment is no longer available on reddit."""
ERROR_TYPE = 'DELETED_COMMENT'
def __str__(self):
"""Return the message of the error."""
return self.ERROR_TYPE
class InvalidSubmission(PRAWException):
"""Indicates that the submission is no longer available on reddit."""
ERROR_TYPE = 'DELETED_LINK'
def __str__(self):
"""Return the message of the error."""
return self.ERROR_TYPE
class InvalidSubreddit(PRAWException):
"""Indicates that an invalid subreddit name was supplied."""
ERROR_TYPE = 'SUBREDDIT_NOEXIST'
def __str__(self):
"""Return the message of the error."""
return self.ERROR_TYPE
class RedirectException(PRAWException):
"""Raised when a redirect response occurs that is not expected."""
def __init__(self, request_url, response_url, message=None):
"""Construct a RedirectException.
:param request_url: The url requested.
:param response_url: The url being redirected to.
:param message: A custom message to associate with the exception.
"""
if not message:
message = ('Unexpected redirect '
'from {0} to {1}').format(request_url, response_url)
super(RedirectException, self).__init__()
self.request_url = request_url
self.response_url = response_url
self.message = message
def __str__(self):
"""Return the message of the error."""
return self.message
class OAuthException(PRAWException):
"""Base exception class for OAuth API calls.
Attribute `message` contains the error message.
Attribute `url` contains the url that resulted in the error.
"""
def __init__(self, message, url):
"""Construct a OAuthException.
:param message: The message associated with the exception.
:param url: The url that resulted in error.
"""
super(OAuthException, self).__init__()
self.message = message
self.url = url
def __str__(self):
"""Return the message along with the url."""
return self.message + " on url {0}".format(self.url)
class OAuthInsufficientScope(OAuthException):
"""Raised when the current OAuth scope is not sufficient for the action.
This indicates the access token is valid, but not for the desired action.
"""
class OAuthInvalidGrant(OAuthException):
"""Raised when the code to retrieve access information is not valid."""
class OAuthInvalidToken(OAuthException):
"""Raised when the current OAuth access token is not valid."""
class APIException(PRAWException):
"""Base exception class for the reddit API error message exceptions.
All exceptions of this type should have their own subclass.
"""
def __init__(self, error_type, message, field='', response=None):
"""Construct an APIException.
:param error_type: The error type set on reddit's end.
:param message: The associated message for the error.
:param field: The input field associated with the error, or ''.
:param response: The HTTP response that resulted in the exception.
"""
super(APIException, self).__init__()
self.error_type = error_type
self.message = message
self.field = field
self.response = response
def __str__(self):
"""Return a string containing the error message and field."""
if hasattr(self, 'ERROR_TYPE'):
return '`{0}` on field `{1}`'.format(self.message, self.field)
else:
return '({0}) `{1}` on field `{2}`'.format(self.error_type,
self.message,
self.field)
class ExceptionList(APIException):
"""Raised when more than one exception occurred."""
def __init__(self, errors):
"""Construct an ExceptionList.
:param errors: The list of errors.
"""
super(ExceptionList, self).__init__(None, None)
self.errors = errors
def __str__(self):
"""Return a string representation for all the errors."""
ret = '\n'
for i, error in enumerate(self.errors):
ret += '\tError {0}) {1}\n'.format(i, six.text_type(error))
return ret
class AlreadySubmitted(APIException):
"""An exception to indicate that a URL was previously submitted."""
ERROR_TYPE = 'ALREADY_SUB'
class AlreadyModerator(APIException):
"""Used to indicate that a user is already a moderator of a subreddit."""
ERROR_TYPE = 'ALREADY_MODERATOR'
class BadCSS(APIException):
"""An exception to indicate bad CSS (such as invalid) was used."""
ERROR_TYPE = 'BAD_CSS'
class BadCSSName(APIException):
"""An exception to indicate a bad CSS name (such as invalid) was used."""
ERROR_TYPE = 'BAD_CSS_NAME'
class BadUsername(APIException):
"""An exception to indicate an invalid username was used."""
ERROR_TYPE = 'BAD_USERNAME'
class InvalidCaptcha(APIException):
"""An exception for when an incorrect captcha error is returned."""
ERROR_TYPE = 'BAD_CAPTCHA'
class InvalidEmails(APIException):
"""An exception for when invalid emails are provided."""
ERROR_TYPE = 'BAD_EMAILS'
class InvalidFlairTarget(APIException):
"""An exception raised when an invalid user is passed as a flair target."""
ERROR_TYPE = 'BAD_FLAIR_TARGET'
class InvalidInvite(APIException):
"""Raised when attempting to accept a nonexistent moderator invite."""
ERROR_TYPE = 'NO_INVITE_FOUND'
class InvalidUser(APIException):
"""An exception for when a user doesn't exist."""
ERROR_TYPE = 'USER_DOESNT_EXIST'
class InvalidUserPass(APIException):
"""An exception for failed logins."""
ERROR_TYPE = 'WRONG_PASSWORD'
class InsufficientCreddits(APIException):
"""Raised when there are not enough creddits to complete the action."""
ERROR_TYPE = 'INSUFFICIENT_CREDDITS'
class NotLoggedIn(APIException):
"""An exception for when a Reddit user isn't logged in."""
ERROR_TYPE = 'USER_REQUIRED'
class NotModified(APIException):
"""An exception raised when reddit returns {'error': 304}.
This error indicates that the requested content was not modified and is
being requested too frequently. Such an error usually occurs when multiple
instances of PRAW are running concurrently or in rapid succession.
"""
def __init__(self, response):
"""Construct an instance of the NotModified exception.
This error does not have an error_type, message, nor field.
"""
super(NotModified, self).__init__(None, None, response=response)
def __str__(self):
"""Return: That page has not been modified."""
return 'That page has not been modified.'
class RateLimitExceeded(APIException):
"""An exception for when something has happened too frequently.
Contains a `sleep_time` attribute for the number of seconds that must
transpire prior to the next request.
"""
ERROR_TYPE = 'RATELIMIT'
def __init__(self, error_type, message, field, response):
"""Construct an instance of the RateLimitExceeded exception.
The parameters match that of :class:`APIException`.
The `sleep_time` attribute is extracted from the response object.
"""
super(RateLimitExceeded, self).__init__(error_type, message,
field, response)
self.sleep_time = self.response['ratelimit']
class SubredditExists(APIException):
"""An exception to indicate that a subreddit name is not available."""
ERROR_TYPE = 'SUBREDDIT_EXISTS'
class UsernameExists(APIException):
"""An exception to indicate that a username is not available."""
ERROR_TYPE = 'USERNAME_TAKEN'
def _build_error_mapping():
def predicate(obj):
return inspect.isclass(obj) and hasattr(obj, 'ERROR_TYPE')
tmp = {}
for _, obj in inspect.getmembers(sys.modules[__name__], predicate):
tmp[obj.ERROR_TYPE] = obj
return tmp
ERROR_MAPPING = _build_error_mapping()
|
dmarx/praw
|
praw/errors.py
|
Python
|
gpl-3.0
| 14,656 | 0.000068 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Batch Uploader core functions. Uploading metadata and documents.
"""
import os
import pwd
import grp
import sys
import time
import tempfile
import cgi
import re
from invenio.dbquery import run_sql, Error
from invenio.access_control_engine import acc_authorize_action
from invenio.webuser import collect_user_info, page_not_authorized
from invenio.config import CFG_BINDIR, CFG_TMPSHAREDDIR, CFG_LOGDIR, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \
CFG_OAI_ID_FIELD, CFG_BATCHUPLOADER_DAEMON_DIR, \
CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS, \
CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS, \
CFG_PREFIX, CFG_SITE_LANG
from invenio.textutils import encode_for_xml
from invenio.bibtask import task_low_level_submission
from invenio.messages import gettext_set_language
from invenio.textmarc2xmlmarc import transform_file
from invenio.shellutils import run_shell_command
from invenio.bibupload import xml_marc_to_records, bibupload
import invenio.bibupload as bibupload_module
from invenio.bibrecord import create_records, \
record_strip_empty_volatile_subfields, \
record_strip_empty_fields
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PERMITTED_MODES = ['-i', '-r', '-c', '-a', '-ir',
'--insert', '--replace', '--correct', '--append']
_CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS_RE = re.compile(CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS)
def cli_allocate_record(req):
req.content_type = "text/plain"
req.send_http_header()
# check IP and useragent:
if not _check_client_ip(req):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
return _write(req, msg)
if not _check_client_useragent(req):
msg = '[ERROR] Sorry, the "%s" useragent cannot use the service.' % _get_useragent(req)
_log(msg)
return _write(req, msg)
recid = run_sql("insert into bibrec (creation_date,modification_date) values(NOW(),NOW())")
return recid
def cli_upload(req, file_content=None, mode=None, callback_url=None, nonce=None, special_treatment=None):
""" Robot interface for uploading MARC files
"""
req.content_type = "text/plain"
req.send_http_header()
# check IP and useragent:
if not _check_client_ip(req):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
return _write(req, msg)
if not _check_client_useragent(req):
msg = "[ERROR] Sorry, the %s useragent cannot use the service." % _get_useragent(req)
_log(msg)
return _write(req, msg)
arg_mode = mode
if not arg_mode:
msg = "[ERROR] Please specify upload mode to use."
_log(msg)
return _write(req, msg)
if not arg_mode in PERMITTED_MODES:
msg = "[ERROR] Invalid upload mode."
_log(msg)
return _write(req, msg)
arg_file = file_content
if hasattr(arg_file, 'read'):
## We've been passed a readable file, e.g. req
arg_file = arg_file.read()
if not arg_file:
msg = "[ERROR] Please provide a body to your request."
_log(msg)
return _write(req, msg)
else:
if not arg_file:
msg = "[ERROR] Please specify file body to input."
_log(msg)
return _write(req, msg)
if hasattr(arg_file, "filename"):
arg_file = arg_file.value
else:
msg = "[ERROR] 'file' parameter must be a (single) file"
_log(msg)
return _write(req, msg)
# write temporary file:
tempfile.tempdir = CFG_TMPSHAREDDIR
filename = tempfile.mktemp(prefix="batchupload_" + \
time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_")
filedesc = open(filename, 'w')
filedesc.write(arg_file)
filedesc.close()
# check if this client can run this file:
client_ip = _get_client_ip(req)
permitted_dbcollids = CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]
if permitted_dbcollids != ['*']: # wildcard
allow = _check_client_can_submit_file(client_ip, filename, req, 0)
if not allow:
msg = "[ERROR] Cannot submit such a file from this IP. (Wrong collection.)"
_log(msg)
return _write(req, msg)
# check validity of marcxml
xmlmarclint_path = CFG_BINDIR + '/xmlmarclint'
xmlmarclint_output, dummy1, dummy2 = run_shell_command('%s %s' % (xmlmarclint_path, filename))
if xmlmarclint_output != 0:
msg = "[ERROR] MARCXML is not valid."
_log(msg)
return _write(req, msg)
args = ['bibupload', "batchupload", arg_mode, filename]
# run upload command
if callback_url:
args += ["--callback-url", callback_url]
if nonce:
args += ["--nonce", nonce]
if special_treatment:
args += ["--special-treatment", special_treatment]
task_low_level_submission(*args)
msg = "[INFO] %s" % ' '.join(args)
_log(msg)
return _write(req, msg)
def metadata_upload(req, metafile=None, filetype=None, mode=None, exec_date=None,
exec_time=None, metafilename=None, ln=CFG_SITE_LANG,
priority="1", email_logs_to=None):
"""
Metadata web upload service. Get upload parameters and exec bibupload for the given file.
Finally, write upload history.
@return: tuple (error code, message)
error code: code that indicates if an error ocurred
message: message describing the error
"""
# start output:
req.content_type = "text/html"
req.send_http_header()
error_codes = {'not_authorized': 1}
# write temporary file:
if filetype != 'marcxml':
metafile = _transform_input_to_marcxml(file_input=metafile)
user_info = collect_user_info(req)
tempfile.tempdir = CFG_TMPSHAREDDIR
filename = tempfile.mktemp(prefix="batchupload_" + \
user_info['nickname'] + "_" + time.strftime("%Y%m%d%H%M%S",
time.localtime()) + "_")
filedesc = open(filename, 'w')
filedesc.write(metafile)
filedesc.close()
# check if this client can run this file:
if req is not None:
allow = _check_client_can_submit_file(req=req, metafile=metafile, webupload=1, ln=ln)
if allow[0] != 0:
return (error_codes['not_authorized'], allow[1])
# run upload command:
task_arguments = ('bibupload', user_info['nickname'], mode, "--name=" + metafilename, "--priority=" + priority)
if exec_date:
date = exec_date
if exec_time:
date += ' ' + exec_time
task_arguments += ("-t", date)
if email_logs_to:
task_arguments += ('--email-logs-to', email_logs_to)
task_arguments += (filename, )
jobid = task_low_level_submission(*task_arguments)
# write batch upload history
run_sql("""INSERT INTO hstBATCHUPLOAD (user, submitdate,
filename, execdate, id_schTASK, batch_mode)
VALUES (%s, NOW(), %s, %s, %s, "metadata")""",
(user_info['nickname'], metafilename,
exec_date != "" and (exec_date + ' ' + exec_time)
or time.strftime("%Y-%m-%d %H:%M:%S"), str(jobid), ))
return (0, "Task %s queued" % str(jobid))
def document_upload(req=None, folder="", matching="", mode="", exec_date="", exec_time="", ln=CFG_SITE_LANG, priority="1", email_logs_to=None):
""" Take files from the given directory and upload them with the appropiate mode.
@parameters:
+ folder: Folder where the files to upload are stored
+ matching: How to match file names with record fields (report number, barcode,...)
+ mode: Upload mode (append, revise, replace)
@return: tuple (file, error code)
file: file name causing the error to notify the user
error code:
1 - More than one possible recID, ambiguous behaviour
2 - No records match that file name
3 - File already exists
"""
import sys
if sys.hexversion < 0x2060000:
from md5 import md5
else:
from hashlib import md5
from invenio.bibdocfile import BibRecDocs, file_strip_ext
import shutil
from invenio.search_engine import perform_request_search, \
search_pattern, \
guess_collection_of_a_record
_ = gettext_set_language(ln)
errors = []
info = [0, []] # Number of files read, name of the files
try:
files = os.listdir(folder)
except OSError, error:
errors.append(("", error))
return errors, info
err_desc = {1: _("More than one possible recID, ambiguous behaviour"), 2: _("No records match that file name"),
3: _("File already exists"), 4: _("A file with the same name and format already exists"),
5: _("No rights to upload to collection '%s'")}
# Create directory DONE/ if doesn't exist
folder = (folder[-1] == "/") and folder or (folder + "/")
files_done_dir = folder + "DONE/"
try:
os.mkdir(files_done_dir)
except OSError:
# Directory exists or no write permission
pass
for docfile in files:
if os.path.isfile(os.path.join(folder, docfile)):
info[0] += 1
identifier = file_strip_ext(docfile)
extension = docfile[len(identifier):]
rec_id = None
if identifier:
rec_id = search_pattern(p=identifier, f=matching, m='e')
if not rec_id:
errors.append((docfile, err_desc[2]))
continue
elif len(rec_id) > 1:
errors.append((docfile, err_desc[1]))
continue
else:
rec_id = str(list(rec_id)[0])
rec_info = BibRecDocs(rec_id)
if rec_info.bibdocs:
for bibdoc in rec_info.bibdocs:
attached_files = bibdoc.list_all_files()
file_md5 = md5(open(os.path.join(folder, docfile), "rb").read()).hexdigest()
num_errors = len(errors)
for attached_file in attached_files:
if attached_file.checksum == file_md5:
errors.append((docfile, err_desc[3]))
break
elif attached_file.get_full_name() == docfile:
errors.append((docfile, err_desc[4]))
break
if len(errors) > num_errors:
continue
# Check if user has rights to upload file
if req is not None:
file_collection = guess_collection_of_a_record(int(rec_id))
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=file_collection)
if auth_code != 0:
error_msg = err_desc[5] % file_collection
errors.append((docfile, error_msg))
continue
tempfile.tempdir = CFG_TMPSHAREDDIR
# Move document to be uploaded to temporary folder
tmp_file = tempfile.mktemp(prefix=identifier + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_", suffix=extension)
shutil.copy(os.path.join(folder, docfile), tmp_file)
# Create MARC temporary file with FFT tag and call bibupload
filename = tempfile.mktemp(prefix=identifier + '_')
filedesc = open(filename, 'w')
marc_content = """ <record>
<controlfield tag="001">%(rec_id)s</controlfield>
<datafield tag="FFT" ind1=" " ind2=" ">
<subfield code="n">%(name)s</subfield>
<subfield code="a">%(path)s</subfield>
</datafield>
</record> """ % {'rec_id': rec_id,
'name': encode_for_xml(identifier),
'path': encode_for_xml(tmp_file),
}
filedesc.write(marc_content)
filedesc.close()
info[1].append(docfile)
user = ""
if req is not None:
user_info = collect_user_info(req)
user = user_info['nickname']
if not user:
user = "batchupload"
# Execute bibupload with the appropiate mode
task_arguments = ('bibupload', user, "--" + mode, "--name=" + docfile, "--priority=" + priority)
if exec_date:
date = '--runtime=' + "\'" + exec_date + ' ' + exec_time + "\'"
task_arguments += (date, )
if email_logs_to:
task_arguments += ("--email-logs-to", email_logs_to)
task_arguments += (filename, )
jobid = task_low_level_submission(*task_arguments)
# write batch upload history
run_sql("""INSERT INTO hstBATCHUPLOAD (user, submitdate,
filename, execdate, id_schTASK, batch_mode)
VALUES (%s, NOW(), %s, %s, %s, "document")""",
(user_info['nickname'], docfile,
exec_date != "" and (exec_date + ' ' + exec_time)
or time.strftime("%Y-%m-%d %H:%M:%S"), str(jobid)))
# Move file to DONE folder
done_filename = docfile + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_" + str(jobid)
try:
os.rename(os.path.join(folder, docfile), os.path.join(files_done_dir, done_filename))
except OSError:
errors.append('MoveError')
return errors, info
def get_user_metadata_uploads(req):
"""Retrieve all metadata upload history information for a given user"""
user_info = collect_user_info(req)
upload_list = run_sql("""SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
s.status \
FROM hstBATCHUPLOAD h INNER JOIN schTASK s \
ON h.id_schTASK = s.id \
WHERE h.user=%s and h.batch_mode="metadata"
ORDER BY h.submitdate DESC""", (user_info['nickname'],))
return upload_list
def get_user_document_uploads(req):
"""Retrieve all document upload history information for a given user"""
user_info = collect_user_info(req)
upload_list = run_sql("""SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
s.status \
FROM hstBATCHUPLOAD h INNER JOIN schTASK s \
ON h.id_schTASK = s.id \
WHERE h.user=%s and h.batch_mode="document"
ORDER BY h.submitdate DESC""", (user_info['nickname'],))
return upload_list
def get_daemon_doc_files():
""" Return all files found in batchuploader document folders """
files = {}
for folder in ['/revise', '/append']:
try:
daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \
or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR
directory = daemon_dir + '/documents' + folder
files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
for file_instance, info in files[directory]:
stat_info = os.lstat(os.path.join(directory, file_instance))
info.append("%s" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner
info.append("%s" % grp.getgrgid(stat_info.st_gid)[0]) # Group
info.append("%d" % stat_info.st_size) # Size
time_stat = stat_info.st_mtime
time_fmt = "%Y-%m-%d %R"
info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified
except OSError:
pass
return files
def get_daemon_meta_files():
""" Return all files found in batchuploader metadata folders """
files = {}
for folder in ['/correct', '/replace', '/insert', '/append']:
try:
daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \
or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR
directory = daemon_dir + '/metadata' + folder
files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
for file_instance, info in files[directory]:
stat_info = os.lstat(os.path.join(directory, file_instance))
info.append("%s" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner
info.append("%s" % grp.getgrgid(stat_info.st_gid)[0]) # Group
info.append("%d" % stat_info.st_size) # Size
time_stat = stat_info.st_mtime
time_fmt = "%Y-%m-%d %R"
info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified
except OSError:
pass
return files
def user_authorization(req, ln):
""" Check user authorization to visit page """
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader')
if auth_code != 0:
referer = '/batchuploader/'
return page_not_authorized(req=req, referer=referer,
text=auth_message, navmenuid="batchuploader")
else:
return None
def perform_basic_upload_checks(xml_record):
""" Performs tests that would provoke the bibupload task to fail with
an exit status 1, to prevent batchupload from crashing while alarming
the user wabout the issue
"""
from invenio.bibupload import writing_rights_p
errors = []
if not writing_rights_p():
errors.append("Error: BibUpload does not have rights to write fulltext files.")
recs = create_records(xml_record, 1, 1)
if recs == []:
errors.append("Error: Cannot parse MARCXML file.")
elif recs[0][0] is None:
errors.append("Error: MARCXML file has wrong format: %s" % recs)
return errors
def perform_upload_check(xml_record, mode):
""" Performs a upload simulation with the given record and mode
@return: string describing errors
@rtype: string
"""
error_cache = []
def my_writer(msg, stream=sys.stdout, verbose=1):
if verbose == 1:
if 'DONE' not in msg:
error_cache.append(msg.strip())
orig_writer = bibupload_module.write_message
bibupload_module.write_message = my_writer
error_cache.extend(perform_basic_upload_checks(xml_record))
if error_cache:
# There has been some critical error
return '\n'.join(error_cache)
recs = xml_marc_to_records(xml_record)
try:
upload_mode = mode[2:]
# Adapt input data for bibupload function
if upload_mode == "r insert-or-replace":
upload_mode = "replace_or_insert"
for record in recs:
if record:
record_strip_empty_volatile_subfields(record)
record_strip_empty_fields(record)
bibupload(record, opt_mode=upload_mode, pretend=True)
finally:
bibupload_module.write_message = orig_writer
return '\n'.join(error_cache)
def _get_useragent(req):
"""Return client user agent from req object."""
user_info = collect_user_info(req)
return user_info['agent']
def _get_client_ip(req):
"""Return client IP address from req object."""
return str(req.remote_ip)
def _check_client_ip(req):
"""
Is this client permitted to use the service?
"""
client_ip = _get_client_ip(req)
if client_ip in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS.keys():
return True
return False
def _check_client_useragent(req):
"""
Is this user agent permitted to use the service?
"""
client_useragent = _get_useragent(req)
if _CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS_RE.match(client_useragent):
return True
return False
def _check_client_can_submit_file(client_ip="", metafile="", req=None, webupload=0, ln=CFG_SITE_LANG):
"""
Is this client able to upload such a FILENAME?
check 980 $a values and collection tags in the file to see if they are among the
permitted ones as specified by CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS and ACC_AUTHORIZE_ACTION.
Useful to make sure that the client does not override other records by
mistake.
"""
_ = gettext_set_language(ln)
recs = create_records(metafile, 0, 0)
user_info = collect_user_info(req)
filename_tag980_values = _detect_980_values_from_marcxml_file(recs)
for filename_tag980_value in filename_tag980_values:
if not filename_tag980_value:
if not webupload:
return False
else:
return(1, "Invalid collection in tag 980")
if not webupload:
if not filename_tag980_value in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]:
return False
else:
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_tag980_value)
if auth_code != 0:
error_msg = _("The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'") % \
{'x_user': user_info['nickname'], 'x_coll': filename_tag980_value}
return (auth_code, error_msg)
filename_rec_id_collections = _detect_collections_from_marcxml_file(recs)
for filename_rec_id_collection in filename_rec_id_collections:
if not webupload:
if not filename_rec_id_collection in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS[client_ip]:
return False
else:
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_rec_id_collection)
if auth_code != 0:
error_msg = _("The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'") % \
{'x_user': user_info['nickname'], 'x_coll': filename_rec_id_collection}
return (auth_code, error_msg)
if not webupload:
return True
else:
return (0, " ")
def _detect_980_values_from_marcxml_file(recs):
"""
Read MARCXML file and return list of 980 $a values found in that file.
Useful for checking rights.
"""
from invenio.bibrecord import record_get_field_values
collection_tag = run_sql("SELECT value FROM tag, field_tag, field \
WHERE tag.id=field_tag.id_tag AND \
field_tag.id_field=field.id AND \
field.code='collection'")
collection_tag = collection_tag[0][0]
dbcollids = {}
for rec, dummy1, dummy2 in recs:
if rec:
for tag980 in record_get_field_values(rec,
tag=collection_tag[:3],
ind1=collection_tag[3],
ind2=collection_tag[4],
code=collection_tag[5]):
dbcollids[tag980] = 1
return dbcollids.keys()
def _detect_collections_from_marcxml_file(recs):
"""
Extract all possible recIDs from MARCXML file and guess collections
for these recIDs.
"""
from invenio.bibrecord import record_get_field_values
from invenio.search_engine import guess_collection_of_a_record
from invenio.bibupload import find_record_from_sysno, \
find_records_from_extoaiid, \
find_record_from_oaiid
dbcollids = {}
sysno_tag = CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG
oaiid_tag = CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG
oai_tag = CFG_OAI_ID_FIELD
for rec, dummy1, dummy2 in recs:
if rec:
for tag001 in record_get_field_values(rec, '001'):
collection = guess_collection_of_a_record(int(tag001))
dbcollids[collection] = 1
for tag_sysno in record_get_field_values(rec, tag=sysno_tag[:3],
ind1=sysno_tag[3],
ind2=sysno_tag[4],
code=sysno_tag[5]):
record = find_record_from_sysno(tag_sysno)
if record:
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
for tag_oaiid in record_get_field_values(rec, tag=oaiid_tag[:3],
ind1=oaiid_tag[3],
ind2=oaiid_tag[4],
code=oaiid_tag[5]):
try:
records = find_records_from_extoaiid(tag_oaiid)
except Error:
records = []
if records:
record = records.pop()
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
for tag_oai in record_get_field_values(rec, tag=oai_tag[0:3],
ind1=oai_tag[3],
ind2=oai_tag[4],
code=oai_tag[5]):
record = find_record_from_oaiid(tag_oai)
if record:
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
return dbcollids.keys()
def _transform_input_to_marcxml(file_input=""):
"""
Takes text-marc as input and transforms it
to MARCXML.
"""
# Create temporary file to read from
tmp_fd, filename = tempfile.mkstemp(dir=CFG_TMPSHAREDDIR)
os.write(tmp_fd, file_input)
os.close(tmp_fd)
try:
# Redirect output, transform, restore old references
old_stdout = sys.stdout
new_stdout = StringIO()
sys.stdout = new_stdout
transform_file(filename)
finally:
sys.stdout = old_stdout
return new_stdout.getvalue()
def _log(msg, logfile="webupload.log"):
"""
Log MSG into LOGFILE with timestamp.
"""
filedesc = open(CFG_LOGDIR + "/" + logfile, "a")
filedesc.write(time.strftime("%Y-%m-%d %H:%M:%S") + " --> " + msg + "\n")
filedesc.close()
return
def _write(req, msg):
"""
Write MSG to the output stream for the end user.
"""
req.write(msg + "\n")
return
|
AlbertoPeon/invenio
|
modules/bibupload/lib/batchuploader_engine.py
|
Python
|
gpl-2.0
| 28,252 | 0.004071 |
"""update sample prep steps
Revision ID: 45f4b2dbc41a
Revises: d1653e552ab
Create Date: 2018-07-18 10:01:26.668385
"""
# revision identifiers, used by Alembic.
revision = '45f4b2dbc41a'
down_revision = 'd1653e552ab'
import sqlalchemy as sa
from alembic import op
def upgrade():
for step in ('mount', 'gold_table', 'us_wand', 'eds', 'cl', 'bse', 'se'):
op.add_column('SamplePrepStepTbl',
sa.Column(step, sa.String(140)))
def downgrade():
for step in ('mount', 'gold_table', 'us_wand', 'eds', 'cl', 'bse', 'se'):
op.drop_column('SamplePrepStepTbl', step)
|
UManPychron/pychron
|
alembic_dvc/versions/45f4b2dbc41a_update_sample_prep_s.py
|
Python
|
apache-2.0
| 607 | 0.003295 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.