repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
akhilpm/Masters-Project
|
kpcaWithTreeFS/mnistKPCA.py
|
1
|
3216
|
'''
KPCA based feature engineering for MNIST handwritten digits classification
Author : Akhil P M
Kernel used : Arc-cosine Kernel
'''
import numpy as np
import matplotlib.pyplot as plt
import time
from sklearn import svm, datasets
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets.mldata import fetch_mldata
from sklearn.metrics import accuracy_score
from sklearn.cross_validation import train_test_split
def compute_J(N, theta):
if N == 0:
return np.pi - theta
elif N == 1:
return np.sin(theta) + (np.pi - theta) * np.cos(theta)
elif N == 2:
return 3*np.sin(theta)*np.cos(theta) + (np.pi - theta)*(1 + 2*pow(np.cos(theta), 2))
elif N == 3:
return 4*pow(np.sin(theta), 3) + 15*np.sin(theta)*pow(np.cos(theta), 2) + \
(np.pi- theta)*(9*pow(np.sin(theta),2)*np.cos(theta) + 15*pow(np.cos(theta),3))
else:
return np.zeros(theta.shape)
def arc_cosine_vector(X, Y):
"""param = a vector of n(degree) values at each layer """
param = np.array([1,1,1])
no_of_layers = len(param)
M = np.dot(X, Y.T)
temp1 = np.diag(np.dot(X, X.T))
temp2 = np.diag(np.dot(Y, Y.T))
for i in xrange(no_of_layers):
norm_matrix = np.outer(temp1,temp2) #the matix of k_xx, and K_yy's
theta = np.arccos( np.maximum( np.minimum(M/np.sqrt(norm_matrix), 1.0), -1.0))
n_l = param[i]
M = np.multiply(np.power(norm_matrix, n_l/2.0), compute_J(n_l, theta)) / np.pi
if i < no_of_layers-1:
zero1 = np.zeros(len(temp1))
zero2 = np.zeros(len(temp2))
temp1 = np.multiply(np.power(temp1, n_l), compute_J(n_l, zero1)) / np.pi
temp2 = np.multiply(np.power(temp2, n_l), compute_J(n_l, zero2)) / np.pi
return M
def arc_cosine(X, Y):
lenX = X.shape[0]
incr = 1000
M = np.zeros((lenX, Y.shape[0]))
for i in range(0,lenX,incr):
M[i:i+incr] = arc_cosine_vector(X[i:i+incr], Y)
return M
def main():
#set the timer
start = time.time()
#load the data
mnist = fetch_mldata('MNIST original')
mnist.target = mnist.target.astype(np.int32)
seed = np.random.randint(1,30000)
rand = np.random.RandomState(seed)
items = len(mnist.target)
indices = rand.randint(items, size = 70000)
trindex = indices[0:30000]
tsindex = indices[30000:]
#scale down features to the range [0, 1]
mnist.data = mnist.data/255.0
mnist.data = mnist.data.astype(np.float32)
trainX = mnist.data[trindex]
testX = mnist.data[tsindex]
trainY = mnist.target[trindex]
testY = mnist.target[tsindex]
#extract the features using KPCA
kpca = KernelPCA(kernel='precomputed')
kpca_train = arc_cosine(trainX[0:1000], trainX[0:1000])
#Fit the model from data in X
kpca.fit(kpca_train)
kernel_train = arc_cosine(trainX, trainX[0:1000])
kernel_test = arc_cosine(testX, trainX[0:1000])
trainX_kpca = kpca.transform(kernel_train)
testX_kpca = kpca.transform(kernel_test)
print testX_kpca.shape
#fit the svm model and compute accuaracy measure
clf = svm.SVC(kernel=arc_cosine)
clf.fit(trainX_kpca, trainY)
pred = clf.predict(testX_kpca)
print accuracy_score(testY, pred)
print('total : %d, correct : %d, incorrect : %d\n' %(len(pred), np.sum(pred == testY), np.sum(pred != testY)))
print('Test Time : %f Minutes\n' %((time.time()-start)/60))
if __name__ == '__main__':
main()
|
mit
|
yl565/statsmodels
|
statsmodels/graphics/_regressionplots_doc.py
|
31
|
3795
|
_plot_added_variable_doc = """\
Create an added variable plot for a fitted regression model.
Parameters
----------
%(extra_params_doc)sfocus_exog : int or string
The column index of exog, or a variable name, indicating the
variable whose role in the regression is to be assessed.
resid_type : string
The type of residuals to use for the dependent variable. If
None, uses `resid_deviance` for GLM/GEE and `resid` otherwise.
use_glm_weights : bool
Only used if the model is a GLM or GEE. If True, the
residuals for the focus predictor are computed using WLS, with
the weights obtained from the IRLS calculations for fitting
the GLM. If False, unweighted regression is used.
fit_kwargs : dict, optional
Keyword arguments to be passed to fit when refitting the
model.
ax : Axes instance
Matplotlib Axes instance
Returns
-------
fig : matplotlib Figure
A matplotlib figure instance.
"""
_plot_partial_residuals_doc = """\
Create a partial residual, or 'component plus residual' plot for a
fited regression model.
Parameters
----------
%(extra_params_doc)sfocus_exog : int or string
The column index of exog, or variable name, indicating the
variable whose role in the regression is to be assessed.
ax : Axes instance
Matplotlib Axes instance
Returns
-------
fig : matplotlib Figure
A matplotlib figure instance.
"""
_plot_ceres_residuals_doc = """\
Produces a CERES (Conditional Expectation Partial Residuals)
plot for a fitted regression model.
Parameters
----------
%(extra_params_doc)sfocus_exog : integer or string
The column index of results.model.exog, or the variable name,
indicating the variable whose role in the regression is to be
assessed.
frac : float
Lowess tuning parameter for the adjusted model used in the
CERES analysis. Not used if `cond_means` is provided.
cond_means : array-like, optional
If provided, the columns of this array span the space of the
conditional means E[exog | focus exog], where exog ranges over
some or all of the columns of exog (other than the focus exog).
ax : matplotlib.Axes instance, optional
The axes on which to draw the plot. If not provided, a new
axes instance is created.
Returns
-------
fig : matplotlib.Figure instance
The figure on which the partial residual plot is drawn.
References
----------
RD Cook and R Croos-Dabrera (1998). Partial residual plots in
generalized linear models. Journal of the American
Statistical Association, 93:442.
RD Cook (1993). Partial residual plots. Technometrics 35:4.
Notes
-----
`cond_means` is intended to capture the behavior of E[x1 |
x2], where x2 is the focus exog and x1 are all the other exog
variables. If all the conditional mean relationships are
linear, it is sufficient to set cond_means equal to the focus
exog. Alternatively, cond_means may consist of one or more
columns containing functional transformations of the focus
exog (e.g. x2^2) that are thought to capture E[x1 | x2].
If nothing is known or suspected about the form of E[x1 | x2],
set `cond_means` to None, and it will be estimated by
smoothing each non-focus exog against the focus exog. The
values of `frac` control these lowess smooths.
If cond_means contains only the focus exog, the results are
equivalent to a partial residual plot.
If the focus variable is believed to be independent of the
other exog variables, `cond_means` can be set to an (empty)
nx0 array.
"""
|
bsd-3-clause
|
ryfeus/lambda-packs
|
Keras_tensorflow_nightly/source2.7/tensorflow/contrib/learn/python/learn/estimators/estimator.py
|
14
|
62939
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import copy
import os
import tempfile
import numpy as np
import six
from google.protobuf import message
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.meta_graph_transform import meta_graph_transform
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary as core_summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existence of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if tensor_util.is_tensor(x) or y is not None and tensor_util.is_tensor(y):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
@deprecated(None, 'Please specify feature columns explicitly.')
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
@deprecated(None, 'Please specify feature columns explicitly.')
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableV2', 'MutableHashTableOfTensors',
'MutableHashTableOfTensorsV2', 'MutableDenseHashTable',
'MutableDenseHashTableV2', 'VarHandleOp'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=ps_ops,
cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError('Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics,
predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError('Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics,
labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
results = []
for k, v in sorted(dictionary.items()):
if isinstance(v, float) or isinstance(v, np.float32) or isinstance(
v, int) or isinstance(v, np.int64) or isinstance(v, np.int32):
results.append('%s = %s' % (k, v))
else:
results.append('Type of %s = %s' % (k, type(v)))
return ', '.join(results)
def _write_dict_to_summary(output_dir, dictionary, current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = core_summary.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
summary_proto.value.add(tag=key, simple_value=float(dictionary[key]))
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
summary_proto.value.add(tag=key, simple_value=int(dictionary[key]))
elif isinstance(dictionary[key], six.string_types):
try:
summ = summary_pb2.Summary.FromString(dictionary[key])
for i, _ in enumerate(summ.value):
summ.value[i].tag = key
summary_proto.value.extend(summ.value)
except message.DecodeError:
logging.warn('Skipping summary for %s, cannot parse string to Summary.',
key)
continue
elif isinstance(dictionary[key], np.ndarray):
value = summary_proto.value.add()
value.tag = key
value.node_name = key
tensor_proto = tensor_util.make_tensor_proto(dictionary[key])
value.tensor.CopyFrom(tensor_proto)
logging.info(
'Summary for np.ndarray is not visible in Tensorboard by default. '
'Consider using a Tensorboard plugin for visualization (see '
'https://github.com/tensorflow/tensorboard-plugin-example/blob/master/README.md'
' for more information).')
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, '
'np.int32 or int or np.ndarray or a serialized string of Summary.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
GraphRewriteSpec = collections.namedtuple('GraphRewriteSpec',
['tags', 'transforms'])
class BaseEstimator(sklearn.BaseEstimator, evaluable.Evaluable,
trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Users should not instantiate or subclass this class. Instead, use an
`Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overridden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
@deprecated(None, 'Please replace uses of any Estimator from tf.contrib.learn'
' with an Estimator from tf.estimator.*')
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
'model_dir are set both in constructor and RunConfig, but with '
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
# pylint: enable=g-doc-exception
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@property
def model_fn(self):
"""Returns the model_fn which is bound to self.params.
Returns:
The model_fn with the following signature:
`def model_fn(features, labels, mode, metrics)`
"""
def public_model_fn(features, labels, mode, config):
return self._call_model_fn(features, labels, mode, config=config)
return public_model_fn
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def fit(self,
x=None,
y=None,
input_fn=None,
steps=None,
batch_size=None,
monitors=None,
max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def partial_fit(self,
x=None,
y=None,
input_fn=None,
steps=1,
batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(
x=x,
y=y,
input_fn=input_fn,
steps=steps,
batch_size=batch_size,
monitors=monitors)
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics, name)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('batch_size', None), ('as_iterable', True))
def predict(self,
x=None,
input_fn=None,
batch_size=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
iterate_batches: If True, yield the whole batch at once instead of
decomposing the batch into individual samples. Only relevant when
as_iterable is True.
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable,
iterate_batches=iterate_batches)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(
self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.', str(labels),
str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval'
if not name else 'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps == 0:
logging.warning('evaluation steps are 0. If `input_fn` does not raise '
'`OutOfRangeError`, the evaluation will never stop. '
'Use steps=None if intended.')
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
training_util.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions)
if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
training_util._get_or_create_global_step_read() # pylint: disable=protected-access
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend(hooks)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=(
self._config.keep_checkpoint_every_n_hours),
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) does not have a params '
'argument, but params (%s) were passed to the '
'Estimator\'s constructor.' % (model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode, metrics=None, config=None):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
metrics: Dict of metrics.
config: RunConfig.
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
if config:
kwargs['config'] = config
else:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
model_fn_ops = model_fn_results
else:
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
model_fn_ops = model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(
_make_metrics_ops(metrics, features, labels,
model_fn_ops.predictions))
return model_fn_ops
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(features, labels,
model_fn_lib.ModeKeys.EVAL, metrics)
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(self,
export_dir_base,
serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None,
graph_rewrite_specs=(GraphRewriteSpec(
(tag_constants.SERVING,), ()),),
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
graph_rewrite_specs: an iterable of `GraphRewriteSpec`. Each element will
produce a separate MetaGraphDef within the exported SavedModel, tagged
and rewritten as specified. Defaults to a single entry using the
default serving tag ("serve") and no rewriting.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued
Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
# pylint: enable=line-too-long
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
# We'll write the SavedModel to a temporary directory and then atomically
# rename it at the end. This helps to avoid corrupt / incomplete outputs,
# which could otherwise occur if the job is preempted or otherwise fails
# in the middle of SavedModel creation.
temp_export_dir = saved_model_export_utils.get_temp_export_dir(export_dir)
builder = saved_model_builder.SavedModelBuilder(temp_export_dir)
# Build the base graph
with ops.Graph().as_default() as g:
training_util.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
init_op = control_flow_ops.group(variables.local_variables_initializer(),
resources.initialize_resources(
resources.shared_resources()),
lookup_ops.tables_initializer())
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
# Export the first MetaGraphDef with variables, assets etc.
with tf_session.Session('') as session:
# pylint: disable=protected-access
saveables = variables._all_saveable_objects()
# pylint: enable=protected-access
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
elif saveables:
saver_for_restore = saver.Saver(saveables, sharded=True)
saver_for_restore.restore(session, checkpoint_path)
# Perform the export
if not graph_rewrite_specs or graph_rewrite_specs[0].transforms:
raise ValueError('The first element of graph_rewrite_specs '
'must specify no transforms.')
untransformed_tags = graph_rewrite_specs[0].tags
# TODO(soergel): switch to main_op or otherwise update when dust settles
builder.add_meta_graph_and_variables(
session,
untransformed_tags,
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op,
strip_default_attrs=strip_default_attrs)
# pylint: disable=protected-access
base_meta_graph_def = builder._saved_model.meta_graphs[0]
# pylint: enable=protected-access
if graph_rewrite_specs[1:]:
# Prepare the input_names and output_names needed for the
# meta_graph_transform call below.
input_names = [
tensor.name
for input_dict in input_alternatives.values()
for tensor in input_dict.values()
]
output_names = [
tensor.name
for output_alternative in output_alternatives.values()
for tensor in output_alternative[1].values()
]
# Write the additional MetaGraphDefs
for graph_rewrite_spec in graph_rewrite_specs[1:]:
# TODO(soergel) consider moving most of this to saved_model.builder_impl
# as e.g. builder.add_rewritten_meta_graph(rewritten_graph_def, tags)
transformed_meta_graph_def = meta_graph_transform.meta_graph_transform(
base_meta_graph_def, input_names, output_names,
graph_rewrite_spec.transforms, graph_rewrite_spec.tags)
# pylint: disable=protected-access
meta_graph_def = builder._saved_model.meta_graphs.add()
# pylint: enable=protected-access
meta_graph_def.CopyFrom(transformed_meta_graph_def)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(
compat.as_bytes(temp_export_dir), compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(
compat.as_bytes(assets_extra_path), compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
builder.save(as_text)
gfile.Rename(temp_export_dir, export_dir)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please switch to the Estimator interface.')
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(
x,
y,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(
input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None, name=None):
input_fn, feed_fn = _get_input_fn(
x,
y,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=False,
epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x,
None,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=False,
epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate([output[key] for output in results], axis=0)
for key in results[0]
}
|
mit
|
geome-mitbbs/QTS_Research
|
Trade_Algo.py
|
1
|
7950
|
try:
from . import Portfolio
from . import Data_API
from .Quant_Indicators import *
except:
import Portfolio
import Data_API
from Quant_Indicators import *
class Trade_Algo:
def __init__(self,command=None):
self.command = command
if not self.safety_check():
self.command = """raise Exception("not safe to run")"""
def filter_string(self):
# first suppress all the """sss""" string.
new_command = ""
left_quotes_saw = 0
left_quotes_pos = []
for i in range(len(self.command)):
if(self.command[i] != "\""):
left_quotes_saw = 0
else:
if(left_quotes_saw<3):
left_quotes_saw += 1
if(left_quotes_saw==3):
left_quotes_pos.append(i-2)
left_quotes_saw = 0
if(len(left_quotes_pos)//2 * 2 != len(left_quotes_pos)):
raise Exception("Not proper string")
if(len(left_quotes_pos)==0):
return self.command
for i in range(len(left_quotes_pos)//2):
if i==0:
new_command += self.command[0:left_quotes_pos[2*i]]
else:
new_command += self.command[left_quotes_pos[2*i-1]+3:left_quotes_pos[2*i]]
if i== len(left_quotes_pos)//2-1:
new_command += self.command[left_quotes_pos[2*i+1]+3:]
return new_command
def find_used_vars(self):
new_command = self.filter_string()
ret = dict()
list = ['portfolio','portfolio1','portfolio2','portfolio3','quant_index','quant_index1','quant_index2','quant_index3']
for item in list:
if item in new_command:
ret[item] = None
self.used_vars = ret
def find_tickers(self):
# find all the "ABC" in the command and they should be all the tickers
if(self.command == """raise Exception("not safe to run")"""):
self.tickers = []
return
new_command = self.filter_string()
tickers = []
current_ticker = ""
saw_left_quote = False
for c in new_command:
if not saw_left_quote:
if c != "\"":
pass
else:
saw_left_quote = True
else:
if c != "\"":
current_ticker += c
else:
tickers.append(current_ticker)
current_ticker = ""
saw_left_quote = False
self.tickers = tickers
def safety_check(self):
# check if self.command is safe to run....need this before go production
return True
def back_test(self,start_date=None,end_date=None,initial_cash=0,initial_portfolio=None):
if end_date == None:
end_date = Data_API.Pricing_Database.pricing_date
if isinstance(start_date,int):
start_date = Data_API.add_pricing_date(start_date,in_place=False)
if isinstance(end_date,int):
end_date = Data_API.add_pricing_date(end_date,in_place=False)
#for pnl
if initial_portfolio == None:
portfolio = Portfolio.Portfolio(initial_cash)
portfolio1=Portfolio.Portfolio(initial_cash)
portfolio2=Portfolio.Portfolio(initial_cash)
portfolio3=Portfolio.Portfolio(initial_cash)
else:
portfolio = initial_portfolio
portfolio1 = initial_portfolio
portfolio2 = initial_portfolio
portfolio3 = initial_portfolio
#for information
quant_index=[]
quant_index1=[]
quant_index2=[]
quant_index3=[]
self.find_tickers()
self.find_used_vars()
cache = Data_API.Cache()
for ticker in self.tickers:
cache.get_ticker_data(ticker)
#set back the date.
orig_pd = Data_API.Pricing_Database.pricing_date
try:
Data_API.set_pricing_date(start_date)
while Data_API.Pricing_Database.pricing_date <= end_date:
exec(self.command)
portfolio.record_pnl()
portfolio1.record_pnl()
portfolio2.record_pnl()
portfolio3.record_pnl()
Data_API.add_pricing_date(1)
self.portfolio = portfolio
self.portfolio1 = portfolio1
self.portfolio2 = portfolio2
self.portfolio3 = portfolio3
self.quant_index = quant_index
self.quant_index1=quant_index1
self.quant_index2=quant_index2
self.quant_index3=quant_index3
Data_API.set_pricing_date(orig_pd)
self.pnls = {'portfolio':self.portfolio.pnl_as_of_date,\
'portfolio1':self.portfolio1.pnl_as_of_date,\
'portfolio2':self.portfolio2.pnl_as_of_date,\
'portfolio3':self.portfolio3.pnl_as_of_date}
self.quant_indices = {'quant_index':self.quant_index,\
'quant_index1':self.quant_index1,\
'quant_index2':self.quant_index2,\
'quant_index3':self.quant_index3}
except Exception as e:
Data_API.set_pricing_date(orig_pd)
raise e
def back_test_summary(self):
output = ""
if "portfolio" in self.used_vars:
output += """portfolio:\n""" + str(self.portfolio.get_measures()) + """\n"""
if "portfolio1" in self.used_vars:
output += """portfolio1:\n""" + str(self.portfolio1.get_measures()) + """\n"""
if "portfolio2" in self.used_vars:
output += """portfolio2:\n""" + str(self.portfolio2.get_measures()) + """\n"""
if "portfolio3" in self.used_vars:
output += """portfolio3:\n""" + str(self.portfolio3.get_measures()) + """\n"""
return output
def back_test_plot(self):
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
fig = plt.figure()
all_lines = []
ax = fig.add_subplot(111)
ax.set_ylabel('PnL')
has_right_ax = False
if 'quant_index' in self.used_vars or \
'quant_index1' in self.used_vars or \
'quant_index2' in self.used_vars or \
'quant_index3' in self.used_vars:
has_right_ax = True
dates = [ x[0] for x in self.pnls['portfolio'] ]
for v in self.used_vars:
if 'portfolio' in v:
all_lines += ax.plot(dates, [x[1] for x in self.pnls[v]],label=v,linewidth=1)
if has_right_ax:
right_ax = ax.twinx()
for v in self.used_vars:
if 'index' in v:
all_lines += right_ax.plot(dates, self.quant_indices[v],label=v,linewidth=1,ls='dotted')
right_ax.set_ylabel('quant_index')
# format the ticks
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
yearsFmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
datemin = min(dates)
datemax = max(dates)
ax.set_xlim(datemin, datemax)
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.grid(True)
# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()
fig.tight_layout()
plt.legend(all_lines,[l.get_label() for l in all_lines],loc='best')
plt.show()
|
mit
|
synthicity/urbansim
|
urbansim/utils/sampling.py
|
4
|
7852
|
import math
import numpy as np
import pandas as pd
def get_probs(data, prob_column=None):
"""
Checks for presence of a probability column and returns the result
as a numpy array. If the probabilities are weights (i.e. they don't
sum to 1), then this will be recalculated.
Parameters
----------
data: pandas.DataFrame
Table to sample from.
prob_column: string, optional, default None
Name of the column in the data to provide probabilities or weights.
Returns
-------
numpy.array
"""
if prob_column is None:
p = None
else:
p = data[prob_column].fillna(0).values
if p.sum() == 0:
p = np.ones(len(p))
if abs(p.sum() - 1.0) > 1e-8:
p = p / (1.0 * p.sum())
return p
def accounting_sample_replace(total, data, accounting_column, prob_column=None, max_iterations=50):
"""
Sample rows with accounting with replacement.
Parameters
----------
total : int
The control total the sampled rows will attempt to match.
data: pandas.DataFrame
Table to sample from.
accounting_column: string
Name of column with accounting totals/quantities to apply towards the control.
prob_column: string, optional, default None
Name of the column in the data to provide probabilities or weights.
max_iterations: int, optional, default 50
When using an accounting attribute, the maximum number of sampling iterations
that will be applied.
Returns
-------
sample_rows : pandas.DataFrame
Table containing the sample.
matched: bool
Indicates if the total was matched exactly.
"""
# check for probabilities
p = get_probs(data, prob_column)
# determine avg number of accounting items per sample (e.g. persons per household)
per_sample = data[accounting_column].sum() / (1.0 * len(data.index.values))
curr_total = 0
remaining = total
sample_rows = pd.DataFrame()
closest = None
closest_remain = total
matched = False
for i in range(0, max_iterations):
# stop if we've hit the control
if remaining == 0:
matched = True
break
# if sampling with probabilities, re-caclc the # of items per sample
# after the initial sample, this way the sample size reflects the probabilities
if p is not None and i == 1:
per_sample = sample_rows[accounting_column].sum() / (1.0 * len(sample_rows))
# update the sample
num_samples = int(math.ceil(math.fabs(remaining) / per_sample))
if remaining > 0:
# we're short, add to the sample
curr_ids = np.random.choice(data.index.values, num_samples, p=p)
sample_rows = pd.concat([sample_rows, data.loc[curr_ids]])
else:
# we've overshot, remove from existing samples (FIFO)
sample_rows = sample_rows.iloc[num_samples:].copy()
# update the total and check for the closest result
curr_total = sample_rows[accounting_column].sum()
remaining = total - curr_total
if abs(remaining) < closest_remain:
closest_remain = abs(remaining)
closest = sample_rows
return closest, matched
def accounting_sample_no_replace(total, data, accounting_column, prob_column=None):
"""
Samples rows with accounting without replacement.
Parameters
----------
total : int
The control total the sampled rows will attempt to match.
data: pandas.DataFrame
Table to sample from.
accounting_column: string
Name of column with accounting totals/quantities to apply towards the control.
prob_column: string, optional, default None
Name of the column in the data to provide probabilities or weights.
Returns
-------
sample_rows : pandas.DataFrame
Table containing the sample.
matched: bool
Indicates if the total was matched exactly.
"""
# make sure this is even feasible
if total > data[accounting_column].sum():
raise ValueError('Control total exceeds the available samples')
# check for probabilities
p = get_probs(data, prob_column)
# shuffle the rows
if p is None:
# random shuffle
shuff_idx = np.random.permutation(data.index.values)
else:
# weighted shuffle
ran_p = pd.Series(np.power(np.random.rand(len(p)), 1.0 / p), index=data.index)
ran_p.sort_values(ascending=False)
shuff_idx = ran_p.index.values
# get the initial sample
shuffle = data.loc[shuff_idx]
csum = np.cumsum(shuffle[accounting_column].values)
pos = np.searchsorted(csum, total, 'right')
sample = shuffle.iloc[:pos]
# refine the sample
sample_idx = sample.index.values
sample_total = sample[accounting_column].sum()
shortage = total - sample_total
matched = False
for idx, row in shuffle.iloc[pos:].iterrows():
if shortage == 0:
# we've matached
matched = True
break
# add the current element if it doesnt exceed the total
cnt = row[accounting_column]
if cnt <= shortage:
sample_idx = np.append(sample_idx, idx)
shortage -= cnt
return shuffle.loc[sample_idx].copy(), matched
def sample_rows(total, data, replace=True, accounting_column=None,
max_iterations=50, prob_column=None, return_status=False):
"""
Samples and returns rows from a data frame while matching a desired control total. The total may
represent a simple row count or may attempt to match a sum/quantity from an accounting column.
Parameters
----------
total : int
The control total the sampled rows will attempt to match.
data: pandas.DataFrame
Table to sample from.
replace: bool, optional, default True
Indicates if sampling with or without replacement.
accounting_column: string, optional
Name of column with accounting totals/quantities to apply towards the control.
If not provided then row counts will be used for accounting.
max_iterations: int, optional, default 50
When using an accounting attribute, the maximum number of sampling iterations
that will be applied. Only applicable when sampling with replacement.
prob_column: string, optional, default None
If provided, name of the column in the data frame to provide probabilities
or weights. If not provided, the sampling is random.
return_status: bool, optional, default True
If True, will also return a bool indicating if the total was matched exactly.
Returns
-------
sample_rows : pandas.DataFrame
Table containing the sample.
matched: bool
If return_status is True, returns True if total is matched exactly.
"""
if not data.index.is_unique:
raise ValueError('Data must have a unique index')
# simplest case, just return n random rows
if accounting_column is None:
if replace is False and total > len(data.index.values):
raise ValueError('Control total exceeds the available samples')
p = get_probs(prob_column)
rows = data.loc[np.random.choice(
data.index.values, int(total), replace=replace, p=p)].copy()
matched = True
# sample with accounting
else:
if replace:
rows, matched = accounting_sample_replace(
total, data, accounting_column, prob_column, max_iterations)
else:
rows, matched = accounting_sample_no_replace(
total, data, accounting_column, prob_column)
# return the results
if return_status:
return rows, matched
else:
return rows
|
bsd-3-clause
|
3manuek/scikit-learn
|
examples/manifold/plot_mds.py
|
261
|
2616
|
"""
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
|
bsd-3-clause
|
GOFAI/glasstone
|
examples/wseg10.py
|
1
|
1738
|
import matplotlib.pyplot as plt
from matplotlib import cm, colors, colorbar
import numpy as np
from glasstone.fallout import WSEG10
# ground zero x & y locations (st. mi)
gzx = 1
gzy = 1
# yield in megatons
yld = 0.01
# fission fraction
ff = 1.0
# wind speed (mph)
wind_speed = 1.151515 * 2.0
# wind direction (in degrees with wind from north = 0)
wind_direction = 225
# wind shear (change in mph per kilofoot change in altitude)
wind_shear = 0.23
x = np.arange(-1, 10, 0.1)
y = np.arange(-1, 10, 0.1)
X, Y = np.meshgrid(x, y)
# use WSEG10's native units
w = WSEG10(gzx, gzy, yld, ff, wind_speed, wind_direction, wind_shear, dunits='mi', wunits='mph', yunits='MT', shearunits='mph/kilofoot')
dose = np.vectorize(w.D_Hplus1)
Z = dose(X, Y, dunits='mi', doseunits='Roentgen')
fig = plt.figure()
ax1 = fig.add_axes([0.1, 0.1, 0.7, 0.8])
ax2 = fig.add_axes([0.85, 0.1, 0.05, 0.75])
CS = ax1.contour(X, Y, Z, [100, 300, 500, 1000, 3000], colors=('b', 'g', 'c', 'y', 'r'), linewidths=2)
cmap = colors.ListedColormap(['b', 'g', 'c', 'y'])
cmap.set_over('r')
cmap.set_under('w')
norm = colors.BoundaryNorm([100, 300, 500, 1000, 3000], cmap.N)
cb = colorbar.ColorbarBase(ax2, cmap=cmap,
norm=norm,
boundaries=[0] + [100, 300, 500, 1000, 3000] + [5000],
extend='both',
extendfrac='auto')
cb.set_label(r'$H+1$ dose rate $(R/hr)$')
ax1.grid(True)
ax1.set_title('WSEG-10 $H+1$ dose rate contours for 10kT burst')
ax1.text(-0.5, 7.5, '$Wind: SW, 2.30303 mi/hr$\n$Shear: 0.23 mi/hr-kilofeet$\n$Yield: 10kT$\n$GZ:1,1$\n$FF: 1.0$\n$HOB: 0$')
ax1.set_ylim([-0.5, 11])
ax1.set_ylabel('$st.$ $miles$')
ax1.set_xlabel('$st.$ $miles$')
plt.show()
|
mit
|
jundongl/PyFeaST
|
skfeature/function/sparse_learning_based/UDFS.py
|
3
|
3369
|
import numpy as np
import scipy
import math
from skfeature.utility.sparse_learning import generate_diagonal_matrix, calculate_l21_norm
from sklearn.metrics.pairwise import pairwise_distances
def udfs(X, **kwargs):
"""
This function implements l2,1-norm regularized discriminative feature
selection for unsupervised learning, i.e., min_W Tr(W^T M W) + gamma ||W||_{2,1}, s.t. W^T W = I
Input
-----
X: {numpy array}, shape (n_samples, n_features)
input data
kwargs: {dictionary}
gamma: {float}
parameter in the objective function of UDFS (default is 1)
n_clusters: {int}
Number of clusters
k: {int}
number of nearest neighbor
verbose: {boolean}
True if want to display the objective function value, false if not
Output
------
W: {numpy array}, shape(n_features, n_clusters)
feature weight matrix
Reference
Yang, Yi et al. "l2,1-Norm Regularized Discriminative Feature Selection for Unsupervised Learning." AAAI 2012.
"""
# default gamma is 0.1
if 'gamma' not in kwargs:
gamma = 0.1
else:
gamma = kwargs['gamma']
# default k is set to be 5
if 'k' not in kwargs:
k = 5
else:
k = kwargs['k']
if 'n_clusters' not in kwargs:
n_clusters = 5
else:
n_clusters = kwargs['n_clusters']
if 'verbose' not in kwargs:
verbose = False
else:
verbose = kwargs['verbose']
# construct M
n_sample, n_feature = X.shape
M = construct_M(X, k, gamma)
D = np.eye(n_feature)
max_iter = 1000
obj = np.zeros(max_iter)
for iter_step in range(max_iter):
# update W as the eigenvectors of P corresponding to the first n_clusters
# smallest eigenvalues
P = M + gamma*D
eigen_value, eigen_vector = scipy.linalg.eigh(a=P)
W = eigen_vector[:, 0:n_clusters]
# update D as D_ii = 1 / 2 / ||W(i,:)||
D = generate_diagonal_matrix(W)
obj[iter_step] = calculate_obj(X, W, M, gamma)
if verbose:
print('obj at iter {0}: {1}'.format(iter_step+1, obj[iter_step]))
if iter_step >= 1 and math.fabs(obj[iter_step] - obj[iter_step-1]) < 1e-3:
break
return W
def construct_M(X, k, gamma):
"""
This function constructs the M matrix described in the paper
"""
n_sample, n_feature = X.shape
Xt = X.T
D = pairwise_distances(X)
# sort the distance matrix D in ascending order
idx = np.argsort(D, axis=1)
# choose the k-nearest neighbors for each instance
idx_new = idx[:, 0:k+1]
H = np.eye(k+1) - 1/(k+1) * np.ones((k+1, k+1))
I = np.eye(k+1)
Mi = np.zeros((n_sample, n_sample))
for i in range(n_sample):
Xi = Xt[:, idx_new[i, :]]
Xi_tilde =np.dot(Xi, H)
Bi = np.linalg.inv(np.dot(Xi_tilde.T, Xi_tilde) + gamma*I)
Si = np.zeros((n_sample, k+1))
for q in range(k+1):
Si[idx_new[q], q] = 1
Mi = Mi + np.dot(np.dot(Si, np.dot(np.dot(H, Bi), H)), Si.T)
M = np.dot(np.dot(X.T, Mi), X)
return M
def calculate_obj(X, W, M, gamma):
"""
This function calculates the objective function of ls_l21 described in the paper
"""
return np.trace(np.dot(np.dot(W.T, M), W)) + gamma*calculate_l21_norm(W)
|
gpl-2.0
|
tochikuji/pyPyrTools
|
pyrtools/showIm.py
|
1
|
3701
|
import numpy
from PIL import Image
import scipy.stats
import matplotlib.pyplot as plt
def showIm(*args):
# check and set input parameters
if len(args) == 0:
print("showIm( matrix, range, zoom, label, nshades )")
print(" matrix is string. It should be the name of a 2D array.")
print(" range is a two element tuple. It specifies the values that ")
print(" map to the min and max colormap values. Passing a value ")
print(" of 'auto' (default) sets range=[min,max]. 'auto2' sets ")
print(" range=[mean-2*stdev, mean+2*stdev]. 'auto3' sets ")
print(" range=[p1-(p2-p1)/8, p2+(p2-p1)/8], where p1 is the 10th ")
print(" percientile value of the sorted matix samples, and p2 is ")
print(" the 90th percentile value.")
print(" zoom specifies the number of matrix samples per screen pixel.")
print(" It will be rounded to an integer, or 1 divided by an ")
print(" integer.")
# print " A value of 'same' or 'auto' (default) causes the "
# print " zoom value to be chosen automatically to fit the image into"
# print " the current axes."
# print " A value of 'full' fills the axis region "
# print " (leaving no room for labels)."
print(" label - A string that is used as a figure title.")
print(" NSHADES (optional) specifies the number of gray shades, ")
print(" and defaults to the size of the current colormap. ")
if len(args) > 0: # matrix entered
matrix = numpy.array(args[0])
if len(args) > 1: # range entered
if isinstance(args[1], str):
if args[1] is "auto":
imRange = (numpy.amin(matrix), numpy.amax(matrix))
elif args[1] is "auto2":
imRange = (matrix.mean() - 2 * matrix.std(),
matrix.mean() + 2 * matrix.std())
elif args[1] is "auto3":
# p1 = numpy.percentile(matrix, 10) not in python 2.6.6?!
#p2 = numpy.percentile(matrix, 90)
p1 = scipy.stats.scoreatpercentile(numpy.hstack(matrix), 10)
p2 = scipy.stats.scoreatpercentile(numpy.hstack(matrix), 90)
imRange = (p1 - (p2 - p1) / 8.0, p2 + (p2 - p1) / 8.0)
else:
print("Error: range of %s is not recognized." % args[1])
print(" please use a two element tuple or ")
print(" 'auto', 'auto2' or 'auto3'")
print(" enter 'showIm' for more info about options")
return
else:
imRange = args[1][0], args[1][1]
else:
imRange = (numpy.amin(matrix), numpy.amax(matrix))
if len(args) > 2: # zoom entered
zoom = args[2]
else:
zoom = 1
if len(args) > 3: # label entered
label = args[3]
else:
label = ''
if len(args) > 4: # colormap entered
nshades = args[4]
print("colormap parameter is not supported.")
print("Such specification does not make any sense.")
else:
nshades = 256 # NOQA
# show image
# create canvas (mpl)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_title(label)
width = matrix.shape[0] * zoom
height = matrix.shape[1] * zoom
# normalize image to [0, 255]
pmin, pmax = matrix.min(), matrix.max()
matrix = (matrix - pmin) / (pmax - pmin) * 255
img = Image.fromarray(matrix.astype(numpy.uint8))
# zoom
if zoom != 1:
img.thumbnail((width, height), Image.BICUBIC)
ax.imshow(img, cmap='gray')
plt.show()
|
mit
|
keras-team/keras-io
|
examples/vision/xray_classification_with_tpus.py
|
1
|
12745
|
"""
Title: Pneumonia Classification on TPU
Author: Amy MiHyun Jang
Date created: 2020/07/28
Last modified: 2020/08/24
Description: Medical image classification on TPU.
"""
"""
## Introduction + Set-up
This tutorial will explain how to build an X-ray image classification model
to predict whether an X-ray scan shows presence of pneumonia.
"""
import re
import os
import random
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print("Device:", tpu.master())
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
except:
strategy = tf.distribute.get_strategy()
print("Number of replicas:", strategy.num_replicas_in_sync)
"""
We need a Google Cloud link to our data to load the data using a TPU.
Below, we define key configuration parameters we'll use in this example.
To run on TPU, this example must be on Colab with the TPU runtime selected.
"""
AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 25 * strategy.num_replicas_in_sync
IMAGE_SIZE = [180, 180]
CLASS_NAMES = ["NORMAL", "PNEUMONIA"]
"""
## Load the data
The Chest X-ray data we are using from
[*Cell*](https://www.cell.com/cell/fulltext/S0092-8674(18)30154-5) divides the data into
training and test files. Let's first load in the training TFRecords.
"""
train_images = tf.data.TFRecordDataset(
"gs://download.tensorflow.org/data/ChestXRay2017/train/images.tfrec"
)
train_paths = tf.data.TFRecordDataset(
"gs://download.tensorflow.org/data/ChestXRay2017/train/paths.tfrec"
)
ds = tf.data.Dataset.zip((train_images, train_paths))
"""
Let's count how many healthy/normal chest X-rays we have and how many
pneumonia chest X-rays we have:
"""
COUNT_NORMAL = len(
[
filename
for filename in train_paths
if "NORMAL" in filename.numpy().decode("utf-8")
]
)
print("Normal images count in training set: " + str(COUNT_NORMAL))
COUNT_PNEUMONIA = len(
[
filename
for filename in train_paths
if "PNEUMONIA" in filename.numpy().decode("utf-8")
]
)
print("Pneumonia images count in training set: " + str(COUNT_PNEUMONIA))
"""
Notice that there are way more images that are classified as pneumonia than normal. This
shows that we have an imbalance in our data. We will correct for this imbalance later on
in our notebook.
"""
"""
We want to map each filename to the corresponding (image, label) pair. The following
methods will help us do that.
As we only have two labels, we will encode the label so that `1` or `True` indicates
pneumonia and `0` or `False` indicates normal.
"""
def get_label(file_path):
# convert the path to a list of path components
parts = tf.strings.split(file_path, "/")
# The second to last is the class-directory
return parts[-2] == "PNEUMONIA"
def decode_img(img):
# convert the compressed string to a 3D uint8 tensor
img = tf.image.decode_jpeg(img, channels=3)
# resize the image to the desired size.
return tf.image.resize(img, IMAGE_SIZE)
def process_path(image, path):
label = get_label(path)
# load the raw data from the file as a string
img = decode_img(image)
return img, label
ds = ds.map(process_path, num_parallel_calls=AUTOTUNE)
"""
Let's split the data into a training and validation datasets.
"""
ds = ds.shuffle(10000)
train_ds = ds.take(4200)
val_ds = ds.skip(4200)
"""
Let's visualize the shape of an (image, label) pair.
"""
for image, label in train_ds.take(1):
print("Image shape: ", image.numpy().shape)
print("Label: ", label.numpy())
"""
Load and format the test data as well.
"""
test_images = tf.data.TFRecordDataset(
"gs://download.tensorflow.org/data/ChestXRay2017/test/images.tfrec"
)
test_paths = tf.data.TFRecordDataset(
"gs://download.tensorflow.org/data/ChestXRay2017/test/paths.tfrec"
)
test_ds = tf.data.Dataset.zip((test_images, test_paths))
test_ds = test_ds.map(process_path, num_parallel_calls=AUTOTUNE)
test_ds = test_ds.batch(BATCH_SIZE)
"""
## Visualize the dataset
First, let's use buffered prefetching so we can yield data from disk without having I/O
become blocking.
Please note that large image datasets should not be cached in memory. We do it here
because the dataset is not very large and we want to train on TPU.
"""
def prepare_for_training(ds, cache=True):
# This is a small dataset, only load it once, and keep it in memory.
# use `.cache(filename)` to cache preprocessing work for datasets that don't
# fit in memory.
if cache:
if isinstance(cache, str):
ds = ds.cache(cache)
else:
ds = ds.cache()
ds = ds.batch(BATCH_SIZE)
# `prefetch` lets the dataset fetch batches in the background while the model
# is training.
ds = ds.prefetch(buffer_size=AUTOTUNE)
return ds
"""
Call the next batch iteration of the training data.
"""
train_ds = prepare_for_training(train_ds)
val_ds = prepare_for_training(val_ds)
image_batch, label_batch = next(iter(train_ds))
"""
Define the method to show the images in the batch.
"""
def show_batch(image_batch, label_batch):
plt.figure(figsize=(10, 10))
for n in range(25):
ax = plt.subplot(5, 5, n + 1)
plt.imshow(image_batch[n] / 255)
if label_batch[n]:
plt.title("PNEUMONIA")
else:
plt.title("NORMAL")
plt.axis("off")
"""
As the method takes in NumPy arrays as its parameters, call the numpy function on the
batches to return the tensor in NumPy array form.
"""
show_batch(image_batch.numpy(), label_batch.numpy())
"""
## Build the CNN
To make our model more modular and easier to understand, let's define some blocks. As
we're building a convolution neural network, we'll create a convolution block and a dense
layer block.
The architecture for this CNN has been inspired by this
[article](https://towardsdatascience.com/deep-learning-for-detecting-pneumonia-from-x-ray-images-fc9a3d9fdba8).
"""
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
def conv_block(filters, inputs):
x = layers.SeparableConv2D(filters, 3, activation="relu", padding="same")(inputs)
x = layers.SeparableConv2D(filters, 3, activation="relu", padding="same")(x)
x = layers.BatchNormalization()(x)
outputs = layers.MaxPool2D()(x)
return outputs
def dense_block(units, dropout_rate, inputs):
x = layers.Dense(units, activation="relu")(inputs)
x = layers.BatchNormalization()(x)
outputs = layers.Dropout(dropout_rate)(x)
return outputs
"""
The following method will define the function to build our model for us.
The images originally have values that range from [0, 255]. CNNs work better with smaller
numbers so we will scale this down for our input.
The Dropout layers are important, as they
reduce the likelikhood of the model overfitting. We want to end the model with a `Dense`
layer with one node, as this will be the binary output that determines if an X-ray shows
presence of pneumonia.
"""
def build_model():
inputs = keras.Input(shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3))
x = preprocessing.Rescaling(1.0 / 255)(inputs)
x = layers.Conv2D(16, 3, activation="relu", padding="same")(x)
x = layers.Conv2D(16, 3, activation="relu", padding="same")(x)
x = layers.MaxPool2D()(x)
x = conv_block(32, x)
x = conv_block(64, x)
x = conv_block(128, x)
x = layers.Dropout(0.2)(x)
x = conv_block(256, x)
x = layers.Dropout(0.2)(x)
x = layers.Flatten()(x)
x = dense_block(512, 0.7, x)
x = dense_block(128, 0.5, x)
x = dense_block(64, 0.3, x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
"""
## Correct for data imbalance
We saw earlier in this example that the data was imbalanced, with more images classified
as pneumonia than normal. We will correct for that by using class weighting:
"""
initial_bias = np.log([COUNT_PNEUMONIA / COUNT_NORMAL])
print("Initial bias: {:.5f}".format(initial_bias[0]))
TRAIN_IMG_COUNT = COUNT_NORMAL + COUNT_PNEUMONIA
weight_for_0 = (1 / COUNT_NORMAL) * (TRAIN_IMG_COUNT) / 2.0
weight_for_1 = (1 / COUNT_PNEUMONIA) * (TRAIN_IMG_COUNT) / 2.0
class_weight = {0: weight_for_0, 1: weight_for_1}
print("Weight for class 0: {:.2f}".format(weight_for_0))
print("Weight for class 1: {:.2f}".format(weight_for_1))
"""
The weight for class `0` (Normal) is a lot higher than the weight for class `1`
(Pneumonia). Because there are less normal images, each normal image will be weighted
more to balance the data as the CNN works best when the training data is balanced.
"""
"""
## Train the model
"""
"""
### Defining callbacks
The checkpoint callback saves the best weights of the model, so next time we want to use
the model, we do not have to spend time training it. The early stopping callback stops
the training process when the model starts becoming stagnant, or even worse, when the
model starts overfitting.
"""
checkpoint_cb = tf.keras.callbacks.ModelCheckpoint("xray_model.h5", save_best_only=True)
early_stopping_cb = tf.keras.callbacks.EarlyStopping(
patience=10, restore_best_weights=True
)
"""
We also want to tune our learning rate. Too high of a learning rate will cause the model
to diverge. Too small of a learning rate will cause the model to be too slow. We
implement the exponential learning rate scheduling method below.
"""
initial_learning_rate = 0.015
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True
)
"""
### Fit the model
For our metrics, we want to include precision and recall as they will provide use with a
more informed picture of how good our model is. Accuracy tells us what fraction of the
labels is correct. Since our data is not balanced, accuracy might give a skewed sense of
a good model (i.e. a model that always predicts PNEUMONIA will be 74% accurate but is not
a good model).
Precision is the number of true positives (TP) over the sum of TP and false positives
(FP). It shows what fraction of labeled positives are actually correct.
Recall is the number of TP over the sum of TP and false negatves (FN). It shows what
fraction of actual positives are correct.
Since there are only two possible labels for the image, we will be using the
binary crossentropy loss. When we fit the model, remember to specify the class weights,
which we defined earlier. Because we are using a TPU, training will be quick - less than
2 minutes.
"""
with strategy.scope():
model = build_model()
METRICS = [
tf.keras.metrics.BinaryAccuracy(),
tf.keras.metrics.Precision(name="precision"),
tf.keras.metrics.Recall(name="recall"),
]
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule),
loss="binary_crossentropy",
metrics=METRICS,
)
history = model.fit(
train_ds,
epochs=100,
validation_data=val_ds,
class_weight=class_weight,
callbacks=[checkpoint_cb, early_stopping_cb],
)
"""
## Visualizing model performance
Let's plot the model accuracy and loss for the training and the validating set. Note that
no random seed is specified for this notebook. For your notebook, there might be slight
variance.
"""
fig, ax = plt.subplots(1, 4, figsize=(20, 3))
ax = ax.ravel()
for i, met in enumerate(["precision", "recall", "binary_accuracy", "loss"]):
ax[i].plot(history.history[met])
ax[i].plot(history.history["val_" + met])
ax[i].set_title("Model {}".format(met))
ax[i].set_xlabel("epochs")
ax[i].set_ylabel(met)
ax[i].legend(["train", "val"])
"""
We see that the accuracy for our model is around 95%.
"""
"""
## Predict and evaluate results
Let's evaluate the model on our test data!
"""
model.evaluate(test_ds, return_dict=True)
"""
We see that our accuracy on our test data is lower than the accuracy for our validating
set. This may indicate overfitting.
Our recall is greater than our precision, indicating that almost all pneumonia images are
correctly identified but some normal images are falsely identified. We should aim to
increase our precision.
"""
for image, label in test_ds.take(1):
plt.imshow(image[0] / 255.0)
plt.title(CLASS_NAMES[label[0].numpy()])
prediction = model.predict(test_ds.take(1))[0]
scores = [1 - prediction, prediction]
for score, name in zip(scores, CLASS_NAMES):
print("This image is %.2f percent %s" % ((100 * score), name))
|
apache-2.0
|
wjw12/emc
|
density.py
|
1
|
3126
|
from particle import *
@mlab.show
def show(a):
mlab.pipeline.volume(mlab.pipeline.scalar_field(a))
@mlab.show
def drawp(points):
mlab.points3d(points[:,0],points[:,1],points[:,2])
def test0():
# rotate a point
a = 17
r = 8
p = np.ones((a,a))
m = np.zeros((a,a,a))
m[r,:,:] = p
new_m = np.zeros((a,a,a))
points = fibonacciSphere(500)
rot = []
p0 = points[0]
eye = np.eye(3)
for p1 in points[1:]:
v = np.cross(p0,p1)
s = np.linalg.norm(v) # sin of vectors
c = np.dot(p0,p1) # cos of vectors
v = makeCrossMatrix(v)
vv = (1-c)/(s*s) * np.dot(v,v)
rot_mat = np.eye(3) + v + vv
rot.append(rot_mat)
ro_points = np.zeros((500,3))
ro_points[0] = p0
for i in range(1,500):
p_ = np.dot(rot[i-1], p0)
ro_points[i] = p_
drawp(ro_points)
def test():
import matplotlib.pyplot as plt
# rotate a plane, without rotating around an axis
a = 17
r = 8
p = np.ones((a,a))
m = np.zeros((a,a,a))
m[:,r,:] = p
new_m = np.zeros((a,a,a))
points = fibonacciSphere(500)
rot = []
eye = np.eye(3)
p0 = points[0]
for p1 in points[1:]:
v = np.cross(p0,p1)
s = np.linalg.norm(v) # sin of vectors
c = np.dot(p0,p1) # cos of vectors
v = makeCrossMatrix(v)
vv = (1-c)/(s*s) * np.dot(v,v)
rot_mat = np.eye(3) + v + vv
rot.append(rot_mat)
show(m)
# compress all planes of random directions by simply adding them together
c = 0
for i in rot:
displace = np.array([r,r,r])
offset = -np.dot(i,displace) + displace
mm = affine_transform(m,i,offset)
new_m += mm
c += 1
show(new_m)
y = new_m[r,r,:]
x = [i for i in range(len(y))]
plt.plot(x,y)
plt.show()
def test__():
a = 17
r = 8
p = np.ones((a,a))
m = np.zeros((a,a,a))
m[:,r,:] = p
new_m = np.zeros((a,a,a))
points = fibonacciSphere(200)
rot = []
ang = 50
sin_value = np.sin(np.linspace(0, 2*np.pi, ang))
cos_value = np.cos(np.linspace(0, 2*np.pi, ang)) # store the values to reuse
eye = np.eye(3)
for p1 in points:
k = makeCrossMatrix(p1)
kk = np.dot(k,k)
for i in range(ang):
rot.append(eye + k*sin_value[i] + kk*(1-cos_value[i]))
# compress all planes of random directions by simply adding them together
for i in rot:
displace = np.array([r,r,r])
offset = -np.dot(i,displace) + displace
mm = affine_transform(m,i,offset,order=5)
new_m += mm
show(m)
show(new_m)
def test2(n):
from numpy.linalg import qr
a = 17
R = 8
p = np.ones((a,a))
m = np.zeros((a,a,a))
m[:,R,:] = p
new_m = np.zeros((a,a,a))
for i in range(n):
q, r = qr(np.random.randn(3,3))
d = np.diagonal(r)
d = d/np.abs(d)
q = np.multiply(q,d)
displace = np.array([R,R,R])
offset = -np.dot(q,displace) + displace
mm = affine_transform(m,q,offset,order=5)
new_m += mm
show(new_m)
|
gpl-2.0
|
cxhernandez/osprey
|
osprey/tests/test_cli_worker_and_dump.py
|
1
|
2065
|
from __future__ import print_function, absolute_import, division
import os
import sys
import json
import shutil
import subprocess
import tempfile
from distutils.spawn import find_executable
from numpy.testing.decorators import skipif
try:
__import__('msmbuilder')
HAVE_MSMBUILDER = True
except:
HAVE_MSMBUILDER = False
OSPREY_BIN = find_executable('osprey')
@skipif(not HAVE_MSMBUILDER, 'this test requires MSMBuilder')
def test_1():
from msmbuilder.example_datasets import FsPeptide
assert OSPREY_BIN is not None
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
FsPeptide(dirname).get()
try:
os.chdir(dirname)
subprocess.check_call([OSPREY_BIN, 'skeleton', '-t', 'msmbuilder',
'-f', 'config.yaml'])
subprocess.check_call([OSPREY_BIN, 'worker', 'config.yaml', '-n', '1'])
assert os.path.exists('osprey-trials.db')
yield _test_dump_1
yield _test_plot_1
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
def test_2():
assert OSPREY_BIN is not None
cwd = os.path.abspath(os.curdir)
dirname = tempfile.mkdtemp()
try:
os.chdir(dirname)
subprocess.check_call([OSPREY_BIN, 'skeleton', '-t', 'sklearn',
'-f', 'config.yaml'])
subprocess.check_call([OSPREY_BIN, 'worker', 'config.yaml', '-n', '1'])
assert os.path.exists('osprey-trials.db')
subprocess.check_call([OSPREY_BIN, 'current_best', 'config.yaml'])
yield _test_dump_1
yield _test_plot_1
finally:
os.chdir(cwd)
shutil.rmtree(dirname)
def _test_dump_1():
out = subprocess.check_output(
[OSPREY_BIN, 'dump', 'config.yaml', '-o', 'json'])
if sys.version_info >= (3, 0):
out = out.decode()
json.loads(out)
def _test_plot_1():
out = subprocess.check_output(
[OSPREY_BIN, 'plot', 'config.yaml', '--no-browser'])
if not os.path.isfile('./plot.html'):
raise ValueError('Plot not created')
|
apache-2.0
|
yarikoptic/pystatsmodels
|
tools/examples_rst.py
|
30
|
5894
|
#! /usr/bin/env python
import os
import sys
import re
import subprocess
import pickle
from StringIO import StringIO
# 3rd party
from matplotlib import pyplot as plt
# Ours
import hash_funcs
#----------------------------------------------------
# Globals
#----------------------------------------------------
# these files do not get made into .rst files because of
# some problems, they may need a simple cleaning up
exclude_list = ['run_all.py',
# these need to be cleaned up
'example_ols_tftest.py',
'example_glsar.py',
'example_ols_table.py',
#not finished yet
'example_arima.py',
'try_wls.py']
file_path = os.path.dirname(__file__)
docs_rst_dir = os.path.realpath(os.path.join(file_path,
'../docs/source/examples/generated/'))
example_dir = os.path.realpath(os.path.join(file_path,
'../examples/'))
def check_script(filename):
"""
Run all the files in filelist from run_all. Add any with problems
to exclude_list and return it.
"""
file_to_run = "python -c\"import warnings; "
file_to_run += "warnings.simplefilter('ignore'); "
file_to_run += "from matplotlib import use; use('Agg'); "
file_to_run += "execfile(r'%s')\"" % os.path.join(example_dir, filename)
proc = subprocess.Popen(file_to_run, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
#NOTE: use communicate to wait for process termination
stdout, stderr = proc.communicate()
result = proc.returncode
if result != 0: # raised an error
msg = "Not generating reST from %s. An error occurred.\n" % filename
msg += stderr
print msg
return False
return True
def parse_docstring(block):
"""
Strips the docstring from a string representation of the file.
Returns the docstring and block without it
"""
ds = "\"{3}|'{3}"
try:
start = re.search(ds, block).end()
end = re.search(ds, block[start:]).start()
except: #TODO: make more informative
raise IOError("File %s does not have a docstring?")
docstring = block[start:start+end]
block = block[start+end+3:]
return docstring.strip(), block
def parse_file(block):
"""
Block is a raw string file.
"""
docstring, block = parse_docstring(block)
# just get the first line from the docstring
docstring = docstring.split('\n')[0] or docstring.split('\n')[1]
outfile = [docstring,'='*len(docstring),'']
block = block.split('\n')
# iterate through the rest of block, anything in comments is stripped of #
# anything else is fair game to go in an ipython directive
code_snippet = False
for line in block:
#if not len(line):
# continue
# preserve blank lines
if line.startswith('#') and not (line.startswith('#%') or
line.startswith('#@')):
# on some ReST text
if code_snippet: # were on a code snippet
outfile.append('')
code_snippet = False
line = line.strip()
# try to remove lines like # hello -> #hello
line = re.sub("(?<=#) (?!\s)", "", line)
# make sure commented out things have a space
line = re.sub("#\.\.(?!\s)", "#.. ", line)
line = re.sub("^#+", "", line) # strip multiple hashes
outfile.append(line)
else:
if not code_snippet: # new code block
outfile.append('\n.. ipython:: python\n')
code_snippet = True
# handle decorators and magic functions
if line.startswith('#%') or line.startswith('#@'):
line = line[1:]
outfile.append(' '+line.strip('\n'))
return '\n'.join(outfile)
def write_file(outfile, rst_file_pth):
"""
Write outfile to rst_file_pth
"""
print "Writing ", os.path.basename(rst_file_pth)
write_file = open(rst_file_pth, 'w')
write_file.writelines(outfile)
write_file.close()
def restify(example_file, filehash, fname):
"""
Takes a whole file ie., the result of file.read(), its md5 hash, and
the filename
Parse the file
Write the new .rst
Update the hash_dict
"""
write_filename = os.path.join(docs_rst_dir, fname[:-2] + 'rst')
try:
rst_file = parse_file(example_file)
except IOError as err:
raise IOError(err.message % fname)
write_file(rst_file, write_filename)
if filehash is not None:
hash_funcs.update_hash_dict(filehash, fname)
if __name__ == "__main__":
sys.path.insert(0, example_dir)
from run_all import filelist
sys.path.remove(example_dir)
if not os.path.exists(docs_rst_dir):
os.makedirs(docs_rst_dir)
if len(sys.argv) > 1: # given a file,files to process, no help flag yet
for example_file in sys.argv[1:]:
whole_file = open(example_file, 'r').read()
restify(whole_file, None, example_file)
else: # process the whole directory
for root, dirnames, filenames in os.walk(example_dir):
if 'notebooks' in root:
continue
for example in filenames:
example_file = os.path.join(root, example)
whole_file = open(example_file, 'r').read()
to_write, filehash = hash_funcs.check_hash(whole_file,
example)
if not to_write:
print "Hash has not changed for file %s" % example
continue
elif (not example.endswith('.py') or example in exclude_list or
not check_script(example_file)):
continue
restify(whole_file, filehash, example)
|
bsd-3-clause
|
pytroll/pygac
|
pygac/utils.py
|
1
|
11310
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author(s):
# Stephan Finkensieper <stephan.finkensieper@dwd.de>
# Carlos Horn <carlos.horn@external.eumetsat.int>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gzip
import io
import logging
from contextlib import contextmanager, nullcontext
import numpy as np
LOG = logging.getLogger(__name__)
def gzip_inspected(open_file):
"""Try to gzip decompress the file object if applicable."""
try:
file_object = gzip.GzipFile(mode='rb', fileobj=open_file)
file_object.read(1)
except OSError:
file_object = open_file
finally:
file_object.seek(0)
return file_object
@contextmanager
def file_opener(file):
if isinstance(file, io.IOBase) and file.seekable():
# avoid closing the file using nullcontext
open_file = nullcontext(file)
elif hasattr(file, 'open'):
try:
open_file = file.open(mode='rb')
except TypeError:
open_file = file.open()
else:
open_file = open(file, mode='rb')
# set open_file into context in case of lazy loading in __enter__ method.
with open_file as file_object:
yield gzip_inspected(file_object)
def get_absolute_azimuth_angle_diff(sat_azi, sun_azi):
"""Calculates absolute azimuth difference angle. """
rel_azi = abs(sat_azi - sun_azi)
rel_azi = rel_azi % 360
# Not using np.where to avoid copying array
rel_azi[rel_azi > 180] = 360.0 - rel_azi[rel_azi > 180]
return rel_azi
def centered_modulus(array, divisor):
"""Transform array to half open range ]-divisor/2, divisor/2]."""
arr = array % divisor
arr[arr > divisor / 2] -= divisor
return arr
def calculate_sun_earth_distance_correction(jday):
"""Calculate the sun earth distance correction.
In 2008 3-4 different equations of ESD were considered.
This one was chosen as it at the time gave reflectances most closely
matching the PATMOS-x data provided then by Andy Heidinger.
Formula might need to be reconsidered if jday is updated to a float.
"""
# Earth-Sun distance correction factor
corr = 1.0 - 0.0334 * np.cos(2.0 * np.pi * (jday - 2) / 365.25)
return corr
def check_user_scanlines(start_line, end_line, first_valid_lat=None,
last_valid_lat=None, along_track=None):
"""Check user-defined scanlines.
Can be used by both pygac and satpy.
Args:
start_line: User-defined start line (afer stripping, if enabled)
end_line: User-defined end line (afer stripping, if enabled)
first_valid_lat: First scanline with valid latitudes
last_valid_lat: Last scanline with valid latitudes
along_track: Number of scanlines (only needed if stripping
is disabled)
"""
if first_valid_lat is not None and last_valid_lat is not None:
num_valid_lines = last_valid_lat - first_valid_lat + 1
else:
if along_track is None:
raise ValueError('Need along_track')
num_valid_lines = along_track
start_line = int(start_line)
end_line = int(end_line)
if end_line == 0:
# If the user specifies 0 as the last scanline, process all
# scanlines with valid coordinates
end_line = num_valid_lines - 1
elif end_line >= num_valid_lines:
end_line = num_valid_lines - 1
LOG.warning('Given end line exceeds scanline range, resetting '
'to {}'.format(end_line))
if start_line > num_valid_lines:
raise ValueError('Given start line {} exceeds scanline range {}'
.format(start_line, num_valid_lines))
return start_line, end_line
def strip_invalid_lat(lats):
"""Strip invalid latitudes at the end and beginning of the orbit."""
no_wrong_lat = np.where(np.logical_not(np.isnan(lats)))
return min(no_wrong_lat[0]), max(no_wrong_lat[0])
def slice_channel(ch, start_line, end_line, first_valid_lat=None,
last_valid_lat=None, midnight_scanline=None,
miss_lines=None, qual_flags=None):
"""Slice channel data using user-defined start/end line.
If valid_lat_start/end are given, strip scanlines with invalid
coordinates at the beginning and end of the orbit.
Can be used by both pygac and satpy.
Args:
ch: Channel data
start_line: User-defined start line (afer stripping, if enabled)
end_line: User-defined end line (after stripping, if enabled)
first_valid_lat: First scanline with valid latitudes
last_valid_lat: Last scanline with valid latitudes.
midnight_scanline: If given, update midnight scanline to the new
scanline range.
miss_lines: If given, update list of missing lines with the ones
that have been stripped due to invalid coordinates
qual_flags: Quality flags, needed to updated missing lines.
"""
if first_valid_lat is not None and last_valid_lat is not None:
# Strip invalid coordinates and update midnight scanline as well as
# user-defined start/end lines
ch, updated = _slice(ch,
start_line=first_valid_lat,
end_line=last_valid_lat,
update=[midnight_scanline])
midnight_scanline = updated[0]
# Reset user-defined end line, if it has been removed
end_line = min(end_line, ch.shape[0] - 1)
start_line = min(start_line, ch.shape[0] - 1)
# Update missing scanlines
if miss_lines is not None:
miss_lines = _update_missing_scanlines(
miss_lines=miss_lines,
qual_flags=qual_flags,
start_line=first_valid_lat,
end_line=last_valid_lat)
# Slice data using user-defined start/end lines
ch_slc, updated = _slice(ch, start_line=start_line, end_line=end_line,
update=[midnight_scanline])
midnight_scanline = updated[0]
return ch_slc, miss_lines, midnight_scanline
def _slice(ch, start_line, end_line, update=None):
"""Slice the given channel.
Args:
start_line: New start line
end_line: New end line
update: List of scanlines to be updated to the new range
"""
# Slice data using new start/end lines
if len(ch.shape) == 1:
ch_slc = ch[start_line:end_line + 1].copy()
else:
ch_slc = ch[start_line:end_line + 1, :].copy()
if update:
updated = [_update_scanline(l, start_line, end_line)
if l is not None else None
for l in update]
return ch_slc, updated
return ch_slc
def _update_scanline(scanline, new_start_line, new_end_line):
"""Update the given scanline to the new range.
Set scanline to None if it lies outside the new range.
"""
scanline -= new_start_line
num_lines = new_end_line - new_start_line + 1
if scanline < 0 or scanline >= num_lines:
scanline = None
return scanline
def _update_missing_scanlines(miss_lines, qual_flags, start_line, end_line):
"""Add scanlines excluded by slicing to the list of missing scanlines.
Args:
miss_lines: List of missing scanlines
qual_flags: Quality flags
start_line: New start line of the slice
end_line: New end line of the slice
"""
return np.sort(np.unique(
qual_flags[0:start_line, 0].tolist() +
miss_lines.tolist() +
qual_flags[end_line + 1:, 0].tolist()
))
def plot_correct_times_thresh(res, filename=None):
"""Visualize results of GACReader.correct_times_thresh."""
import matplotlib.pyplot as plt
t = res['t']
tcorr = res.get('tcorr')
n = res['n']
offsets = res.get('offsets')
t0_head = res.get('t0_head')
max_diff_from_t0_head = res.get('max_diff_from_t0_head')
fail_reason = res.get('fail_reason', 'Failed for unknown reason')
# Setup figure
along_track = np.arange(t.size)
_, (ax0, ax1, ax2) = plt.subplots(nrows=3, sharex=True,
figsize=(8, 10))
# Plot original vs corrected timestamps
ax0.plot(along_track, t, "b-", label="original")
if tcorr is not None:
ax0.plot(along_track, tcorr, color="red", linestyle="--",
label="corrected")
else:
ax0.set_title(fail_reason)
ax0.set_ylabel("Time")
ax0.set_ylim(t.min() - np.timedelta64(30, "m"),
t.max() + np.timedelta64(30, "m"))
ax0.legend(loc="best")
# Plot offset (original time - ideal time)
if offsets is not None:
ax1.plot(along_track, offsets)
ax1.fill_between(
along_track,
t0_head - np.ones(along_track.size) * max_diff_from_t0_head,
t0_head + np.ones(along_track.size) * max_diff_from_t0_head,
facecolor="g", alpha=0.33)
ax1.axhline(y=t0_head, color="g", linestyle="--",
label="Header timestamp")
ax1.set_ylim(t0_head - 5 * max_diff_from_t0_head,
t0_head + 5 * max_diff_from_t0_head)
ax1.set_ylabel("Offset t-tn [ms]")
ax1.legend(loc="best")
# Plot scanline number
ax2.plot(along_track, n)
ax2.set_ylabel("Scanline number")
ax2.set_xlabel("Along Track")
if filename:
plt.savefig(filename, bbox_inches="tight", dpi=100)
else:
plt.show()
def plot_correct_scanline_numbers(res, filename=None):
"""Visualize results of GACReader.correct_scanline_numbers."""
import matplotlib.pyplot as plt
along_track = res['along_track']
n_orig = res['n_orig']
n_corr = res['n_corr']
within_range = res['within_range']
thresh = res['thresh']
diffs = res['diffs']
nz_diffs = res['nz_diffs']
# Setup figure
_, (ax0, ax1) = plt.subplots(nrows=2)
# Plot original vs corrected scanline numbers
ax0.plot(along_track, n_orig, "b-", label="original")
along_track_corr = along_track.copy()
along_track_corr = along_track_corr[within_range]
along_track_corr = along_track_corr[diffs <= thresh]
ax0.plot(along_track_corr, n_corr, "r--", label="corrected")
ax0.set_ylabel("Scanline Number")
ax0.set_xlabel("Along Track")
ax0.legend(loc="best")
# Plot difference from ideal
ax1.plot(np.arange(len(nz_diffs)), nz_diffs)
ax1.axhline(thresh, color="r", label="thresh={0:.2f}"
.format(thresh))
ax1.set_xlabel("Index")
ax1.set_ylabel("nonzero |n - n'|")
ax1.legend()
plt.tight_layout()
if filename:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
|
gpl-3.0
|
fabianp/scikit-learn
|
benchmarks/bench_multilabel_metrics.py
|
86
|
7286
|
#!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
|
bsd-3-clause
|
zaxliu/deepnap
|
experiments/kdd-exps/experiment_QNN_legacy_template.py
|
1
|
4398
|
# System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_file_name = "Default"
# Composite classes
class Phi_QAgentNN(PhiMixin, QAgentNN):
def __init__(self, **kwargs):
super(Phi_QAgentNN, self).__init__(**kwargs)
# Parameters
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.9, 0.9 # TD backup
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - Phi
phi_length = 5
dim_state = (1, phi_length, 3+2)
range_state_slice = [(0, 10), (0, 10), (0, 10), (0, 1), (0, 1)]
range_state = [[range_state_slice]*phi_length]
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size = 2, 200
reward_scaling, reward_scaling_update = 1, 'adaptive'
batch_size, update_period, freeze_period, rs_period = 100, 4, 16, 32
# |- Env model
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, 0.0
beta = None # R = (1-beta)*ServiceReward + beta*Cost
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- Env
# |- Time
start_time = pd.to_datetime("2014-10-15 09:20:00")
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_dh3.dat',
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
agent = Phi_QAgentNN(
phi_length=phi_length,
dim_state=dim_state, range_state=range_state,
f_build_net = None,
batch_size=batch_size, learning_rate=learning_rate, momentum=momentum,
reward_scaling=reward_scaling, reward_scaling_update=reward_scaling_update, rs_period=rs_period,
update_period=update_period, freeze_period=freeze_period,
memory_size=memory_size, num_buffer=num_buffer,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
|
bsd-3-clause
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_AstroMagic/PTS/pts/eagle/plotdensitycurves.py
|
2
|
4043
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.eagle.plotdensitycurves Plot stellar, gas and dust
# densities in function of the galaxy radius for an EAGLE SKIRT-run.
#
# The facilities in this module serve to plot stellar, gas and dust
# densities in function of the galaxy radius for a particular EAGLE SKIRT-run.
# The data is calculated based on the SPH particles in the input files,
# assuming al mass is concentrated in a particle's center position.
# ----------------------------------------------------------------------
# use a non-interactive back-end to generate high-quality vector graphics
import matplotlib.pyplot as plt
# import standard modules
import os.path
import numpy as np
# import pts modules
from ..core.tools import archive as arch
# ----------------------------------------------------------------------
# load columns text file in given directory and with name ending with given extension
def loadfile(inpath, extension):
filenames = arch.listdir(inpath, extension)
if len(filenames)!=1: raise ValueError("input file not found")
filepath = os.path.join(inpath, filenames[0])
return np.loadtxt(arch.opentext(filepath), unpack=True)
# ----------------------------------------------------------------------
## This function creates a PDF plot with histograms of stellar, gas and dust
# densities in function of the galaxy radius for a particular EAGLE SKIRT-run.
# The data is calculated based on the SPH particles in the input files,
# assuming al mass is concentrated in a particle's center position.
# The output plot is placed in the SKIRT-run's visualization directory.
def plotdensitycurves(skirtrun):
# setup the figure
figure = plt.figure(figsize=(10,6))
rmax = 50 # kpc
# load and plot the stars
x,y,z,h,M,Z,t = loadfile(skirtrun.inpath(), "_stars.dat")
r = np.sqrt(x*x + y*y + z*z)/1000 # kpc
r[r>rmax] = rmax
plt.hist(r, weights=M, bins=25, range=(0,rmax), histtype='step', log=True, color='b', label="stars")
# load and plot the gas
x,y,z,h,Mgas,Z,T = loadfile(skirtrun.inpath(), "_gas.dat")
r = np.sqrt(x*x + y*y + z*z)/1000 # kpc
r[r>rmax] = rmax
M = Mgas.copy()
M[np.abs(T)>75000] = 0
if np.any(M):
plt.hist(r, weights=M*Z, bins=25, range=(0,rmax), histtype='step', log=True, color='m', ls='dashed', label="metals (T<75000K)")
M = Mgas.copy()
M[np.abs(T)>8000] = 0
if np.any(M):
plt.hist(r, weights=M*Z, bins=25, range=(0,rmax), histtype='step', log=True, color='m', ls='dotted', label="metals (T<8000K)")
M = Mgas.copy()
M[T>8000] = 0
if np.any(M):
plt.hist(r, weights=M*Z, bins=25, range=(0,rmax), histtype='step', log=True, color='m', ls='solid', label="metals (T<8000K or SFR>0)")
# load and plot the hii regions
try:
x,y,z,h,SFR,Z,logC,P,fPDR = loadfile(skirtrun.inpath(), "_hii.dat")
r = np.sqrt(x*x + y*y + z*z)/1000 # kpc
r[r>rmax] = rmax
plt.hist(r, weights=SFR*1e7, bins=25, range=(0,rmax), histtype='step', log=True, color='c', label="hii regions")
except ValueError:
pass
# add axis labels, legend and title
plt.grid('on')
plt.xlabel("r (kpc)", fontsize='medium')
plt.ylabel("Mass (Msun)", fontsize='medium')
plt.ylim(1e4, 1e9)
plt.legend(loc='upper right', prop={'size':'small'})
plt.title("runid {} -- {}".format(skirtrun.runid(), skirtrun.prefix()), fontsize='medium')
# save the figure
plotpath = os.path.join(skirtrun.vispath(), skirtrun.prefix()+"_density_curves.pdf")
plt.savefig(plotpath, bbox_inches='tight', pad_inches=0.25)
plt.close()
print "Created PDF plot file " + plotpath
# ----------------------------------------------------------------------
|
mit
|
moonboots/tensorflow
|
tensorflow/python/client/notebook.py
|
26
|
4596
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import socket
import sys
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"password", None,
"Password to require. If set, the server will allow public access."
" Only used if notebook config file does not exist.")
flags.DEFINE_string("notebook_dir", "experimental/brain/notebooks",
"root location where to store notebooks")
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print ("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = ([sys.argv[0]] +
[x for x in sys.argv[1:] if x.startswith("--flagfile")])
app.run()
|
apache-2.0
|
joergdietrich/astropy
|
astropy/table/table.py
|
2
|
101482
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..extern import six
from ..extern.six.moves import zip, range
from .index import TableIndices, TableLoc, TableILoc
import re
import sys
from collections import OrderedDict, Mapping
import warnings
from copy import deepcopy
import numpy as np
from numpy import ma
from .. import log
from ..io import registry as io_registry
from ..units import Quantity
from ..utils import isiterable, ShapedLikeNDArray
from ..utils.compat.numpy import broadcast_to as np_broadcast_to
from ..utils.console import color_print
from ..utils.metadata import MetaData
from ..utils.data_info import BaseColumnInfo, MixinInfo, ParentDtypeInfo, DataInfo
from . import groups
from .pprint import TableFormatter
from .column import (BaseColumn, Column, MaskedColumn, _auto_names, FalseArray,
col_copy)
from .row import Row
from .np_utils import fix_column_name, recarray_fromrecords
from .info import TableInfo
from .index import Index, _IndexModeContext, get_index
from . import conf
__doctest_skip__ = ['Table.read', 'Table.write',
'Table.convert_bytestring_to_unicode',
'Table.convert_unicode_to_bytestring',
]
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = 'O' if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, 'shape') else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
return hasattr(obj, 'info') and isinstance(obj.info, cls)
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super(TableColumns, self).__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, six.string_types):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return self.values()[item]
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.values()[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError('Illegal key or index value for {} object'
.format(self.__class__.__name__))
def __setitem__(self, item, value):
if item in self:
raise ValueError("Cannot replace column '{0}'. Use Table.replace_column() instead."
.format(item))
super(TableColumns, self).__setitem__(item, value)
def __repr__(self):
names = ("'{0}'".format(x) for x in six.iterkeys(self))
return "<{1} names=({0})>".format(",".join(names), self.__class__.__name__)
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError("Column {0} already exists".format(new_name))
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(six.itervalues(self))
self.clear()
self.update(list(zip(new_names, cols)))
# Define keys and values for Python 2 and 3 source compatibility
def keys(self):
return list(OrderedDict.keys(self))
def values(self):
return list(OrderedDict.values(self))
class Table(object):
"""A class to represent tables of heterogeneous data.
`Table` provides a class for heterogeneous tabular data, making use of a
`numpy` structured array internally to store the data values. A key
enhancement provided by the `Table` class is the ability to easily modify
the structure of the table by adding or removing columns, or adding new
rows of data. In addition table and column metadata are fully supported.
`Table` differs from `~astropy.nddata.NDData` by the assumption that the
input data consists of columns of homogeneous data, where each column
has a unique identifier and may contain additional metadata such as the
data unit, format, and description.
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names
dtype : list, optional
Specify column data types
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data (default=True).
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument
copy_indices : bool, optional
Copy any indices in the input data (default=True)
**kwargs : dict, optional
Additional keyword args when converting table-like object
.. note::
If the input is a Table the ``meta`` is always copied regardless of the
``copy`` parameter.
"""
meta = MetaData()
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
def as_array(self, keep_byteorder=False):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
Returns
-------
table_array : np.ndarray (unmasked) or np.ma.MaskedArray (masked)
Copy of table as a numpy structured array
"""
if len(self.columns) == 0:
return None
sys_byteorder = ('>', '<')[sys.byteorder == 'little']
native_order = ('=', sys_byteorder)
dtype = []
cols = self.columns.values()
for col in cols:
col_descr = descr(col)
byteorder = col.info.dtype.byteorder
if not keep_byteorder and byteorder not in native_order:
new_dt = np.dtype(col_descr[1]).newbyteorder('=')
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
empty_init = ma.empty if self.masked else np.empty
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
return data
def __init__(self, data=None, masked=None, names=None, dtype=None,
meta=None, copy=True, rows=None, copy_indices=True,
**kwargs):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.meta = meta
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError('Cannot specify dtype when copy=False')
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
is_list_of_dict = False
if rows is not None:
if data is not None:
raise ValueError('Cannot supply both `data` and `rows` values')
if all(isinstance(row, dict) for row in rows):
is_list_of_dict = True # Avoid doing the all(...) test twice.
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
rec_data = recarray_fromrecords(rows)
data = [rec_data[name] for name in rec_data.dtype.names]
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
if hasattr(data, '__astropy_table__'):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied (though the meta
# will be deep-copied anyway).
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError('__init__() got unexpected keyword argument {!r}'
.format(list(kwargs.keys())[0]))
if (isinstance(data, np.ndarray) and
data.shape == (0,) and
not data.dtype.names):
data = None
if isinstance(data, self.Row):
data = data._table[data._index:data._index + 1]
if isinstance(data, (list, tuple)):
init_func = self._init_from_list
if data and (is_list_of_dict or all(isinstance(row, dict) for row in data)):
n_cols = len(data[0])
else:
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError('Can not initialize a Table with a scalar')
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
init_func = self._init_from_table
n_cols = len(data.colnames)
default_names = data.colnames
# don't copy indices if the input Table is in non-copy mode
self._init_indices = self._init_indices and data._copy_indices
elif data is None:
if names is None:
if dtype is None:
return # Empty table
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError('dtype was specified but could not be '
'parsed for column names')
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError('Data type {0} not allowed to init Table'
.format(type(data)))
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if names is None:
names = default_names or [None] * n_cols
if dtype is None:
dtype = [None] * n_cols
# Numpy does not support Unicode column names on Python 2, or
# bytes column names on Python 3, so fix them up now.
names = [fix_column_name(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Whatever happens above, the masked property should be set to a boolean
if type(self.masked) is not bool:
raise TypeError("masked property has not been set to True or False")
def __getstate__(self):
columns = OrderedDict((key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items())
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked:
mask_table = Table([col.mask for col in self.columns.values()],
names=self.colnames, copy=False)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property."""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : Table
New table with masked values filled
"""
if self.masked:
data = [col.filled(fill_value) for col in six.itervalues(self.columns)]
else:
data = self
return self.__class__(data, meta=deepcopy(self.meta))
@property
def indices(self):
'''
Return the indices associated with columns of the table
as a TableIndices object.
'''
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum([index is x for x in lst]) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
'''
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
'''
return TableLoc(self)
@property
def iloc(self):
'''
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
'''
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
'''
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, from among SortedArray, BST,
FastBST, and FastRBT. If the supplied argument is None (by
default), use SortedArray.
unique : bool (defaults to False)
Whether the values of the index must be unique
'''
if isinstance(colnames, six.string_types):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, '_supports_indexing', False):
raise ValueError('Cannot create an index on column "{0}", of '
'type "{1}"'.format(col.info.name, type(col)))
index = Index(columns, engine=engine, unique=unique)
if not self.indices:
self.primary_key = colnames
for col in columns:
col.info.indices.append(index)
def remove_indices(self, colname):
'''
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
'''
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
'''
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
return self.as_array().data if self.masked else self.as_array()
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, 'dtype'), (names, 'names')):
if not isiterable(inp_list):
raise ValueError('{0} must be a list or None'.format(inp_str))
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns'
.format(inp_str))
def _set_masked_from_cols(self, cols):
if self.masked is None:
if any(isinstance(col, (MaskedColumn, ma.MaskedArray)) for col in cols):
self._set_masked(True)
else:
self._set_masked(False)
elif not self.masked:
if any(np.any(col.mask) for col in cols if isinstance(col, (MaskedColumn, ma.MaskedArray))):
self._set_masked(True)
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
names_from_data = set()
for row in data:
names_from_data.update(row)
cols = {}
for name in names_from_data:
cols[name] = []
for i, row in enumerate(data):
try:
cols[name].append(row[name])
except KeyError:
raise ValueError('Row {0} has no value for column {1}'.format(i, name))
if all(name is None for name in names):
names = sorted(names_from_data)
self._init_from_dict(cols, names, dtype, n_cols, copy)
return
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of columns. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
if data and all(isinstance(row, dict) for row in data):
self._init_from_list_of_dicts(data, names, dtype, n_cols, copy)
return
# Set self.masked appropriately, then get class to create column instances.
self._set_masked_from_cols(data)
cols = []
def_names = _auto_names(n_cols)
for col, name, def_name, dtype in zip(data, names, def_names, dtype):
# Structured ndarray gets viewed as a mixin
if isinstance(col, np.ndarray) and len(col.dtype) > 1:
col = col.view(NdarrayMixin)
if isinstance(col, (Column, MaskedColumn)):
col = self.ColumnClass(name=(name or col.info.name or def_name),
data=col, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
elif self._add_as_mixin_column(col):
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute.
if copy:
col = col_copy(col, copy_indices=self._init_indices)
col.info.name = name or col.info.name or def_name
elif isinstance(col, np.ndarray) or isiterable(col):
col = self.ColumnClass(name=(name or def_name), data=col, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
else:
raise ValueError('Elements in list initialization must be '
'either Column or list-like')
cols.append(col)
self._init_from_cols(cols)
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array"""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = ([data[name] for name in data_names] if struct else
[data[:, i] for i in range(n_cols)])
# Set self.masked appropriately, then get class to create column instances.
self._set_masked_from_cols(cols)
if copy:
self._init_from_list(cols, names, dtype, n_cols, copy)
else:
dtype = [(name, col.dtype, col.shape[1:]) for name, col in zip(names, cols)]
newdata = data.view(dtype).ravel()
columns = self.TableColumns()
for name in names:
columns[name] = self.ColumnClass(name=name, data=newdata[name])
columns[name].info.parent_table = self
self.columns = columns
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns"""
# TODO: is this restriction still needed with no ndarray?
if not copy:
raise ValueError('Cannot use copy=False with a dict data input')
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _init_from_table(self, data, names, dtype, n_cols, copy):
"""Initialize table from an existing Table object """
table = data # data is really a Table, rename for clarity
self.meta.clear()
self.meta.update(deepcopy(table.meta))
self.primary_key = table.primary_key
cols = list(table.columns.values())
self._init_from_list(cols, names, dtype, n_cols, copy)
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if col.__class__ is not self.ColumnClass and isinstance(col, Column):
col = self.ColumnClass(col) # copy attributes and reference data
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects"""
lengths = set(len(col) for col in cols)
if len(lengths) != 1:
raise ValueError('Inconsistent data column lengths: {0}'
.format(lengths))
# Set the table masking
self._set_masked_from_cols(cols)
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
table.meta.clear()
table.meta.update(deepcopy(self.meta))
table.primary_key = self.primary_key
cols = self.columns.values()
newcols = []
for col in cols:
col.info._copy_indices = self._copy_indices
newcol = col[slice_]
if col.info.indices:
newcol = col.info.slice_indices(newcol, slice_, len(col))
newcols.append(newcol)
col.info._copy_indices = True
self._make_table_from_cols(table, newcols)
return table
@staticmethod
def _make_table_from_cols(table, cols):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
colnames = set(col.info.name for col in cols)
if None in colnames:
raise TypeError('Cannot have None for column name')
if len(colnames) != len(cols):
raise ValueError('Duplicate column names')
columns = table.TableColumns((col.info.name, col) for col in cols)
for col in cols:
col.info.parent_table = table
if table.masked and not hasattr(col, 'mask'):
col.mask = FalseArray(col.shape)
table.columns = columns
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(self, html=False, descr_vals=None, max_width=None,
tableid=None, show_dtype=True, max_lines=None,
tableclass=None):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append('masked=True')
descr_vals.append('length={0}'.format(len(self)))
descr = '<' + ' '.join(descr_vals) + '>\n'
if html:
from ..utils.xml.writer import xml_escape
descr = xml_escape(descr)
if tableid is None:
tableid = 'table{id}'.format(id=id(self))
data_lines, outs = self.formatter._pformat_table(
self, tableid=tableid, html=html, max_width=max_width,
show_name=True, show_unit=None, show_dtype=show_dtype,
max_lines=max_lines, tableclass=tableclass)
out = descr + '\n'.join(data_lines)
if six.PY2 and isinstance(out, six.text_type):
out = out.encode('utf-8')
return out
def _repr_html_(self):
return self._base_repr_(html=True, max_width=-1,
tableclass=conf.default_notebook_table_class)
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __unicode__(self):
return '\n'.join(self.pformat())
if not six.PY2:
__str__ = __unicode__
def __bytes__(self):
return six.text_type(self).encode('utf-8')
if six.PY2:
__str__ = __bytes__
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses)
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
def _add_as_mixin_column(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not not Quantity (which gets converted to Column with
# unit set).
return has_info_class(col, MixinInfo) and not isinstance(col, Quantity)
def pprint(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names (default=True)
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes (default=True)
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
lines, outs = self.formatter._pformat_table(self, max_lines, max_width,
show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype, align=align)
if outs['show_length']:
lines.append('Length = {0} rows'.format(len(self)))
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + self.columns.values(),
copy=False)
else:
return self
def show_in_notebook(self, tableid=None, css=None, display_length=50,
table_class='astropy-default', show_row_index='idx'):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or `None`
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <http://getbootstrap.com/css/#tables>`_
for the list of classes.
css : string
A valid CSS string declaring the formatting for the table. Default
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from .jsviewer import JSViewer
from IPython.display import HTML
if tableid is None:
tableid = 'table{0}-{1}'.format(id(self),
np.random.randint(1, 1e6))
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == 'astropy-default':
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(html=True, max_width=-1, tableid=tableid,
max_lines=-1, show_dtype=False,
tableclass=table_class)
columns = display_table.columns.values()
sortable_columns = [i for i, col in enumerate(columns)
if col.dtype.kind in 'iufc']
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(self, max_lines=5000, jsviewer=False,
browser='default', jskwargs={'use_local_files': True},
tableid=None, table_class="display compact",
css=None, show_row_index='idx'):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or `None`
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in http://www.datatables.net/manual/styling/classes
css : string
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import webbrowser
import tempfile
from ..extern.six.moves.urllib.parse import urljoin
from ..extern.six.moves.urllib.request import pathname2url
from .jsviewer import DEFAULT_CSS
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, 'table.html')
with open(path, 'w') as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(tmp, format='jsviewer', css=css,
max_lines=max_lines, jskwargs=jskwargs,
table_id=tableid, table_class=table_class)
else:
self.write(tmp, format='html')
try:
br = webbrowser.get(None if browser == 'default' else browser)
except webbrowser.Error:
log.error("Browser '{}' not found.".format(browser))
else:
br.open(urljoin('file:', pathname2url(path)))
def pformat(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
Parameters
----------
max_lines : int or `None`
Maximum number of rows to output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names (default=True)
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes (default=True)
html : bool
Format the output as an HTML table (default=False)
tableid : str or `None`
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or `None`
CSS classes for the table; only used if html is set. Default is
none
Returns
-------
lines : list
Formatted table as a list of strings
"""
lines, outs = self.formatter._pformat_table(
self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype, html=html,
tableid=tableid, tableclass=tableclass, align=align)
if outs['show_length']:
lines.append('Length = {0} rows'.format(len(self)))
return lines
def more(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names (default=True)
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes (default=True)
"""
self.formatter._more_tabcol(self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype)
def __getitem__(self, item):
if isinstance(item, six.string_types):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.Row(self, item.item())
elif (isinstance(item, (tuple, list)) and item and
all(isinstance(x, six.string_types) for x in item)):
bad_names = [x for x in item if x not in self.colnames]
if bad_names:
raise ValueError('Slice name(s) {0} not valid column name(s)'
.format(', '.join(bad_names)))
out = self.__class__([self[x] for x in item],
meta=deepcopy(self.meta),
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
return out
elif ((isinstance(item, np.ndarray) and item.size == 0) or
(isinstance(item, (tuple, list)) and not item)):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
isinstance(item, tuple) and all(isinstance(x, np.ndarray)
for x in item)):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError('Illegal type {0} for table item access'
.format(type(item)))
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, six.string_types) and item not in self.colnames:
NewColumn = self.MaskedColumn if self.masked else self.Column
# If value doesn't have a dtype and won't be added as a mixin then
# convert to a numpy array.
if not hasattr(value, 'dtype') and not self._add_as_mixin_column(value):
value = np.asarray(value)
# Structured ndarray gets viewed as a mixin
if isinstance(value, np.ndarray) and len(value.dtype) > 1:
value = value.view(NdarrayMixin)
# Make new column and assign the value. If the table currently
# has no rows (len=0) of the value is already a Column then
# define new column directly from value. In the latter case
# this allows for propagation of Column metadata. Otherwise
# define a new column with the right length and shape and then
# set it from value. This allows for broadcasting, e.g. t['a']
# = 1.
name = item
# If this is a column-like object that could be added directly to table
if isinstance(value, BaseColumn) or self._add_as_mixin_column(value):
# If we're setting a new column to a scalar, broadcast it.
# (things will fail in _init_from_cols if this doesn't work)
if (len(self) > 0 and (getattr(value, 'isscalar', False) or
getattr(value, 'shape', None) == () or
len(value) == 1)):
new_shape = (len(self),) + getattr(value, 'shape', ())[1:]
if isinstance(value, np.ndarray):
value = np_broadcast_to(value, shape=new_shape,
subok=True)
elif isinstance(value, ShapedLikeNDArray):
value = value._apply(np_broadcast_to, shape=new_shape,
subok=True)
new_column = col_copy(value)
new_column.info.name = name
elif len(self) == 0:
new_column = NewColumn(value, name=name)
else:
new_column = NewColumn(name=name, length=len(self), dtype=value.dtype,
shape=value.shape[1:],
unit=getattr(value, 'unit', None))
new_column[:] = value
# Now add new column to the table
self.add_columns([new_column], copy=False)
else:
n_cols = len(self.columns)
if isinstance(item, six.string_types):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (not getattr(self, '_setitem_inplace', False)
and not conf.replace_inplace):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
# Set the corresponding row assuming value is an iterable.
if not hasattr(value, '__len__'):
raise TypeError('Right side value must be iterable')
if len(value) != n_cols:
raise ValueError('Right side value needs {0} elements (one for each column)'
.format(n_cols))
for col, val in zip(self.columns.values(), value):
col[item] = val
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
(isinstance(item, tuple) and # output from np.where
all(isinstance(x, np.ndarray) for x in item))):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
import itertools
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError('Right side value needs {0} elements (one for each column)'
.format(n_cols))
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError('Illegal type {0} for table item access'
.format(type(item)))
def __delitem__(self, item):
if isinstance(item, six.string_types):
self.remove_column(item)
elif isinstance(item, tuple):
self.remove_columns(item)
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception('Masked attribute is read-only (use t = Table(t, masked=True)'
' to convert to a masked table)')
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if hasattr(self, '_masked'):
# The only allowed change is from None to False or True, or False to True
if self._masked is None and masked in [False, True]:
self._masked = masked
elif self._masked is False and masked is True:
log.info("Upgrading Table to masked Table. Use Table.filled() to convert to unmasked table.")
self._masked = masked
elif self._masked is masked:
raise Exception("Masked attribute is already set to {0}".format(masked))
else:
raise Exception("Cannot change masked attribute to {0} once it is set to {1}"
.format(masked, self._masked))
else:
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
if self._masked:
self._column_class = self.MaskedColumn
else:
self._column_class = self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
def keys(self):
return list(self.columns.keys())
def __len__(self):
if len(self.columns) == 0:
return 0
lengths = set(len(col) for col in self.columns.values())
if len(lengths) != 1:
len_strs = [' {0} : {1}'.format(name, len(col)) for name, col in self.columns.items()]
raise ValueError('Column length mismatch:\n{0}'.format('\n'.join(len_strs)))
return lengths.pop()
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError("Column {0} does not exist".format(name))
def add_column(self, col, index=None, rename_duplicate=False):
"""
Add a new Column object ``col`` to the table. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
Parameters
----------
col : Column
Column object to add.
index : int or `None`
Insert column before this position or at end (default)
rename_duplicate : bool
Uniquify column name if it already exist (default=False)
Examples
--------
Create a table with two columns 'a' and 'b'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> print(t)
a b
--- ---
1 0.1
2 0.2
3 0.3
Create a third column 'c' and append it to the end of the table::
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> col_d = Column(name='d', data=['a', 'b', 'c'])
>>> t.add_column(col_d, 1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
3 c 0.3 z
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_b = Column(name='b', data=[1.1, 1.2, 1.3])
>>> t.add_column(col_b, rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.2
3 0.3 1.3
To add several columns use add_columns.
"""
if index is None:
index = len(self.columns)
self.add_columns([col], [index], rename_duplicate=rename_duplicate)
def add_columns(self, cols, indexes=None, copy=True, rename_duplicate=False):
"""
Add a list of new Column objects ``cols`` to the table. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
Parameters
----------
cols : list of Columns
Column objects to add.
indexes : list of ints or `None`
Insert column before this position or at end (default)
copy : bool
Make a copy of the new columns (default=True)
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones
(default=False)
Examples
--------
Create a table with two columns 'a' and 'b'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> print(t)
a b
--- ---
1 0.1
2 0.2
3 0.3
Create column 'c' and 'd' and append them to the end of the table::
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> col_d = Column(name='d', data=['u', 'v', 'w'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
3 0.3 z w
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> col_d = Column(name='d', data=['u', 'v', 'w'])
>>> t.add_columns([col_c, col_d], [0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
z 3 w 0.3
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_b = Column(name='b', data=[1.1, 1.2, 1.3])
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> t.add_columns([col_b, col_c], rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
3 0.3 1.3 z
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError('Number of indexes must match number of cols')
if copy:
cols = [col_copy(col) for col in cols]
if len(self.columns) == 0:
# No existing table data, init from cols
newcols = cols
else:
newcols = list(self.columns.values())
new_indexes = list(range(len(newcols) + 1))
for col, index in zip(cols, indexes):
i = new_indexes.index(index)
new_indexes.insert(i, None)
newcols.insert(i, col)
if rename_duplicate:
existing_names = set(self.colnames)
for col in cols:
i = 1
orig_name = col.info.name
while col.info.name in existing_names:
# If the column belongs to another table then copy it
# before renaming
if col.info.parent_table is not None:
col = col_copy(col)
new_name = '{0}_{1}'.format(orig_name, i)
col.info.name = new_name
i += 1
existing_names.add(new_name)
self._init_from_cols(newcols)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
if 'refcount' in warns and name in self.colnames:
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if 'always' in warns:
warnings.warn("replaced column '{}'".format(name),
TableReplaceWarning, stacklevel=3)
if 'slice' in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = ("replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
if 'refcount' in warns:
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = ("replaced column '{}' and the number of references "
"to the column changed.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if 'attributes' in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = ("replaced column '{}' and column attributes {} changed."
.format(name, changed_attrs))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col):
"""
Replace column ``name`` with the new ``col`` object.
Parameters
----------
name : str
Name of column to replace
col : column object (list, ndarray, Column, etc)
New column object to replace the existing column
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError('column name {0} is not in the table'.format(name))
if self[name].info.indices:
raise ValueError('cannot replace a table index column')
t = self.__class__([col], names=[name])
cols = OrderedDict(self.columns)
cols[name] = t[name]
self._init_from_cols(cols.values())
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (six.integer_types, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice, int, or array of ints
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=np.bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
'''
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
'''
if isinstance(names, six.string_types):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError("Column {0} does not exist".format(name))
for name in names:
self.columns.pop(name)
def _convert_string_dtype(self, in_kind, out_kind, python3_only):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
python3_only : bool
Only do this operation for Python 3
"""
if python3_only and six.PY2:
return
# If there are no `in_kind` columns then do nothing
cols = self.columns.values()
if not any(col.dtype.kind == in_kind for col in cols):
return
newcols = []
for col in cols:
if col.dtype.kind == in_kind:
newdtype = re.sub(in_kind, out_kind, col.dtype.str)
newcol = col.__class__(col, dtype=newdtype)
else:
newcol = col
newcols.append(newcol)
self._init_from_cols(newcols)
def convert_bytestring_to_unicode(self, python3_only=False):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U') assuming
ASCII encoding.
Internally this changes string columns to represent each character in the string
with a 4-byte UCS-4 equivalent, so it is inefficient for memory but allows Python
3 scripts to manipulate string arrays with natural syntax.
The ``python3_only`` parameter is provided as a convenience so that code can
be written in a Python 2 / 3 compatible way::
>>> t = Table.read('my_data.fits')
>>> t.convert_bytestring_to_unicode(python3_only=True)
Parameters
----------
python3_only : bool
Only do this operation for Python 3
"""
self._convert_string_dtype('S', 'U', python3_only)
def convert_unicode_to_bytestring(self, python3_only=False):
"""
Convert ASCII-only unicode columns (dtype.kind='U') to bytestring (dtype.kind='S').
When exporting a unicode string array to a file in Python 3, it may be desirable
to encode unicode columns as bytestrings. This routine takes advantage of numpy
automated conversion which works for strings that are pure ASCII.
The ``python3_only`` parameter is provided as a convenience so that code can
be written in a Python 2 / 3 compatible way::
>>> t.convert_unicode_to_bytestring(python3_only=True)
>>> t.write('my_data.fits')
Parameters
----------
python3_only : bool
Only do this operation for Python 3
"""
self._convert_string_dtype('U', 'S', python3_only)
def keep_columns(self, names):
'''
Keep only the columns specified (remove the others).
Parameters
----------
names : list
A list containing the names of the columns to keep. All other
columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Specifying only a single column name keeps only this column.
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Specifying a list of column names is keeps is also possible.
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
'''
if isinstance(names, six.string_types):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError("Column {0} does not exist".format(name))
remove = list(set(self.keys()) - set(names))
self.remove_columns(remove)
def rename_column(self, name, new_name):
'''
Rename a column.
This can also be done directly with by setting the ``name`` attribute
for a column::
table[name].name = new_name
TODO: this won't work for mixins
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
'''
if name not in self.keys():
raise KeyError("Column {0} does not exist".format(name))
self.columns[name].info.name = new_name
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError("Index {0} is out of bounds for table with length {1}"
.format(index, N))
if index < 0:
index += N
def _is_mapping(obj):
"""Minimal checker for mapping (dict-like) interface for obj"""
attrs = ('__getitem__', '__len__', '__iter__', 'keys', 'values', 'items')
return all(hasattr(obj, attr) for attr in attrs)
if mask is not None and not self.masked:
# Possibly issue upgrade warning and update self.ColumnClass. This
# does not change the existing columns.
self._set_masked(True)
if _is_mapping(vals) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not _is_mapping(mask):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError('keys in mask should match keys in vals')
if vals and any(name not in colnames for name in vals):
raise ValueError('Keys in vals must all be valid column names')
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, 'dtype'):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError("Value must be supplied for column '{0}'".format(name))
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or _is_mapping(mask)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError('Mismatch between number of vals and columns')
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError('Mismatch between number of masks and columns')
else:
mask = [False] * len(self.columns)
else:
raise TypeError('Vals must be an iterable or mapping or None')
columns = self.TableColumns()
try:
# Insert val at index for each column
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
# If the new row caused a change in self.ColumnClass then
# Column-based classes need to be converted first. This is
# typical for adding a row with mask values to an unmasked table.
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col = self.ColumnClass(col, copy=False)
newcol = col.insert(index, val)
if not isinstance(newcol, BaseColumn):
newcol.info.name = name
if self.masked:
newcol.mask = FalseArray(newcol.shape)
if len(newcol) != N + 1:
raise ValueError('Incorrect length for column {0} after inserting {1}'
' (expected {2}, got {3})'
.format(name, val, len(newcol), N + 1))
newcol.info.parent_table = self
# Set mask if needed
if self.masked:
newcol.mask[index] = mask_
columns[name] = newcol
# insert row in indices
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
except Exception as err:
raise ValueError("Unable to insert row because of exception in column '{0}':\n{1}"
.format(name, err))
else:
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def argsort(self, keys=None, kind=None):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, six.string_types):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, self[keys])
if index is not None:
return index.sorted_data()
kwargs = {}
if keys:
kwargs['order'] = keys
if kind:
kwargs['kind'] = kind
if keys:
data = self[keys].as_array()
else:
data = self.as_array()
return data.argsort(**kwargs)
def sort(self, keys=None):
'''
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name','firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, six.string_types):
keys = [keys]
indexes = self.argsort(keys)
sort_index = get_index(self, self[keys])
if sort_index is not None:
# avoid inefficient relabelling of sorted index
prev_frozen = sort_index._frozen
sort_index._frozen = True
for col in self.columns.values():
col[:] = col.take(indexes, axis=0)
if sort_index is not None:
# undo index freeze
sort_index._frozen = prev_frozen
# now relabel the sort index appropriately
sort_index.sort()
def reverse(self):
'''
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
for col in self.columns.values():
col[:] = col[::-1]
for index in self.indices:
index.reverse()
@classmethod
def read(cls, *args, **kwargs):
"""
Read and parse a data table and return as a Table.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily reading a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table.read('table.dat', format='ascii')
>>> events = Table.read('events.fits', format='fits')
The arguments and keywords (other than ``format``) provided to this function are
passed through to the underlying data reader (e.g. `~astropy.io.ascii.read`).
"""
return io_registry.read(cls, *args, **kwargs)
def write(self, *args, **kwargs):
"""
Write this Table object out in the specified format.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily writing a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table([[1, 2], [3, 4]], names=('a', 'b'))
>>> dat.write('table.dat', format='ascii')
The arguments and keywords (other than ``format``) provided to this function are
passed through to the underlying data reader (e.g. `~astropy.io.ascii.write`).
"""
io_registry.write(self, *args, **kwargs)
def copy(self, copy_data=True):
'''
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array
.. note::
The ``meta`` is always deepcopied regardless of the value for
``copy_data``.
'''
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, '_groups'):
out._groups = groups.TableGroups(out, indices=self._groups._indices,
keys=self._groups._keys)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
if six.PY2:
raise TypeError("unorderable types: Table() < {0}".
format(str(type(other))))
else:
return super(Table, self).__lt__(other)
def __gt__(self, other):
if six.PY2:
raise TypeError("unorderable types: Table() > {0}".
format(str(type(other))))
else:
return super(Table, self).__gt__(other)
def __le__(self, other):
if six.PY2:
raise TypeError("unorderable types: Table() <= {0}".
format(str(type(other))))
else:
return super(Table, self).__le__(other)
def __ge__(self, other):
if six.PY2:
raise TypeError("unorderable types: Table() >= {0}".
format(str(type(other))))
else:
return super(Table, self).__ge__(other)
def __eq__(self, other):
if isinstance(other, Table):
other = other.as_array()
if self.masked:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def __ne__(self, other):
return ~self.__eq__(other)
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``
This effectively splits the table into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`TableGroups` which contains a copy of this table but sorted by row
according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `Table`
Key grouping object
Returns
-------
out : `Table`
New table with groups set
"""
if self.has_mixin_columns:
raise NotImplementedError('group_by not available for tables with mixin columns')
return groups.table_group_by(self, keys)
def to_pandas(self):
"""
Return a :class:`pandas.DataFrame` instance
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table contains mixin or multi-dimensional columns
"""
from pandas import DataFrame
if self.has_mixin_columns:
raise ValueError("Cannot convert a table with mixin columns to a pandas DataFrame")
if any(getattr(col, 'ndim', 1) > 1 for col in self.columns.values()):
raise ValueError("Cannot convert a table with multi-dimensional columns to a pandas DataFrame")
out = OrderedDict()
for name, column in self.columns.items():
if isinstance(column, MaskedColumn):
if column.dtype.kind in ['i', 'u']:
out[name] = column.astype(float).filled(np.nan)
elif column.dtype.kind in ['f', 'c']:
out[name] = column.filled(np.nan)
else:
out[name] = column.astype(np.object).filled(np.nan)
else:
out[name] = column
if out[name].dtype.byteorder not in ('=', '|'):
out[name] = out[name].byteswap().newbyteorder()
return DataFrame(out)
@classmethod
def from_pandas(cls, dataframe):
"""
Create a `Table` from a :class:`pandas.DataFrame` instance
Parameters
----------
dataframe : :class:`pandas.DataFrame`
The pandas :class:`pandas.DataFrame` instance
Returns
-------
table : `Table`
A `Table` (or subclass) instance
"""
out = OrderedDict()
for name in dataframe.columns:
column = dataframe[name]
mask = np.array(column.isnull())
data = np.array(column)
if data.dtype.kind == 'O':
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = six.string_types
if not six.PY2:
string_types += (bytes,)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b''
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array([x for x in data])
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask)
else:
out[name] = Column(data=data, name=name)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`QTable` provides a class for heterogeneous tabular data which can be
easily modified, for instance adding columns or new rows.
The `QTable` class is identical to `Table` except that columns with an
associated ``unit`` attribute are converted to `~astropy.units.Quantity`
objects.
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names
dtype : list, optional
Specify column data types
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data (default=True).
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument
copy_indices : bool, optional
Copy any indices in the input data (default=True)
**kwargs : dict, optional
Additional keyword args when converting table-like object
"""
def _add_as_mixin_column(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if (isinstance(col, Column) and getattr(col, 'unit', None) is not None):
# We need to turn the column into a quantity, or a subclass
# identified in the unit (such as u.mag()).
q_cls = getattr(col.unit, '_quantity_class', Quantity)
qcol = q_cls(col.data, col.unit, copy=False)
qcol.info = col.info
col = qcol
else:
col = super(QTable, self)._convert_col_for_table(col)
return col
class NdarrayMixin(np.ndarray):
"""
Mixin column class to allow storage of arbitrary numpy
ndarrays within a Table. This is a subclass of numpy.ndarray
and has the same initialization options as ndarray().
"""
info = ParentDtypeInfo()
def __new__(cls, obj, *args, **kwargs):
self = np.array(obj, *args, **kwargs).view(cls)
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
return self
def __array_finalize__(self, obj):
if obj is None:
return
if six.callable(super(NdarrayMixin, self).__array_finalize__):
super(NdarrayMixin, self).__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html
object_state = list(super(NdarrayMixin, self).__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle NdarrayMixin objects (ndarray subclasses), see
# http://www.mail-archive.com/numpy-discussion@scipy.org/msg02446.html
nd_state, own_state = state
super(NdarrayMixin, self).__setstate__(nd_state)
self.__dict__.update(own_state)
|
bsd-3-clause
|
ankurankan/pgmpy
|
pgmpy/inference/bn_inference.py
|
2
|
8890
|
from pgmpy.inference import Inference
from pgmpy.models import BayesianNetwork
import pandas as pd
import numpy as np
import networkx as nx
import itertools
class BayesianModelInference(Inference):
"""
Inference class specific to Bayesian Models
"""
def __init__(self, model):
"""
Class to calculate probability (pmf) values specific to Bayesian Models
Parameters
----------
model: Bayesian Model
model on which inference queries will be computed
"""
if not isinstance(model, BayesianNetwork):
raise TypeError(
"Model expected type: BayesianNetwork, got type: ", type(model)
)
super(BayesianModelInference, self).__init__(model)
self._initialize_structures()
self.topological_order = list(nx.topological_sort(model))
def pre_compute_reduce(self, variable):
"""
Get probability arrays for a node as function of conditional dependencies
Internal function used for Bayesian networks, eg. in BayesianModelSampling
and BayesianModelProbability.
Parameters
----------
variable: Bayesian Model Node
node of the Bayesian network
Returns
-------
dict: dictionary with probability array for node
as function of conditional dependency values
"""
variable_cpd = self.model.get_cpds(variable)
variable_evid = variable_cpd.variables[:0:-1]
cached_values = {}
for state_combination in itertools.product(
*[range(self.cardinality[var]) for var in variable_evid]
):
states = list(zip(variable_evid, state_combination))
cached_values[state_combination] = variable_cpd.reduce(
states, inplace=False, show_warnings=False
).values
return cached_values
def pre_compute_reduce_maps(self, variable):
"""
Get probability array-maps for a node as function of conditional dependencies
Internal function used for Bayesian networks, eg. in BayesianModelSampling
and BayesianModelProbability.
Parameters
----------
variable: Bayesian Model Node
node of the Bayesian network
Returns
-------
dict: dictionary with probability array-index for node as function of conditional dependency values,
dictionary with mapping of probability array-index to probability array.
"""
variable_cpd = self.model.get_cpds(variable)
variable_evid = variable_cpd.variables[:0:-1]
state_combinations = [
tuple(sc)
for sc in itertools.product(
*[range(self.cardinality[var]) for var in variable_evid]
)
]
weights_list = np.array(
[
variable_cpd.reduce(
list(zip(variable_evid, sc)), inplace=False, show_warnings=False
).values
for sc in state_combinations
]
)
unique_weights, weights_indices = np.unique(
weights_list, axis=0, return_inverse=True
)
# convert weights to index; make mapping of state to index
state_to_index = dict(zip(state_combinations, weights_indices))
# make mapping of index to weights
index_to_weight = dict(enumerate(unique_weights))
# return mappings of state to index, and index to weight
return state_to_index, index_to_weight
class BayesianModelProbability(BayesianModelInference):
"""
Class to calculate probability (pmf) values specific to Bayesian Models
"""
def __init__(self, model):
"""
Class to calculate probability (pmf) values specific to Bayesian Models
Parameters
----------
model: Bayesian Model
model on which inference queries will be computed
"""
super(BayesianModelProbability, self).__init__(model)
def _log_probability_node(self, data, ordering, node):
"""
Evaluate the log probability of each datapoint for a specific node.
Internal function used by log_probability().
Parameters
----------
data: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
ordering: list
ordering of columns in data, used by the Bayesian model.
default is topological ordering used by model.
node: Bayesian Model Node
node from the Bayesian network.
Returns
-------
ndarray: having shape (n_samples,)
The array of log(density) evaluations. These are normalized to be
probability densities, so values will be low for high-dimensional
data.
"""
def vec_translate(a, my_dict):
return np.vectorize(my_dict.__getitem__)(a)
cpd = self.model.get_cpds(node)
# variable to probe: data[n], where n is the node number
current = cpd.variables[0]
current_idx = ordering.index(current)
current_val = data[:, current_idx]
current_no = vec_translate(current_val, cpd.name_to_no[current])
# conditional dependencies E of the probed variable
evidence = cpd.variables[:0:-1]
evidence_idx = [ordering.index(ev) for ev in evidence]
evidence_val = data[:, evidence_idx]
evidence_no = np.empty_like(evidence_val)
for i, ev in enumerate(evidence):
evidence_no[:, i] = vec_translate(evidence_val[:, i], cpd.name_to_no[ev])
if evidence:
# there are conditional dependencies E for data[n] for this node
# Here we retrieve the array: p(x[n]|E). We do this for each x in data.
# We pick the specific node value from the arrays below.
state_to_index, index_to_weight = self.pre_compute_reduce_maps(
variable=node
)
unique, inverse = np.unique(evidence_no, axis=0, return_inverse=True)
weights = np.array(
[index_to_weight[state_to_index[tuple(u)]] for u in unique]
)[inverse]
else:
# there are NO conditional dependencies for this node
# retrieve array: p(x[n]). We do this for each x in data.
# We pick the specific node value from the arrays below.
weights = np.array([cpd.values] * len(data))
# pick the specific node value x[n] from the array p(x[n]|E) or p(x[n])
# We do this for each x in data.
probability_node = np.array([weights[i][cn] for i, cn in enumerate(current_no)])
return np.log(probability_node)
def log_probability(self, data, ordering=None):
"""
Evaluate the logarithmic probability of each point in a data set.
Parameters
----------
data: pandas dataframe OR array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
ordering: list
ordering of columns in data, used by the Bayesian model.
default is topological ordering used by model.
Returns
-------
ndarray: having shape (n_samples,)
The array of log(density) evaluations. These are normalized to be
probability densities, so values will be low for high-dimensional
data.
"""
if isinstance(data, pd.DataFrame):
# use numpy array from now on.
ordering = data.columns.to_list()
data = data.values
if ordering is None:
ordering = self.topological_order
logp = np.array(
[
self._log_probability_node(data, ordering, node)
for node in self.topological_order
]
)
return np.sum(logp, axis=0)
def score(self, data, ordering=None):
"""
Compute the total log probability density under the model.
Parameters
----------
data: pandas dataframe OR array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
ordering: list
ordering of columns in data, used by the Bayesian model.
default is topological ordering used by model.
Returns
-------
float: total log-likelihood of the data in data.
This is normalized to be a probability density, so the value
will be low for high-dimensional data.
"""
return np.sum(self.log_probability(data, ordering))
|
mit
|
wanggang3333/scikit-learn
|
sklearn/manifold/tests/test_isomap.py
|
226
|
3941
|
from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
|
bsd-3-clause
|
mrry/tensorflow
|
tensorflow/contrib/learn/python/learn/tests/estimators_test.py
|
7
|
5456
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
# TODO(b/29580537): Remove when we deprecate feature column inference.
class InferredfeatureColumnTest(tf.test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
def custom_optimizer():
return tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
cont_features = [
tf.contrib.layers.real_valued_column("", dimension=4)]
classifier = learn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[10, 20, 10],
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
score = accuracy_score(y_test, classifier.predict(x_test))
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
class FeatureEngineeringFunctionTest(tf.test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {"x": tf.constant([1.])}, {"y": tf.constant([11.])}
def feature_engineering_fn(features, targets):
_, _ = features, targets
return {
"transformed_x": tf.constant([9.])
}, {
"transformed_y": tf.constant([99.])
}
def model_fn(features, targets):
# dummy variable:
_ = tf.Variable([0.])
_ = targets
predictions = features["transformed_x"]
loss = tf.constant([2.])
return predictions, loss, tf.no_op()
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {"x": tf.constant([1.])}, {"y": tf.constant([11.])}
def feature_engineering_fn(features, targets):
_, _ = features, targets
return {"x": tf.constant([9.])}, {"y": tf.constant([99.])}
def model_fn(features, targets):
# dummy variable:
_ = tf.Variable([0.])
_ = targets
predictions = features["x"]
loss = tf.constant([2.])
return predictions, loss, tf.no_op()
estimator_with_fe_fn = tf.contrib.learn.Estimator(
model_fn=model_fn,
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = tf.contrib.learn.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(tf.test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
def custom_optimizer():
return tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
score = accuracy_score(y_test, classifier.predict(x_test))
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
|
MyRookie/SentimentAnalyse
|
venv/lib/python2.7/site-packages/numpy/linalg/linalg.py
|
11
|
75845
|
"""Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, inexact):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if not (0 <= row_axis < nd and 0 <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
# cost1 = cost((AB)C)
cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
# cost2 = cost((AB)C)
cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
|
mit
|
superphy/backend
|
app/modules/qc/qc.py
|
1
|
4021
|
from __future__ import division
import os
import tempfile
import subprocess
import argparse
import pandas as pd
from middleware.graphers.turtle_grapher import generate_turtle_skeleton
def create_blast_db(query_file):
'''
:param query_file: genome file that was given by the user.
'''
tempdir = tempfile.mkdtemp()
blast_db_path = os.path.join(tempdir, 'ecoli_blastdb')
ret_code = subprocess.call(["makeblastdb",
"-in", query_file,
"-dbtype", "nucl",
"-title", "ecoli_blastdb",
"-out", blast_db_path])
if ret_code == 0:
return blast_db_path
else:
raise Exception("Could not create blast db")
def run_blast(blast_db):
'''
Compares db made from user submitted data against https://raw.githubusercontent.com/superphy/version-1/master/Sequences/genome_content_panseq/putative_e_coli_specific.fasta
The ref contains 10 ecoli-specific gene sequences
Output format is set to '10'(csv)
'''
ecoli_ref = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/' + 'putative_e_coli_specific.fasta')
blast_output_file = blast_db + '.output'
ret_code = subprocess.call(["blastn",
"-query", ecoli_ref,
"-db", blast_db,
"-out", blast_output_file,
"-outfmt", '10 " qseqid qlen sseqid length pident sstart send sframe "',
"-word_size", "11"])
if ret_code == 0:
return blast_output_file
else:
raise Exception("Could not run blast")
def parse_blast_records(blast_output_file):
'''
Recall, headers are: https://edwards.sdsu.edu/research/blast-output-8/
For QC, we only consider perfect matches against our reference.
returns a list of unique hits from the reference db
'''
print blast_output_file
blast_records = pd.read_csv(blast_output_file, header=None)
blast_records.columns = ['qseqid','qlen','sseqid','length','pident','sstart','send','sframe']
# filter for results with percent identity >= 90%
blast_records_pi_passed = blast_records[blast_records['pident']>=90]
print blast_records_pi_passed
# calculate percent length
blast_records_pi_passed['pl'] = blast_records_pi_passed['length']/blast_records_pi_passed['qlen'] * 100
# filter for results with percent length >= 90%
blast_records_pi_pl_passed = blast_records_pi_passed[blast_records_pi_passed['pl'] >= 90]
print blast_records_pi_pl_passed
# take only unique hits of the reference sequence that pass pi/pl checks (we don't count repeats)
unique_hits = blast_records_pi_pl_passed['qseqid'].unique()
print unique_hits
return unique_hits
def check_header_parsing(query_file):
'''
Checks that SeqIO can parse the file okay before continuing.
'''
try:
graph = generate_turtle_skeleton(query_file)
return True
except:
return False
def check_ecoli(query_file):
# Checks if the query_file is an E.Coli genome.
# run blast for ecoli specific sequences
blast_db = create_blast_db(query_file)
blast_output_file = run_blast(blast_db)
unique_hits = parse_blast_records(blast_output_file)
if len(unique_hits) >= 3:
return True
else:
return False
def qc(query_file):
'''
Compares the query_file against a reference db of ecoli-specific gene sequences.
We consider a "pass" if the query_file has >=3 of the sequences.
Returns True for pass, False for failed qc check (not ecoli.)
'''
return check_header_parsing(query_file) and check_ecoli(query_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", required=True)
args = parser.parse_args()
print qc(args.i)
|
apache-2.0
|
guschmue/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/estimator_input_test.py
|
72
|
12865
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator input."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
class EstimatorInputTest(test.TestCase):
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(
predictions['class'], np.argmax(
predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
if __name__ == '__main__':
test.main()
|
apache-2.0
|
ltiao/scikit-learn
|
examples/svm/plot_svm_kernels.py
|
329
|
1971
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
|
bsd-3-clause
|
LeeMendelowitz/basketball-reference
|
run_games.py
|
1
|
1036
|
"""
Download all 2013 boxscores.
"""
import os
import sys
from file_utils import make_dirs
import pandas
import time
import requests
from basketball_reference import games_table
from basketball_reference.globals import URL_BASE
def games_to_csv():
src = os.path.join('pages', 'NBA_2013_games.html')
games_table.to_csv(src, 'games.csv')
def download_boxscores():
"""
Download all boxscores.
"""
df = pandas.read_csv('games.csv')
urls = df['box_score_url']
#import pdb; pdb.set_trace()
n = len(urls)
for i, url in enumerate(urls):
url = url.lstrip('/')
dirpath = os.path.dirname(url)
make_dirs(dirpath)
full_url = '%s/%s'%(URL_BASE, url)
sys.stderr.write("Downloading file %i of %i: %s..."%(i, n, full_url))
response = requests.get(full_url)
with open(url, 'w') as f:
f.write(response.text.encode('utf-8', errors='ignore'))
sys.stderr.write('done!\n')
# Be kind to Basketball-Reference.com
time.sleep(1)
if __name__ == '__main__':
download_boxscores()
|
gpl-3.0
|
UPenn-RoboCup/UPennalizers
|
Lib/Modules/Util/Python/monitor_shm.py
|
3
|
2295
|
#!/usr/bin/env python
import matplotlib.pyplot as mpl
import numpy as np
from scipy.misc import pilutil
import time
import shm
import os
vcmImage = shm.ShmWrapper('vcmImage181%s' % str(os.getenv('USER')));
def draw_data(rgb, labelA):
mpl.subplot(2,2,1);
mpl.imshow(rgb)
# disp('Received image.')
mpl.subplot(2,2,2);
# labelA = sw.vcmImage.get_labelA();
# labelA = typecast( labelA, 'uint8' );
# labelA = reshape( labelA, [80,60] );
# labelA = permute( labelA, [2 1] );
mpl.imshow(labelA);
# TODO: Port the Matlab Colormap
# cbk=[0 0 0];cr=[1 0 0];cg=[0 1 0];cb=[0 0 1];cy=[1 1 0];cw=[1 1 1];
# cmap=[cbk;cr;cy;cy;cb;cb;cb;cb;cg;cg;cg;cg;cg;cg;cg;cg;cw];
# colormap(cmap);
# hold on;
# plot_ball( sw.vcmBall );
# plot_goalposts( sw.vcmGoal );
# print 'Received Label A."
mpl.subplot(2,2,3);
# Draw the field for localization reasons
#plot_field();
# hold on;
# plot robots
# for t in range( len(teamNumbers) ):
# for p in range(nPlayers):
# if (~isempty(robots{p, t})):
# plot_robot_struct(robots{p, t});
mpl.subplot(2,2,4);
# What to draw here?
#plot(10,10);
#hold on;
#plot_goalposts( sw.vcmGoal );
mpl.draw();
def on_button_press(event):
global vcmImage
# get the yuyv image data
yuyv = vcmImage.get_yuyv();
# data is actually int32 (YUYV format) not float64
yuyv.dtype = 'uint32';
n = yuyv.shape[0];
# convert to uint8 to seperate out YUYV
yuyv.dtype = 'uint8';
# reshape to Nx4
yuyv_u8 = yuyv.reshape((120, 80, 4));
# convert to ycbcr (approx.)
ycbcr = yuyv_u8[0:-1:2, :, [0,1,3]];
# convert to rgb
# there is probably a better way to do this...
rgb = np.asarray(pilutil.toimage(ycbcr, mode='YCbCr').convert('RGB').getdata());
rgb = rgb.reshape((60, 80, 3))/255.0;
# Get the labelA data
labelA = vcmImage.get_labelA();
# data is actually uint8 (one bit per label)
labelA.dtype = 'uint8';
n = yuyv.shape[0];
labelA = labelA.reshape( (60,80) );
# labelA = permute( labelA, [2 1] );
# display image
draw_data(rgb, labelA)
if __name__=='__main__':
# create connection to image shm
print('Click on the image to update...');
fig = mpl.figure();
fig.canvas.mpl_connect('button_press_event', on_button_press);
mpl.show();
time.sleep(0.1);
|
gpl-3.0
|
niketanpansare/systemml
|
projects/breast_cancer/breastcancer/visualization.py
|
18
|
2001
|
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
"""
Visualization -- Predicting Breast Cancer Proliferation Scores with
Apache SystemML
This module contains functions for visualizing data for the breast
cancer project.
"""
import matplotlib.pyplot as plt
def visualize_tile(tile):
"""
Plot a tissue tile.
Args:
tile: A 3D NumPy array of shape (tile_size, tile_size, channels).
Returns:
None
"""
plt.imshow(tile)
plt.show()
def visualize_sample(sample, size=256):
"""
Plot a tissue sample.
Args:
sample: A square sample flattened to a vector of size
(channels*size_x*size_y).
size: The width and height of the square samples.
Returns:
None
"""
# Change type, reshape, transpose to (size_x, size_y, channels).
length = sample.shape[0]
channels = int(length / (size * size))
if channels > 1:
sample = sample.astype('uint8').reshape((channels, size, size)).transpose(1,2,0)
plt.imshow(sample)
else:
vmax = 255 if sample.max() > 1 else 1
sample = sample.reshape((size, size))
plt.imshow(sample, cmap="gray", vmin=0, vmax=vmax)
plt.show()
|
apache-2.0
|
superbobry/pymc3
|
pymc3/examples/ARM5_4.py
|
14
|
1026
|
'''
Created on May 18, 2012
@author: jsalvatier
'''
import numpy as np
from pymc3 import *
import theano.tensor as t
import pandas as pd
wells = get_data_file('pymc3.examples', 'data/wells.dat')
data = pd.read_csv(wells, delimiter=u' ', index_col=u'id',
dtype={u'switch': np.int8})
data.dist /= 100
data.educ /= 4
col = data.columns
P = data[col[1:]]
P = P - P.mean()
P['1'] = 1
Pa = np.array(P)
with Model() as model:
effects = Normal(
'effects', mu=0, tau=100. ** -2, shape=len(P.columns))
p = sigmoid(dot(Pa, effects))
s = Bernoulli('s', p, observed=np.array(data.switch))
def run(n=3000):
if n == "short":
n = 50
with model:
# move the chain to the MAP which should be a good starting point
start = find_MAP()
H = model.fastd2logp() # find a good orientation using the hessian at the MAP
h = H(start)
step = HamiltonianMC(model.vars, h)
trace = sample(n, step, start)
if __name__ == '__main__':
run()
|
apache-2.0
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/sklearn/tests/test_grid_search.py
|
53
|
28730
|
"""
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
|
gpl-2.0
|
noam09/deluge-telegramer
|
telegramer/include/future/utils/__init__.py
|
8
|
20325
|
"""
A selection of cross-compatible functions for Python 2 and 3.
This module exports useful functions for 2/3 compatible code:
* bind_method: binds functions to classes
* ``native_str_to_bytes`` and ``bytes_to_native_str``
* ``native_str``: always equal to the native platform string object (because
this may be shadowed by imports from future.builtins)
* lists: lrange(), lmap(), lzip(), lfilter()
* iterable method compatibility:
- iteritems, iterkeys, itervalues
- viewitems, viewkeys, viewvalues
These use the original method if available, otherwise they use items,
keys, values.
* types:
* text_type: unicode in Python 2, str in Python 3
* binary_type: str in Python 2, bytes in Python 3
* string_types: basestring in Python 2, str in Python 3
* bchr(c):
Take an integer and make a 1-character byte string
* bord(c)
Take the result of indexing on a byte string and make an integer
* tobytes(s)
Take a text string, a byte string, or a sequence of characters taken
from a byte string, and make a byte string.
* raise_from()
* raise_with_traceback()
This module also defines these decorators:
* ``python_2_unicode_compatible``
* ``with_metaclass``
* ``implements_iterator``
Some of the functions in this module come from the following sources:
* Jinja2 (BSD licensed: see
https://github.com/mitsuhiko/jinja2/blob/master/LICENSE)
* Pandas compatibility module pandas.compat
* six.py by Benjamin Peterson
* Django
"""
import types
import sys
import numbers
import functools
import copy
import inspect
PY3 = sys.version_info[0] == 3
PY35_PLUS = sys.version_info[0:2] >= (3, 5)
PY36_PLUS = sys.version_info[0:2] >= (3, 6)
PY2 = sys.version_info[0] == 2
PY26 = sys.version_info[0:2] == (2, 6)
PY27 = sys.version_info[0:2] == (2, 7)
PYPY = hasattr(sys, 'pypy_translation_info')
def python_2_unicode_compatible(cls):
"""
A decorator that defines __unicode__ and __str__ methods under Python
2. Under Python 3, this decorator is a no-op.
To support Python 2 and 3 with a single code base, define a __str__
method returning unicode text and apply this decorator to the class, like
this::
>>> from future.utils import python_2_unicode_compatible
>>> @python_2_unicode_compatible
... class MyClass(object):
... def __str__(self):
... return u'Unicode string: \u5b54\u5b50'
>>> a = MyClass()
Then, after this import:
>>> from future.builtins import str
the following is ``True`` on both Python 3 and 2::
>>> str(a) == a.encode('utf-8').decode('utf-8')
True
and, on a Unicode-enabled terminal with the right fonts, these both print the
Chinese characters for Confucius::
>>> print(a)
>>> print(str(a))
The implementation comes from django.utils.encoding.
"""
if not PY3:
cls.__unicode__ = cls.__str__
cls.__str__ = lambda self: self.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
"""
Function from jinja2/_compat.py. License: BSD.
Use it like this::
class BaseForm(object):
pass
class FormType(type):
pass
class Form(with_metaclass(FormType, BaseForm)):
pass
This requires a bit of explanation: the basic idea is to make a
dummy metaclass for one level of class instantiation that replaces
itself with the actual metaclass. Because of internal type checks
we also need to make sure that we downgrade the custom metaclass
for one level to something closer to type (that's why __call__ and
__init__ comes back from type etc.).
This has the advantage over six.with_metaclass of not introducing
dummy classes into the final MRO.
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
# Definitions from pandas.compat and six.py follow:
if PY3:
def bchr(s):
return bytes([s])
def bstr(s):
if isinstance(s, str):
return bytes(s, 'latin-1')
else:
return bytes(s)
def bord(s):
return s
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
else:
# Python 2
def bchr(s):
return chr(s)
def bstr(s):
return str(s)
def bord(s):
return ord(s)
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
###
if PY3:
def tobytes(s):
if isinstance(s, bytes):
return s
else:
if isinstance(s, str):
return s.encode('latin-1')
else:
return bytes(s)
else:
# Python 2
def tobytes(s):
if isinstance(s, unicode):
return s.encode('latin-1')
else:
return ''.join(s)
tobytes.__doc__ = """
Encodes to latin-1 (where the first 256 chars are the same as
ASCII.)
"""
if PY3:
def native_str_to_bytes(s, encoding='utf-8'):
return s.encode(encoding)
def bytes_to_native_str(b, encoding='utf-8'):
return b.decode(encoding)
def text_to_native_str(t, encoding=None):
return t
else:
# Python 2
def native_str_to_bytes(s, encoding=None):
from future.types import newbytes # to avoid a circular import
return newbytes(s)
def bytes_to_native_str(b, encoding=None):
return native(b)
def text_to_native_str(t, encoding='ascii'):
"""
Use this to create a Py2 native string when "from __future__ import
unicode_literals" is in effect.
"""
return unicode(t).encode(encoding)
native_str_to_bytes.__doc__ = """
On Py3, returns an encoded string.
On Py2, returns a newbytes type, ignoring the ``encoding`` argument.
"""
if PY3:
# list-producing versions of the major Python iterating functions
def lrange(*args, **kwargs):
return list(range(*args, **kwargs))
def lzip(*args, **kwargs):
return list(zip(*args, **kwargs))
def lmap(*args, **kwargs):
return list(map(*args, **kwargs))
def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
else:
import __builtin__
# Python 2-builtin ranges produce lists
lrange = __builtin__.range
lzip = __builtin__.zip
lmap = __builtin__.map
lfilter = __builtin__.filter
def isidentifier(s, dotted=False):
'''
A function equivalent to the str.isidentifier method on Py3
'''
if dotted:
return all(isidentifier(a) for a in s.split('.'))
if PY3:
return s.isidentifier()
else:
import re
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
return bool(_name_re.match(s))
def viewitems(obj, **kwargs):
"""
Function for iterating over dictionary items with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewitems", None)
if not func:
func = obj.items
return func(**kwargs)
def viewkeys(obj, **kwargs):
"""
Function for iterating over dictionary keys with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def viewvalues(obj, **kwargs):
"""
Function for iterating over dictionary values with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewvalues", None)
if not func:
func = obj.values
return func(**kwargs)
def iteritems(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewitems().
"""
func = getattr(obj, "iteritems", None)
if not func:
func = obj.items
return func(**kwargs)
def iterkeys(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewkeys().
"""
func = getattr(obj, "iterkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def itervalues(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewvalues().
"""
func = getattr(obj, "itervalues", None)
if not func:
func = obj.values
return func(**kwargs)
def bind_method(cls, name, func):
"""Bind a method to class, python 2 and python 3 compatible.
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has an issue with bound/unbound methods
if not PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
def getexception():
return sys.exc_info()[1]
def _get_caller_globals_and_locals():
"""
Returns the globals and locals of the calling frame.
Is there an alternative to frame hacking here?
"""
caller_frame = inspect.stack()[2]
myglobals = caller_frame[0].f_globals
mylocals = caller_frame[0].f_locals
return myglobals, mylocals
def _repr_strip(mystring):
"""
Returns the string without any initial or final quotes.
"""
r = repr(mystring)
if r.startswith("'") and r.endswith("'"):
return r[1:-1]
else:
return r
if PY3:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
myglobals, mylocals = _get_caller_globals_and_locals()
# We pass the exception and cause along with other globals
# when we exec():
myglobals = myglobals.copy()
myglobals['__python_future_raise_from_exc'] = exc
myglobals['__python_future_raise_from_cause'] = cause
execstr = "raise __python_future_raise_from_exc from __python_future_raise_from_cause"
exec(execstr, myglobals, mylocals)
def raise_(tp, value=None, tb=None):
"""
A function that matches the Python 2.x ``raise`` statement. This
allows re-raising exceptions with the cls value and traceback on
Python 2 and 3.
"""
if value is not None and isinstance(tp, Exception):
raise TypeError("instance exception may not have a separate value")
if value is not None:
exc = tp(value)
else:
exc = tp
if exc.__traceback__ is not tb:
raise exc.with_traceback(tb)
raise exc
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc.with_traceback(traceback)
else:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
# Is either arg an exception class (e.g. IndexError) rather than
# instance (e.g. IndexError('my message here')? If so, pass the
# name of the class undisturbed through to "raise ... from ...".
if isinstance(exc, type) and issubclass(exc, Exception):
e = exc()
# exc = exc.__name__
# execstr = "e = " + _repr_strip(exc) + "()"
# myglobals, mylocals = _get_caller_globals_and_locals()
# exec(execstr, myglobals, mylocals)
else:
e = exc
e.__suppress_context__ = False
if isinstance(cause, type) and issubclass(cause, Exception):
e.__cause__ = cause()
e.__suppress_context__ = True
elif cause is None:
e.__cause__ = None
e.__suppress_context__ = True
elif isinstance(cause, BaseException):
e.__cause__ = cause
e.__suppress_context__ = True
else:
raise TypeError("exception causes must derive from BaseException")
e.__context__ = sys.exc_info()[1]
raise e
exec('''
def raise_(tp, value=None, tb=None):
raise tp, value, tb
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc, None, traceback
'''.strip())
raise_with_traceback.__doc__ = (
"""Raise exception with existing traceback.
If traceback is not passed, uses sys.exc_info() to get traceback."""
)
# Deprecated alias for backward compatibility with ``future`` versions < 0.11:
reraise = raise_
def implements_iterator(cls):
'''
From jinja2/_compat.py. License: BSD.
Use as a decorator like this::
@implements_iterator
class UppercasingIterator(object):
def __init__(self, iterable):
self._iter = iter(iterable)
def __iter__(self):
return self
def __next__(self):
return next(self._iter).upper()
'''
if PY3:
return cls
else:
cls.next = cls.__next__
del cls.__next__
return cls
if PY3:
get_next = lambda x: x.next
else:
get_next = lambda x: x.__next__
def encode_filename(filename):
if PY3:
return filename
else:
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
def is_new_style(cls):
"""
Python 2.7 has both new-style and old-style classes. Old-style classes can
be pesky in some circumstances, such as when using inheritance. Use this
function to test for whether a class is new-style. (Python 3 only has
new-style classes.)
"""
return hasattr(cls, '__class__') and ('__dict__' in dir(cls)
or hasattr(cls, '__slots__'))
# The native platform string and bytes types. Useful because ``str`` and
# ``bytes`` are redefined on Py2 by ``from future.builtins import *``.
native_str = str
native_bytes = bytes
def istext(obj):
"""
Deprecated. Use::
>>> isinstance(obj, str)
after this import:
>>> from future.builtins import str
"""
return isinstance(obj, type(u''))
def isbytes(obj):
"""
Deprecated. Use::
>>> isinstance(obj, bytes)
after this import:
>>> from future.builtins import bytes
"""
return isinstance(obj, type(b''))
def isnewbytes(obj):
"""
Equivalent to the result of ``isinstance(obj, newbytes)`` were
``__instancecheck__`` not overridden on the newbytes subclass. In
other words, it is REALLY a newbytes instance, not a Py2 native str
object?
"""
# TODO: generalize this so that it works with subclasses of newbytes
# Import is here to avoid circular imports:
from future.types.newbytes import newbytes
return type(obj) == newbytes
def isint(obj):
"""
Deprecated. Tests whether an object is a Py3 ``int`` or either a Py2 ``int`` or
``long``.
Instead of using this function, you can use:
>>> from future.builtins import int
>>> isinstance(obj, int)
The following idiom is equivalent:
>>> from numbers import Integral
>>> isinstance(obj, Integral)
"""
return isinstance(obj, numbers.Integral)
def native(obj):
"""
On Py3, this is a no-op: native(obj) -> obj
On Py2, returns the corresponding native Py2 types that are
superclasses for backported objects from Py3:
>>> from builtins import str, bytes, int
>>> native(str(u'ABC'))
u'ABC'
>>> type(native(str(u'ABC')))
unicode
>>> native(bytes(b'ABC'))
b'ABC'
>>> type(native(bytes(b'ABC')))
bytes
>>> native(int(10**20))
100000000000000000000L
>>> type(native(int(10**20)))
long
Existing native types on Py2 will be returned unchanged:
>>> type(native(u'ABC'))
unicode
"""
if hasattr(obj, '__native__'):
return obj.__native__()
else:
return obj
# Implementation of exec_ is from ``six``:
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
# Defined here for backward compatibility:
def old_div(a, b):
"""
DEPRECATED: import ``old_div`` from ``past.utils`` instead.
Equivalent to ``a / b`` on Python 2 without ``from __future__ import
division``.
TODO: generalize this to other objects (like arrays etc.)
"""
if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral):
return a // b
else:
return a / b
def as_native_str(encoding='utf-8'):
'''
A decorator to turn a function or method call that returns text, i.e.
unicode, into one that returns a native platform str.
Use it as a decorator like this::
from __future__ import unicode_literals
class MyClass(object):
@as_native_str(encoding='ascii')
def __repr__(self):
return next(self._iter).upper()
'''
if PY3:
return lambda f: f
else:
def encoder(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs).encode(encoding=encoding)
return wrapper
return encoder
# listvalues and listitems definitions from Nick Coghlan's (withdrawn)
# PEP 496:
try:
dict.iteritems
except AttributeError:
# Python 3
def listvalues(d):
return list(d.values())
def listitems(d):
return list(d.items())
else:
# Python 2
def listvalues(d):
return d.values()
def listitems(d):
return d.items()
if PY3:
def ensure_new_type(obj):
return obj
else:
def ensure_new_type(obj):
from future.types.newbytes import newbytes
from future.types.newstr import newstr
from future.types.newint import newint
from future.types.newdict import newdict
native_type = type(native(obj))
# Upcast only if the type is already a native (non-future) type
if issubclass(native_type, type(obj)):
# Upcast
if native_type == str: # i.e. Py2 8-bit str
return newbytes(obj)
elif native_type == unicode:
return newstr(obj)
elif native_type == int:
return newint(obj)
elif native_type == long:
return newint(obj)
elif native_type == dict:
return newdict(obj)
else:
return obj
else:
# Already a new type
assert type(obj) in [newbytes, newstr]
return obj
__all__ = ['PY2', 'PY26', 'PY3', 'PYPY',
'as_native_str', 'bind_method', 'bord', 'bstr',
'bytes_to_native_str', 'encode_filename', 'ensure_new_type',
'exec_', 'get_next', 'getexception', 'implements_iterator',
'is_new_style', 'isbytes', 'isidentifier', 'isint',
'isnewbytes', 'istext', 'iteritems', 'iterkeys', 'itervalues',
'lfilter', 'listitems', 'listvalues', 'lmap', 'lrange',
'lzip', 'native', 'native_bytes', 'native_str',
'native_str_to_bytes', 'old_div',
'python_2_unicode_compatible', 'raise_',
'raise_with_traceback', 'reraise', 'text_to_native_str',
'tobytes', 'viewitems', 'viewkeys', 'viewvalues',
'with_metaclass'
]
|
gpl-3.0
|
severinson/Coded-Shuffling
|
tests/simulationtests.py
|
2
|
4673
|
############################################################################
# Copyright 2016 Albin Severinson #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
'''This module contains tests of the simulation module.
'''
import os
import math
import unittest
import tempfile
import pandas as pd
import simulation
from functools import partial
from model import SystemParameters
from solvers.heuristicsolver import HeuristicSolver
from evaluation.binsearch import SampleEvaluator
class EvaluationTests(unittest.TestCase):
'''Tests of the simulation module.'''
def verify_result(self, result, correct_result, delta=0.1):
'''Check the results against known correct results.
Args:
result: Measured result.
correct_result: Dict with correct results.
delta: Correct result must be within a delta fraction of the
measured result.
'''
for key, value in correct_result.items():
if value == math.inf:
self.assertAlmostEqual(result[key].mean(), value, places=1,
msg='key={}, value={}'.format(str(key), str(value)))
else:
self.assertAlmostEqual(result[key].mean(), value, delta=value*delta,
msg='key={}, value={}'.format(str(key), str(value)))
def verify_solver(self, solver, parameters, correct_results):
'''Check the results from evaluating the assignment produced by some
solver against known correct results.
Args:
solver: Assignment solver.
parameters: System parameters.
correct_results: List of dicts with correct results.
'''
evaluator = binsearch.SampleEvaluator(num_samples=1000)
for par, correct_result in zip(parameters, correct_results):
assignment = solver.solve(par)
self.assertTrue(assignment.is_valid())
result = evaluator.evaluate(par, assignment)
self.verify_result(result, correct_result)
return
def test_simulation(self):
'''Test basic functionality.'''
parameters = SystemParameters(rows_per_batch=5, num_servers=10, q=9, num_outputs=9,
server_storage=1/3, num_partitions=5)
correct = {'servers': 9, 'batches': 324, 'delay': 25.460714285714285/9,
'unicast_load_1': 720/540/9, 'multicast_load_1': 840/540/9,
'unicast_load_2': 0, 'multicast_load_2': 1470/540/9}
solver = HeuristicSolver()
evaluator = SampleEvaluator(num_samples=1000)
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, parameters.identifier() + '.csv')
dataframe = simulation.simulate(
parameters,
directory=tmpdir,
rerun=False,
samples=10,
solver=solver,
assignment_eval=evaluator,
)
self.verify_result(dataframe, correct)
simulate_fun = partial(
simulation.simulate,
directory=tmpdir,
rerun=False,
samples=10,
solver=solver,
assignment_eval=evaluator,
)
dataframe = simulation.simulate_parameter_list(
parameter_list=[parameters],
simulate_fun=simulate_fun,
map_complexity_fun=lambda x: 1,
encode_delay_fun=lambda x: 0,
reduce_delay_fun=lambda x: 0,
)
self.verify_result(dataframe, correct)
return
|
apache-2.0
|
paulocoding/DataScienceMachineLearning
|
DataMunging/DataProcessing.py
|
2
|
1705
|
import pandas as pd
def sum_of_digits(str_value):
"""
Sum up all the digits in a number till it is single digit
Eg:
1 => 1
11 => 2
123 => 6
1235 => 2
98 => 8
"""
total = 0
for num in str_value:
total += int(num)
if total > 9:
return sum_of_digits(str(total))
return total
def do_processing(file_name):
# Read an excel file. Make sure it is in in XLSX format and
# you have XlsxWriter package installed.
# Pandas uses this package to read excel files.
df = pd.read_excel(file_name)
# Split the name into first name and last name
# You will get a pandas Series which has 1 column with a list in each row
fn_ln_list = df['Name'].str.split(' ')
# Use list comprehension to build a list for the first name and last name
df['first_name'] = [name[0] for name in fn_ln_list]
df['last_name'] = [name[1] for name in fn_ln_list]
# Pandas DataFrame automatically recognizes the date field and converts
# it into a datetime object. Using strftime to convert the datetime object
# to a string in the format DDMMYYYY
df['dob'] = df['Date Of Birth'].apply(lambda x: x.strftime('%d%m%Y'))
# Sum the numbers in DOB to a single digit
# Create a new field to save the sum of digits
df['sum_dob'] = df['dob'].apply(sum_of_digits)
print "\n\n\nDataFrame:\n"
print "----------\n", df
print "\n\n\nDataFrame Columns:\n"
print "------------------\n", df.columns
print "\n\n\nDataFrame Data Types:\n"
print "---------------------\n", df.dtypes
if __name__ == '__main__':
file_name = 'person_details.xlsx'
do_processing(file_name)
|
mit
|
cortedeltimo/SickRage
|
lib/future/utils/__init__.py
|
36
|
20238
|
"""
A selection of cross-compatible functions for Python 2 and 3.
This module exports useful functions for 2/3 compatible code:
* bind_method: binds functions to classes
* ``native_str_to_bytes`` and ``bytes_to_native_str``
* ``native_str``: always equal to the native platform string object (because
this may be shadowed by imports from future.builtins)
* lists: lrange(), lmap(), lzip(), lfilter()
* iterable method compatibility:
- iteritems, iterkeys, itervalues
- viewitems, viewkeys, viewvalues
These use the original method if available, otherwise they use items,
keys, values.
* types:
* text_type: unicode in Python 2, str in Python 3
* binary_type: str in Python 2, bythes in Python 3
* string_types: basestring in Python 2, str in Python 3
* bchr(c):
Take an integer and make a 1-character byte string
* bord(c)
Take the result of indexing on a byte string and make an integer
* tobytes(s)
Take a text string, a byte string, or a sequence of characters taken
from a byte string, and make a byte string.
* raise_from()
* raise_with_traceback()
This module also defines these decorators:
* ``python_2_unicode_compatible``
* ``with_metaclass``
* ``implements_iterator``
Some of the functions in this module come from the following sources:
* Jinja2 (BSD licensed: see
https://github.com/mitsuhiko/jinja2/blob/master/LICENSE)
* Pandas compatibility module pandas.compat
* six.py by Benjamin Peterson
* Django
"""
import types
import sys
import numbers
import functools
import copy
import inspect
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
PY26 = sys.version_info[0:2] == (2, 6)
PY27 = sys.version_info[0:2] == (2, 7)
PYPY = hasattr(sys, 'pypy_translation_info')
def python_2_unicode_compatible(cls):
"""
A decorator that defines __unicode__ and __str__ methods under Python
2. Under Python 3, this decorator is a no-op.
To support Python 2 and 3 with a single code base, define a __str__
method returning unicode text and apply this decorator to the class, like
this::
>>> from future.utils import python_2_unicode_compatible
>>> @python_2_unicode_compatible
... class MyClass(object):
... def __str__(self):
... return u'Unicode string: \u5b54\u5b50'
>>> a = MyClass()
Then, after this import:
>>> from future.builtins import str
the following is ``True`` on both Python 3 and 2::
>>> str(a) == a.encode('utf-8').decode('utf-8')
True
and, on a Unicode-enabled terminal with the right fonts, these both print the
Chinese characters for Confucius::
>>> print(a)
>>> print(str(a))
The implementation comes from django.utils.encoding.
"""
if not PY3:
cls.__unicode__ = cls.__str__
cls.__str__ = lambda self: self.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
"""
Function from jinja2/_compat.py. License: BSD.
Use it like this::
class BaseForm(object):
pass
class FormType(type):
pass
class Form(with_metaclass(FormType, BaseForm)):
pass
This requires a bit of explanation: the basic idea is to make a
dummy metaclass for one level of class instantiation that replaces
itself with the actual metaclass. Because of internal type checks
we also need to make sure that we downgrade the custom metaclass
for one level to something closer to type (that's why __call__ and
__init__ comes back from type etc.).
This has the advantage over six.with_metaclass of not introducing
dummy classes into the final MRO.
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
# Definitions from pandas.compat and six.py follow:
if PY3:
def bchr(s):
return bytes([s])
def bstr(s):
if isinstance(s, str):
return bytes(s, 'latin-1')
else:
return bytes(s)
def bord(s):
return s
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
else:
# Python 2
def bchr(s):
return chr(s)
def bstr(s):
return str(s)
def bord(s):
return ord(s)
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
###
if PY3:
def tobytes(s):
if isinstance(s, bytes):
return s
else:
if isinstance(s, str):
return s.encode('latin-1')
else:
return bytes(s)
else:
# Python 2
def tobytes(s):
if isinstance(s, unicode):
return s.encode('latin-1')
else:
return ''.join(s)
tobytes.__doc__ = """
Encodes to latin-1 (where the first 256 chars are the same as
ASCII.)
"""
if PY3:
def native_str_to_bytes(s, encoding='utf-8'):
return s.encode(encoding)
def bytes_to_native_str(b, encoding='utf-8'):
return b.decode(encoding)
def text_to_native_str(t, encoding=None):
return t
else:
# Python 2
def native_str_to_bytes(s, encoding=None):
from future.types import newbytes # to avoid a circular import
return newbytes(s)
def bytes_to_native_str(b, encoding=None):
return native(b)
def text_to_native_str(t, encoding='ascii'):
"""
Use this to create a Py2 native string when "from __future__ import
unicode_literals" is in effect.
"""
return unicode(t).encode(encoding)
native_str_to_bytes.__doc__ = """
On Py3, returns an encoded string.
On Py2, returns a newbytes type, ignoring the ``encoding`` argument.
"""
if PY3:
# list-producing versions of the major Python iterating functions
def lrange(*args, **kwargs):
return list(range(*args, **kwargs))
def lzip(*args, **kwargs):
return list(zip(*args, **kwargs))
def lmap(*args, **kwargs):
return list(map(*args, **kwargs))
def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
else:
import __builtin__
# Python 2-builtin ranges produce lists
lrange = __builtin__.range
lzip = __builtin__.zip
lmap = __builtin__.map
lfilter = __builtin__.filter
def isidentifier(s, dotted=False):
'''
A function equivalent to the str.isidentifier method on Py3
'''
if dotted:
return all(isidentifier(a) for a in s.split('.'))
if PY3:
return s.isidentifier()
else:
import re
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
return bool(_name_re.match(s))
def viewitems(obj, **kwargs):
"""
Function for iterating over dictionary items with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewitems", None)
if not func:
func = obj.items
return func(**kwargs)
def viewkeys(obj, **kwargs):
"""
Function for iterating over dictionary keys with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def viewvalues(obj, **kwargs):
"""
Function for iterating over dictionary values with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewvalues", None)
if not func:
func = obj.values
return func(**kwargs)
def iteritems(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewitems().
"""
func = getattr(obj, "iteritems", None)
if not func:
func = obj.items
return func(**kwargs)
def iterkeys(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewkeys().
"""
func = getattr(obj, "iterkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def itervalues(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewvalues().
"""
func = getattr(obj, "itervalues", None)
if not func:
func = obj.values
return func(**kwargs)
def bind_method(cls, name, func):
"""Bind a method to class, python 2 and python 3 compatible.
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has an issue with bound/unbound methods
if not PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
def getexception():
return sys.exc_info()[1]
def _get_caller_globals_and_locals():
"""
Returns the globals and locals of the calling frame.
Is there an alternative to frame hacking here?
"""
caller_frame = inspect.stack()[2]
myglobals = caller_frame[0].f_globals
mylocals = caller_frame[0].f_locals
return myglobals, mylocals
def _repr_strip(mystring):
"""
Returns the string without any initial or final quotes.
"""
r = repr(mystring)
if r.startswith("'") and r.endswith("'"):
return r[1:-1]
else:
return r
if PY3:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
myglobals, mylocals = _get_caller_globals_and_locals()
# We pass the exception and cause along with other globals
# when we exec():
myglobals = myglobals.copy()
myglobals['__python_future_raise_from_exc'] = exc
myglobals['__python_future_raise_from_cause'] = cause
execstr = "raise __python_future_raise_from_exc from __python_future_raise_from_cause"
exec(execstr, myglobals, mylocals)
def raise_(tp, value=None, tb=None):
"""
A function that matches the Python 2.x ``raise`` statement. This
allows re-raising exceptions with the cls value and traceback on
Python 2 and 3.
"""
if value is not None and isinstance(tp, Exception):
raise TypeError("instance exception may not have a separate value")
if value is not None:
exc = tp(value)
else:
exc = tp
if exc.__traceback__ is not tb:
raise exc.with_traceback(tb)
raise exc
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc.with_traceback(traceback)
else:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
# Is either arg an exception class (e.g. IndexError) rather than
# instance (e.g. IndexError('my message here')? If so, pass the
# name of the class undisturbed through to "raise ... from ...".
if isinstance(exc, type) and issubclass(exc, Exception):
e = exc()
# exc = exc.__name__
# execstr = "e = " + _repr_strip(exc) + "()"
# myglobals, mylocals = _get_caller_globals_and_locals()
# exec(execstr, myglobals, mylocals)
else:
e = exc
e.__suppress_context__ = False
if isinstance(cause, type) and issubclass(cause, Exception):
e.__cause__ = cause()
e.__suppress_context__ = True
elif cause is None:
e.__cause__ = None
e.__suppress_context__ = True
elif isinstance(cause, BaseException):
e.__cause__ = cause
e.__suppress_context__ = True
else:
raise TypeError("exception causes must derive from BaseException")
e.__context__ = sys.exc_info()[1]
raise e
exec('''
def raise_(tp, value=None, tb=None):
raise tp, value, tb
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc, None, traceback
'''.strip())
raise_with_traceback.__doc__ = (
"""Raise exception with existing traceback.
If traceback is not passed, uses sys.exc_info() to get traceback."""
)
# Deprecated alias for backward compatibility with ``future`` versions < 0.11:
reraise = raise_
def implements_iterator(cls):
'''
From jinja2/_compat.py. License: BSD.
Use as a decorator like this::
@implements_iterator
class UppercasingIterator(object):
def __init__(self, iterable):
self._iter = iter(iterable)
def __iter__(self):
return self
def __next__(self):
return next(self._iter).upper()
'''
if PY3:
return cls
else:
cls.next = cls.__next__
del cls.__next__
return cls
if PY3:
get_next = lambda x: x.next
else:
get_next = lambda x: x.__next__
def encode_filename(filename):
if PY3:
return filename
else:
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
def is_new_style(cls):
"""
Python 2.7 has both new-style and old-style classes. Old-style classes can
be pesky in some circumstances, such as when using inheritance. Use this
function to test for whether a class is new-style. (Python 3 only has
new-style classes.)
"""
return hasattr(cls, '__class__') and ('__dict__' in dir(cls)
or hasattr(cls, '__slots__'))
# The native platform string and bytes types. Useful because ``str`` and
# ``bytes`` are redefined on Py2 by ``from future.builtins import *``.
native_str = str
native_bytes = bytes
def istext(obj):
"""
Deprecated. Use::
>>> isinstance(obj, str)
after this import:
>>> from future.builtins import str
"""
return isinstance(obj, type(u''))
def isbytes(obj):
"""
Deprecated. Use::
>>> isinstance(obj, bytes)
after this import:
>>> from future.builtins import bytes
"""
return isinstance(obj, type(b''))
def isnewbytes(obj):
"""
Equivalent to the result of ``isinstance(obj, newbytes)`` were
``__instancecheck__`` not overridden on the newbytes subclass. In
other words, it is REALLY a newbytes instance, not a Py2 native str
object?
"""
# TODO: generalize this so that it works with subclasses of newbytes
# Import is here to avoid circular imports:
from future.types.newbytes import newbytes
return type(obj) == newbytes
def isint(obj):
"""
Deprecated. Tests whether an object is a Py3 ``int`` or either a Py2 ``int`` or
``long``.
Instead of using this function, you can use:
>>> from future.builtins import int
>>> isinstance(obj, int)
The following idiom is equivalent:
>>> from numbers import Integral
>>> isinstance(obj, Integral)
"""
return isinstance(obj, numbers.Integral)
def native(obj):
"""
On Py3, this is a no-op: native(obj) -> obj
On Py2, returns the corresponding native Py2 types that are
superclasses for backported objects from Py3:
>>> from builtins import str, bytes, int
>>> native(str(u'ABC'))
u'ABC'
>>> type(native(str(u'ABC')))
unicode
>>> native(bytes(b'ABC'))
b'ABC'
>>> type(native(bytes(b'ABC')))
bytes
>>> native(int(10**20))
100000000000000000000L
>>> type(native(int(10**20)))
long
Existing native types on Py2 will be returned unchanged:
>>> type(native(u'ABC'))
unicode
"""
if hasattr(obj, '__native__'):
return obj.__native__()
else:
return obj
# Implementation of exec_ is from ``six``:
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
# Defined here for backward compatibility:
def old_div(a, b):
"""
DEPRECATED: import ``old_div`` from ``past.utils`` instead.
Equivalent to ``a / b`` on Python 2 without ``from __future__ import
division``.
TODO: generalize this to other objects (like arrays etc.)
"""
if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral):
return a // b
else:
return a / b
def as_native_str(encoding='utf-8'):
'''
A decorator to turn a function or method call that returns text, i.e.
unicode, into one that returns a native platform str.
Use it as a decorator like this::
from __future__ import unicode_literals
class MyClass(object):
@as_native_str(encoding='ascii')
def __repr__(self):
return next(self._iter).upper()
'''
if PY3:
return lambda f: f
else:
def encoder(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs).encode(encoding=encoding)
return wrapper
return encoder
# listvalues and listitems definitions from Nick Coghlan's (withdrawn)
# PEP 496:
try:
dict.iteritems
except AttributeError:
# Python 3
def listvalues(d):
return list(d.values())
def listitems(d):
return list(d.items())
else:
# Python 2
def listvalues(d):
return d.values()
def listitems(d):
return d.items()
if PY3:
def ensure_new_type(obj):
return obj
else:
def ensure_new_type(obj):
from future.types.newbytes import newbytes
from future.types.newstr import newstr
from future.types.newint import newint
from future.types.newdict import newdict
native_type = type(native(obj))
# Upcast only if the type is already a native (non-future) type
if issubclass(native_type, type(obj)):
# Upcast
if native_type == str: # i.e. Py2 8-bit str
return newbytes(obj)
elif native_type == unicode:
return newstr(obj)
elif native_type == int:
return newint(obj)
elif native_type == long:
return newint(obj)
elif native_type == dict:
return newdict(obj)
else:
return obj
else:
# Already a new type
assert type(obj) in [newbytes, newstr]
return obj
__all__ = ['PY2', 'PY26', 'PY3', 'PYPY',
'as_native_str', 'bind_method', 'bord', 'bstr',
'bytes_to_native_str', 'encode_filename', 'ensure_new_type',
'exec_', 'get_next', 'getexception', 'implements_iterator',
'is_new_style', 'isbytes', 'isidentifier', 'isint',
'isnewbytes', 'istext', 'iteritems', 'iterkeys', 'itervalues',
'lfilter', 'listitems', 'listvalues', 'lmap', 'lrange',
'lzip', 'native', 'native_bytes', 'native_str',
'native_str_to_bytes', 'old_div',
'python_2_unicode_compatible', 'raise_',
'raise_with_traceback', 'reraise', 'text_to_native_str',
'tobytes', 'viewitems', 'viewkeys', 'viewvalues',
'with_metaclass'
]
|
gpl-3.0
|
musically-ut/statsmodels
|
statsmodels/examples/tsa/ex_arma_all.py
|
34
|
1982
|
from __future__ import print_function
import numpy as np
from numpy.testing import assert_almost_equal
import matplotlib.pyplot as plt
import statsmodels.sandbox.tsa.fftarma as fa
from statsmodels.tsa.descriptivestats import TsaDescriptive
from statsmodels.tsa.arma_mle import Arma
x = fa.ArmaFft([1, -0.5], [1., 0.4], 40).generate_sample(size=200, burnin=1000)
d = TsaDescriptive(x)
d.plot4()
#d.fit(order=(1,1))
d.fit((1,1), trend='nc')
print(d.res.params)
modc = Arma(x)
resls = modc.fit(order=(1,1))
print(resls[0])
rescm = modc.fit_mle(order=(1,1), start_params=[-0.4,0.4, 1.])
print(rescm.params)
#decimal 1 corresponds to threshold of 5% difference
assert_almost_equal(resls[0] / d.res.params, 1, decimal=1)
assert_almost_equal(rescm.params[:-1] / d.res.params, 1, decimal=1)
#copied to tsa.tests
plt.figure()
plt.plot(x, 'b-o')
plt.plot(modc.predicted(), 'r-')
plt.figure()
plt.plot(modc.error_estimate)
#plt.show()
from statsmodels.miscmodels.tmodel import TArma
modct = TArma(x)
reslst = modc.fit(order=(1,1))
print(reslst[0])
rescmt = modct.fit_mle(order=(1,1), start_params=[-0.4,0.4, 10, 1.],maxiter=500,
maxfun=500)
print(rescmt.params)
from statsmodels.tsa.arima_model import ARMA
mkf = ARMA(x)
##rkf = mkf.fit((1,1))
##rkf.params
rkf = mkf.fit((1,1), trend='nc')
print(rkf.params)
from statsmodels.tsa.arima_process import arma_generate_sample
np.random.seed(12345)
y_arma22 = arma_generate_sample([1.,-.85,.35, -0.1],[1,.25,-.7], nsample=1000)
##arma22 = ARMA(y_arma22)
##res22 = arma22.fit(trend = 'nc', order=(2,2))
##print 'kf ',res22.params
##res22css = arma22.fit(method='css',trend = 'nc', order=(2,2))
##print 'css', res22css.params
mod22 = Arma(y_arma22)
resls22 = mod22.fit(order=(2,2))
print('ls ', resls22[0])
resmle22 = mod22.fit_mle(order=(2,2), maxfun=2000)
print('mle', resmle22.params)
f = mod22.forecast()
f3 = mod22.forecast3(start=900)[-20:]
print(y_arma22[-10:])
print(f[-20:])
print(f3[-109:-90])
plt.show()
|
bsd-3-clause
|
talbrecht/pism_pik
|
test/vnreport.py
|
1
|
8967
|
#!/usr/bin/env python
from pylab import close, figure, clf, hold, plot, xlabel, ylabel, xticks, yticks, axis, legend, title, grid, show, savefig
from numpy import array, polyfit, polyval, log10, floor, ceil, unique
import sys
try:
from netCDF4 import Dataset as NC
except:
print "netCDF4 is not installed!"
sys.exit(1)
class Plotter:
def __init__(self, save_figures, nc, file_format):
self.save_figures = save_figures
self.nc = nc
self.file_format = file_format
def plot(self, x, vars, testname, plot_title):
# This mask lets us choose data corresponding to a particular test:
test = array(map(chr, self.nc.variables['test'][:]))
mask = (test == testname)
# If we have less than 2 points to plot, then bail.
if (sum(mask) < 2):
print "Skipping Test %s %s (not enough data to plot)" % (testname, plot_title)
return
# Get the independent variable and transform it. Note that everywhere here
# I assume that neither dx (dy, dz) nor errors can be zero or negative.
dx = self.nc.variables[x][mask]
dim = log10(dx)
figure(figsize=(10, 6))
clf()
hold(True)
colors = ['red', 'blue', 'green', 'black', 'brown', 'cyan']
for (v, c) in zip(vars, colors):
# Get a particular variable, transform and fit a line through it:
data = log10(self.nc.variables[v][mask])
p = polyfit(dim, data, 1)
# Try to get the long_name, use short_name if it fails:
try:
name = self.nc.variables[v].long_name
except:
name = v
# Create a label for the independent variable:
if (x == "dx"):
dim_name = "\Delta x"
if (x == "dy"):
dim_name = "\Delta y"
if (x == "dz"):
dim_name = "\Delta z"
if (x == "dzb"):
dim_name = "\Delta z_{bed}"
# Variable label:
var_label = "%s, $O(%s^{%1.2f})$" % (name, dim_name, p[0])
print "Test {} {}: convergence rate: O(dx^{:1.4f})".format(testname, name, p[0])
# Plot errors and the linear fit:
plot(dim, data, label=var_label, marker='o', color=c)
plot(dim, polyval(p, dim), ls="--", color=c)
# Shrink axes, then expand vertically to have integer powers of 10:
axis('tight')
_, _, ymin, ymax = axis()
axis(ymin=floor(ymin), ymax=ceil(ymax))
# Switch to km if dx (dy, dz) are big:
units = self.nc.variables[x].units
if (dx.min() > 1000.0 and (units == "meters")):
dx = dx / 1000.0
units = "km"
# Round grid spacing in x-ticks:
xticks(dim, map(lambda(x): "%d" % x, dx))
xlabel("$%s$ (%s)" % (dim_name, units))
# Use default (figured out by matplotlib) locations, but change labels for y-ticks:
loc, _ = yticks()
yticks(loc, map(lambda(x): "$10^{%1.1f}$" % x, loc))
# Make sure that all variables given have the same units:
try:
ylabels = array(map(lambda(x): self.nc.variables[x].units, vars))
if (any(ylabels != ylabels[0])):
print "Incompatible units!"
else:
ylabel(ylabels[0])
except:
pass
# Legend, grid and the title:
legend(loc='best', borderpad=1, labelspacing=0.5, handletextpad=0.75, handlelength=0.02)
# prop = FontProperties(size='smaller'),
grid(True)
title("Test %s %s (%s)" % (testname, plot_title, self.nc.source))
if self.save_figures:
filename = "%s_%s_%s.%s" % (self.nc.source.replace(" ", "_"),
testname.replace(" ", "_"),
plot_title.replace(" ", "_"),
self.file_format)
savefig(filename)
def plot_tests(self, list_of_tests):
for test_name in list_of_tests:
# thickness, volume and eta errors:
if test_name in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'L']:
self.plot('dx', ["maximum_thickness", "average_thickness"], test_name, "ice thickness errors")
self.plot('dx', ["relative_volume"], test_name, "relative ice volume errors")
self.plot('dx', ["relative_max_eta"], test_name, r"relative max eta errors")
# errors that are reported for test E only:
if (test_name == 'E'):
self.plot('dx', ["maximum_basal_velocity", "average_basal_velocity"], 'E', r"basal velocity errors")
self.plot('dx', ["maximum_basal_u", "maximum_basal_v"], 'E', "basal velocity (ub and vb) errors")
self.plot('dx', ["relative_basal_velocity"], 'E', "relative basal velocity errors")
# F and G temperature, sigma and velocity errors:
if test_name in ['F', 'G']:
self.plot('dx', ["maximum_sigma", "average_sigma"],
test_name, "strain heating errors")
self.plot('dx', ["maximum_temperature", "average_temperature",
"maximum_basal_temperature", "average_basal_temperature"],
test_name, "ice temperature errors")
self.plot('dx', ["maximum_surface_velocity", "maximum_surface_w"],
test_name, "maximum ice surface velocity errors")
self.plot('dx', ["average_surface_velocity", "average_surface_w"],
test_name, "average ice surface velocity errors")
# test I: plot only the u component
if test_name == 'I':
self.plot('dy', ["relative_velocity"],
test_name, "relative velocity errors")
self.plot('dy', ["maximum_u", "average_u"],
test_name, "velocity errors")
# tests J and M:
if test_name in ['J', 'M']:
self.plot('dx', ["relative_velocity"],
test_name, "relative velocity errors")
self.plot('dx', ["max_velocity", "maximum_u", "average_u", "maximum_v", "average_v"],
test_name, "velocity errors")
# test K temperature errors:
if (test_name == 'K'):
self.plot('dz', ["maximum_temperature", "average_temperature",
"maximum_bedrock_temperature", "average_bedrock_temperature"],
'K', "temperature errors")
# test O temperature and basal melt rate errors:
if (test_name == 'O'):
self.plot('dz', ["maximum_temperature", "average_temperature",
"maximum_bedrock_temperature", "average_bedrock_temperature"],
'K', "temperature errors")
self.plot('dz', ["maximum_basal_melt_rate"],
'O', "basal melt rate errors")
# test V: plot only the u component
if test_name == 'V':
self.plot('dx', ["relative_velocity"],
test_name, "relative velocity errors")
self.plot('dx', ["maximum_u", "average_u"],
test_name, "velocity errors")
from argparse import ArgumentParser
parser = ArgumentParser()
parser.description = """Plot script for PISM verification results."""
parser.add_argument("filename",
help="The NetCDF error report file name, usually produces by running vfnow.py")
parser.add_argument("-t", nargs="+", dest="tests_to_plot", default=None,
help="Test results to plot (space-delimited list)")
parser.add_argument("--save_figures", dest="save_figures", action="store_true",
help="Save figures to .png files")
parser.add_argument("--file_format", dest="file_format", default="png",
help="File format for --save_figures (png, pdf, jpg, ...)")
options = parser.parse_args()
input_file = NC(options.filename, 'r')
available_tests = unique(array(map(chr, input_file.variables['test'][:])))
tests_to_plot = options.tests_to_plot
if len(available_tests) == 1:
if tests_to_plot == None:
tests_to_plot = available_tests
else:
if (tests_to_plot == None):
print """Please choose tests to plot using the -t option.
(Input file %s has reports for tests %s available.)""" % (input, str(available_tests))
sys.exit(0)
if (tests_to_plot[0] == "all"):
tests_to_plot = available_tests
close('all')
p = Plotter(options.save_figures, input_file, options.file_format)
p.plot_tests(tests_to_plot)
try:
# show() will break if we didn't plot anything
if not options.save_figures:
show()
except:
pass
|
gpl-3.0
|
pranavtbhat/EE219
|
project2/c.py
|
2
|
4916
|
from StdSuites.AppleScript_Suite import vector
import cPickle
from sklearn.feature_extraction import text
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
from pandas import DataFrame
import nltk
import operator
import os
import numpy as np
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from nltk.tokenize import RegexpTokenizer
import math
# RegExpTokenizer reduces term count from 29k to 25k
class StemTokenizer(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
self.snowball_stemmer = SnowballStemmer("english", ignore_stopwords=True)
self.regex_tokenizer = RegexpTokenizer(r'\w+')
def __call__(self, doc):
# tmp = [self.wnl.lemmatize(t) for t in word_tokenize(doc)]
tmp = [self.snowball_stemmer.stem(t) for t in self.regex_tokenizer.tokenize(doc)]
return tmp
def calculate_tcicf(freq, maxFreq, categories, categories_per_term):
val= ((0.5+(0.5*(freq/float(maxFreq))))*math.log10(categories/float(1+categories_per_term)))
return val
all_categories=['comp.graphics',
'comp.os.ms-windows.misc',
'comp.sys.ibm.pc.hardware',
'comp.sys.mac.hardware',
'comp.windows.x',
'rec.autos',
'rec.motorcycles',
'rec.sport.baseball',
'rec.sport.hockey',
'alt.atheism',
'sci.crypt',
'sci.electronics',
'sci.med',
'sci.space',
'soc.religion.christian',
'misc.forsale',
'talk.politics.guns',
'talk.politics.mideast',
'talk.politics.misc',
'talk.religion.misc'
]
all_docs_per_category=[]
for cat in all_categories:
categories=[cat]
all_data = fetch_20newsgroups(subset='train',categories=categories).data
temp = ""
for doc in all_data:
temp= temp + " "+doc
all_docs_per_category.append(temp)
stop_words = text.ENGLISH_STOP_WORDS
# Ignore words appearing in less than 2 documents or more than 99% documents.
# min_df reduces from 100k to 29k
vectorizer = CountVectorizer(analyzer='word',stop_words=stop_words,ngram_range=(1, 1), tokenizer=StemTokenizer(),
lowercase=True,max_df=0.99, min_df=2)
#
# test_corpus = [
# 'This document is the first document.',
# 'This is the second second document.',
# 'And the third one with extra extra extra text.',
# 'Is this the first document?',
# ]
vectorized_newsgroups_train = vectorizer.fit_transform(all_docs_per_category)
#print "All terms:", vectorizer.get_feature_names()
#print vectorized_newsgroups_train.shape
#print vectorized_newsgroups_train
def calculate():
max_term_freq_per_category=[0]*vectorized_newsgroups_train.shape[0]
category_count_per_term=[0]*vectorized_newsgroups_train.shape[1]
for i in range(0,vectorized_newsgroups_train.shape[0],1):
max_term_freq_per_category[i]=np.amax(vectorized_newsgroups_train[i,:])
for i in range(0,vectorized_newsgroups_train.shape[1],1):
for j in range(0,vectorized_newsgroups_train.shape[0],1):
category_count_per_term[i]+= (0 if vectorized_newsgroups_train[j,i]==0 else 1)
# print vectorized_newsgroups_train.shape
#
# print len(max_term_freq_per_category)
# print len(category_count_per_term)
# Calculate tc-icf - Notice the matrix is sparse!
# print len(vectorizer.get_feature_names())
tf_icf = np.zeros((len(vectorizer.get_feature_names()), vectorized_newsgroups_train.shape[1]))
for i in range(vectorized_newsgroups_train.shape[1]):
row = vectorized_newsgroups_train[:,i].toarray()
for j in range(vectorized_newsgroups_train.shape[0]):
# print row[j,0],max_term_freq_per_category[j],len(all_categories),category_count_per_term[i]
tf_icf[i][j] = calculate_tcicf(row[j,0],max_term_freq_per_category[j],len(all_categories),category_count_per_term[i])
# cPickle.dump(tf_icf,open("data/tc_icf.pkl", "wb"))
return tf_icf
# if not (os.path.isfile("data/tc_icf.pkl")):
# print "Calculating"
# tf_icf=calculate()
# else:
# tf_icf=cPickle.load(open("data/tc_icf.pkl", "rb"))
tf_icf=calculate()
# print top 10 significant term for this class
for category in [2,3,14,15]:
tficf={}
term_index=0;
for term in vectorizer.get_feature_names():
tficf[term]=tf_icf[term_index][category]
term_index+=1
significant_terms = dict(sorted(tficf.iteritems(), key=operator.itemgetter(1), reverse=True)[:10]) #get 10 significant terms
print significant_terms.keys()
|
unlicense
|
ron1818/Singaboat_RobotX2016
|
robotx_nav/nodes/task1_toplevel.py
|
3
|
9161
|
#!/usr/bin/env python
""" task 1:
-----------------
Created by Ren Ye @ 2016-11-06
Authors: Ren Ye, Reinaldo
-----------------
<put the descriptions from robotx.org pdf file>
<put the algorithms in natural language, can use bullet points, best is to use markdown format>
<if you have plan b, can put it here>
## example ##
+ Go to start point
+ Rotate in position to detect red_1 and green_1 buoys
+ Plot perpendicular waypoints wrt to position of red and green buoys
+ Move towards waypoints move_base_forward
+ meanwhile requesting positions of red_2 and green_2
+ shutdown move_base_forward, create new move_base_forward towards mid of red_2 and green_2
<change log put here>
### @ 2016-11-06 ###
+ create template
renye's approach:
1. drive to gps waypoint
2. slowly in place rotate # noneed
3. detect red and green totems by any camera
4. rotate to bow to red and green totems
5. roi of red in bow/left and roi of green in bow/right, calculate center
6. drive until roi vanishes from both bow cameras, detect totem from port and starboard
7. see new roi from bow
8. drive with 5 and 6
reinaldo's approach:
1. fill bucket of markers array until full
2. do k-means clustering to differentiate monocolor totems
3. get closest pairs
4. plan based on pairs, replan if new plan is far from old plan
5. loop to 2.
6. terminate if displacement from start to end > termination_distance
"""
import rospy
import multiprocessing as mp
import math
import time
import numpy as np
import os
import tf
from sklearn.cluster import KMeans
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Pose
from visualization_msgs.msg import MarkerArray, Marker
from move_base_forward import Forward
from move_base_force_cancel import ForceCancel
from tf.transformations import euler_from_quaternion
from nav_msgs.msg import Odometry
def constant_heading(goal):
constant_obj = Forward(nodename="constant_heading", target=goal, waypoint_separation=5, is_relative=False)
def cancel_forward():
os.system('rosnode kill constant_heading')
class PassGates(object):
pool = mp.Pool()
x0, y0, yaw0= 0, 0, 0
MAX_DATA=30
markers_array=MarkerArray()
red_totem=np.zeros((MAX_DATA, 2)) #unordered list
green_totem=np.zeros((MAX_DATA, 2))
red_centers=np.zeros((2, 2)) #ordered list of centers x, y
green_centers=np.zeros((2, 2))
red_position=np.zeros((2, 2)) #ordered list of centers x, y
green_position=np.zeros((2, 2))
red_counter=0
green_counter=0
replan_min=5
termination_displacement=60
def __init__(self):
print("starting task 1")
rospy.init_node('task_1', anonymous=True)
rospy.Subscriber("/filtered_marker_array", MarkerArray, self.marker_callback, queue_size = 50)
self.marker_pub= rospy.Publisher('waypoint_markers', Marker, queue_size=5)
self.odom_received = False
self.base_frame = rospy.get_param("~base_frame", "base_link")
self.fixed_frame = rospy.get_param("~fixed_frame", "map")
# tf_listener
self.tf_listener = tf.TransformListener()
rospy.wait_for_message("/odometry/filtered/global", Odometry)
rospy.Subscriber("/odometry/filtered/global", Odometry, self.odom_callback, queue_size=50)
while not self.odom_received:
rospy.sleep(1)
print("odom received")
init_position =np.array([self.x0, self.y0, 0])
prev_target=np.array([self.x0, self.y0, 0])
while(self.red_counter<self.MAX_DATA and self.green_counter<self.MAX_DATA):
#wait for data bucket to fill up
time.sleep(1)
print("bucket full")
while not rospy.is_shutdown():
self.matrix_reorder()
print("reorder complete")
target = self.plan_waypoint()
print(target)
if self.euclid_distance(target, prev_target)>self.replan_min:
#replan
#force cancel
self.pool.apply(cancel_forward)
#plan new constant heading
print("replan")
self.pool.apply_async(constant_heading, args = (target, ))
prev_target=target
else:
pass
#termination condition
if self.euclid_distance(np.array([self.x0, self.y0, 0]), init_position)>self.termination_displacement:
self.pool.apply(cancel_forward)
print("Task 1 Completed")
break
time.sleep(1)
self.pool.close()
self.pool.join()
def plan_waypoint(self):
distance=20
dis_red=1000
dis_green=1000
#find closest available totem pairs
for m in self.red_position:
if self.distance_from_boat(m) < dis_red:
nearest_red=m
dis_red=self.distance_from_boat(m)
for n in self.green_position:
if self.distance_from_boat(n) < dis_green:
nearest_green=n
dis_green=self.distance_from_boat(n)
#plan
dis=nearest_red-nearest_green
[x_center, y_center]=[(nearest_red[0]+nearest_green[0])/2, (nearest_red[1]+nearest_green[1])/2]
if math.sqrt(dis.dot(dis.T)) <20:
theta=math.atan2(math.sin(math.atan2(nearest_green[1]-nearest_red[1], nearest_green[0]-nearest_red[0])+math.pi/2), math.cos(math.atan2(nearest_green[1]-nearest_red[1], nearest_green[0]-nearest_red[0])+math.pi/2))
#theta = math.atan2(nearest_green[1]-nearest_red[1], nearest_green[0]-nearest_red[0])+math.pi/2
else:
theta = math.atan2(nearest_green[1]-nearest_red[1], nearest_green[0]-nearest_red[0])+math.atan2(10,30)
return np.array([x_center+distance*math.cos(theta), y_center+distance*math.sin(theta), theta])
def distance_from_boat(self, target):
return math.sqrt((target[0]-self.x0)**2+(target[1]-self.y0)**2)
def euclid_distance(self, target1, target2):
return math.sqrt((target1[0]-target2[0])**2+(target1[1]-target2[1])**2)
def is_complete(self):
pass
def marker_callback(self, msg):
if len(msg.markers)>0:
for i in range(len(msg.markers)):
if msg.markers[i].type == 3:
#may append more than 1 markers
if msg.markers[i].id == 0:
self.red_totem[self.red_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.red_counter+=1
elif msg.markers[i].id == 1:
self.green_totem[self.green_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.green_counter+=1
else:
pass
# list is full
if (self.red_counter>self.MAX_DATA):
red_kmeans = KMeans(n_clusters=2).fit(self.red_totem)
self.red_centers=red_kmeans.cluster_centers_
if(self.green_counter>self.MAX_DATA):
green_kmeans = KMeans(n_clusters=2).fit(self.green_totem)
self.green_centers=green_kmeans.cluster_centers_
#visualize markers in rviz
for i in range(len(msg.markers)):
self.marker_pub.publish(msg.markers[i])
def matrix_reorder(self):
if self.red_centers[0].dot(self.red_centers[0].T)< self.red_centers[1].dot(self.red_centers[1].T):
self.red_position=self.red_centers
else:
self.red_position[0]=self.red_centers[1]
self.red_position[1]=self.red_centers[0]
if self.green_centers[0].dot(self.green_centers[0].T)< self.green_centers[1].dot(self.green_centers[1].T):
self.green_position=self.green_centers
else:
self.green_position[0]=self.green_centers[1]
self.green_position[1]=self.green_centers[0]
def get_tf(self, fixed_frame, base_frame):
""" transform from base_link to map """
trans_received = False
while not trans_received:
try:
(trans, rot) = self.tf_listener.lookupTransform(fixed_frame,
base_frame,
rospy.Time(0))
trans_received = True
return (Point(*trans), Quaternion(*rot))
except (tf.LookupException,
tf.ConnectivityException,
tf.ExtrapolationException):
pass
def odom_callback(self, msg):
trans, rot = self.get_tf("map", "base_link")
self.x0 = trans.x
self.y0 = trans.y
_, _, self.yaw0 = euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))
self.odom_received = True
if __name__ == '__main__':
try:
PassGates()
# stage 1: gps
except rospy.ROSInterruptException:
rospy.loginfo("Task 1 Finished")
|
gpl-3.0
|
anaderi/lhcb_trigger_ml
|
hep_ml/experiments/gradient_boosting.py
|
1
|
20009
|
from __future__ import division, print_function
import copy
import numbers
import numpy
import pandas
from scipy.special import expit, logit
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.ensemble._gradient_boosting import _random_sample_mask
from sklearn.ensemble.gradient_boosting import LossFunction
from sklearn.tree.tree import DecisionTreeRegressor, DTYPE
from sklearn.utils.random import check_random_state
from sklearn.utils.validation import check_arrays, column_or_1d, array2d
from hep_ml.commonutils import check_sample_weight, generate_sample, map_on_cluster, indices_of_values
from hep_ml.losses import AbstractLossFunction
from transformations import enhance_data, Shuffler
real_s = 691.988607712
real_b = 410999.847322
#region Functions for measurements
def get_higgs_data(train_file = '/Users/axelr/ipython/datasets/higgs/training.csv'):
data = pandas.read_csv(train_file, index_col='EventId')
answers_bs = numpy.ravel(data.Label)
weights = numpy.ravel(data.Weight)
data = data.drop(['Label', 'Weight'], axis=1)
answers = numpy.zeros(len(answers_bs), dtype=numpy.int)
answers[answers_bs == 's'] = 1
return data, answers, weights
def AMS(answers, predictions, sample_weight):
""" Predictions are classes """
assert len(answers) == len(predictions) == len(sample_weight)
predictions = column_or_1d(predictions)
total_s = numpy.sum(sample_weight[answers > 0.5])
total_b = numpy.sum(sample_weight[answers < 0.5])
s = numpy.sum(sample_weight[answers * predictions > 0.5])
b = numpy.sum(sample_weight[(1 - answers) * predictions > 0.5])
s *= real_s / total_s
b *= real_b / total_b
br = 10.
radicand = 2 * ( (s+b+br) * numpy.log(1.0 + s/(b+br)) - s)
if radicand < 0:
raise ValueError('Radicand is negative')
else:
return numpy.sqrt(radicand)
def compute_ams_on_cuts(answers, predictions, sample_weight):
""" Prediction is probabilities"""
assert len(answers) == len(predictions) == len(sample_weight)
answers = column_or_1d(answers)
predictions = column_or_1d(predictions)
sample_weight = column_or_1d(sample_weight)
order = numpy.argsort(predictions)[::-1]
reordered_answers = answers[order]
reordered_weights = sample_weight[order]
s_cumulative = numpy.cumsum(reordered_answers * reordered_weights)
b_cumulative = numpy.cumsum((1 - reordered_answers) * reordered_weights)
b_cumulative *= real_b / b_cumulative[-1]
s_cumulative *= real_s / s_cumulative[-1]
br = 10.
s = s_cumulative
b = b_cumulative
radicands = 2 * ((s + b + br) * numpy.log(1.0 + s/(b + br)) - s)
return predictions[order], radicands
def optimal_AMS(answers, predictions, sample_weight):
""" Prediction is probabilities """
cuts, radicands = compute_ams_on_cuts(answers, predictions, sample_weight)
return numpy.sqrt(numpy.max(radicands))
def plot_ams_report(answers, predictions, sample_weight=None):
import pylab
cuts, radicands = compute_ams_on_cuts(answers, predictions, sample_weight)
pylab.figure(figsize=(18, 9))
pylab.subplot(131)
pylab.title('On cuts')
pylab.plot(cuts, numpy.sqrt(numpy.clip(radicands, 0, 100)))
pylab.subplot(132)
pylab.title('On signal order')
order = numpy.argsort(predictions)[::-1]
pylab.plot( numpy.sqrt(numpy.clip(radicands[answers[order] > 0.5], 0, 100)) )
pylab.subplot(133)
pylab.title('On common order')
pylab.plot( numpy.sqrt(radicands) )
def plot_AMS_on_cuts(answers, predictions, sample_weight):
""" Prediction is probabilities """
import pylab
cuts, radicands = compute_ams_on_cuts(answers, predictions, sample_weight)
pylab.plot(cuts, numpy.sqrt(numpy.clip(radicands, 0, 100)))
def plot_AMS_on_signal_order(answers, predictions, sample_weight):
""" Prediction is probabilities """
import pylab
cuts, radicands = compute_ams_on_cuts(answers, predictions, sample_weight)
order = numpy.argsort(predictions)[::-1]
pylab.plot( numpy.sqrt(numpy.clip(radicands[answers[order] > 0.5], 0, 100)) )
#endregion
#region Losses
class MyLossFunction(BaseEstimator):
def fit(self, X, y, sample_weight=None):
pass
def negative_gradient(self, y, y_pred, sample_weight=None):
raise NotImplementedError()
def update_terminal_regions(self, tree, X, y, residual, pred, sample_mask, sample_weight):
assert y.ndim == 1 and residual.ndim == 1 and \
pred.ndim == 1 and sample_mask.ndim == 1 and sample_weight.ndim == 1
# residual is negative gradient
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
for leaf, leaf_indices in indices_of_values(masked_terminal_regions):
if leaf == -1:
continue
self._update_terminal_region(tree, terminal_regions=masked_terminal_regions,
leaf=leaf, X=X, y=y, residual=residual, pred=pred,
sample_weight=sample_weight, leaf_indices=leaf_indices)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight, leaf_indices):
"""This function should select a better values for leaves"""
pass
class LogitLossFunction(MyLossFunction):
def __init__(self, shift=0.):
MyLossFunction.__init__(self)
self.shift = shift
def __call__(self, y, y_pred, sample_weight=None):
y_signed = 2. * y - 1
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
return numpy.sum(sample_weight * numpy.log(1 + numpy.exp(- y_signed * y_pred - self.shift)))
def negative_gradient(self, y, y_pred, sample_weight=None):
y_signed = 2. * y - 1
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
return sample_weight * y_signed * expit(-y_signed * y_pred - self.shift)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight, leaf_indices):
"""Making one Newton step"""
# terminal_region = numpy.where(terminal_regions == leaf)[0]
terminal_region = leaf_indices
y = y.take(terminal_region, axis=0)
y_signed = 2. * y - 1
pred = pred.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region)
argument = -y_signed * pred - self.shift
n_gradient = numpy.sum(sample_weight * y_signed * expit(argument))
laplacian = numpy.sum(sample_weight / numpy.logaddexp(0., argument) / numpy.logaddexp(0., -argument))
tree.value[leaf, 0, 0] = n_gradient / laplacian
class AdaLossFunction(MyLossFunction):
def __init__(self, signal_curvature=1.):
self.signal_curvature = signal_curvature
# we need only one variable
MyLossFunction.__init__(self)
def fit(self, X, y, sample_weight=None):
pass
def _signed_multiplier(self, y):
result = numpy.ones(len(y), dtype=float)
result[y > 0.5] = - self.signal_curvature
return result
def _weight_multiplier(self, y):
result = numpy.ones(len(y), dtype=float)
result[y > 0.5] = 1 / self.signal_curvature
return result
def __call__(self, y, y_pred, sample_weight=None):
signed_multiplier = self._signed_multiplier(y)
weight_multiplier = self._weight_multiplier(y)
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
return numpy.sum(sample_weight * weight_multiplier * numpy.exp(y_pred * signed_multiplier))
def negative_gradient(self, y, y_pred, sample_weight=None, **kargs):
multiplier = self._signed_multiplier(y)
y_signed = 2. * y - 1
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
return sample_weight * y_signed * numpy.exp(y_pred * multiplier)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight, leaf_indices):
terminal_region = leaf_indices
curv = self.signal_curvature
y = y.take(terminal_region, axis=0)
pred = pred.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region)
w_sig = numpy.sum(sample_weight[y > 0.5] * numpy.exp(- curv * pred[y > 0.5]))
w_bck = numpy.sum(sample_weight[y < 0.5] * numpy.exp(pred[y < 0.5]))
# minimizing w_sig * exp(-curv * x) / curv + w_bck * exp(x)
w_sum = w_sig + w_bck
w_sig += 1e-4 * w_sum
w_bck += 1e-4 * w_sum
tree.value[leaf, 0, 0] = 1 / (1. + curv) * numpy.log(w_sig / w_bck)
#endregion
#region Interpolation
def interpolate(vals, step, steps, use_log=False):
if isinstance(vals, numbers.Number):
return vals
t = numpy.clip(step / float(steps), 0, 1)
assert len(vals) == 2, 'Not two values'
if use_log:
return numpy.exp(numpy.interp(t, [0., 1.], numpy.log(vals)))
else:
return numpy.interp(t, [0., 1.], vals)
#endregion
#region GradientBoosting
class GradientBoosting(BaseEstimator, ClassifierMixin):
def __init__(self, loss,
n_estimators=10,
learning_rate=1.,
max_depth=15,
min_samples_leaf=5,
min_samples_split=2,
max_features='auto',
subsample=1.,
criterion='mse',
splitter='best',
weights_in_loss=True,
update_tree=True,
update_on='all',
smearing=0.0,
recount_step=1000,
random_state=None):
"""
Supports only two classes
:type loss: LossFunction
:type n_estimators: int,
:type learning_rate: float,
:type max_depth: int | NoneType,
:type min_samples_leaf: int,
:type min_samples_split: int,
:type max_features: int | 'auto',
:type subsample: float,
:type splitter: str,
:type weights_in_loss: bool,
:type update_on: str, 'all', 'same', 'other', 'random'
:type smearing: float
:type init_smearing: float
:rtype:
"""
self.loss = loss
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.min_samples_split = min_samples_split
self.max_features = max_features
self.subsample = subsample
self.splitter = splitter
self.criterion = criterion
self.weights_in_loss = weights_in_loss
self.random_state = random_state
self.update_tree = update_tree
self.update_on = update_on
self.smearing = smearing
self.recount_step = recount_step
def fit(self, X, y, sample_weight=None):
shuffler = Shuffler(X, random_state=self.random_state)
X, y = check_arrays(X, y, dtype=DTYPE, sparse_format="dense", check_ccontiguous=True)
y = column_or_1d(y, warn=True)
n_samples = len(X)
n_inbag = int(self.subsample * n_samples)
sample_weight = check_sample_weight(y, sample_weight=sample_weight).copy()
self.random_state = check_random_state(self.random_state)
# skipping all checks
assert self.update_on in ['all', 'same', 'other', 'random']
y_pred = numpy.zeros(len(y), dtype=float)
self.classifiers = []
self.learning_rates = []
self.loss_values = []
self.loss = copy.copy(self.loss)
self.loss.fit(X, y, sample_weight=sample_weight)
iter_X = shuffler.generate(0.)
prev_smearing = 1
for iteration in range(self.n_estimators):
if iteration % self.recount_step == 0:
if prev_smearing > 0:
iter_smearing = interpolate(self.smearing, iteration, self.n_estimators)
prev_smearing = iter_smearing
iter_X = shuffler.generate(iter_smearing)
iter_X, = check_arrays(iter_X, dtype=DTYPE, sparse_format="dense", check_ccontiguous=True)
y_pred = numpy.zeros(len(y))
y_pred += sum(cl.predict(X) * rate for rate, cl in zip(self.learning_rates, self.classifiers))
self.loss_values.append(self.loss(y, y_pred, sample_weight=sample_weight))
tree = DecisionTreeRegressor(
criterion=self.criterion,
splitter=self.splitter,
max_depth=interpolate(self.max_depth, iteration, self.n_estimators),
min_samples_split=self.min_samples_split,
min_samples_leaf=interpolate(self.min_samples_leaf, iteration, self.n_estimators, use_log=True),
max_features=self.max_features,
random_state=self.random_state)
sample_mask = _random_sample_mask(n_samples, n_inbag, self.random_state)
loss_weight = sample_weight if self.weights_in_loss else numpy.ones(len(sample_weight))
tree_weight = sample_weight if not self.weights_in_loss else numpy.ones(len(sample_weight))
residual = self.loss.negative_gradient(y, y_pred, sample_weight=loss_weight)
tree.fit(numpy.array(iter_X)[sample_mask, :],
residual[sample_mask],
sample_weight=tree_weight[sample_mask], check_input=False)
# update tree leaves
if self.update_tree:
if self.update_on == 'all':
update_mask = numpy.ones(len(sample_mask), dtype=bool)
elif self.update_on == 'same':
update_mask = sample_mask
elif self.update_on == 'other':
update_mask = ~sample_mask
else: # random
update_mask = _random_sample_mask(n_samples, n_inbag, self.random_state)
self.loss.update_terminal_regions(tree.tree_, X=iter_X, y=y, residual=residual, pred=y_pred,
sample_mask=update_mask, sample_weight=sample_weight)
iter_learning_rate = interpolate(self.learning_rate, iteration, self.n_estimators, use_log=True)
y_pred += iter_learning_rate * tree.predict(X)
self.classifiers.append(tree)
self.learning_rates.append(iter_learning_rate)
return self
def decision_function(self, X):
X = array2d(X, dtype=DTYPE)
result = numpy.zeros(len(X))
for rate, estimator in zip(self.learning_rates, self.classifiers):
result += rate * estimator.predict(X)
return result
def staged_decision_function(self, X):
X = array2d(X, dtype=DTYPE)
result = numpy.zeros(len(X))
for rate, classifier in zip(self.learning_rates, self.classifiers):
result += rate * classifier.predict(X)
yield result
@staticmethod
def _score_to_proba(score):
result = numpy.zeros([len(score), 2], dtype=float)
result[:, 1] = expit(score / 100.)
result[:, 0] = 1. - result[:, 1]
return result
def _proba_to_score(self, proba):
# for init_estimator
return numpy.clip(logit(proba[:, 1]), -5., 5.)
def predict(self, X):
return numpy.argmax(self.predict_proba(X), axis=1)
def predict_proba(self, X):
return self._score_to_proba(self.decision_function(X))
def staged_predict_proba(self, X):
for score in self.staged_decision_function(X):
yield self._score_to_proba(score)
def test_gradient_boosting(size=100, n_features=10):
trainX, trainY = generate_sample(size, n_features)
testX, testY = generate_sample(size, n_features)
for loss in [AdaLossFunction()]:
for update in ['all', 'same', 'other', 'random']:
gb = GradientBoosting(loss=loss, update_on=update, smearing=[0.1, -0.1])
score = gb.fit(trainX, trainY).score(testX, testY)
print(update, score)
test_gradient_boosting()
#endregion
#region Reweighters
def normalize_weight(y, weights, sig_weight=1., pow_sig=1., pow_bg=1.):
result = numpy.copy(weights)
assert numpy.all((y == 0) | (y == 1)), 'Supports only two classes'
result[y == 1] **= pow_sig
result[y == 0] **= pow_bg
result[y == 1] /= numpy.mean(result[y == 1]) / sig_weight
result[y == 0] /= numpy.mean(result[y == 0])
return result
class ReweightingGB(GradientBoosting):
def __init__(self, loss,
sig_weight=1., pow_sig=1., pow_bg=1.,
n_estimators=10, learning_rate=1., max_depth=None, min_samples_leaf=5, min_samples_split=2,
max_features='auto', criterion='mse',
subsample=1., splitter='best', weights_in_loss=True, update_tree=True,
update_on='all', smearing=0.01,
init_estimator=None, init_smearing=0.05, recount_step=1000, random_state=None):
GradientBoosting.__init__(self, loss=loss, n_estimators=n_estimators, learning_rate=learning_rate,
max_depth=max_depth, min_samples_leaf=min_samples_leaf,
min_samples_split=min_samples_split, max_features=max_features, criterion=criterion,
subsample=subsample, splitter=splitter, weights_in_loss=weights_in_loss,
update_on=update_on, update_tree=update_tree, random_state=random_state,
recount_step=recount_step,
smearing=smearing)
# Everything should be set via set_params
self.sig_weight = sig_weight
self.pow_bg = pow_bg
self.pow_sig = pow_sig
def fit(self, X, y, sample_weight=None):
sample_weight = normalize_weight(y, sample_weight, sig_weight=self.sig_weight, pow_sig=self.pow_sig,
pow_bg=self.pow_bg)
return GradientBoosting.fit(self, X, y, sample_weight=sample_weight)
base_gb = ReweightingGB(loss=AdaLossFunction())
base_gb.set_params(loss__signal_curvature=0.7, learning_rate=0.03, min_samples_leaf=125, n_estimators=400,
smearing=0.01, max_features=13, update_tree=True, max_depth=16, subsample=0.5,
sig_weight=0.1, weights_in_loss=False, update_on='all')
base_gb_short = ReweightingGB(loss=AdaLossFunction())
base_gb_short.set_params(loss__signal_curvature=0.7, learning_rate=0.03, min_samples_leaf=150, n_estimators=500,
smearing=0.0, max_features=16, update_tree=True, max_depth=14, subsample=0.4,
sig_weight=0.1, weights_in_loss=False, update_on='all')
base_gb_no_shuffle = ReweightingGB(loss=AdaLossFunction())
base_gb_no_shuffle.set_params(loss__signal_curvature=0.7, learning_rate=0.03, min_samples_leaf=125, n_estimators=250,
smearing=0., max_features=13, update_tree=True, max_depth=16, subsample=0.5,
sig_weight=0.1, weights_in_loss=False, update_on='all')
base_gb_test = ReweightingGB(loss=AdaLossFunction())
base_gb_test.set_params(loss__signal_curvature=0.7, learning_rate=0.03, min_samples_leaf=125, n_estimators=1,
smearing=0.01, max_features=15, update_tree=True, max_depth=16, subsample=0.5,
sig_weight=0.1, weights_in_loss=False, update_on='all')
#endregion
"""
import gradient_boosting as gb
data, y, w = gb.get_higgs_data()
voter = gb.base_gb
voter.set_params(n_estimators=10)
voter.fit(gb.enhance_data(data), y, w)
"""
|
mit
|
JackKelly/neuralnilm_prototype
|
scripts/e385.py
|
4
|
6024
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
"""
e370
longer seq
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.9,
one_target_per_seq=False,
n_seq_per_batch=128,
# subsample_target=4,
include_diff=False,
include_power=True,
clip_appliance_power=True,
target_is_prediction=False,
independently_center_inputs=True,
standardise_input=True,
unit_variance_targets=True,
# input_padding=2,
lag=0
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=0.01,
learning_rate_changes_by_iteration={
# 500: 1e-5,
# 1500: 1e-6
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
# plotter=MDNPlotter
layers_config=[
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': rectify,
'W_hid_to_hid': Identity(scale=0.1),
'W_in_to_hid': Normal(std=1)
},
{
'type': BidirectionalRecurrentLayer,
'num_units': 40,
'gradient_steps': GRADIENT_STEPS,
'nonlinearity': rectify,
'W_hid_to_hid': Identity(scale=0.1),
'W_in_to_hid': Normal(std=1/sqrt(40))
}
]
)
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'].extend([
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': T.nnet.softplus,
'W': Normal(std=1/sqrt(40))
}
])
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
|
mit
|
ChanChiChoi/scikit-learn
|
examples/neighbors/plot_classification.py
|
287
|
1790
|
"""
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
|
bsd-3-clause
|
pprett/statsmodels
|
statsmodels/examples/tsa/arma_plots.py
|
4
|
2478
|
'''Plot acf and pacf for some ARMA(1,1)
'''
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.tsa.arima_process as tsp
from statsmodels.sandbox.tsa.fftarma import ArmaFft as FftArmaProcess
import statsmodels.tsa.stattools as tss
from statsmodels.graphics.tsaplots import plotacf
np.set_printoptions(precision=2)
arcoefs = [0.9, 0., -0.5] #[0.9, 0.5, 0.1, 0., -0.5]
macoefs = [0.9, 0., -0.5] #[0.9, 0.5, 0.1, 0., -0.5]
nsample = 1000
nburnin = 1000
sig = 1
fig = plt.figure(figsize=(8, 13))
fig.suptitle('ARMA: Autocorrelation (left) and Partial Autocorrelation (right)')
subplotcount = 1
nrows = 4
for arcoef in arcoefs[:-1]:
for macoef in macoefs[:-1]:
ar = np.r_[1., -arcoef]
ma = np.r_[1., macoef]
#y = tsp.arma_generate_sample(ar,ma,nsample, sig, burnin)
#armaprocess = FftArmaProcess(ar, ma, nsample) #TODO: make n optional
#armaprocess.plot4()
armaprocess = tsp.ArmaProcess(ar, ma)
acf = armaprocess.acf(20)[:20]
pacf = armaprocess.pacf(20)[:20]
ax = fig.add_subplot(nrows, 2, subplotcount)
plotacf(acf, ax=ax)
## ax.set_title('Autocorrelation \nar=%s, ma=%rs' % (ar, ma),
## size='xx-small')
ax.text(0.7, 0.6, 'ar =%s \nma=%s' % (ar, ma),
transform=ax.transAxes,
horizontalalignment='left', #'right',
size='xx-small')
ax.set_xlim(-1,20)
subplotcount +=1
ax = fig.add_subplot(nrows, 2, subplotcount)
plotacf(pacf, ax=ax)
## ax.set_title('Partial Autocorrelation \nar=%s, ma=%rs' % (ar, ma),
## size='xx-small')
ax.text(0.7, 0.6, 'ar =%s \nma=%s' % (ar, ma),
transform=ax.transAxes,
horizontalalignment='left', #'right',
size='xx-small')
ax.set_xlim(-1,20)
subplotcount +=1
axs = fig.axes
### turn of the 2nd column y tick labels
##for ax in axs[1::2]:#[:,1].flat:
## for label in ax.get_yticklabels(): label.set_visible(False)
# turn off all but the bottom xtick labels
for ax in axs[:-2]:#[:-1,:].flat:
for label in ax.get_xticklabels(): label.set_visible(False)
# use a MaxNLocator on the first column y axis if you have a bunch of
# rows to avoid bunching; example below uses at most 3 ticks
import matplotlib.ticker as mticker
for ax in axs: #[::2]:#[:,1].flat:
ax.yaxis.set_major_locator( mticker.MaxNLocator(3 ))
plt.show()
|
bsd-3-clause
|
Lawrence-Liu/scikit-learn
|
sklearn/tests/test_metaestimators.py
|
226
|
4954
|
"""Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
|
bsd-3-clause
|
bmazin/ARCONS-pipeline
|
examples/Pal2014-J0337/plotLightCurve.py
|
1
|
1690
|
import numpy as np
import matplotlib.pyplot as plt
import figureHeader
def plotPulseProfile(phaseBinEdges,pulseProfile,profileErrors=None,plotDoublePulse=True,ax=None,**kwargs):
label = kwargs.pop('label','')
if plotDoublePulse:
doublePhaseBinEdges = np.concatenate([phaseBinEdges,phaseBinEdges[1:]+1.])
doubleSteppedPulseProfile = np.concatenate([pulseProfile,pulseProfile,[pulseProfile[-1]]])
ax.plot(doublePhaseBinEdges,doubleSteppedPulseProfile,drawstyle='steps-post',label=label,**kwargs)
if not (profileErrors is None):
doublePulseProfile = np.concatenate([pulseProfile,pulseProfile])
doubleProfileErrors = np.concatenate([profileErrors,profileErrors])
doubleBinCenters = doublePhaseBinEdges[0:-1]+np.diff(doublePhaseBinEdges)/2.
ax.errorbar(doubleBinCenters,doublePulseProfile,yerr=doubleProfileErrors,linestyle='',**kwargs)
else:
steppedPulseProfile = np.concatenate([pulseProfile,[pulseProfile[-1]]])
ax.plot(phaseBinEdges,steppedPulseProfile,drawstyle='steps-post',label=label,**kwargs)
if not (profileErrors is None):
binCenters = phaseBinEdges[0:-1]+np.diff(phaseBinEdges)/2.
ax.errorbar(binCenters,pulseProfile,yerr=profileErrors,linestyle='',**kwargs)
lcData = np.load('lightcurvePlot.npz')
phaseBinEdges = lcData['phaseBinEdges']
phaseProfile = lcData['phaseProfile']
profileErrors = lcData['profileErrors']
fig,ax = plt.subplots()
plotPulseProfile(phaseBinEdges,phaseProfile,profileErrors,color='k',plotDoublePulse=False,ax=ax,linewidth=1.2)
ax.set_xlabel('phase')
ax.set_ylabel('counts')
fig.savefig('lightcurve.eps')
plt.show()
|
gpl-2.0
|
muku42/bokeh
|
bokeh/charts/builder/tests/test_step_builder.py
|
4
|
2479
|
""" This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Step
from bokeh.util.testing import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestStep(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python'] = [2, 3, 7, 5, 26]
xyvalues['pypy'] = [12, 33, 47, 15, 126]
xyvalues['jython'] = [22, 43, 10, 25, 26]
xyvaluesdf = pd.DataFrame(xyvalues)
y_python = [ 2., 2., 3., 3., 7., 7., 5., 5., 26.]
y_jython = [ 22., 22.,43., 43., 10., 10., 25., 25., 26.]
y_pypy = [ 12., 12., 33., 33., 47., 47., 15., 15., 126.]
x = [0, 1, 1, 2, 2, 3, 3, 4, 4]
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Step, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['x'], x)
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_jython'], y_jython)
assert_array_equal(builder._data['y_pypy'], y_pypy)
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Step, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
|
bsd-3-clause
|
plablo09/geo_context
|
roc_curve.py
|
1
|
3990
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.decomposition import PCA as sklearnPCA
from sklearn import svm
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
import matplotlib.pyplot as plt
from scipy import interp
#set random state for camparability
random_state = np.random.RandomState(0)
#this function performs stratified k-folds and plots roc curves
def plot_roc(predictor, target):
cv = StratifiedKFold(target, n_folds=6)
classifier = svm.SVC(probability=True,random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
#all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(predictor[train],
target[train]).predict_proba(predictor[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(target[test],
probas_[:, 1],pos_label=3.0)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic geo_context')
plt.legend(loc="lower right")
plt.show()
#read data
context = pd.read_csv('context_nuevo.csv')
#select variable columns
cols_select = context.columns[6:]
variables = context.ix[:,cols_select]
for c in ['no_se','uname','cont','lat','lon','geom','cve_mza']:
del variables[c]
#reclass intervalo as numerical
def intervalo_to_numbers(x):
equiv = {'sun':0,'mon':1,'tue':2,'wed':3,'thu':4,'fri':5,'sat':6,'sun':7}
interval = 0.16666*int(x.split('.')[1])
day = x.split('.')[0]
valor = equiv[day] + interval
return valor
reclass = variables['intervalo'].apply(intervalo_to_numbers)
#drop old 'intervalo' column and replace it with numerical values
del variables['intervalo']
variables = variables.join(reclass,how='inner')
#Get dataframe as matrix and scale it:
data = variables.as_matrix()
Y = data[:,0]
X = data[:,1:]
scaled_X = preprocessing.scale(X)
#Perform PCA analysis
pca = sklearnPCA(n_components=0.80,whiten=True)
pca_transform = pca.fit_transform(scaled_X)
pca_transform.shape
#Stratified k-fold
#Get only positive and negative classes, first with original data
X_bin, Y_bin = scaled_X[Y != 2], Y[Y != 2]
#Same with PCA reduced data:
#data = variables.as_matrix()
#Y_pca = pca_transform[:,0]
#X_pca = pca_transform[:,1:]
#X_pca_bin, Y_pca_bin = X_pca[Y != 2], Y[Y != 2]
#cv_pca = StratifiedKFold(Y_pca_bin, n_folds=6)
#for i, (train, test) in enumerate(cv_pca):
# probas_ = classifier.fit(X_pca_bin[train], Y_bin[train]).predict_proba(X_pca_bin[test])
# # Compute ROC curve and area the curve
# fpr, tpr, thresholds = roc_curve(Y_bin[test], probas_[:, 1],pos_label=3.0)
# mean_tpr += interp(mean_fpr, fpr, tpr)
# mean_tpr[0] = 0.0
# roc_auc = auc(fpr, tpr)
# plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
#
#plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
#
#mean_tpr /= len(cv_pca)
#mean_tpr[-1] = 1.0
#mean_auc = auc(mean_fpr, mean_tpr)
#plt.plot(mean_fpr, mean_tpr, 'k--',
# label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
#
#plt.xlim([-0.05, 1.05])
#plt.ylim([-0.05, 1.05])
#plt.xlabel('False Positive Rate')
#plt.ylabel('True Positive Rate')
#plt.title('Receiver operating characteristic geo_context')
#plt.legend(loc="lower right")
#plt.show()
|
apache-2.0
|
kevin-intel/scikit-learn
|
sklearn/gaussian_process/tests/test_gpc.py
|
3
|
10065
|
"""Testing for Gaussian process classification """
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy.optimize import approx_fprime
import pytest
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils._testing \
import assert_almost_equal, assert_array_equal
def f(x):
return np.sin(x)
X = np.atleast_2d(np.linspace(0, 10, 30)).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = np.array(f(X).ravel() > 0, dtype=int)
fX = f(X).ravel()
y_mc = np.empty(y.shape, dtype=int) # multi-class
y_mc[fX < -0.35] = 0
y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
y_mc[fX > 0.35] = 2
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=0.1), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))]
non_fixed_kernels = [kernel for kernel in kernels
if kernel != fixed_kernel]
@pytest.mark.parametrize('kernel', kernels)
def test_predict_consistent(kernel):
# Check binary predict decision has also predicted probability above 0.5.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X),
gpc.predict_proba(X)[:, 1] >= 0.5)
def test_predict_consistent_structured():
# Check binary predict decision has also predicted probability above 0.5.
X = ['A', 'AB', 'B']
y = np.array([True, False, True])
kernel = MiniSeqKernel(baseline_similarity_bounds='fixed')
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X),
gpc.predict_proba(X)[:, 1] >= 0.5)
@pytest.mark.parametrize('kernel', non_fixed_kernels)
def test_lml_improving(kernel):
# Test that hyperparameter-tuning improves log-marginal likelihood.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert (gpc.log_marginal_likelihood(gpc.kernel_.theta) >
gpc.log_marginal_likelihood(kernel.theta))
@pytest.mark.parametrize('kernel', kernels)
def test_lml_precomputed(kernel):
# Test that lml of optimized kernel is stored correctly.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_almost_equal(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(), 7)
@pytest.mark.parametrize('kernel', kernels)
def test_lml_without_cloning_kernel(kernel):
# Test that clone_kernel=False has side-effects of kernel.theta.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
input_theta = np.ones(gpc.kernel_.theta.shape, dtype=np.float64)
gpc.log_marginal_likelihood(input_theta, clone_kernel=False)
assert_almost_equal(gpc.kernel_.theta, input_theta, 7)
@pytest.mark.parametrize('kernel', non_fixed_kernels)
def test_converged_to_local_maximum(kernel):
# Test that we are in local maximum after hyperparameter-optimization.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
assert np.all((np.abs(lml_gradient) < 1e-4) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 0]) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 1]))
@pytest.mark.parametrize('kernel', kernels)
def test_lml_gradient(kernel):
# Compare analytic and numeric gradient of log marginal likelihood.
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpc.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the log marginal likelihood of the chosen theta.
n_samples, n_features = 25, 2
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1e-3] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features)
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessClassifier(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert lml > last_lml - np.finfo(np.float32).eps
last_lml = lml
@pytest.mark.parametrize('kernel', non_fixed_kernels)
def test_custom_optimizer(kernel):
# Test that GPC can use externally defined optimizers.
# Define a dummy optimizer that simply tests 10 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(10):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
gpc.fit(X, y_mc)
# Checks that optimizer improved marginal likelihood
assert (gpc.log_marginal_likelihood(gpc.kernel_.theta) >
gpc.log_marginal_likelihood(kernel.theta))
@pytest.mark.parametrize('kernel', kernels)
def test_multi_class(kernel):
# Test GPC for multi-class classification problems.
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
assert_almost_equal(y_prob.sum(1), 1)
y_pred = gpc.predict(X2)
assert_array_equal(np.argmax(y_prob, 1), y_pred)
@pytest.mark.parametrize('kernel', kernels)
def test_multi_class_n_jobs(kernel):
# Test that multi-class GPC produces identical results with n_jobs>1.
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
gpc_2.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
y_prob_2 = gpc_2.predict_proba(X2)
assert_almost_equal(y_prob, y_prob_2)
def test_warning_bounds():
kernel = RBF(length_scale_bounds=[1e-5, 1e-3])
gpc = GaussianProcessClassifier(kernel=kernel)
warning_message = (
"The optimal value found for dimension 0 of parameter "
"length_scale is close to the specified upper bound "
"0.001. Increasing the bound and calling fit again may "
"find a better value."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
gpc.fit(X, y)
kernel_sum = (WhiteKernel(noise_level_bounds=[1e-5, 1e-3]) +
RBF(length_scale_bounds=[1e3, 1e5]))
gpc_sum = GaussianProcessClassifier(kernel=kernel_sum)
with pytest.warns(None) as record:
with warnings.catch_warnings():
# scipy 1.3.0 uses tostring which is deprecated in numpy
warnings.filterwarnings("ignore", "tostring", DeprecationWarning)
gpc_sum.fit(X, y)
assert len(record) == 2
assert record[0].message.args[0] == ("The optimal value found for "
"dimension 0 of parameter "
"k1__noise_level is close to the "
"specified upper bound 0.001. "
"Increasing the bound and calling "
"fit again may find a better value.")
assert record[1].message.args[0] == ("The optimal value found for "
"dimension 0 of parameter "
"k2__length_scale is close to the "
"specified lower bound 1000.0. "
"Decreasing the bound and calling "
"fit again may find a better value.")
X_tile = np.tile(X, 2)
kernel_dims = RBF(length_scale=[1., 2.],
length_scale_bounds=[1e1, 1e2])
gpc_dims = GaussianProcessClassifier(kernel=kernel_dims)
with pytest.warns(None) as record:
with warnings.catch_warnings():
# scipy 1.3.0 uses tostring which is deprecated in numpy
warnings.filterwarnings("ignore", "tostring", DeprecationWarning)
gpc_dims.fit(X_tile, y)
assert len(record) == 2
assert record[0].message.args[0] == ("The optimal value found for "
"dimension 0 of parameter "
"length_scale is close to the "
"specified upper bound 100.0. "
"Increasing the bound and calling "
"fit again may find a better value.")
assert record[1].message.args[0] == ("The optimal value found for "
"dimension 1 of parameter "
"length_scale is close to the "
"specified upper bound 100.0. "
"Increasing the bound and calling "
"fit again may find a better value.")
|
bsd-3-clause
|
jpo/healthcareai-py
|
healthcareai/common/transformers.py
|
1
|
9289
|
"""Transformers
This module contains transformers for preprocessing data. Most operate on DataFrames and are named appropriately.
"""
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from sklearn.preprocessing import StandardScaler
class DataFrameImputer(TransformerMixin):
"""
Impute missing values in a dataframe.
Columns of dtype object or category (assumed categorical) are imputed with the mode (most frequent value in column).
Columns of other types (assumed continuous) are imputed with mean of column.
"""
def __init__(self, impute=True, verbose=True):
self.impute = impute
self.object_columns = None
self.fill = None
self.verbose = verbose
def fit(self, X, y=None):
# Return if not imputing
if self.impute is False:
return self
# Grab list of object column names before doing imputation
self.object_columns = X.select_dtypes(include=['object']).columns.values
self.fill = pd.Series([X[c].value_counts().index[0]
if X[c].dtype == np.dtype('O')
or pd.core.common.is_categorical_dtype(X[c])
else X[c].mean() for c in X], index=X.columns)
if self.verbose:
num_nans = sum(X.select_dtypes(include=[np.number]).isnull().sum())
num_total = sum(X.select_dtypes(include=[np.number]).count())
percentage_imputed = num_nans / num_total * 100
print("Percentage Imputed: %.2f%%" % percentage_imputed)
print("Note: Impute will always happen on prediction dataframe, otherwise rows are dropped, and will lead "
"to missing predictions")
# return self for scikit compatibility
return self
def transform(self, X, y=None):
# Return if not imputing
if self.impute is False:
return X
result = X.fillna(self.fill)
for i in self.object_columns:
if result[i].dtype not in ['object', 'category']:
result[i] = result[i].astype('object')
return result
class DataFrameConvertTargetToBinary(TransformerMixin):
# TODO Note that this makes healthcareai only handle N/Y in pred column
"""
Convert classification model's predicted col to 0/1 (otherwise won't work with GridSearchCV). Passes through data
for regression models unchanged. This is to simplify the data pipeline logic. (Though that may be a more appropriate
place for the logic...)
Note that this makes healthcareai only handle N/Y in pred column
"""
def __init__(self, model_type, target_column):
self.model_type = model_type
self.target_column = target_column
def fit(self, X, y=None):
# return self for scikit compatibility
return self
def transform(self, X, y=None):
# TODO: put try/catch here when type = class and predictor is numeric
# TODO this makes healthcareai only handle N/Y in pred column
if self.model_type == 'classification':
# Turn off warning around replace
pd.options.mode.chained_assignment = None # default='warn'
# Replace 'Y'/'N' with 1/0
X[self.target_column].replace(['Y', 'N'], [1, 0], inplace=True)
return X
class DataFrameCreateDummyVariables(TransformerMixin):
"""Convert all categorical columns into dummy/indicator variables. Exclude given columns."""
def __init__(self, excluded_columns=None):
self.excluded_columns = excluded_columns
def fit(self, X, y=None):
# return self for scikit compatibility
return self
def transform(self, X, y=None):
columns_to_dummify = X.select_dtypes(include=[object, 'category'])
# remove excluded columns (if they are still in the list)
for column in columns_to_dummify:
if column in self.excluded_columns:
columns_to_dummify.remove(column)
# Create dummy variables
X = pd.get_dummies(X, columns=columns_to_dummify, drop_first=True, prefix_sep='.')
return X
class DataFrameConvertColumnToNumeric(TransformerMixin):
"""Convert a column into numeric variables."""
def __init__(self, column_name):
self.column_name = column_name
def fit(self, X, y=None):
# return self for scikit compatibility
return self
def transform(self, X, y=None):
X[self.column_name] = pd.to_numeric(arg=X[self.column_name], errors='raise')
return X
class DataFrameUnderSampling(TransformerMixin):
"""
Performs undersampling on a dataframe.
Must be done BEFORE train/test split so that when we split the under/over sampled dataset.
Must be done AFTER imputation, since under/over sampling will not work with missing values (imblearn requires target
column to be converted to numerical values)
"""
def __init__(self, predicted_column, random_seed=0):
self.random_seed = random_seed
self.predicted_column = predicted_column
def fit(self, X, y=None):
# return self for scikit compatibility
return self
def transform(self, X, y=None):
# TODO how do we validate this happens before train/test split? Or do we need to? Can we implement it in the
# TODO simple trainer in the correct order and leave this to advanced users?
# Extract predicted column
y = np.squeeze(X[[self.predicted_column]])
# Copy the dataframe without the predicted column
temp_dataframe = X.drop([self.predicted_column], axis=1)
# Initialize and fit the under sampler
under_sampler = RandomUnderSampler(random_state=self.random_seed)
x_under_sampled, y_under_sampled = under_sampler.fit_sample(temp_dataframe, y)
# Build the resulting under sampled dataframe
result = pd.DataFrame(x_under_sampled)
# Restore the column names
result.columns = temp_dataframe.columns
# Restore the y values
y_under_sampled = pd.Series(y_under_sampled)
result[self.predicted_column] = y_under_sampled
return result
class DataFrameOverSampling(TransformerMixin):
"""
Performs oversampling on a dataframe.
Must be done BEFORE train/test split so that when we split the under/over sampled dataset.
Must be done AFTER imputation, since under/over sampling will not work with missing values (imblearn requires target
column to be converted to numerical values)
"""
def __init__(self, predicted_column, random_seed=0):
self.random_seed = random_seed
self.predicted_column = predicted_column
def fit(self, X, y=None):
# return self for scikit compatibility
return self
def transform(self, X, y=None):
# TODO how do we validate this happens before train/test split? Or do we need to? Can we implement it in the
# TODO simple trainer in the correct order and leave this to advanced users?
# Extract predicted column
y = np.squeeze(X[[self.predicted_column]])
# Copy the dataframe without the predicted column
temp_dataframe = X.drop([self.predicted_column], axis=1)
# Initialize and fit the under sampler
over_sampler = RandomOverSampler(random_state=self.random_seed)
x_over_sampled, y_over_sampled = over_sampler.fit_sample(temp_dataframe, y)
# Build the resulting under sampled dataframe
result = pd.DataFrame(x_over_sampled)
# Restore the column names
result.columns = temp_dataframe.columns
# Restore the y values
y_over_sampled = pd.Series(y_over_sampled)
result[self.predicted_column] = y_over_sampled
return result
class DataFrameDropNaN(TransformerMixin):
"""Remove NaN values. Columns that are NaN or None are removed."""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Uses pandas.DataFrame.dropna function where axis=1 is column action, and
# how='all' requires all the values to be NaN or None to be removed.
return X.dropna(axis=1, how='all')
class DataFrameFeatureScaling(TransformerMixin):
"""Scales numeric features. Columns that are numerics are scaled, or otherwise specified."""
def __init__(self, columns_to_scale=None, reuse=None):
self.columns_to_scale = columns_to_scale
self.reuse = reuse
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Check if it's reuse, if so, then use the reuse's DataFrameFeatureScaling
if self.reuse:
return self.reuse.fit_transform(X, y)
# Check if we know what columns to scale, if not, then get all the numeric columns' names
if not self.columns_to_scale:
self.columns_to_scale = list(X.select_dtypes(include=[np.number]).columns)
X[self.columns_to_scale] = StandardScaler().fit_transform(X[self.columns_to_scale])
return X
|
mit
|
BlueBrain/NEST
|
testsuite/manualtests/stdp_check.py
|
13
|
4713
|
# -*- coding: utf-8 -*-
#
# stdp_check.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from matplotlib.pylab import *
# Test script to reproduce changes in weight of a STDP synapse in an event-driven way.
# Pre- and post-synaptic spike trains are read in from spike_detector-0-0-3.gdf
# (output of test_stdp_poiss.sli).
# output: pre/post \t spike time \t weight
#
# Synaptic dynamics for STDP synapses according to Abigail Morrison's
# STDP model (see stdp_rec.pdf).
#
# first version: Moritz Helias, april 2006
# adapted to python MH, SK, May 2008
def stdp(w_init, w_max, pre_spikes, post_spikes, alpha, mu_plus, mu_minus, lmbd, tau_plus, tau_minus, delay):
w = w_init # initial weight
i = 0 # index of next presynaptic spike
j = 0 # index of next postsynaptic spike
K_plus = 0.
K_minus = 0.
last_t = 0.
advance = True
while advance:
advance = False
# next spike is presynaptic
if pre_spikes[i] < post_spikes[j]:
dt = pre_spikes[i] - last_t
# evolve exponential filters
K_plus *= exp(-dt/tau_plus)
K_minus *= exp(-dt/tau_minus)
# depression
w = w/w_max - lmbd * alpha * (w/w_max)**mu_minus * K_minus
if w > 0.:
w *= w_max
else:
w = 0.
print "pre\t%.16f\t%.16f" % (pre_spikes[i],w)
K_plus += 1.
last_t = pre_spikes[i] # time evolved until here
if i < len(pre_spikes) - 1:
i += 1
advance = True
# same timing of next pre- and postsynaptic spike
elif pre_spikes[i] == post_spikes[j]:
dt = pre_spikes[i] - last_t
# evolve exponential filters
K_plus *= exp(-dt/tau_plus)
K_minus *= exp(-dt/tau_minus)
# facilitation
w = w/w_max + lmbd * (1.-w/w_max)**mu_plus * K_plus
if w < 1.:
w *= w_max
else:
w = w_max
print "post\t%.16f\t%.16f" % (post_spikes[j]-delay,w)
# depression
w = w/w_max - lmbd * alpha * (w/w_max)**mu_minus * K_minus
if w > 0.:
w *= w_max
else:
w = 0.
print "pre\t%.16f\t%.16f" % (pre_spikes[i],w)
K_plus += 1.
K_minus += 1.
last_t = pre_spikes[i] # time evolved until here
if i < len(pre_spikes) - 1:
i += 1
advance = True
if j < len(post_spikes) - 1:
j += 1
advance = True
# next spike is postsynaptic
else:
dt = post_spikes[j] - last_t
# evolve exponential filters
K_plus *= exp(-dt / tau_plus)
K_minus *= exp(-dt / tau_minus)
# facilitation
w = w/w_max + lmbd * (1.-w/w_max)**mu_plus * K_plus
if w < 1.:
w *= w_max
else:
w = w_max
print "post\t%.16f\t%.16f" % (post_spikes[j]-delay,w)
K_minus += 1.
last_t = post_spikes[j] # time evolved until here
if j < len(post_spikes) - 1:
j += 1
advance = True
return w
# stdp parameters
w_init = 35.
w_max = 70.
alpha = .95
mu_plus = .05
mu_minus = .05
lmbd = .025
tau_plus = 20.
tau_minus = 20.
# dendritic delay
delay = 1.
# load spikes from simulation with test_stdp_poiss.sli
spikes = load("spike_detector-0-0-3.gdf")
pre_spikes = spikes[find(spikes[:,0] == 5), 1]
# delay is purely dendritic
# postsynaptic spike arrives at sp_j + delay at the synapse
post_spikes = spikes[find(spikes[:,0] == 6), 1] + delay
# calculate development of stdp weight
stdp(w_init, w_max, pre_spikes, post_spikes, alpha, mu_plus, mu_minus, lmbd, tau_plus, tau_minus, delay)
|
gpl-2.0
|
harshaneelhg/scikit-learn
|
sklearn/metrics/pairwise.py
|
104
|
42995
|
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
bsd-3-clause
|
edhuckle/statsmodels
|
statsmodels/datasets/cpunish/data.py
|
25
|
2597
|
"""US Capital Punishment dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = __doc__
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """Number of state executions in 1997"""
DESCRLONG = """This data describes the number of times capital punishment is implemented
at the state level for the year 1997. The outcome variable is the number of
executions. There were executions in 17 states.
Included in the data are explanatory variables for median per capita income
in dollars, the percent of the population classified as living in poverty,
the percent of Black citizens in the population, the rate of violent
crimes per 100,000 residents for 1996, a dummy variable indicating
whether the state is in the South, and (an estimate of) the proportion
of the population with a college degree of some kind.
"""
NOTE = """::
Number of Observations - 17
Number of Variables - 7
Variable name definitions::
EXECUTIONS - Executions in 1996
INCOME - Median per capita income in 1996 dollars
PERPOVERTY - Percent of the population classified as living in poverty
PERBLACK - Percent of black citizens in the population
VC100k96 - Rate of violent crimes per 100,00 residents for 1996
SOUTH - SOUTH == 1 indicates a state in the South
DEGREE - An esimate of the proportion of the state population with a
college degree of some kind
State names are included in the data file, though not returned by load.
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the cpunish data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Load the cpunish data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/cpunish.csv', 'rb'), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6,7))
return data
|
bsd-3-clause
|
bavardage/statsmodels
|
statsmodels/sandbox/tsa/examples/example_var.py
|
37
|
1218
|
"""
Look at some macro plots, then do some VARs and IRFs.
"""
import numpy as np
import statsmodels.api as sm
import scikits.timeseries as ts
import scikits.timeseries.lib.plotlib as tplt
from matplotlib import pyplot as plt
data = sm.datasets.macrodata.load()
data = data.data
### Create Timeseries Representations of a few vars
dates = ts.date_array(start_date=ts.Date('Q', year=1959, quarter=1),
end_date=ts.Date('Q', year=2009, quarter=3))
ts_data = data[['realgdp','realcons','cpi']].view(float).reshape(-1,3)
ts_data = np.column_stack((ts_data, (1 - data['unemp']/100) * data['pop']))
ts_series = ts.time_series(ts_data, dates)
fig = tplt.tsfigure()
fsp = fig.add_tsplot(221)
fsp.tsplot(ts_series[:,0],'-')
fsp.set_title("Real GDP")
fsp = fig.add_tsplot(222)
fsp.tsplot(ts_series[:,1],'r-')
fsp.set_title("Real Consumption")
fsp = fig.add_tsplot(223)
fsp.tsplot(ts_series[:,2],'g-')
fsp.set_title("CPI")
fsp = fig.add_tsplot(224)
fsp.tsplot(ts_series[:,3],'y-')
fsp.set_title("Employment")
# Plot real GDP
#plt.subplot(221)
#plt.plot(data['realgdp'])
#plt.title("Real GDP")
# Plot employment
#plt.subplot(222)
# Plot cpi
#plt.subplot(223)
# Plot real consumption
#plt.subplot(224)
#plt.show()
|
bsd-3-clause
|
aiguofer/bokeh
|
examples/app/gapminder/main.py
|
3
|
2668
|
# -*- coding: utf-8 -*-
import pandas as pd
from bokeh.core.properties import field
from bokeh.io import curdoc
from bokeh.layouts import layout
from bokeh.models import (
ColumnDataSource, HoverTool, SingleIntervalTicker, Slider, Button, Label,
CategoricalColorMapper,
)
from bokeh.palettes import Spectral6
from bokeh.plotting import figure
from data import process_data
fertility_df, life_expectancy_df, population_df_size, regions_df, years, regions_list = process_data()
sources = {}
region_name = regions_df.Group
region_name.name = 'region'
for year in years:
fertility = fertility_df[year]
fertility.name = 'fertility'
life = life_expectancy_df[year]
life.name = 'life'
population = population_df_size[year]
population.name = 'population'
df = pd.concat([fertility, life, population, region_name], axis=1)
df = df.fillna('NaN')
sources[year] = ColumnDataSource(df)
source = sources[years[0]]
plot = figure(x_range=(1, 9), y_range=(20, 100), title='Gapminder Data', plot_height=300)
plot.xaxis.ticker = SingleIntervalTicker(interval=1)
plot.xaxis.axis_label = "Children per woman (total fertility)"
plot.yaxis.ticker = SingleIntervalTicker(interval=20)
plot.yaxis.axis_label = "Life expectancy at birth (years)"
label = Label(x=1.1, y=18, text=str(years[0]), text_font_size='70pt', text_color='#eeeeee')
plot.add_layout(label)
color_mapper = CategoricalColorMapper(palette=Spectral6, factors=regions_list)
plot.circle(
x='fertility',
y='life',
size='population',
source=source,
fill_color={'field': 'region', 'transform': color_mapper},
fill_alpha=0.8,
line_color='#7c7e71',
line_width=0.5,
line_alpha=0.5,
legend=field('region'),
)
plot.add_tools(HoverTool(tooltips="@index", show_arrow=False, point_policy='follow_mouse'))
def animate_update():
year = slider.value + 1
if year > years[-1]:
year = years[0]
slider.value = year
def slider_update(attrname, old, new):
year = slider.value
label.text = str(year)
source.data = sources[year].data
slider = Slider(start=years[0], end=years[-1], value=years[0], step=1, title="Year")
slider.on_change('value', slider_update)
def animate():
if button.label == '► Play':
button.label = '❚❚ Pause'
curdoc().add_periodic_callback(animate_update, 200)
else:
button.label = '► Play'
curdoc().remove_periodic_callback(animate_update)
button = Button(label='► Play', width=60)
button.on_click(animate)
layout = layout([
[plot],
[slider, button],
], sizing_mode='scale_width')
curdoc().add_root(layout)
curdoc().title = "Gapminder"
|
bsd-3-clause
|
lorenzo-desantis/mne-python
|
examples/inverse/plot_make_inverse_operator.py
|
21
|
3232
|
"""
===============================================================
Assemble inverse operator and compute MNE-dSPM inverse solution
===============================================================
Assemble M/EEG, MEG, and EEG inverse operators and compute dSPM
inverse solution on MNE evoked dataset and stores the solution
in stc files for visualisation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import (make_inverse_operator, apply_inverse,
write_inverse_operator)
print(__doc__)
data_path = sample.data_path()
fname_fwd_meeg = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_fwd_eeg = data_path + '/MEG/sample/sample_audvis-eeg-oct-6-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
forward_meeg = mne.read_forward_solution(fname_fwd_meeg, surf_ori=True)
noise_cov = mne.read_cov(fname_cov)
# Restrict forward solution as necessary for MEG
forward_meg = mne.pick_types_forward(forward_meeg, meg=True, eeg=False)
# Alternatively, you can just load a forward solution that is restricted
forward_eeg = mne.read_forward_solution(fname_fwd_eeg, surf_ori=True)
# make an M/EEG, MEG-only, and EEG-only inverse operators
info = evoked.info
inverse_operator_meeg = make_inverse_operator(info, forward_meeg, noise_cov,
loose=0.2, depth=0.8)
inverse_operator_meg = make_inverse_operator(info, forward_meg, noise_cov,
loose=0.2, depth=0.8)
inverse_operator_eeg = make_inverse_operator(info, forward_eeg, noise_cov,
loose=0.2, depth=0.8)
write_inverse_operator('sample_audvis-meeg-oct-6-inv.fif',
inverse_operator_meeg)
write_inverse_operator('sample_audvis-meg-oct-6-inv.fif',
inverse_operator_meg)
write_inverse_operator('sample_audvis-eeg-oct-6-inv.fif',
inverse_operator_eeg)
# Compute inverse solution
stcs = dict()
stcs['meeg'] = apply_inverse(evoked, inverse_operator_meeg, lambda2, "dSPM",
pick_ori=None)
stcs['meg'] = apply_inverse(evoked, inverse_operator_meg, lambda2, "dSPM",
pick_ori=None)
stcs['eeg'] = apply_inverse(evoked, inverse_operator_eeg, lambda2, "dSPM",
pick_ori=None)
# Save result in stc files
names = ['meeg', 'meg', 'eeg']
for name in names:
stcs[name].save('mne_dSPM_inverse-%s' % name)
###############################################################################
# View activation time-series
plt.close('all')
plt.figure(figsize=(8, 6))
for ii in range(len(stcs)):
name = names[ii]
stc = stcs[name]
plt.subplot(len(stcs), 1, ii + 1)
plt.plot(1e3 * stc.times, stc.data[::150, :].T)
plt.ylabel('%s\ndSPM value' % str.upper(name))
plt.xlabel('time (ms)')
plt.show()
|
bsd-3-clause
|
CforED/Machine-Learning
|
examples/semi_supervised/plot_label_propagation_structure.py
|
45
|
2433
|
"""
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plt.scatter(X[labels == outer, 0], X[labels == outer, 1], color='navy',
marker='s', lw=0, label="outer labeled", s=10)
plt.scatter(X[labels == inner, 0], X[labels == inner, 1], color='c',
marker='s', lw=0, label='inner labeled', s=10)
plt.scatter(X[labels == -1, 0], X[labels == -1, 1], color='darkorange',
marker='.', label='unlabeled')
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Raw data (2 classes=outer and inner)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plt.scatter(X[outer_numbers, 0], X[outer_numbers, 1], color='navy',
marker='s', lw=0, s=10, label="outer learned")
plt.scatter(X[inner_numbers, 0], X[inner_numbers, 1], color='c',
marker='s', lw=0, s=10, label="inner learned")
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
|
bsd-3-clause
|
Fireblend/scikit-learn
|
sklearn/feature_extraction/tests/test_dict_vectorizer.py
|
276
|
3790
|
# Authors: Lars Buitinck <L.J.Buitinck@uva.nl>
# Dan Blanchard <dblanchard@ets.org>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
|
bsd-3-clause
|
liam2/larray
|
larray/tests/test_array.py
|
2
|
174871
|
# -*- coding: utf8 -*-
from __future__ import absolute_import, division, print_function
import os
import re
import sys
import pytest
import numpy as np
import pandas as pd
from collections import OrderedDict
from larray.tests.common import (inputpath, tmp_path, meta,
assert_array_equal, assert_array_nan_equal, assert_larray_equiv, assert_larray_equal,
needs_xlwings, needs_pytables, needs_xlsxwriter, needs_xlrd,
needs_python35, needs_python36, needs_python37)
from larray import (Array, LArray, Axis, LGroup, union, zeros, zeros_like, ndtest, empty, ones, eye, diag, stack,
clip, exp, where, X, mean, isnan, round, read_hdf, read_csv, read_eurostat, read_excel,
from_lists, from_string, open_excel, from_frame, sequence, nan, IGroup)
from larray.inout.pandas import from_series
from larray.core.axis import _to_ticks, _to_key
from larray.util.misc import LHDFStore
from larray.util.compat import StringIO
from larray.core.metadata import Metadata
# ================== #
# Test Value Strings #
# ================== #
def test_value_string_split():
assert_array_equal(_to_ticks('M,F'), np.asarray(['M', 'F']))
assert_array_equal(_to_ticks('M, F'), np.asarray(['M', 'F']))
def test_value_string_union():
assert union('A11,A22', 'A12,A22') == ['A11', 'A22', 'A12']
def test_value_string_range():
assert_array_equal(_to_ticks('0..115'), np.asarray(range(116)))
assert_array_equal(_to_ticks('..115'), np.asarray(range(116)))
with pytest.raises(ValueError):
_to_ticks('10..')
with pytest.raises(ValueError):
_to_ticks('..')
# ================ #
# Test Key Strings #
# ================ #
def test_key_string_nonstring():
assert _to_key(('M', 'F')) == ['M', 'F']
assert _to_key(['M', 'F']) == ['M', 'F']
def test_key_string_split():
assert _to_key('M,F') == ['M', 'F']
assert _to_key('M, F') == ['M', 'F']
assert _to_key('M,') == ['M']
assert _to_key('M') == 'M'
def test_key_string_slice_strings():
# these two examples have different results and this is fine because numeric axes do not necessarily start at 0
assert _to_key('0:115') == slice(0, 115)
assert _to_key(':115') == slice(115)
assert _to_key('10:') == slice(10, None)
assert _to_key(':') == slice(None)
# =================== #
# Test Metadata #
# =================== #
def test_read_set_update_delete_metadata(meta, tmpdir):
# __eq__
meta2 = meta.copy()
assert meta2 == meta
# set/get metadata to/from an array
arr = ndtest((3, 3))
arr.meta = meta
assert arr.meta == meta
# access item
assert arr.meta.date == meta.date
# add new item
arr.meta.city = 'London'
assert arr.meta.city == 'London'
# update item
arr.meta.city = 'Berlin'
assert arr.meta.city == 'Berlin'
# __contains__
assert 'city' in arr.meta
# delete item
del arr.meta.city
assert arr.meta == meta
# __reduce__ and __reduce_ex__
import pickle
fname = os.path.join(tmpdir.strpath, 'test_metadata.pkl')
with open(fname, 'wb') as f:
pickle.dump(meta, f)
with open(fname, 'rb') as f:
meta2 = Metadata(pickle.load(f))
assert meta2 == meta
@needs_pytables
def test_metadata_hdf(meta, tmpdir):
key = 'meta'
fname = os.path.join(tmpdir.strpath, 'test_metadata.hdf')
with LHDFStore(fname) as store:
ndtest(3).to_hdf(store, key)
meta.to_hdf(store, key)
meta2 = Metadata.from_hdf(store, key)
assert meta2 == meta
def test_meta_arg_array_creation(array):
meta_list = [('title', 'test array'), ('description', 'Array used for testing'),
('author', 'John Cleese')]
meta = Metadata(meta_list)
# meta as list
arr = Array(array.data, array.axes, meta=meta_list)
assert arr.meta == meta
# meta as OrderedDict
arr = Array(array.data, array.axes, meta=OrderedDict(meta_list))
assert arr.meta == meta
# ================ #
# Test Array #
# ================ #
# AXES
lipro = Axis(['P%02d' % i for i in range(1, 16)], 'lipro')
age = Axis('age=0..115')
sex = Axis('sex=M,F')
vla = 'A11,A12,A13,A23,A24,A31,A32,A33,A34,A35,A36,A37,A38,A41,A42,A43,A44,A45,A46,A71,A72,A73'
wal = 'A25,A51,A52,A53,A54,A55,A56,A57,A61,A62,A63,A64,A65,A81,A82,A83,A84,A85,A91,A92,A93'
bru = 'A21'
vla_str = vla
wal_str = wal
bru_str = bru
belgium = union(vla, wal, bru)
geo = Axis(belgium, 'geo')
# ARRAYS
@pytest.fixture()
def array():
data = np.arange(116 * 44 * 2 * 15).reshape(116, 44, 2, 15).astype(float)
return Array(data, axes=(age, geo, sex, lipro))
@pytest.fixture()
def small_array():
small_data = np.arange(30).reshape(2, 15)
return Array(small_data, axes=(sex, lipro))
io_1d = ndtest(3)
io_2d = ndtest("a=1..3; b=b0,b1")
io_3d = ndtest("a=1..3; b=b0,b1; c=c0..c2")
io_int_labels = ndtest("a=0..2; b=0..2; c=0..2")
io_unsorted = ndtest("a=3..1; b=b1,b0; c=c2..c0")
io_missing_values = ndtest("a=1..3; b=b0,b1; c=c0..c2", dtype=float)
io_missing_values[2, 'b0'] = nan
io_missing_values[3, 'b1'] = nan
io_narrow_missing_values = io_missing_values.copy()
io_narrow_missing_values[2, 'b1', 'c1'] = nan
def test_larray_renamed_as_array():
with pytest.warns(FutureWarning) as caught_warnings:
arr = LArray([0, 1, 2, 3], 'a=a0..a3')
assert len(caught_warnings) == 1
assert caught_warnings[0].message.args[0] == "LArray has been renamed as Array."
assert caught_warnings[0].filename == __file__
def test_ndtest():
arr = ndtest('a=a0..a2')
assert arr.shape == (3,)
assert arr.axes.names == ['a']
assert_array_equal(arr.data, np.arange(3))
# using an explicit Axis object
a = Axis('a=a0..a2')
arr = ndtest(a)
assert arr.shape == (3,)
assert arr.axes.names == ['a']
assert_array_equal(arr.data, np.arange(3))
# using a group as an axis
arr = ndtest(a[:'a1'])
assert arr.shape == (2,)
assert arr.axes.names == ['a']
assert_array_equal(arr.data, np.arange(2))
def test_getattr(array):
assert type(array.geo) == Axis
assert array.geo is geo
with pytest.raises(AttributeError):
array.geom
def test_zeros():
la = zeros((geo, age))
assert la.shape == (44, 116)
assert_array_equal(la, np.zeros((44, 116)))
def test_zeros_like(array):
la = zeros_like(array)
assert la.shape == (116, 44, 2, 15)
assert_array_equal(la, np.zeros((116, 44, 2, 15)))
def test_bool():
a = ones([2])
# ValueError: The truth value of an array with more than one element
# is ambiguous. Use a.any() or a.all()
with pytest.raises(ValueError):
bool(a)
a = ones([1])
assert bool(a)
a = zeros([1])
assert not bool(a)
a = Array(np.array(2), [])
assert bool(a)
a = Array(np.array(0), [])
assert not bool(a)
def test_iter(small_array):
l = list(small_array)
assert_array_equal(l[0], small_array['M'])
assert_array_equal(l[1], small_array['F'])
def test_keys():
arr = ndtest((2, 2))
a, b = arr.axes
keys = arr.keys()
assert list(keys) == [(a.i[0], b.i[0]), (a.i[0], b.i[1]), (a.i[1], b.i[0]), (a.i[1], b.i[1])]
assert keys[0] == (a.i[0], b.i[0])
assert keys[-1] == (a.i[1], b.i[1])
keys = arr.keys(ascending=False)
assert list(keys) == [(a.i[1], b.i[1]), (a.i[1], b.i[0]), (a.i[0], b.i[1]), (a.i[0], b.i[0])]
assert keys[0] == (a.i[1], b.i[1])
assert keys[-1] == (a.i[0], b.i[0])
keys = arr.keys(('b', 'a'))
assert list(keys) == [(b.i[0], a.i[0]), (b.i[0], a.i[1]), (b.i[1], a.i[0]), (b.i[1], a.i[1])]
assert keys[1] == (b.i[0], a.i[1])
assert keys[2] == (b.i[1], a.i[0])
keys = arr.keys(('b', 'a'), ascending=False)
assert list(keys) == [(b.i[1], a.i[1]), (b.i[1], a.i[0]), (b.i[0], a.i[1]), (b.i[0], a.i[0])]
assert keys[1] == (b.i[1], a.i[0])
assert keys[2] == (b.i[0], a.i[1])
keys = arr.keys('b')
assert list(keys) == [(b.i[0],), (b.i[1],)]
assert keys[0] == (b.i[0],)
assert keys[-1] == (b.i[1],)
keys = arr.keys('b', ascending=False)
assert list(keys) == [(b.i[1],), (b.i[0],)]
assert keys[0] == (b.i[1],)
assert keys[-1] == (b.i[0],)
def test_values():
arr = ndtest((2, 2))
a, b = arr.axes
values = arr.values()
assert list(values) == [0, 1, 2, 3]
assert values[0] == 0
assert values[-1] == 3
values = arr.values(ascending=False)
assert list(values) == [3, 2, 1, 0]
assert values[0] == 3
assert values[-1] == 0
values = arr.values(('b', 'a'))
assert list(values) == [0, 2, 1, 3]
assert values[1] == 2
assert values[2] == 1
values = arr.values(('b', 'a'), ascending=False)
assert list(values) == [3, 1, 2, 0]
assert values[1] == 1
assert values[2] == 2
values = arr.values('b')
res = list(values)
assert_larray_equal(res[0], arr['b0'])
assert_larray_equal(res[1], arr['b1'])
assert_larray_equal(values[0], arr['b0'])
assert_larray_equal(values[-1], arr['b1'])
values = arr.values('b', ascending=False)
res = list(values)
assert_larray_equal(res[0], arr['b1'])
assert_larray_equal(res[1], arr['b0'])
assert_larray_equal(values[0], arr['b1'])
assert_larray_equal(values[-1], arr['b0'])
def test_items():
arr = ndtest((2, 2))
a, b = arr.axes
items = arr.items()
assert list(items) == [((a.i[0], b.i[0]), 0), ((a.i[0], b.i[1]), 1), ((a.i[1], b.i[0]), 2), ((a.i[1], b.i[1]), 3)]
assert items[0] == ((a.i[0], b.i[0]), 0)
assert items[-1] == ((a.i[1], b.i[1]), 3)
items = arr.items(ascending=False)
assert list(items) == [((a.i[1], b.i[1]), 3), ((a.i[1], b.i[0]), 2), ((a.i[0], b.i[1]), 1), ((a.i[0], b.i[0]), 0)]
assert items[0] == ((a.i[1], b.i[1]), 3)
assert items[-1] == ((a.i[0], b.i[0]), 0)
items = arr.items(('b', 'a'))
assert list(items) == [((b.i[0], a.i[0]), 0), ((b.i[0], a.i[1]), 2), ((b.i[1], a.i[0]), 1), ((b.i[1], a.i[1]), 3)]
assert items[1] == ((b.i[0], a.i[1]), 2)
assert items[2] == ((b.i[1], a.i[0]), 1)
items = arr.items(('b', 'a'), ascending=False)
assert list(items) == [((b.i[1], a.i[1]), 3), ((b.i[1], a.i[0]), 1), ((b.i[0], a.i[1]), 2), ((b.i[0], a.i[0]), 0)]
assert items[1] == ((b.i[1], a.i[0]), 1)
assert items[2] == ((b.i[0], a.i[1]), 2)
items = arr.items('b')
items_list = list(items)
key, value = items[0]
assert key == (b.i[0],)
assert_larray_equal(value, arr['b0'])
key, value = items_list[0]
assert key == (b.i[0],)
assert_larray_equal(value, arr['b0'])
key, value = items[-1]
assert key == (b.i[1],)
assert_larray_equal(value, arr['b1'])
key, value = items_list[-1]
assert key == (b.i[1],)
assert_larray_equal(value, arr['b1'])
items = arr.items('b', ascending=False)
items_list = list(items)
key, value = items[0]
assert key == (b.i[1],)
assert_larray_equal(value, arr['b1'])
key, value = items_list[0]
assert key == (b.i[1],)
assert_larray_equal(value, arr['b1'])
key, value = items[-1]
assert key == (b.i[0],)
assert_larray_equal(value, arr['b0'])
key, value = items_list[-1]
assert key == (b.i[0],)
assert_larray_equal(value, arr['b0'])
def test_rename(array):
new_array = array.rename('sex', 'gender')
# old array axes names not modified
assert array.axes.names == ['age', 'geo', 'sex', 'lipro']
assert new_array.axes.names == ['age', 'geo', 'gender', 'lipro']
new_array = array.rename(sex, 'gender')
# old array axes names not modified
assert array.axes.names == ['age', 'geo', 'sex', 'lipro']
assert new_array.axes.names == ['age', 'geo', 'gender', 'lipro']
def test_info(array, meta):
array.meta = meta
expected = """\
title: test array
description: Array used for testing
author: John Cleese
location: Ministry of Silly Walks
office_number: 42
score: 9.7
date: 1970-03-21 00:00:00
116 x 44 x 2 x 15
age [116]: 0 1 2 ... 113 114 115
geo [44]: 'A11' 'A12' 'A13' ... 'A92' 'A93' 'A21'
sex [2]: 'M' 'F'
lipro [15]: 'P01' 'P02' 'P03' ... 'P13' 'P14' 'P15'
dtype: float64
memory used: 1.17 Mb"""
assert array.info == expected
def test_str(small_array, array):
lipro3 = lipro['P01:P03']
# zero dimension / scalar
assert str(small_array[lipro['P01'], sex['F']]) == "15"
# empty / len 0 first dimension
assert str(small_array[sex[[]]]) == "Array([])"
# one dimension
assert str(small_array[lipro3, sex['M']]) == """\
lipro P01 P02 P03
0 1 2"""
# two dimensions
assert str(small_array.filter(lipro=lipro3)) == """\
sex\\lipro P01 P02 P03
M 0 1 2
F 15 16 17"""
# four dimensions (too many rows)
assert str(array.filter(lipro=lipro3)) == """\
age geo sex\\lipro P01 P02 P03
0 A11 M 0.0 1.0 2.0
0 A11 F 15.0 16.0 17.0
0 A12 M 30.0 31.0 32.0
0 A12 F 45.0 46.0 47.0
0 A13 M 60.0 61.0 62.0
... ... ... ... ... ...
115 A92 F 153045.0 153046.0 153047.0
115 A93 M 153060.0 153061.0 153062.0
115 A93 F 153075.0 153076.0 153077.0
115 A21 M 153090.0 153091.0 153092.0
115 A21 F 153105.0 153106.0 153107.0"""
# too many columns
assert str(array['P01', 'A11', 'M']) == """\
age 0 1 2 ... 112 113 114 115
0.0 1320.0 2640.0 ... 147840.0 149160.0 150480.0 151800.0"""
arr = Array([0, ''], Axis(['a0', ''], 'a'))
assert str(arr) == "a a0 \n 0 "
def test_getitem(array):
raw = array.data
age, geo, sex, lipro = array.axes
age159 = age[[1, 5, 9]]
lipro159 = lipro['P01,P05,P09']
# LGroup at "correct" place
subset = array[age159]
assert subset.axes[1:] == (geo, sex, lipro)
assert subset.axes[0].equals(Axis([1, 5, 9], 'age'))
assert_array_equal(subset, raw[[1, 5, 9]])
# LGroup at "incorrect" place
assert_array_equal(array[lipro159], raw[..., [0, 4, 8]])
# multiple LGroup key (in "incorrect" order)
res = array[lipro159, age159]
assert res.axes.names == ['age', 'geo', 'sex', 'lipro']
assert_array_equal(res, raw[[1, 5, 9]][..., [0, 4, 8]])
# LGroup key and scalar
res = array[lipro159, 5]
assert res.axes.names == ['geo', 'sex', 'lipro']
assert_array_equal(res, raw[..., [0, 4, 8]][5])
# mixed LGroup/positional key
assert_array_equal(array[[1, 5, 9], lipro159],
raw[[1, 5, 9]][..., [0, 4, 8]])
# single None slice
assert_array_equal(array[:], raw)
# only Ellipsis
assert_array_equal(array[...], raw)
# Ellipsis and LGroup
assert_array_equal(array[..., lipro159], raw[..., [0, 4, 8]])
# string 'int..int'
assert_array_equal(array['10..13'], array['10,11,12,13'])
assert_array_equal(array['8, 10..13, 15'], array['8,10,11,12,13,15'])
# ambiguous label
arr = ndtest("a=l0,l1;b=l1,l2")
res = arr[arr.b['l1']]
assert_array_equal(res, arr.data[:, 0])
# scalar group on another axis
arr = ndtest((3, 2))
alt_a = Axis("alt_a=a1..a2")
lgroup = alt_a['a1']
assert_array_equal(arr[lgroup], arr['a1'])
pgroup = alt_a.i[0]
assert_array_equal(arr[pgroup], arr['a1'])
# key with duplicate axes
with pytest.raises(ValueError):
array[age[1, 2], age[3, 4]]
# key with lgroup from another axis leading to duplicate axis
bad = Axis(3, 'bad')
with pytest.raises(ValueError):
array[bad[1, 2], age[3, 4]]
def test_getitem_abstract_axes(array):
raw = array.data
age, geo, sex, lipro = array.axes
age159 = X.age[1, 5, 9]
lipro159 = X.lipro['P01,P05,P09']
# LGroup at "correct" place
subset = array[age159]
assert subset.axes[1:] == (geo, sex, lipro)
assert subset.axes[0].equals(Axis([1, 5, 9], 'age'))
assert_array_equal(subset, raw[[1, 5, 9]])
# LGroup at "incorrect" place
assert_array_equal(array[lipro159], raw[..., [0, 4, 8]])
# multiple LGroup key (in "incorrect" order)
assert_array_equal(array[lipro159, age159], raw[[1, 5, 9]][..., [0, 4, 8]])
# mixed LGroup/positional key
assert_array_equal(array[[1, 5, 9], lipro159], raw[[1, 5, 9]][..., [0, 4, 8]])
# single None slice
assert_array_equal(array[:], raw)
# only Ellipsis
assert_array_equal(array[...], raw)
# Ellipsis and LGroup
assert_array_equal(array[..., lipro159], raw[..., [0, 4, 8]])
# key with duplicate axes
with pytest.raises(ValueError):
array[X.age[1, 2], X.age[3]]
# key with invalid axis
with pytest.raises(ValueError):
array[X.bad[1, 2], X.age[3, 4]]
def test_getitem_anonymous_axes():
arr = ndtest([Axis(3), Axis(4)])
raw = arr.data
assert_array_equal(arr[X[0][1:]], raw[1:])
assert_array_equal(arr[X[1][2:]], raw[:, 2:])
assert_array_equal(arr[X[0][2:], X[1][1:]], raw[2:, 1:])
assert_array_equal(arr.i[2:, 1:], raw[2:, 1:])
def test_getitem_guess_axis(array):
raw = array.data
age, geo, sex, lipro = array.axes
# key at "correct" place
assert_array_equal(array[[1, 5, 9]], raw[[1, 5, 9]])
subset = array[[1, 5, 9]]
assert subset.axes[1:] == (geo, sex, lipro)
assert subset.axes[0].equals(Axis([1, 5, 9], 'age'))
assert_array_equal(subset, raw[[1, 5, 9]])
# key at "incorrect" place
assert_array_equal(array['P01,P05,P09'], raw[..., [0, 4, 8]])
assert_array_equal(array[['P01', 'P05', 'P09']], raw[..., [0, 4, 8]])
# multiple keys (in "incorrect" order)
assert_array_equal(array['P01,P05,P09', [1, 5, 9]],
raw[[1, 5, 9]][..., [0, 4, 8]])
# mixed LGroup/key
assert_array_equal(array[lipro['P01,P05,P09'], [1, 5, 9]],
raw[[1, 5, 9]][..., [0, 4, 8]])
# single None slice
assert_array_equal(array[:], raw)
# only Ellipsis
assert_array_equal(array[...], raw)
# Ellipsis and LGroup
assert_array_equal(array[..., 'P01,P05,P09'], raw[..., [0, 4, 8]])
assert_array_equal(array[..., ['P01', 'P05', 'P09']], raw[..., [0, 4, 8]])
# LGroup without axis (which also needs to be guessed)
g = LGroup(['P01', 'P05', 'P09'])
assert_array_equal(array[g], raw[..., [0, 4, 8]])
# key with duplicate axes
with pytest.raises(ValueError, match="key has several values for axis: age"):
array[[1, 2], [3, 4]]
# key with invalid label (ie label not found on any axis)
with pytest.raises(ValueError, match="999 is not a valid label for any axis"):
array[[1, 2], 999]
# key with invalid label list (ie list of labels not found on any axis)
with pytest.raises(ValueError, match=r"\[998, 999\] is not a valid label for any axis"):
array[[1, 2], [998, 999]]
# key with partial invalid list (ie list containing a label not found
# on any axis)
# FIXME: the message should be the same as for 999, 4 (ie it should NOT mention age).
with pytest.raises(ValueError, match=r"age\[3, 999\] is not a valid label for any axis"):
array[[1, 2], [3, 999]]
with pytest.raises(ValueError, match=r"\[999, 4\] is not a valid label for any axis"):
array[[1, 2], [999, 4]]
# ambiguous key
arr = ndtest("a=l0,l1;b=l1,l2")
with pytest.raises(ValueError, match=r"l1 is ambiguous \(valid in a, b\)"):
arr['l1']
# ambiguous key disambiguated via string
res = arr['b[l1]']
assert_array_equal(res, arr.data[:, 0])
def test_getitem_positional_group(array):
raw = array.data
age, geo, sex, lipro = array.axes
age159 = age.i[1, 5, 9]
lipro159 = lipro.i[0, 4, 8]
# LGroup at "correct" place
subset = array[age159]
assert subset.axes[1:] == (geo, sex, lipro)
assert subset.axes[0].equals(Axis([1, 5, 9], 'age'))
assert_array_equal(subset, raw[[1, 5, 9]])
# LGroup at "incorrect" place
assert_array_equal(array[lipro159], raw[..., [0, 4, 8]])
# multiple LGroup key (in "incorrect" order)
assert_array_equal(array[lipro159, age159],
raw[[1, 5, 9]][..., [0, 4, 8]])
# mixed LGroup/positional key
assert_array_equal(array[[1, 5, 9], lipro159],
raw[[1, 5, 9]][..., [0, 4, 8]])
# single None slice
assert_array_equal(array[:], raw)
# only Ellipsis
assert_array_equal(array[...], raw)
# Ellipsis and LGroup
assert_array_equal(array[..., lipro159], raw[..., [0, 4, 8]])
# key with duplicate axes
with pytest.raises(ValueError, match="key has several values for axis: age"):
array[age.i[1, 2], age.i[3, 4]]
def test_getitem_str_positional_group():
arr = ndtest('a=l0..l2;b=l0..l2')
a, b = arr.axes
res = arr['b.i[1]']
expected = Array([1, 4, 7], 'a=l0..l2')
assert_array_equal(res, expected)
def test_getitem_abstract_positional(array):
raw = array.data
age, geo, sex, lipro = array.axes
age159 = X.age.i[1, 5, 9]
lipro159 = X.lipro.i[0, 4, 8]
# LGroup at "correct" place
subset = array[age159]
assert subset.axes[1:] == (geo, sex, lipro)
assert subset.axes[0].equals(Axis([1, 5, 9], 'age'))
assert_array_equal(subset, raw[[1, 5, 9]])
# LGroup at "incorrect" place
assert_array_equal(array[lipro159], raw[..., [0, 4, 8]])
# multiple LGroup key (in "incorrect" order)
assert_array_equal(array[lipro159, age159],
raw[[1, 5, 9]][..., [0, 4, 8]])
# mixed LGroup/positional key
assert_array_equal(array[[1, 5, 9], lipro159],
raw[[1, 5, 9]][..., [0, 4, 8]])
# single None slice
assert_array_equal(array[:], raw)
# only Ellipsis
assert_array_equal(array[...], raw)
# Ellipsis and LGroup
assert_array_equal(array[..., lipro159], raw[..., [0, 4, 8]])
# key with duplicate axes
with pytest.raises(ValueError, match="key has several values for axis: age"):
array[X.age.i[2, 3], X.age.i[1, 5]]
def test_getitem_bool_larray_key_arr_whout_bool_axis():
arr = ndtest((3, 2, 4))
raw = arr.data
# all dimensions
res = arr[arr < 5]
assert isinstance(res, Array)
assert res.ndim == 1
assert_array_equal(res, raw[raw < 5])
# missing dimension
filter_ = arr['b1'] % 5 == 0
res = arr[filter_]
assert isinstance(res, Array)
assert res.ndim == 2
assert res.shape == (3, 2)
raw_key = raw[:, 1, :] % 5 == 0
raw_d1, raw_d3 = raw_key.nonzero()
assert_array_equal(res, raw[raw_d1, :, raw_d3])
# using an Axis object
arr = ndtest('a=a0,a1;b=0..3')
raw = arr.data
res = arr[arr.b < 2]
assert_array_equal(res, raw[:, :2])
# using an AxisReference (ExprNode)
res = arr[X.b < 2]
assert_array_equal(res, raw[:, :2])
def test_getitem_bool_larray_key_arr_wh_bool_axis():
gender = Axis([False, True], 'gender')
arr = Array([0.1, 0.2], gender)
id_axis = Axis('id=0..3')
key = Array([True, False, True, True], id_axis)
expected = Array([0.2, 0.1, 0.2, 0.2], id_axis)
# LGroup using the real axis
assert_larray_equal(arr[gender[key]], expected)
# LGroup using an AxisReference
assert_larray_equal(arr[X.gender[key]], expected)
# this test checks that the current behavior does not change unintentionally...
# ... but I am unsure the current behavior is what we actually want
msg = re.escape("boolean subset key contains more axes ({id}) than array ({gender})")
with pytest.raises(ValueError, match=msg):
arr[key]
def test_getitem_bool_larray_and_group_key():
arr = ndtest((3, 6, 4)).set_labels('b', '0..5')
# using axis
res = arr['a0,a2', arr.b < 3, 'c0:c3']
assert isinstance(res, Array)
assert res.ndim == 3
expected = arr['a0,a2', '0:2', 'c0:c3']
assert_array_equal(res, expected)
# using axis reference
res = arr['a0,a2', X.b < 3, 'c0:c3']
assert isinstance(res, Array)
assert res.ndim == 3
assert_array_equal(res, expected)
def test_getitem_bool_ndarray_key_arr_whout_bool_axis(array):
raw = array.data
res = array[raw < 5]
assert isinstance(res, Array)
assert res.ndim == 1
assert_array_equal(res, raw[raw < 5])
def test_getitem_bool_ndarray_key_arr_wh_bool_axis():
gender = Axis([False, True], 'gender')
arr = Array([0.1, 0.2], gender)
key = np.array([True, False, True, True])
expected = arr.i[[1, 0, 1, 1]]
# LGroup using the real axis
assert_larray_equal(arr[gender[key]], expected)
# LGroup using an AxisReference
assert_larray_equal(arr[X.gender[key]], expected)
# raw key => ???
# this test checks that the current behavior does not change unintentionally...
# ... but I am unsure the current behavior is what we actually want
# L? is to account for Python2 where shape can be 'long' integers
msg = r"boolean key with a different shape \(\(4L?,\)\) than array \(\(2,\)\)"
with pytest.raises(ValueError, match=msg):
arr[key]
def test_getitem_bool_anonymous_axes():
a = ndtest([Axis(2), Axis(3), Axis(4), Axis(5)])
mask = ones(a.axes[1, 3], dtype=bool)
res = a[mask]
assert res.ndim == 3
assert res.shape == (15, 2, 4)
# XXX: we might want to transpose the result to always move combined axes to the front
a = ndtest([Axis(2), Axis(3), Axis(4), Axis(5)])
mask = ones(a.axes[1, 2], dtype=bool)
res = a[mask]
assert res.ndim == 3
assert res.shape == (2, 12, 5)
def test_getitem_igroup_on_int_axis():
a = Axis('a=1..3')
arr = ndtest(a)
assert arr[a.i[1]] == 1
def test_getitem_integer_string_axes():
arr = ndtest((5, 5))
a, b = arr.axes
assert_array_equal(arr['0[a0, a2]'], arr[a['a0', 'a2']])
assert_array_equal(arr['0[a0:a2]'], arr[a['a0:a2']])
with pytest.raises(ValueError):
arr['1[a0, a2]']
assert_array_equal(arr['0.i[0, 2]'], arr[a.i[0, 2]])
assert_array_equal(arr['0.i[0:2]'], arr[a.i[0:2]])
with pytest.raises(ValueError):
arr['3.i[0, 2]']
def test_getitem_int_larray_lgroup_key():
# e axis go from 0 to 3
arr = ndtest("c=0,1; d=0,1; e=0..3")
# key values go from 0 to 3
key = ndtest("a=0,1; b=0,1")
# this replaces 'e' axis by 'a' and 'b' axes
res = arr[X.e[key]]
assert res.shape == (2, 2, 2, 2)
assert res.axes.names == ['c', 'd', 'a', 'b']
def test_getitem_structured_key_with_groups():
arr = ndtest((3, 2))
expected = arr['a1':]
a, b = arr.axes
alt_a = Axis('a=a1..a3')
# a) slice with lgroup
# a.1) LGroup.axis from array.axes
assert_array_equal(arr[a['a1']:a['a2']], expected)
# a.2) LGroup.axis not from array.axes
assert_array_equal((arr[alt_a['a1']:alt_a['a2']]), expected)
# b) slice with igroup
# b.1) IGroup.axis from array.axes
assert_array_equal((arr[a.i[1]:a.i[2]]), expected)
# b.2) IGroup.axis not from array.axes
assert_array_equal((arr[alt_a.i[0]:alt_a.i[1]]), expected)
# c) list with LGroup
# c.1) LGroup.axis from array.axes
assert_array_equal((arr[[a['a1'], a['a2']]]), expected)
# c.2) LGroup.axis not from array.axes
assert_array_equal((arr[[alt_a['a1'], alt_a['a2']]]), expected)
# d) list with IGroup
# d.1) IGroup.axis from array.axes
assert_array_equal((arr[[a.i[1], a.i[2]]]), expected)
# d.2) IGroup.axis not from array.axes
assert_array_equal((arr[[alt_a.i[0], alt_a.i[1]]]), expected)
def test_getitem_single_larray_key_guess():
# TODO: we really need another way to get test axes, e.g. testaxes(2, 3, 4) or testaxes((2, 3, 4))
a, b, c = ndtest((2, 3, 4)).axes
arr = ndtest((a, b))
# >>> arr
# a\b b0 b1 b2
# a0 0 1 2
# a1 3 4 5
# 1) key with extra axis
key = Array(['a0', 'a1', 'a1', 'a0'], c)
# replace the target axis by the extra axis
expected = from_string(r"""
c\b b0 b1 b2
c0 0 1 2
c1 3 4 5
c2 3 4 5
c3 0 1 2""")
assert_array_equal(arr[key], expected)
# 2) key with the target axis (the one being replaced)
key = Array(['b1', 'b0', 'b2'], b)
# axis stays the same but data should be flipped/shuffled
expected = from_string(r"""
a\b b0 b1 b2
a0 1 0 2
a1 4 3 5""")
assert_array_equal(arr[key], expected)
# 2bis) key with part of the target axis (the one being replaced)
key = Array(['b2', 'b1'], 'b=b0,b1')
expected = from_string(r"""
a\b b0 b1
a0 2 1
a1 5 4""")
assert_array_equal(arr[key], expected)
# 3) key with another existing axis (not the target axis)
key = Array(['a0', 'a1', 'a0'], b)
expected = from_string("""
b b0 b1 b2
\t 0 4 2""")
assert_array_equal(arr[key], expected)
# TODO: this does not work yet but should be much easier to implement with "align" in make_np_broadcastable
# 3bis) key with *part* of another existing axis (not the target axis)
# key = Array(['a1', 'a0'], 'b=b0,b1')
# expected = from_string("""
# b b0 b1
# \t 3 1""")
# assert_array_equal(arr[key], expected)
# 4) key has both the target axis and another existing axis
# TODO: maybe we should make this work without requiring astype!
key = from_string(r"""
a\b b0 b1 b2
a0 a0 a1 a0
a1 a1 a0 a1""").astype(str)
expected = from_string(r"""
a\b b0 b1 b2
a0 0 4 2
a1 3 1 5""")
assert_array_equal(arr[key], expected)
# 5) key has both the target axis and an extra axis
key = from_string(r"""
a\c c0 c1 c2 c3
a0 a0 a1 a1 a0
a1 a1 a0 a0 a1""").astype(str)
expected = from_string(r"""
a c\b b0 b1 b2
a0 c0 0 1 2
a0 c1 3 4 5
a0 c2 3 4 5
a0 c3 0 1 2
a1 c0 3 4 5
a1 c1 0 1 2
a1 c2 0 1 2
a1 c3 3 4 5""")
assert_array_equal(arr[key], expected)
# 6) key has both another existing axis (not target) and an extra axis
key = from_string(r"""
a\c c0 c1 c2 c3
a0 b0 b1 b0 b1
a1 b2 b1 b2 b1""").astype(str)
expected = from_string(r"""
a\c c0 c1 c2 c3
a0 0 1 0 1
a1 5 4 5 4""")
assert_array_equal(arr[key], expected)
# 7) key has the target axis, another existing axis and an extra axis
key = from_string(r"""
a b\c c0 c1 c2 c3
a0 b0 a0 a1 a0 a1
a0 b1 a1 a0 a1 a0
a0 b2 a0 a1 a0 a1
a1 b0 a0 a1 a1 a0
a1 b1 a1 a1 a1 a1
a1 b2 a0 a1 a1 a0""").astype(str)
expected = from_string(r"""
a b\c c0 c1 c2 c3
a0 b0 0 3 0 3
a0 b1 4 1 4 1
a0 b2 2 5 2 5
a1 b0 0 3 3 0
a1 b1 4 4 4 4
a1 b2 2 5 5 2""")
assert_array_equal(arr[key], expected)
def test_getitem_multiple_larray_key_guess():
a, b, c, d, e = ndtest((2, 3, 2, 3, 2)).axes
arr = ndtest((a, b))
# >>> arr
# a\b b0 b1 b2
# a0 0 1 2
# a1 3 4 5
# 1) keys with each a different existing axis
k1 = from_string(""" a a1 a0
\t b2 b0""")
k2 = from_string(""" b b1 b2 b3
\t a0 a1 a0""")
expected = from_string(r"""b\a a1 a0
b1 2 0
b2 5 3
b3 2 0""")
assert_array_equal(arr[k1, k2], expected)
# 2) keys with a common existing axis
k1 = from_string(""" b b0 b1 b2
\t a1 a0 a1""")
k2 = from_string(""" b b0 b1 b2
\t b1 b2 b0""")
expected = from_string(""" b b0 b1 b2
\t 4 2 3""")
assert_array_equal(arr[k1, k2], expected)
# 3) keys with each a different extra axis
k1 = from_string(""" c c0 c1
\t a1 a0""")
k2 = from_string(""" d d0 d1 d2
\t b1 b2 b0""")
expected = from_string(r"""c\d d0 d1 d2
c0 4 5 3
c1 1 2 0""")
assert_array_equal(arr[k1, k2], expected)
# 4) keys with a common extra axis
k1 = from_string(r"""c\d d0 d1 d2
c0 a1 a0 a1
c1 a0 a1 a0""").astype(str)
k2 = from_string(r"""c\e e0 e1
c0 b1 b2
c1 b0 b1""").astype(str)
expected = from_string(r""" c d\e e0 e1
c0 d0 4 5
c0 d1 1 2
c0 d2 4 5
c1 d0 0 1
c1 d1 3 4
c1 d2 0 1""")
assert_array_equal(arr[k1, k2], expected)
def test_getitem_ndarray_key_guess(array):
raw = array.data
keys = ['P04', 'P01', 'P03', 'P02']
key = np.array(keys)
res = array[key]
assert isinstance(res, Array)
assert res.axes == array.axes.replace(X.lipro, Axis(keys, 'lipro'))
assert_array_equal(res, raw[:, :, :, [3, 0, 2, 1]])
def test_getitem_int_larray_key_guess():
a = Axis([0, 1], 'a')
b = Axis([2, 3], 'b')
c = Axis([4, 5], 'c')
d = Axis([6, 7], 'd')
e = Axis([8, 9, 10, 11], 'e')
arr = ndtest([c, d, e])
key = Array([[8, 9], [10, 11]], [a, b])
assert arr[key].axes == [c, d, a, b]
def test_getitem_int_ndarray_key_guess():
c = Axis([4, 5], 'c')
d = Axis([6, 7], 'd')
e = Axis([8, 9, 10, 11], 'e')
arr = ndtest([c, d, e])
# ND keys do not work yet
# key = nparray([[8, 11], [10, 9]])
key = np.array([8, 11, 10])
res = arr[key]
assert res.axes == [c, d, Axis([8, 11, 10], 'e')]
def test_getitem_axis_object():
arr = ndtest((2, 3))
a, b = arr.axes
assert_array_equal(arr[a], arr)
assert_array_equal(arr[b], arr)
b2 = Axis('b=b0,b2')
assert_array_equal(arr[b2], from_string("""a\\b b0 b2
a0 0 2
a1 3 5"""))
def test_getitem_empty_tuple():
# an empty tuple should return a view on the original array
arr = ndtest((2, 3))
res = arr[()]
assert_array_equal(res, arr)
assert res is not arr
z = Array(0)
res = z[()]
assert res == z
assert res is not z
def test_positional_indexer_getitem(array):
raw = array.data
for key in [0, (0, 5, 1, 2), (slice(None), 5, 1), (0, 5), [1, 0], ([1, 0], 5)]:
assert_array_equal(array.i[key], raw[key])
assert_array_equal(array.i[[1, 0], [5, 4]], raw[np.ix_([1, 0], [5, 4])])
with pytest.raises(IndexError):
array.i[0, 0, 0, 0, 0]
def test_positional_indexer_setitem(array):
for key in [0, (0, 2, 1, 2), (slice(None), 2, 1), (0, 2), [1, 0], ([1, 0], 2)]:
arr = array.copy()
raw = array.data.copy()
arr.i[key] = 42
raw[key] = 42
assert_array_equal(arr, raw)
raw = array.data
array.i[[1, 0], [5, 4]] = 42
raw[np.ix_([1, 0], [5, 4])] = 42
assert_array_equal(array, raw)
def test_points_indexer_getitem():
arr = ndtest((2, 3, 3))
raw = arr.data
keys = [
('a0',
0),
(('a0', 'c2'),
(0, slice(None), 2)),
(('a0', 'b1', 'c2'),
(0, 1, 2)),
# key in the "correct" order
((['a1', 'a0', 'a1', 'a0'], 'b1', ['c1', 'c0', 'c1', 'c0']),
([1, 0, 1, 0], 1, [1, 0, 1, 0])),
# key in the "wrong" order
((['a1', 'a0', 'a1', 'a0'], ['c1', 'c0', 'c1', 'c0'], 'b1'),
([1, 0, 1, 0], 1, [1, 0, 1, 0])),
# advanced key with a missing dimension
((['a1', 'a0', 'a1', 'a0'], ['c1', 'c0', 'c1', 'c0']),
([1, 0, 1, 0], slice(None), [1, 0, 1, 0])),
]
for label_key, index_key in keys:
assert_array_equal(arr.points[label_key], raw[index_key])
# XXX: we might want to raise KeyError or IndexError instead?
with pytest.raises(ValueError):
arr.points['a0', 'b1', 'c2', 'd0']
def test_points_indexer_setitem():
keys = [
('a0',
0),
(('a0', 'c2'),
(0, slice(None), 2)),
(('a0', 'b1', 'c2'),
(0, 1, 2)),
# key in the "correct" order
((['a1', 'a0', 'a1', 'a0'], 'b1', ['c1', 'c0', 'c1', 'c0']),
([1, 0, 1, 0], 1, [1, 0, 1, 0])),
# key in the "wrong" order
((['a1', 'a0', 'a1', 'a0'], ['c1', 'c0', 'c1', 'c0'], 'b1'),
([1, 0, 1, 0], 1, [1, 0, 1, 0])),
# advanced key with a missing dimension
((['a1', 'a0', 'a1', 'a0'], ['c1', 'c0', 'c1', 'c0']),
([1, 0, 1, 0], slice(None), [1, 0, 1, 0])),
]
for label_key, index_key in keys:
arr = ndtest((2, 3, 3))
raw = arr.data.copy()
arr.points[label_key] = 42
raw[index_key] = 42
assert_array_equal(arr, raw)
arr = ndtest(2)
# XXX: we might want to raise KeyError or IndexError instead?
with pytest.raises(ValueError):
arr.points['a0', 'b1'] = 42
# test when broadcasting is involved
arr = ndtest((2, 3, 4))
raw = arr.data.copy()
raw_value = raw[:, 0, 0].reshape(2, 1)
raw[:, [0, 1, 2], [0, 1, 2]] = raw_value
arr.points['b0,b1,b2', 'c0,c1,c2'] = arr['b0', 'c0']
assert_array_equal(arr, raw)
def test_setitem_larray(array, small_array):
"""
tests Array.__setitem__(key, value) where value is an Array
"""
age, geo, sex, lipro = array.axes
# 1) using a LGroup key
ages1_5_9 = age[[1, 5, 9]]
# a) value has exactly the same shape as the target slice
arr = array.copy()
raw = array.data.copy()
arr[ages1_5_9] = arr[ages1_5_9] + 25.0
raw[[1, 5, 9]] = raw[[1, 5, 9]] + 25.0
assert_array_equal(arr, raw)
# b) value has exactly the same shape but LGroup at a "wrong" positions
arr = array.copy()
arr[geo[:], ages1_5_9] = arr[ages1_5_9] + 25.0
# same raw as previous test
assert_array_equal(arr, raw)
# c) value has an extra length-1 axis
arr = array.copy()
raw = array.data.copy()
raw_value = raw[[1, 5, 9], np.newaxis] + 26.0
fake_axis = Axis(['label'], 'fake')
age_axis = arr[ages1_5_9].axes.age
value = Array(raw_value, axes=(age_axis, fake_axis, geo, sex, lipro))
arr[ages1_5_9] = value
raw[[1, 5, 9]] = raw[[1, 5, 9]] + 26.0
assert_array_equal(arr, raw)
# d) value has the same axes than target but one has length 1
# arr = array.copy()
# raw = array.data.copy()
# raw[[1, 5, 9]] = np.sum(raw[[1, 5, 9]], axis=1, keepdims=True)
# arr[ages1_5_9] = arr[ages1_5_9].sum(geo=(geo.all(),))
# assert_array_equal(arr, raw)
# e) value has a missing dimension
arr = array.copy()
raw = array.data.copy()
arr[ages1_5_9] = arr[ages1_5_9].sum(geo)
raw[[1, 5, 9]] = np.sum(raw[[1, 5, 9]], axis=1, keepdims=True)
assert_array_equal(arr, raw)
# 2) using a LGroup and scalar key (triggers advanced indexing/cross)
# a) value has exactly the same shape as the target slice
arr = array.copy()
raw = array.data.copy()
# using 1, 5, 8 and not 9 so that the list is not collapsed to slice
value = arr[age[1, 5, 8], sex['M']] + 25.0
arr[age[1, 5, 8], sex['M']] = value
raw[[1, 5, 8], :, 0] = raw[[1, 5, 8], :, 0] + 25.0
assert_array_equal(arr, raw)
# 3) using a string key
arr = array.copy()
raw = array.data.copy()
arr['1, 5, 9'] = arr['1, 5, 9'] + 27.0
raw[[1, 5, 9]] = raw[[1, 5, 9]] + 27.0
assert_array_equal(arr, raw)
# 4) using ellipsis keys
# only Ellipsis
arr = array.copy()
arr[...] = 0
assert_array_equal(arr, np.zeros_like(raw))
# Ellipsis and LGroup
arr = array.copy()
raw = array.data.copy()
arr[..., lipro['P01,P05,P09']] = 0
raw[..., [0, 4, 8]] = 0
assert_array_equal(arr, raw)
# 5) using a single slice(None) key
arr = array.copy()
arr[:] = 0
assert_array_equal(arr, np.zeros_like(raw))
# 6) incompatible axes
arr = small_array.copy()
la2 = small_array.copy()
with pytest.raises(ValueError, match="Value {!s} axis is not present in target subset {!s}. "
"A value can only have the same axes or fewer axes than the subset "
"being targeted".format(la2.axes - arr['P01'].axes, arr['P01'].axes)):
arr['P01'] = la2
la2 = arr.rename('sex', 'gender')
with pytest.raises(ValueError, match="Value {!s} axis is not present in target subset {!s}. "
"A value can only have the same axes or fewer axes than the subset "
"being targeted".format(la2['P01'].axes - arr['P01'].axes, arr['P01'].axes)):
arr['P01'] = la2['P01']
# 7) incompatible labels
sex2 = Axis('sex=F,M')
la2 = Array(small_array.data, axes=(sex2, lipro))
with pytest.raises(ValueError, match="incompatible axes:"):
arr[:] = la2
# key has multiple Arrays (this is used within .points indexing)
# ==============================================================
# first some setup
a = Axis(['a0', 'a1'], None)
b = Axis(['b0', 'b1', 'b2'], None)
expected = ndtest((a, b))
value = expected.combine_axes()
# a) with anonymous axes
combined_axis = value.axes[0]
a_key = Array([0, 0, 0, 1, 1, 1], combined_axis)
b_key = Array([0, 1, 2, 0, 1, 2], combined_axis)
key = (a.i[a_key], b.i[b_key])
array = empty((a, b))
array[key] = value
assert_array_equal(array, expected)
# b) with wildcard combined_axis
wild_combined_axis = combined_axis.ignore_labels()
wild_a_key = Array([0, 0, 0, 1, 1, 1], wild_combined_axis)
wild_b_key = Array([0, 1, 2, 0, 1, 2], wild_combined_axis)
wild_key = (a.i[wild_a_key], b.i[wild_b_key])
array = empty((a, b))
array[wild_key] = value
assert_array_equal(array, expected)
# c) with a wildcard value
wild_value = value.ignore_labels()
array = empty((a, b))
array[key] = wild_value
assert_array_equal(array, expected)
# d) with a wildcard combined axis and wildcard value
array = empty((a, b))
array[wild_key] = wild_value
assert_array_equal(array, expected)
def test_setitem_ndarray(array):
"""
tests Array.__setitem__(key, value) where value is a raw ndarray.
In that case, value.shape is more restricted as we rely on numpy broadcasting.
"""
# a) value has exactly the same shape as the target slice
arr = array.copy()
raw = array.data.copy()
value = raw[[1, 5, 9]] + 25.0
arr[[1, 5, 9]] = value
raw[[1, 5, 9]] = value
assert_array_equal(arr, raw)
# b) value has the same axes than target but one has length 1
arr = array.copy()
raw = array.data.copy()
value = np.sum(raw[[1, 5, 9]], axis=1, keepdims=True)
arr[[1, 5, 9]] = value
raw[[1, 5, 9]] = value
assert_array_equal(arr, raw)
def test_setitem_scalar(array):
"""
tests Array.__setitem__(key, value) where value is a scalar
"""
# a) list key (one dimension)
arr = array.copy()
raw = array.data.copy()
arr[[1, 5, 9]] = 42
raw[[1, 5, 9]] = 42
assert_array_equal(arr, raw)
# b) full scalar key (ie set one cell)
arr = array.copy()
raw = array.data.copy()
arr[0, 'P02', 'A12', 'M'] = 42
raw[0, 1, 0, 1] = 42
assert_array_equal(arr, raw)
def test_setitem_bool_array_key(array):
# XXX: this test is awfully slow (more than 1s)
age, geo, sex, lipro = array.axes
# Array key
# a1) same shape, same order
arr = array.copy()
raw = array.data.copy()
arr[arr < 5] = 0
raw[raw < 5] = 0
assert_array_equal(arr, raw)
# a2) same shape, different order
arr = array.copy()
raw = array.data.copy()
key = (arr < 5).T
arr[key] = 0
raw[raw < 5] = 0
assert_array_equal(arr, raw)
# b) numpy-broadcastable shape
# arr = array.copy()
# raw = array.data.copy()
# key = arr[sex['F,']] < 5
# self.assertEqual(key.ndim, 4)
# arr[key] = 0
# raw[raw[:, :, [1]] < 5] = 0
# assert_array_equal(arr, raw)
# c) Array-broadcastable shape (missing axis)
arr = array.copy()
raw = array.data.copy()
key = arr[sex['M']] < 5
assert key.ndim == 3
arr[key] = 0
raw_key = raw[:, :, 0, :] < 5
raw_d1, raw_d2, raw_d4 = raw_key.nonzero()
raw[raw_d1, raw_d2, :, raw_d4] = 0
assert_array_equal(arr, raw)
# ndarray key
arr = array.copy()
raw = array.data.copy()
arr[raw < 5] = 0
raw[raw < 5] = 0
assert_array_equal(arr, raw)
# d) Array with extra axes
arr = array.copy()
key = (arr < 5).expand([Axis(2, 'extra')])
assert key.ndim == 5
# TODO: make this work
with pytest.raises(ValueError):
arr[key] = 0
def test_set(array):
age, geo, sex, lipro = array.axes
# 1) using a LGroup key
ages1_5_9 = age[[1, 5, 9]]
# a) value has exactly the same shape as the target slice
arr = array.copy()
raw = array.data.copy()
arr.set(arr[ages1_5_9] + 25.0, age=ages1_5_9)
raw[[1, 5, 9]] = raw[[1, 5, 9]] + 25.0
assert_array_equal(arr, raw)
# b) same size but a different shape (extra length-1 axis)
arr = array.copy()
raw = array.data.copy()
raw_value = raw[[1, 5, 9], np.newaxis] + 26.0
fake_axis = Axis(['label'], 'fake')
age_axis = arr[ages1_5_9].axes.age
value = Array(raw_value, axes=(age_axis, fake_axis, geo, sex, lipro))
arr.set(value, age=ages1_5_9)
raw[[1, 5, 9]] = raw[[1, 5, 9]] + 26.0
assert_array_equal(arr, raw)
# dimension of length 1
# arr = array.copy()
# raw = array.data.copy()
# raw[[1, 5, 9]] = np.sum(raw[[1, 5, 9]], axis=1, keepdims=True)
# arr.set(arr[ages1_5_9].sum(geo=(geo.all(),)), age=ages1_5_9)
# assert_array_equal(arr, raw)
# c) missing dimension
arr = array.copy()
raw = array.data.copy()
arr.set(arr[ages1_5_9].sum(geo), age=ages1_5_9)
raw[[1, 5, 9]] = np.sum(raw[[1, 5, 9]], axis=1, keepdims=True)
assert_array_equal(arr, raw)
# 2) using a raw key
arr = array.copy()
raw = array.data.copy()
arr.set(arr[[1, 5, 9]] + 27.0, age=[1, 5, 9])
raw[[1, 5, 9]] = raw[[1, 5, 9]] + 27.0
assert_array_equal(arr, raw)
def test_filter(array):
age, geo, sex, lipro = array.axes
ages1_5_9 = age[(1, 5, 9)]
ages11 = age[11]
# with LGroup
assert array.filter(age=ages1_5_9).shape == (3, 44, 2, 15)
# FIXME: this should raise a comprehensible error!
# self.assertEqual(array.filter(age=[ages1_5_9]).shape, (3, 44, 2, 15))
# LGroup with 1 value => collapse
assert array.filter(age=ages11).shape == (44, 2, 15)
# LGroup with a list of 1 value => do not collapse
assert array.filter(age=age[[11]]).shape == (1, 44, 2, 15)
# LGroup with a list of 1 value defined as a string => do not collapse
assert array.filter(lipro=lipro['P01,']).shape == (116, 44, 2, 1)
# LGroup with 1 value
# XXX: this does not work. Do we want to make this work?
# filtered = array.filter(age=(ages11,))
# self.assertEqual(filtered.shape, (1, 44, 2, 15))
# list
assert array.filter(age=[1, 5, 9]).shape == (3, 44, 2, 15)
# string
assert array.filter(lipro='P01,P02').shape == (116, 44, 2, 2)
# multiple axes at once
assert array.filter(age=[1, 5, 9], lipro='P01,P02').shape == (3, 44, 2, 2)
# multiple axes one after the other
assert array.filter(age=[1, 5, 9]).filter(lipro='P01,P02').shape == (3, 44, 2, 2)
# a single value for one dimension => collapse the dimension
assert array.filter(sex='M').shape == (116, 44, 15)
# but a list with a single value for one dimension => do not collapse
assert array.filter(sex=['M']).shape == (116, 44, 1, 15)
assert array.filter(sex='M,').shape == (116, 44, 1, 15)
# with duplicate keys
# XXX: do we want to support this? I don't see any value in that but I might be short-sighted.
# filtered = array.filter(lipro='P01,P02,P01')
# XXX: we could abuse python to allow naming groups via Axis.__getitem__
# (but I doubt it is a good idea).
# child = age[':17', 'child']
# slices
# ------
# LGroup slice
assert array.filter(age=age[:17]).shape == (18, 44, 2, 15)
# string slice
assert array.filter(lipro=':P03').shape == (116, 44, 2, 3)
# raw slice
assert array.filter(age=slice(17)).shape == (18, 44, 2, 15)
# filter chain with a slice
assert array.filter(age=slice(17)).filter(geo='A12,A13').shape == (18, 2, 2, 15)
def test_filter_multiple_axes(array):
# multiple values in each group
assert array.filter(age=[1, 5, 9], lipro='P01,P02').shape == (3, 44, 2, 2)
# with a group of one value
assert array.filter(age=[1, 5, 9], sex='M,').shape == (3, 44, 1, 15)
# with a discarded axis (there is a scalar in the key)
assert array.filter(age=[1, 5, 9], sex='M').shape == (3, 44, 15)
# with a discarded axis that is not adjacent to the ix_array axis ie with a sliced axis between the scalar axis
# and the ix_array axis since our array has a axes: age, geo, sex, lipro, any of the following should be tested:
# age+sex / age+lipro / geo+lipro
# additionally, if the ix_array axis was first (ie ix_array on age), it worked even before the issue was fixed,
# since the "indexing" subspace is tacked-on to the beginning (as the first dimension)
assert array.filter(age=57, sex='M,F').shape == (44, 2, 15)
assert array.filter(age=57, lipro='P01,P05').shape == (44, 2, 2)
assert array.filter(geo='A57', lipro='P01,P05').shape == (116, 2, 2)
def test_nonzero():
arr = ndtest((2, 3))
a, b = arr.axes
cond = arr > 1
assert_array_equal(cond, from_string(r"""a\b b0 b1 b2
a0 False False True
a1 True True True"""))
a_group, b_group = cond.nonzero()
assert isinstance(a_group, IGroup)
assert a_group.axis is a
assert a_group.key.equals(from_string("""a_b a0_b2 a1_b0 a1_b1 a1_b2
\t 0 1 1 1"""))
assert isinstance(b_group, IGroup)
assert b_group.axis is b
assert b_group.key.equals(from_string("""a_b a0_b2 a1_b0 a1_b1 a1_b2
\t 2 0 1 2"""))
expected = from_string("""a_b a0_b2 a1_b0 a1_b1 a1_b2
\t 2 3 4 5""")
assert_array_equal(arr[a_group, b_group], expected)
assert_array_equal(arr.points[a_group, b_group], expected)
assert_array_equal(arr[cond], expected)
def test_contains():
arr = ndtest('a=0..2;b=b0..b2;c=2..4')
# string label
assert 'b1' in arr
assert 'b4' not in arr
# int label
assert 1 in arr
assert 5 not in arr
# duplicate label
assert 2 in arr
# slice
assert not slice('b0', 'b2') in arr
def test_sum_full_axes(array):
age, geo, sex, lipro = array.axes
# everything
assert array.sum() == np.asarray(array).sum()
# using axes numbers
assert array.sum(axis=2).shape == (116, 44, 15)
assert array.sum(axis=(0, 2)).shape == (44, 15)
# using Axis objects
assert array.sum(age).shape == (44, 2, 15)
assert array.sum(age, sex).shape == (44, 15)
# using axes names
assert array.sum('age', 'sex').shape == (44, 15)
# chained sum
assert array.sum(age, sex).sum(geo).shape == (15,)
assert array.sum(age, sex).sum(lipro, geo) == array.sum()
# getitem on aggregated
aggregated = array.sum(age, sex)
assert aggregated[vla_str].shape == (22, 15)
# filter on aggregated
assert aggregated.filter(geo=vla_str).shape == (22, 15)
def test_sum_full_axes_with_nan(array):
array['M', 'P02', 'A12', 0] = nan
raw = array.data
# everything
assert array.sum() == np.nansum(raw)
assert isnan(array.sum(skipna=False))
# using Axis objects
assert_array_nan_equal(array.sum(X.age), np.nansum(raw, 0))
assert_array_nan_equal(array.sum(X.age, skipna=False), raw.sum(0))
assert_array_nan_equal(array.sum(X.age, X.sex), np.nansum(raw, (0, 2)))
assert_array_nan_equal(array.sum(X.age, X.sex, skipna=False), raw.sum((0, 2)))
def test_sum_full_axes_keep_axes(array):
agg = array.sum(keepaxes=True)
assert agg.shape == (1, 1, 1, 1)
for axis in agg.axes:
assert axis.labels == ['sum']
agg = array.sum(keepaxes='total')
assert agg.shape == (1, 1, 1, 1)
for axis in agg.axes:
assert axis.labels == ['total']
def test_mean_full_axes(array):
raw = array.data
assert array.mean() == np.mean(raw)
assert_array_nan_equal(array.mean(X.age), np.mean(raw, 0))
assert_array_nan_equal(array.mean(X.age, X.sex), np.mean(raw, (0, 2)))
def test_mean_groups(array):
# using int type to test that we get a float in return
arr = array.astype(int)
raw = array.data
res = arr.mean(X.geo['A11', 'A13', 'A24', 'A31'])
assert_array_nan_equal(res, np.mean(raw[:, [0, 2, 4, 5]], 1))
def test_median_full_axes(array):
raw = array.data
assert array.median() == np.median(raw)
assert_array_nan_equal(array.median(X.age), np.median(raw, 0))
assert_array_nan_equal(array.median(X.age, X.sex), np.median(raw, (0, 2)))
def test_median_groups(array):
raw = array.data
res = array.median(X.geo['A11', 'A13', 'A24'])
assert res.shape == (116, 2, 15)
assert_array_nan_equal(res, np.median(raw[:, [0, 2, 4]], 1))
def test_percentile_full_axes():
arr = ndtest((2, 3, 4))
raw = arr.data
assert arr.percentile(10) == np.percentile(raw, 10)
assert_array_nan_equal(arr.percentile(10, X.a), np.percentile(raw, 10, 0))
assert_array_nan_equal(arr.percentile(10, X.c, X.a), np.percentile(raw, 10, (2, 0)))
def test_percentile_groups():
arr = ndtest((2, 5, 3))
raw = arr.data
res = arr.percentile(10, X.b['b0', 'b2', 'b4'])
assert_array_nan_equal(res, np.percentile(raw[:, [0, 2, 4]], 10, 1))
def test_cumsum(array):
raw = array.data
# using Axis objects
assert_array_equal(array.cumsum(X.age), raw.cumsum(0))
assert_array_equal(array.cumsum(X.lipro), raw.cumsum(3))
# using axes numbers
assert_array_equal(array.cumsum(1), raw.cumsum(1))
# using axes names
assert_array_equal(array.cumsum('sex'), raw.cumsum(2))
def test_group_agg_kwargs(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = vla_str, wal_str, bru_str
# a) group aggregate on a fresh array
# a.1) one group => collapse dimension
assert array.sum(sex='M').shape == (116, 44, 15)
assert array.sum(sex='M,F').shape == (116, 44, 15)
assert array.sum(sex=sex['M']).shape == (116, 44, 15)
assert array.sum(geo='A11,A21,A25').shape == (116, 2, 15)
assert array.sum(geo=['A11', 'A21', 'A25']).shape == (116, 2, 15)
assert array.sum(geo=geo['A11,A21,A25']).shape == (116, 2, 15)
assert array.sum(geo=':').shape == (116, 2, 15)
assert array.sum(geo=geo[:]).shape == (116, 2, 15)
assert array.sum(geo=geo[':']).shape == (116, 2, 15)
# Include everything between two labels. Since A11 is the first label
# and A21 is the last one, this should be equivalent to the previous
# tests.
assert array.sum(geo='A11:A21').shape == (116, 2, 15)
assert_array_equal(array.sum(geo='A11:A21'), array.sum(geo=':'))
assert_array_equal(array.sum(geo=geo['A11:A21']), array.sum(geo=':'))
# a.2) a tuple of one group => do not collapse dimension
assert array.sum(geo=(geo[:],)).shape == (116, 1, 2, 15)
# a.3) several groups
# string groups
assert array.sum(geo=(vla, wal, bru)).shape == (116, 3, 2, 15)
# with one label in several groups
assert array.sum(sex=(['M'], ['M', 'F'])).shape == (116, 44, 2, 15)
assert array.sum(sex=('M', 'M,F')).shape == (116, 44, 2, 15)
assert array.sum(sex='M;M,F').shape == (116, 44, 2, 15)
res = array.sum(geo=(vla, wal, bru, belgium))
assert res.shape == (116, 4, 2, 15)
# a.4) several dimensions at the same time
res = array.sum(lipro='P01,P03;P02,P05;:', geo=(vla, wal, bru, belgium))
assert res.shape == (116, 4, 2, 3)
# b) both axis aggregate and group aggregate at the same time
# Note that you must list "full axes" aggregates first (Python does not allow non-kwargs after kwargs.
res = array.sum(age, sex, geo=(vla, wal, bru, belgium))
assert res.shape == (4, 15)
# c) chain group aggregate after axis aggregate
res = array.sum(age, sex).sum(geo=(vla, wal, bru, belgium))
assert res.shape == (4, 15)
def test_group_agg_guess_axis(array):
raw = array.data.copy()
age, geo, sex, lipro = array.axes
vla, wal, bru = vla_str, wal_str, bru_str
# a) group aggregate on a fresh array
# a.1) one group => collapse dimension
# not sure I should support groups with a single item in an aggregate
assert array.sum('M').shape == (116, 44, 15)
assert array.sum('M,').shape == (116, 44, 15)
assert array.sum('M,F').shape == (116, 44, 15)
assert array.sum('A11,A21,A25').shape == (116, 2, 15)
# with a name
assert array.sum('A11,A21,A25 >> g1').shape == (116, 2, 15)
assert array.sum(['A11', 'A21', 'A25']).shape == (116, 2, 15)
# Include everything between two labels. Since A11 is the first label
# and A21 is the last one, this should be equivalent to taking the
# full axis.
assert array.sum('A11:A21').shape == (116, 2, 15)
assert_array_equal(array.sum('A11:A21'), array.sum(geo=':'))
assert_array_equal(array.sum('A11:A21'), array.sum(geo))
# a.2) a tuple of one group => do not collapse dimension
assert array.sum((geo[:],)).shape == (116, 1, 2, 15)
# a.3) several groups
# string groups
assert array.sum((vla, wal, bru)).shape == (116, 3, 2, 15)
# XXX: do we also want to support this? I do not really like it because it gets tricky when we have some other
# axes into play. For now the error message is unclear because it first aggregates on "vla", then tries to
# aggregate on "wal", but there is no "geo" dimension anymore.
# self.assertEqual(array.sum(vla, wal, bru).shape, (116, 3, 2, 15))
# with one label in several groups
assert array.sum((['M'], ['M', 'F'])).shape == (116, 44, 2, 15)
assert array.sum(('M', 'M,F')).shape == (116, 44, 2, 15)
assert array.sum('M;M,F').shape == (116, 44, 2, 15)
# with group names
res = array.sum('M >> men;M,F >> all')
assert res.shape == (116, 44, 2, 15)
assert 'sex' in res.axes
men = sex['M'].named('men')
all_ = sex['M,F'].named('all')
assert_array_equal(res.axes.sex.labels, ['men', 'all'])
assert_array_equal(res['men'], raw[:, :, 0, :])
assert_array_equal(res['all'], raw.sum(2))
res = array.sum(('M >> men', 'M,F >> all'))
assert res.shape == (116, 44, 2, 15)
assert 'sex' in res.axes
assert_array_equal(res.axes.sex.labels, ['men', 'all'])
assert_array_equal(res['men'], raw[:, :, 0, :])
assert_array_equal(res['all'], raw.sum(2))
res = array.sum((vla, wal, bru, belgium))
assert res.shape == (116, 4, 2, 15)
# a.4) several dimensions at the same time
res = array.sum('P01,P03;P02,P05;P01:', (vla, wal, bru, belgium))
assert res.shape == (116, 4, 2, 3)
# b) both axis aggregate and group aggregate at the same time
res = array.sum(age, sex, (vla, wal, bru, belgium))
assert res.shape == (4, 15)
# c) chain group aggregate after axis aggregate
res = array.sum(age, sex).sum((vla, wal, bru, belgium))
assert res.shape == (4, 15)
def test_group_agg_label_group(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = geo[vla_str], geo[wal_str], geo[bru_str]
lg_belgium = geo[belgium]
# a) group aggregate on a fresh array
# a.1) one group => collapse dimension
# not sure I should support groups with a single item in an aggregate
men = sex.i[[0]]
assert array.sum(men).shape == (116, 44, 15)
assert array.sum(sex['M']).shape == (116, 44, 15)
assert array.sum(sex['M,']).shape == (116, 44, 15)
assert array.sum(sex['M,F']).shape == (116, 44, 15)
assert array.sum(geo['A11,A21,A25']).shape == (116, 2, 15)
assert array.sum(geo[['A11', 'A21', 'A25']]).shape == (116, 2, 15)
assert array.sum(geo['A11', 'A21', 'A25']).shape == (116, 2, 15)
assert array.sum(geo['A11,A21,A25']).shape == (116, 2, 15)
assert array.sum(geo[:]).shape == (116, 2, 15)
assert array.sum(geo[':']).shape == (116, 2, 15)
assert array.sum(geo[:]).shape == (116, 2, 15)
# Include everything between two labels. Since A11 is the first label and A21 is the last one, this should be
# equivalent to the previous tests.
assert array.sum(geo['A11:A21']).shape == (116, 2, 15)
assert_array_equal(array.sum(geo['A11:A21']), array.sum(geo))
assert_array_equal(array.sum(geo['A11':'A21']), array.sum(geo))
# a.2) a tuple of one group => do not collapse dimension
assert array.sum((geo[:],)).shape == (116, 1, 2, 15)
# a.3) several groups
# string groups
assert array.sum((vla, wal, bru)).shape == (116, 3, 2, 15)
# XXX: do we also want to support this? I do not really like it because it gets tricky when we have some other
# axes into play. For now the error message is unclear because it first aggregates on "vla", then tries to
# aggregate on "wal", but there is no "geo" dimension anymore.
# self.assertEqual(array.sum(vla, wal, bru).shape, (116, 3, 2, 15))
# with one label in several groups
assert array.sum((sex['M'], sex[['M', 'F']])).shape == (116, 44, 2, 15)
assert array.sum((sex['M'], sex['M', 'F'])).shape == (116, 44, 2, 15)
assert array.sum((sex['M'], sex['M,F'])).shape == (116, 44, 2, 15)
# XXX: do we want to support this?
# self.assertEqual(array.sum(sex['M;H,F']).shape, (116, 44, 2, 15))
res = array.sum((vla, wal, bru, lg_belgium))
assert res.shape == (116, 4, 2, 15)
# a.4) several dimensions at the same time
# self.assertEqual(array.sum(lipro['P01,P03;P02,P05;P01:'], (vla, wal, bru, lg_belgium)).shape,
# (116, 4, 2, 3))
res = array.sum((lipro['P01,P03'], lipro['P02,P05'], lipro[:]), (vla, wal, bru, lg_belgium))
assert res.shape == (116, 4, 2, 3)
# b) both axis aggregate and group aggregate at the same time
res = array.sum(age, sex, (vla, wal, bru, lg_belgium))
assert res.shape == (4, 15)
# c) chain group aggregate after axis aggregate
res = array.sum(age, sex).sum((vla, wal, bru, lg_belgium))
assert res.shape == (4, 15)
def test_group_agg_label_group_no_axis(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = LGroup(vla_str), LGroup(wal_str), LGroup(bru_str)
lg_belgium = LGroup(belgium)
# a) group aggregate on a fresh array
# a.1) one group => collapse dimension
# not sure I should support groups with a single item in an aggregate
assert array.sum(LGroup('M')).shape == (116, 44, 15)
assert array.sum(LGroup('M,')).shape == (116, 44, 15)
assert array.sum(LGroup('M,F')).shape == (116, 44, 15)
assert array.sum(LGroup('A11,A21,A25')).shape == (116, 2, 15)
assert array.sum(LGroup(['A11', 'A21', 'A25'])).shape == (116, 2, 15)
# Include everything between two labels. Since A11 is the first label
# and A21 is the last one, this should be equivalent to the full axis.
assert array.sum(LGroup('A11:A21')).shape == (116, 2, 15)
assert_array_equal(array.sum(LGroup('A11:A21')), array.sum(geo))
assert_array_equal(array.sum(LGroup(slice('A11', 'A21'))), array.sum(geo))
# a.3) several groups
# string groups
assert array.sum((vla, wal, bru)).shape == (116, 3, 2, 15)
# XXX: do we also want to support this? I do not really like it because it gets tricky when we have some other
# axes into play. For now the error message is unclear because it first aggregates on "vla", then tries to
# aggregate on "wal", but there is no "geo" dimension anymore.
# self.assertEqual(array.sum(vla, wal, bru).shape, (116, 3, 2, 15))
# with one label in several groups
assert array.sum((LGroup('M'), LGroup(['M', 'F']))).shape == (116, 44, 2, 15)
assert array.sum((LGroup('M'), LGroup('M,F'))).shape == (116, 44, 2, 15)
# XXX: do we want to support this?
# self.assertEqual(array.sum(sex['M;M,F']).shape, (116, 44, 2, 15))
res = array.sum((vla, wal, bru, lg_belgium))
assert res.shape == (116, 4, 2, 15)
# a.4) several dimensions at the same time
# self.assertEqual(array.sum(lipro['P01,P03;P02,P05;P01:'], (vla, wal, bru, lg_belgium)).shape,
# (116, 4, 2, 3))
res = array.sum((LGroup('P01,P03'), LGroup('P02,P05')), (vla, wal, bru, lg_belgium))
assert res.shape == (116, 4, 2, 2)
# b) both axis aggregate and group aggregate at the same time
res = array.sum(age, sex, (vla, wal, bru, lg_belgium))
assert res.shape == (4, 15)
# c) chain group aggregate after axis aggregate
res = array.sum(age, sex).sum((vla, wal, bru, lg_belgium))
assert res.shape == (4, 15)
def test_group_agg_axis_ref_label_group(array):
age, geo, sex, lipro = X.age, X.geo, X.sex, X.lipro
vla, wal, bru = geo[vla_str], geo[wal_str], geo[bru_str]
lg_belgium = geo[belgium]
# a) group aggregate on a fresh array
# a.1) one group => collapse dimension
# not sure I should support groups with a single item in an aggregate
men = sex.i[[0]]
assert array.sum(men).shape == (116, 44, 15)
assert array.sum(sex['M']).shape == (116, 44, 15)
assert array.sum(sex['M,']).shape == (116, 44, 15)
assert array.sum(sex['M,F']).shape == (116, 44, 15)
assert array.sum(geo['A11,A21,A25']).shape == (116, 2, 15)
assert array.sum(geo[['A11', 'A21', 'A25']]).shape == (116, 2, 15)
assert array.sum(geo['A11', 'A21', 'A25']).shape == (116, 2, 15)
assert array.sum(geo['A11,A21,A25']).shape == (116, 2, 15)
assert array.sum(geo[:]).shape == (116, 2, 15)
assert array.sum(geo[':']).shape == (116, 2, 15)
assert array.sum(geo[:]).shape == (116, 2, 15)
# Include everything between two labels. Since A11 is the first label
# and A21 is the last one, this should be equivalent to the previous
# tests.
assert array.sum(geo['A11:A21']).shape == (116, 2, 15)
assert_array_equal(array.sum(geo['A11:A21']), array.sum(geo))
assert_array_equal(array.sum(geo['A11':'A21']), array.sum(geo))
# a.2) a tuple of one group => do not collapse dimension
assert array.sum((geo[:],)).shape == (116, 1, 2, 15)
# a.3) several groups
# string groups
assert array.sum((vla, wal, bru)).shape == (116, 3, 2, 15)
# XXX: do we also want to support this? I do not really like it because
# it gets tricky when we have some other axes into play. For now the
# error message is unclear because it first aggregates on "vla", then
# tries to aggregate on "wal", but there is no "geo" dimension anymore.
# self.assertEqual(array.sum(vla, wal, bru).shape, (116, 3, 2, 15))
# with one label in several groups
assert array.sum((sex['M'], sex[['M', 'F']])).shape == (116, 44, 2, 15)
assert array.sum((sex['M'], sex['M', 'F'])).shape == (116, 44, 2, 15)
assert array.sum((sex['M'], sex['M,F'])).shape == (116, 44, 2, 15)
# XXX: do we want to support this?
# self.assertEqual(array.sum(sex['M;M,F']).shape, (116, 44, 2, 15))
res = array.sum((vla, wal, bru, lg_belgium))
assert res.shape == (116, 4, 2, 15)
# a.4) several dimensions at the same time
# self.assertEqual(array.sum(lipro['P01,P03;P02,P05;P01:'],
# (vla, wal, bru, belgium)).shape,
# (116, 4, 2, 3))
res = array.sum((lipro['P01,P03'], lipro['P02,P05'], lipro[:]), (vla, wal, bru, lg_belgium))
assert res.shape == (116, 4, 2, 3)
# b) both axis aggregate and group aggregate at the same time
res = array.sum(age, sex, (vla, wal, bru, lg_belgium))
assert res.shape == (4, 15)
# c) chain group aggregate after axis aggregate
res = array.sum(age, sex).sum((vla, wal, bru, lg_belgium))
assert res.shape == (4, 15)
def test_group_agg_one_axis():
a = Axis(range(3), 'a')
la = ndtest(a)
raw = np.asarray(la)
assert_array_equal(la.sum(a[0, 2]), raw[[0, 2]].sum())
def test_group_agg_anonymous_axis():
la = ndtest([Axis(2), Axis(3)])
a1, a2 = la.axes
raw = np.asarray(la)
assert_array_equal(la.sum(a2[0, 2]), raw[:, [0, 2]].sum(1))
def test_group_agg_zero_padded_label():
arr = ndtest("a=01,02,03,10,11; b=b0..b2")
expected = Array([36, 30, 39], "a=01_03,10,11")
assert_array_equal(arr.sum("01,02,03 >> 01_03; 10; 11", "b"), expected)
def test_group_agg_on_int_array():
# issue 193
arr = ndtest('year=2014..2018')
group = arr.year[:2016]
assert arr.mean(group) == 1.0
assert arr.median(group) == 1.0
assert arr.percentile(90, group) == 1.8
assert arr.std(group) == 1.0
assert arr.var(group) == 1.0
def test_group_agg_on_bool_array():
# issue 194
a = ndtest((2, 3))
b = a > 1
expected = from_string("""a,a0,a1
, 1, 2""", sep=',')
assert_array_equal(b.sum('b1:'), expected)
# TODO: fix this (and add other tests for references (X.) to anonymous axes
# def test_group_agg_anonymous_axis_ref():
# la = ndtest([Axis(2), Axis(3)])
# raw = np.asarray(la)
# # this does not work because x[1] refers to an axis with name 1,
# # which does not exist. We might want to change this.
# assert_array_equal(la.sum(x[1][0, 2]), raw[:, [0, 2]].sum(1))
# group aggregates on a group-aggregated array
def test_group_agg_on_group_agg(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = vla_str, wal_str, bru_str
reg = array.sum(age, sex).sum(geo=(vla, wal, bru, belgium))
# 1) one group => collapse dimension
assert reg.sum(lipro='P01,P02').shape == (4,)
# 2) a tuple of one group => do not collapse dimension
assert reg.sum(lipro=('P01,P02',)).shape == (4, 1)
# 3) several groups
assert reg.sum(lipro='P01;P02;:').shape == (4, 3)
# this is INVALID
# TODO: raise a nice exception
# regsum = reg.sum(lipro='P01,P02,:')
# this is currently allowed even though it can be confusing:
# P01 and P02 are both groups with one element each.
assert reg.sum(lipro=('P01', 'P02', ':')).shape == (4, 3)
assert reg.sum(lipro=('P01', 'P02', lipro[:])).shape == (4, 3)
# explicit groups are better
assert reg.sum(lipro=('P01,', 'P02,', ':')).shape == (4, 3)
assert reg.sum(lipro=(['P01'], ['P02'], ':')).shape == (4, 3)
# 4) groups on the aggregated dimension
# self.assertEqual(reg.sum(geo=([vla, bru], [wal, bru])).shape, (2, 3))
# vla, wal, bru
# group aggregates on a group-aggregated array
def test_group_agg_on_group_agg_nokw(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = vla_str, wal_str, bru_str
reg = array.sum(age, sex).sum((vla, wal, bru, belgium))
# XXX: should this be supported too? (it currently fails)
# reg = array.sum(age, sex).sum(vla, wal, bru, belgium)
# 1) one group => collapse dimension
assert reg.sum('P01,P02').shape == (4,)
# 2) a tuple of one group => do not collapse dimension
assert reg.sum(('P01,P02',)).shape == (4, 1)
# 3) several groups
# : is ambiguous
# self.assertEqual(reg.sum('P01;P02;:').shape, (4, 3))
assert reg.sum('P01;P02;P01:').shape == (4, 3)
# this is INVALID
# TODO: raise a nice exception
# regsum = reg.sum(lipro='P01,P02,:')
# this is currently allowed even though it can be confusing:
# P01 and P02 are both groups with one element each.
assert reg.sum(('P01', 'P02', 'P01:')).shape == (4, 3)
assert reg.sum(('P01', 'P02', lipro[:])).shape == (4, 3)
# explicit groups are better
assert reg.sum(('P01,', 'P02,', 'P01:')).shape == (4, 3)
assert reg.sum((['P01'], ['P02'], 'P01:')).shape == (4, 3)
# 4) groups on the aggregated dimension
# self.assertEqual(reg.sum(geo=([vla, bru], [wal, bru])).shape, (2, 3))
# vla, wal, bru
def test_getitem_on_group_agg(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = vla_str, wal_str, bru_str
# using a string
vla = vla_str
reg = array.sum(age, sex).sum(geo=(vla, wal, bru, belgium))
# the following are all equivalent
assert reg[vla].shape == (15,)
assert reg[(vla,)].shape == (15,)
assert reg[(vla, slice(None))].shape == (15,)
assert reg[vla, slice(None)].shape == (15,)
assert reg[vla, :].shape == (15,)
# one more level...
assert reg[vla]['P03'] == 389049848.0
# using an anonymous LGroup
vla = geo[vla_str]
reg = array.sum(age, sex).sum(geo=(vla, wal, bru, belgium))
# the following are all equivalent
assert reg[vla].shape == (15,)
assert reg[(vla,)].shape == (15,)
assert reg[(vla, slice(None))].shape == (15,)
assert reg[vla, slice(None)].shape == (15,)
assert reg[vla, :].shape == (15,)
# using a named LGroup
vla = geo[vla_str] >> 'Vlaanderen'
reg = array.sum(age, sex).sum(geo=(vla, wal, bru, belgium))
# the following are all equivalent
assert reg[vla].shape == (15,)
assert reg[(vla,)].shape == (15,)
assert reg[(vla, slice(None))].shape == (15,)
assert reg[vla, slice(None)].shape == (15,)
assert reg[vla, :].shape == (15,)
def test_getitem_on_group_agg_nokw(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = vla_str, wal_str, bru_str
# using a string
vla = vla_str
reg = array.sum(age, sex).sum((vla, wal, bru, belgium))
# the following are all equivalent
assert reg[vla].shape == (15,)
assert reg[(vla,)].shape == (15,)
assert reg[(vla, slice(None))].shape == (15,)
assert reg[vla, slice(None)].shape == (15,)
assert reg[vla, :].shape == (15,)
# one more level...
assert reg[vla]['P03'] == 389049848.0
# using an anonymous LGroup
vla = geo[vla_str]
reg = array.sum(age, sex).sum((vla, wal, bru, belgium))
# the following are all equivalent
assert reg[vla].shape == (15,)
assert reg[(vla,)].shape == (15,)
assert reg[(vla, slice(None))].shape == (15,)
assert reg[vla, slice(None)].shape == (15,)
assert reg[vla, :].shape == (15,)
# using a named LGroup
vla = geo[vla_str] >> 'Vlaanderen'
reg = array.sum(age, sex).sum((vla, wal, bru, belgium))
# the following are all equivalent
assert reg[vla].shape == (15,)
assert reg[(vla,)].shape == (15,)
assert reg[(vla, slice(None))].shape == (15,)
assert reg[vla, slice(None)].shape == (15,)
assert reg[vla, :].shape == (15,)
def test_filter_on_group_agg(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = vla_str, wal_str, bru_str
# using a string
# vla = vla_str
# reg = array.sum(age, sex).sum(geo=(vla, wal, bru, belgium))
# assert reg.filter(geo=vla).shape == (15,)
# using a named LGroup
vla = geo[vla_str] >> 'Vlaanderen'
reg = array.sum(age, sex).sum(geo=(vla, wal, bru, belgium))
assert reg.filter(geo=vla).shape == (15,)
# Note that reg.filter(geo=(vla,)) does NOT work. It might be a
# little confusing for users, because reg[(vla,)] works but it is
# normal because reg.filter(geo=(vla,)) is equivalent to:
# reg[((vla,),)] or reg[(vla,), :]
# mixed LGroup/string slices
child = age[:17]
child_named = age[:17] >> 'child'
working = age[18:64]
retired = age[65:]
byage = array.sum(age=(child, 5, working, retired))
assert byage.shape == (4, 44, 2, 15)
byage = array.sum(age=(child, slice(5, 10), working, retired))
assert byage.shape == (4, 44, 2, 15)
# filter on an aggregated larray created with mixed groups
# assert byage.filter(age=':17').shape == (44, 2, 15)
byage = array.sum(age=(child_named, 5, working, retired))
assert byage.filter(age=child_named).shape == (44, 2, 15)
def test_sum_several_lg_groups(array):
# 1) aggregated array created using LGroups
# -----------------------------------------
fla = geo[vla_str] >> 'Flanders'
wal = geo[wal_str] >> 'Wallonia'
bru = geo[bru_str] >> 'Brussels'
reg = array.sum(geo=(fla, wal, bru))
assert reg.shape == (116, 3, 2, 15)
# the result is indexable
# 1.a) by LGroup
assert reg.filter(geo=fla).shape == (116, 2, 15)
assert reg.filter(geo=(fla, wal)).shape == (116, 2, 2, 15)
# 1.b) by string (name of groups)
assert reg.filter(geo='Flanders').shape == (116, 2, 15)
assert reg.filter(geo='Flanders,Wallonia').shape == (116, 2, 2, 15)
# 2) aggregated array created using string groups
# -----------------------------------------------
reg = array.sum(geo=(vla_str, wal_str, bru_str))
assert reg.shape == (116, 3, 2, 15)
# the result is indexable
# 2.a) by string (def)
# assert reg.filter(geo=vla_str).shape == (116, 2, 15)
assert reg.filter(geo=(vla_str, wal_str)).shape == (116, 2, 2, 15)
# 2.b) by LGroup
# assert reg.filter(geo=fla).shape == (116, 2, 15)
# assert reg.filter(geo=(fla, wal)).shape == (116, 2, 2, 15)
def test_sum_with_groups_from_other_axis(small_array):
# use a group from another *compatible* axis
lipro2 = Axis('lipro=P01..P15')
assert small_array.sum(lipro=lipro2['P01,P03']).shape == (2,)
# use (compatible) group from another *incompatible* axis
# XXX: I am unsure whether or not this should be allowed. Maybe we
# should simply check that the group is valid in axis, but that
# will trigger a pretty meaningful error anyway
lipro3 = Axis('lipro=P01,P03,P05')
assert small_array.sum(lipro3['P01,P03']).shape == (2,)
# use a group (from another axis) which is incompatible with the axis of
# the same name in the array
lipro4 = Axis('lipro=P01,P03,P16')
with pytest.raises(ValueError, match=r"lipro\['P01', 'P16'\] is not a valid label for any axis"):
small_array.sum(lipro4['P01,P16'])
def test_agg_kwargs(array):
raw = array.data
# dtype
assert array.sum(dtype=int) == raw.sum(dtype=int)
# ddof
assert array.std(ddof=0) == raw.std(ddof=0)
# out
res = array.std(X.sex)
out = zeros_like(res)
array.std(X.sex, out=out)
assert_array_equal(res, out)
def test_agg_by(array):
age, geo, sex, lipro = array.axes
vla, wal, bru = vla_str, wal_str, bru_str
# no group or axis
assert array.sum_by().shape == ()
assert array.sum_by() == array.sum()
# a) group aggregate on a fresh array
# a.1) one group
res = array.sum_by(geo='A11,A21,A25')
assert res.shape == ()
assert res == array.sum(geo='A11,A21,A25').sum()
# a.2) a tuple of one group
res = array.sum_by(geo=(geo[:],))
assert res.shape == (1,)
assert_array_equal(res, array.sum(age, sex, lipro, geo=(geo[:],)))
# a.3) several groups
# string groups
res = array.sum_by(geo=(vla, wal, bru))
assert res.shape == (3,)
assert_array_equal(res, array.sum(age, sex, lipro, geo=(vla, wal, bru)))
# with one label in several groups
assert array.sum_by(sex=(['M'], ['M', 'F'])).shape == (2,)
assert array.sum_by(sex=('M', 'M,F')).shape == (2,)
res = array.sum_by(sex='M;M,F')
assert res.shape == (2,)
assert_array_equal(res, array.sum(age, geo, lipro, sex='M;M,F'))
# a.4) several dimensions at the same time
res = array.sum_by(geo=(vla, wal, bru, belgium), lipro='P01,P03;P02,P05;:')
assert res.shape == (4, 3)
assert_array_equal(res, array.sum(age, sex, geo=(vla, wal, bru, belgium), lipro='P01,P03;P02,P05;:'))
# b) both axis aggregate and group aggregate at the same time
# Note that you must list "full axes" aggregates first (Python does not allow non-kwargs after kwargs.
res = array.sum_by(sex, geo=(vla, wal, bru, belgium))
assert res.shape == (4, 2)
assert_array_equal(res, array.sum(age, lipro, geo=(vla, wal, bru, belgium)))
# c) chain group aggregate after axis aggregate
res = array.sum_by(geo, sex)
assert res.shape == (44, 2)
assert_array_equal(res, array.sum(age, lipro))
res2 = res.sum_by(geo=(vla, wal, bru, belgium))
assert res2.shape == (4,)
assert_array_equal(res2, res.sum(sex, geo=(vla, wal, bru, belgium)))
def test_agg_igroup():
arr = ndtest(3)
res = arr.sum((X.a.i[:2], X.a.i[1:]))
assert_array_equal(res.a.labels, [':a1', 'a1:'])
def test_ratio(array):
age, geo, sex, lipro = array.axes
regions = (vla_str, wal_str, bru_str, belgium)
reg = array.sum(age, sex, regions)
assert reg.shape == (4, 15)
fla = geo[vla_str] >> 'Flanders'
wal = geo[wal_str] >> 'Wallonia'
bru = geo[bru_str] >> 'Brussels'
regions = (fla, wal, bru)
reg = array.sum(age, sex, regions)
ratio = reg.ratio()
assert_array_equal(ratio, reg / reg.sum(geo, lipro))
assert ratio.shape == (3, 15)
ratio = reg.ratio(geo)
assert_array_equal(ratio, reg / reg.sum(geo))
assert ratio.shape == (3, 15)
ratio = reg.ratio(geo, lipro)
assert_array_equal(ratio, reg / reg.sum(geo, lipro))
assert ratio.shape == (3, 15)
assert ratio.sum() == 1.0
def test_percent(array):
age, geo, sex, lipro = array.axes
regions = (vla_str, wal_str, bru_str, belgium)
reg = array.sum(age, sex, regions)
assert reg.shape == (4, 15)
fla = geo[vla_str] >> 'Flanders'
wal = geo[wal_str] >> 'Wallonia'
bru = geo[bru_str] >> 'Brussels'
regions = (fla, wal, bru)
reg = array.sum(age, sex, regions)
percent = reg.percent()
assert_array_equal(percent, (reg * 100.0 / reg.sum(geo, lipro)))
assert percent.shape == (3, 15)
percent = reg.percent(geo)
assert_array_equal(percent, (reg * 100.0 / reg.sum(geo)))
assert percent.shape == (3, 15)
percent = reg.percent(geo, lipro)
assert_array_equal(percent, (reg * 100.0 / reg.sum(geo, lipro)))
assert percent.shape == (3, 15)
assert round(abs(percent.sum() - 100.0), 7) == 0
def test_total(array):
age, geo, sex, lipro = array.axes
# array = small_array
# sex, lipro = array.axes
assert array.with_total().shape == (117, 45, 3, 16)
assert array.with_total(sex).shape == (116, 44, 3, 15)
assert array.with_total(lipro).shape == (116, 44, 2, 16)
assert array.with_total(sex, lipro).shape == (116, 44, 3, 16)
fla = geo[vla_str] >> 'Flanders'
wal = geo[wal_str] >> 'Wallonia'
bru = geo[bru_str] >> 'Brussels'
bel = geo[:] >> 'Belgium'
assert array.with_total(geo=(fla, wal, bru), op=mean).shape == (116, 47, 2, 15)
assert array.with_total((fla, wal, bru), op=mean).shape == (116, 47, 2, 15)
# works but "wrong" for X.geo (double what is expected because it includes fla wal & bru)
# TODO: we probably want to display a warning (or even an error?) in that case.
# If we really want that behavior, we can still split the operation:
# .with_total((fla, wal, bru)).with_total(X.geo)
# OR we might want to only sum the axis as it was before the op (but that does not play well when working with
# multiple axes).
a1 = array.with_total(X.sex, (fla, wal, bru), X.geo, X.lipro)
assert a1.shape == (116, 48, 3, 16)
# correct total but the order is not very nice
a2 = array.with_total(X.sex, X.geo, (fla, wal, bru), X.lipro)
assert a2.shape == (116, 48, 3, 16)
# the correct way to do it
a3 = array.with_total(X.sex, (fla, wal, bru, bel), X.lipro)
assert a3.shape == (116, 48, 3, 16)
# a4 = array.with_total((lipro[':P05'], lipro['P05:']), op=mean)
a4 = array.with_total((':P05', 'P05:'), op=mean)
assert a4.shape == (116, 44, 2, 17)
def test_transpose():
arr = ndtest((2, 3, 4))
a, b, c = arr.axes
res = arr.transpose()
assert res.axes == [c, b, a]
res = arr.transpose('b', 'c', 'a')
assert res.axes == [b, c, a]
res = arr.transpose('b')
assert res.axes == [b, a, c]
# using Ellipsis instead of ... to avoid a syntax error on Python 2 (where ... is only available within [])
res = arr.transpose(Ellipsis, 'a')
assert res.axes == [b, c, a]
res = arr.transpose('c', Ellipsis, 'a')
assert res.axes == [c, b, a]
def test_transpose_anonymous():
a = ndtest([Axis(2), Axis(3), Axis(4)])
# reordered = a.transpose(0, 2, 1)
# self.assertEqual(reordered.shape, (2, 4, 3))
# axes = [1, 2]
# => union(axes, )
# => axes.extend([[0]])
# => breaks because [0] not compatible with axes[0]
# => breaks because [0] not compatible with [1]
# a real union should not care and should return
# [1, 2, 0] but will this break other stuff? My gut feeling is yes
# when doing a binop between anonymous axes, we use union too (that might be the problem) and we need *that*
# union to match axes by position
reordered = a.transpose(1, 2)
assert reordered.shape == (3, 4, 2)
reordered = a.transpose(2, 0)
assert reordered.shape == (4, 2, 3)
reordered = a.transpose()
assert reordered.shape == (4, 3, 2)
def test_binary_ops(small_array):
raw = small_array.data
assert_array_equal(small_array + small_array, raw + raw)
assert_array_equal(small_array + 1, raw + 1)
assert_array_equal(1 + small_array, 1 + raw)
assert_array_equal(small_array - small_array, raw - raw)
assert_array_equal(small_array - 1, raw - 1)
assert_array_equal(1 - small_array, 1 - raw)
assert_array_equal(small_array * small_array, raw * raw)
assert_array_equal(small_array * 2, raw * 2)
assert_array_equal(2 * small_array, 2 * raw)
with np.errstate(invalid='ignore'):
raw_res = raw / raw
with pytest.warns(RuntimeWarning) as caught_warnings:
res = small_array / small_array
assert_array_nan_equal(res, raw_res)
assert len(caught_warnings) == 1
warn_msg = "invalid value (NaN) encountered during operation (this is typically caused by a 0 / 0)"
assert caught_warnings[0].message.args[0] == warn_msg
assert caught_warnings[0].filename == __file__
assert_array_equal(small_array / 2, raw / 2)
with np.errstate(divide='ignore'):
raw_res = 30 / raw
with pytest.warns(RuntimeWarning) as caught_warnings:
res = 30 / small_array
assert_array_equal(res, raw_res)
assert len(caught_warnings) == 1
assert caught_warnings[0].message.args[0] == "divide by zero encountered during operation"
assert caught_warnings[0].filename == __file__
assert_array_equal(30 / (small_array + 1), 30 / (raw + 1))
raw_int = raw.astype(int)
la_int = Array(raw_int, axes=(sex, lipro))
assert_array_equal(la_int / 2, raw_int / 2)
assert_array_equal(la_int // 2, raw_int // 2)
# test adding two larrays with different axes order
assert_array_equal(small_array + small_array.transpose(), raw * 2)
# mixed operations
raw2 = raw / 2
la_raw2 = small_array - raw2
assert la_raw2.axes == small_array.axes
assert_array_equal(la_raw2, raw - raw2)
raw2_la = raw2 - small_array
assert raw2_la.axes == small_array.axes
assert_array_equal(raw2_la, raw2 - raw)
la_ge_raw2 = small_array >= raw2
assert la_ge_raw2.axes == small_array.axes
assert_array_equal(la_ge_raw2, raw >= raw2)
raw2_ge_la = raw2 >= small_array
assert raw2_ge_la.axes == small_array.axes
assert_array_equal(raw2_ge_la, raw2 >= raw)
def test_binary_ops_no_name_axes(small_array):
raw = small_array.data
raw2 = small_array.data + 1
la = ndtest([Axis(l) for l in small_array.shape])
la2 = ndtest([Axis(l) for l in small_array.shape]) + 1
assert_array_equal(la + la2, raw + raw2)
assert_array_equal(la + 1, raw + 1)
assert_array_equal(1 + la, 1 + raw)
assert_array_equal(la - la2, raw - raw2)
assert_array_equal(la - 1, raw - 1)
assert_array_equal(1 - la, 1 - raw)
assert_array_equal(la * la2, raw * raw2)
assert_array_equal(la * 2, raw * 2)
assert_array_equal(2 * la, 2 * raw)
assert_array_nan_equal(la / la2, raw / raw2)
assert_array_equal(la / 2, raw / 2)
with np.errstate(divide='ignore'):
raw_res = 30 / raw
with pytest.warns(RuntimeWarning) as caught_warnings:
res = 30 / la
assert_array_equal(res, raw_res)
assert len(caught_warnings) == 1
assert caught_warnings[0].message.args[0] == "divide by zero encountered during operation"
assert caught_warnings[0].filename == __file__
assert_array_equal(30 / (la + 1), 30 / (raw + 1))
raw_int = raw.astype(int)
la_int = Array(raw_int)
assert_array_equal(la_int / 2, raw_int / 2)
assert_array_equal(la_int // 2, raw_int // 2)
# adding two larrays with different axes order cannot work with unnamed axes
# assert_array_equal(la + la.transpose(), raw * 2)
# mixed operations
raw2 = raw / 2
la_raw2 = la - raw2
assert la_raw2.axes == la.axes
assert_array_equal(la_raw2, raw - raw2)
raw2_la = raw2 - la
assert raw2_la.axes == la.axes
assert_array_equal(raw2_la, raw2 - raw)
la_ge_raw2 = la >= raw2
assert la_ge_raw2.axes == la.axes
assert_array_equal(la_ge_raw2, raw >= raw2)
raw2_ge_la = raw2 >= la
assert raw2_ge_la.axes == la.axes
assert_array_equal(raw2_ge_la, raw2 >= raw)
def test_broadcasting_no_name():
a = ndtest([Axis(2), Axis(3)])
b = ndtest(Axis(3))
c = ndtest(Axis(2))
with pytest.raises(ValueError):
# ValueError: incompatible axes:
# Axis(None, [0, 1, 2])
# vs
# Axis(None, [0, 1])
a * b
d = a * c
assert d.shape == (2, 3)
# {0}*\{1}* 0 1 2
# 0 0 0 0
# 1 3 4 5
assert np.array_equal(d, [[0, 0, 0],
[3, 4, 5]])
# it is unfortunate that the behavior is different from numpy (even though I find our behavior more intuitive)
d = np.asarray(a) * np.asarray(b)
assert d.shape == (2, 3)
assert np.array_equal(d, [[0, 1, 4],
[0, 4, 10]])
with pytest.raises(ValueError):
# ValueError: operands could not be broadcast together with shapes (2,3) (2,)
np.asarray(a) * np.asarray(c)
def test_binary_ops_with_scalar_group():
time = Axis('time=2015..2019')
arr = ndtest(3)
expected = arr + 2015
assert_larray_equal(time.i[0] + arr, expected)
assert_larray_equal(arr + time.i[0], expected)
def test_unary_ops(small_array):
raw = small_array.data
# using numpy functions
assert_array_equal(np.abs(small_array - 10), np.abs(raw - 10))
assert_array_equal(np.negative(small_array), np.negative(raw))
assert_array_equal(np.invert(small_array), np.invert(raw))
# using python builtin ops
assert_array_equal(abs(small_array - 10), abs(raw - 10))
assert_array_equal(-small_array, -raw)
assert_array_equal(+small_array, +raw)
assert_array_equal(~small_array, ~raw)
def test_mean(small_array):
raw = small_array.data
sex, lipro = small_array.axes
assert_array_equal(small_array.mean(lipro), raw.mean(1))
def test_sequence():
res = sequence('b=b0..b2', ndtest(3) * 3, 1.0)
assert_array_equal(ndtest((3, 3), dtype=float), res)
def test_sort_values():
# 1D arrays
arr = Array([0, 1, 6, 3, -1], "a=a0..a4")
res = arr.sort_values()
expected = Array([-1, 0, 1, 3, 6], "a=a4,a0,a1,a3,a2")
assert_array_equal(res, expected)
# ascending arg
res = arr.sort_values(ascending=False)
expected = Array([6, 3, 1, 0, -1], "a=a2,a3,a1,a0,a4")
assert_array_equal(res, expected)
# 3D arrays
arr = Array([[[10, 2, 4], [3, 7, 1]], [[5, 1, 6], [2, 8, 9]]],
'a=a0,a1; b=b0,b1; c=c0..c2')
res = arr.sort_values(axis='c')
expected = Array([[[2, 4, 10], [1, 3, 7]], [[1, 5, 6], [2, 8, 9]]],
[Axis('a=a0,a1'), Axis('b=b0,b1'), Axis(3, 'c')])
assert_array_equal(res, expected)
def test_set_labels(small_array):
small_array.set_labels(X.sex, ['Man', 'Woman'], inplace=True)
expected = small_array.set_labels(X.sex, ['Man', 'Woman'])
assert_array_equal(small_array, expected)
def test_set_axes(small_array):
lipro2 = Axis([l.replace('P', 'Q') for l in lipro.labels], 'lipro2')
sex2 = Axis(['Man', 'Woman'], 'sex2')
la = Array(small_array.data, axes=(sex, lipro2))
# replace one axis
la2 = small_array.set_axes(X.lipro, lipro2)
assert_array_equal(la, la2)
la = Array(small_array.data, axes=(sex2, lipro2))
# all at once
la2 = small_array.set_axes([sex2, lipro2])
assert_array_equal(la, la2)
# using keywrods args
la2 = small_array.set_axes(sex=sex2, lipro=lipro2)
assert_array_equal(la, la2)
# using dict
la2 = small_array.set_axes({X.sex: sex2, X.lipro: lipro2})
assert_array_equal(la, la2)
# using list of pairs (axis_to_replace, new_axis)
la2 = small_array.set_axes([(X.sex, sex2), (X.lipro, lipro2)])
assert_array_equal(la, la2)
def test_reindex():
arr = ndtest((2, 2))
res = arr.reindex(X.b, ['b1', 'b2', 'b0'], fill_value=-1)
assert_array_equal(res, from_string("""a\\b b1 b2 b0
a0 1 -1 0
a1 3 -1 2"""))
arr2 = ndtest((2, 2))
arr2.reindex(X.b, ['b1', 'b2', 'b0'], fill_value=-1, inplace=True)
assert_array_equal(arr2, from_string("""a\\b b1 b2 b0
a0 1 -1 0
a1 3 -1 2"""))
# Array fill value
filler = ndtest(arr.a)
res = arr.reindex(X.b, ['b1', 'b2', 'b0'], fill_value=filler)
assert_array_equal(res, from_string("""a\\b b1 b2 b0
a0 1 0 0
a1 3 1 2"""))
# using labels from another array
arr = ndtest('a=v0..v2;b=v0,v2,v1,v3')
res = arr.reindex('a', arr.b.labels, fill_value=-1)
assert_array_equal(res, from_string("""a\\b v0 v2 v1 v3
v0 0 1 2 3
v2 8 9 10 11
v1 4 5 6 7
v3 -1 -1 -1 -1"""))
res = arr.reindex('a', arr.b, fill_value=-1)
assert_array_equal(res, from_string("""a\\b v0 v2 v1 v3
v0 0 1 2 3
v2 8 9 10 11
v1 4 5 6 7
v3 -1 -1 -1 -1"""))
# passing a list of Axis
arr = ndtest((2, 2))
res = arr.reindex([Axis("a=a0,a1"), Axis("c=c0"), Axis("b=b1,b2")], fill_value=-1)
assert_array_equal(res, from_string(""" a b\\c c0
a0 b1 1
a0 b2 -1
a1 b1 3
a1 b2 -1"""))
def test_expand():
country = Axis("country=BE,FR,DE")
arr = ndtest(country)
out1 = empty((sex, country))
arr.expand(out=out1)
out2 = empty((sex, country))
out2[:] = arr
assert_array_equal(out1, out2)
def test_append(small_array):
sex, lipro = small_array.axes
small_array = small_array.append(lipro, small_array.sum(lipro), label='sum')
assert small_array.shape == (2, 16)
small_array = small_array.append(sex, small_array.sum(sex), label='sum')
assert small_array.shape == (3, 16)
# crap the sex axis is different !!!! we don't have this problem with
# the kwargs syntax below
# small_array = small_array.append(small_array.mean(sex), axis=sex, label='mean')
# self.assertEqual(small_array.shape, (4, 16))
# another syntax (which implies we could not have an axis named "label")
# small_array = small_array.append(lipro=small_array.sum(lipro), label='sum')
# self.assertEqual(small_array.shape, (117, 44, 2, 15))
def test_insert():
# simple tests are in the docstring
arr1 = ndtest((2, 3))
# insert at multiple places at once
# we cannot use from_string in these tests because it deduplicates ambiguous (column) labels automatically
res = arr1.insert([42, 43], before='b1', label='new')
assert_array_equal(res, from_lists([
['a\\b', 'b0', 'new', 'new', 'b1', 'b2'],
['a0', 0, 42, 43, 1, 2],
['a1', 3, 42, 43, 4, 5]]))
res = arr1.insert(42, before=['b1', 'b2'], label='new')
assert_array_equal(res, from_lists([
['a\\b', 'b0', 'new', 'b1', 'new', 'b2'],
['a0', 0, 42, 1, 42, 2],
['a1', 3, 42, 4, 42, 5]]))
res = arr1.insert(42, before='b1', label=['b0.1', 'b0.2'])
assert_array_equal(res, from_string(r"""
a\b b0 b0.1 b0.2 b1 b2
a0 0 42 42 1 2
a1 3 42 42 4 5"""))
res = arr1.insert(42, before=['b1', 'b2'], label=['b0.5', 'b1.5'])
assert_array_equal(res, from_string(r"""
a\b b0 b0.5 b1 b1.5 b2
a0 0 42 1 42 2
a1 3 42 4 42 5"""))
res = arr1.insert([42, 43], before='b1', label=['b0.1', 'b0.2'])
assert_array_equal(res, from_string(r"""
a\b b0 b0.1 b0.2 b1 b2
a0 0 42 43 1 2
a1 3 42 43 4 5"""))
res = arr1.insert([42, 43], before=['b1', 'b2'], label='new')
assert_array_equal(res, from_lists([
['a\\b', 'b0', 'new', 'b1', 'new', 'b2'],
[ 'a0', 0, 42, 1, 43, 2],
[ 'a1', 3, 42, 4, 43, 5]]))
res = arr1.insert([42, 43], before=['b1', 'b2'], label=['b0.5', 'b1.5'])
assert_array_equal(res, from_string(r"""
a\b b0 b0.5 b1 b1.5 b2
a0 0 42 1 43 2
a1 3 42 4 43 5"""))
res = arr1.insert([42, 43], before='b1,b2', label=['b0.5', 'b1.5'])
assert_array_equal(res, from_string(r"""
a\b b0 b0.5 b1 b1.5 b2
a0 0 42 1 43 2
a1 3 42 4 43 5"""))
arr2 = ndtest(2)
res = arr1.insert([arr2 + 42, arr2 + 43], before=['b1', 'b2'], label=['b0.5', 'b1.5'])
assert_array_equal(res, from_string(r"""
a\b b0 b0.5 b1 b1.5 b2
a0 0 42 1 43 2
a1 3 43 4 44 5"""))
arr3 = ndtest('a=a0,a1;b=b0.1,b0.2') + 42
res = arr1.insert(arr3, before='b1,b2')
assert_array_equal(res, from_string(r"""
a\b b0 b0.1 b1 b0.2 b2
a0 0 42 1 43 2
a1 3 44 4 45 5"""))
# with ambiguous labels
arr4 = ndtest('a=v0,v1;b=v0,v1')
expected = from_string(r"""
a\b v0 v0.5 v1
v0 0 42 1
v1 2 42 3""")
res = arr4.insert(42, before='b[v1]', label='v0.5')
assert_array_equal(res, expected)
res = arr4.insert(42, before=X.b['v1'], label='v0.5')
assert_array_equal(res, expected)
res = arr4.insert(42, before=arr4.b['v1'], label='v0.5')
assert_array_equal(res, expected)
def test_drop():
arr1 = ndtest(3)
expected = Array([0, 2], 'a=a0,a2')
# indices
res = arr1.drop('a.i[1]')
assert_array_equal(res, expected)
res = arr1.drop(X.a.i[1])
assert_array_equal(res, expected)
# labels
res = arr1.drop(X.a['a1'])
assert_array_equal(res, expected)
res = arr1.drop('a[a1]')
assert_array_equal(res, expected)
# 2D array
arr2 = ndtest((2, 4))
expected = from_string(r"""
a\b b0 b2
a0 0 2
a1 4 6""")
res = arr2.drop(['b1', 'b3'])
assert_array_equal(res, expected)
res = arr2.drop(X.b['b1', 'b3'])
assert_array_equal(res, expected)
res = arr2.drop('b.i[1, 3]')
assert_array_equal(res, expected)
res = arr2.drop(X.b.i[1, 3])
assert_array_equal(res, expected)
a = Axis('a=label0..label2')
b = Axis('b=label0..label2')
arr3 = ndtest((a, b))
res = arr3.drop('a[label1]')
assert_array_equal(res, from_string(r"""
a\b label0 label1 label2
label0 0 1 2
label2 6 7 8"""))
# XXX: implement the following (#671)?
# res = arr3.drop('0[label1]')
res = arr3.drop(X[0]['label1'])
assert_array_equal(res, from_string(r"""
a\b label0 label1 label2
label0 0 1 2
label2 6 7 8"""))
res = arr3.drop(a['label1'])
assert_array_equal(res, from_string(r"""
a\b label0 label1 label2
label0 0 1 2
label2 6 7 8"""))
# the aim of this test is to drop the last value of an axis, but instead
# of dropping the last axis tick/label, drop the first one.
def test_shift_axis(small_array):
sex, lipro = small_array.axes
# TODO: check how awful the syntax is with an axis that is not last
# or first
l2 = Array(small_array[:, :'P14'], axes=[sex, Axis(lipro.labels[1:], 'lipro')])
l2 = Array(small_array[:, :'P14'], axes=[sex, lipro.subaxis(slice(1, None))])
# We can also modify the axis in-place (dangerous!)
# lipro.labels = np.append(lipro.labels[1:], lipro.labels[0])
l2 = small_array[:, 'P02':]
l2.axes.lipro.labels = lipro.labels[1:]
def test_unique():
arr = Array([[[0, 2, 0, 0],
[1, 1, 1, 0]],
[[0, 2, 0, 0],
[2, 1, 2, 0]]], 'a=a0,a1;b=b0,b1;c=c0..c3')
assert_array_equal(arr.unique('a'), arr)
assert_array_equal(arr.unique('b'), arr)
assert_array_equal(arr.unique('c'), arr['c0,c1,c3'])
expected = from_string("""\
a_b\\c c0 c1 c2 c3
a0_b0 0 2 0 0
a0_b1 1 1 1 0
a1_b1 2 1 2 0""")
assert_array_equal(arr.unique(('a', 'b')), expected)
def test_extend(small_array):
sex, lipro = small_array.axes
all_lipro = lipro[:]
tail = small_array.sum(lipro=(all_lipro,))
small_array = small_array.extend(lipro, tail)
assert small_array.shape == (2, 16)
# test with a string axis
small_array = small_array.extend('sex', small_array.sum(sex=(sex[:],)))
assert small_array.shape == (3, 16)
@needs_pytables
def test_hdf_roundtrip(tmpdir, meta):
a = ndtest((2, 3), meta=meta)
fpath = tmp_path(tmpdir, 'test.h5')
a.to_hdf(fpath, 'a')
res = read_hdf(fpath, 'a')
assert a.ndim == 2
assert a.shape == (2, 3)
assert a.axes.names == ['a', 'b']
assert_array_equal(res, a)
assert res.meta == a.meta
# issue 72: int-like strings should not be parsed (should round-trip correctly)
fpath = tmp_path(tmpdir, 'issue72.h5')
a = from_lists([['axis', '10', '20'],
['', 0, 1]])
a.to_hdf(fpath, 'a')
res = read_hdf(fpath, 'a')
assert res.ndim == 1
axis = res.axes[0]
assert axis.name == 'axis'
assert_array_equal(axis.labels, ['10', '20'])
# passing group as key to to_hdf
a3 = ndtest((4, 3, 4))
fpath = tmp_path(tmpdir, 'test.h5')
os.remove(fpath)
# single element group
for label in a3.a:
a3[label].to_hdf(fpath, label)
# unnamed group
group = a3.c['c0,c2']
a3[group].to_hdf(fpath, group)
# unnamed group + slice
group = a3.c['c0::2']
a3[group].to_hdf(fpath, group)
# named group
group = a3.c['c0,c2'] >> 'even'
a3[group].to_hdf(fpath, group)
# group with name containing special characters (replaced by _)
group = a3.c['c0,c2'] >> r':name?with*special/\[characters]'
a3[group].to_hdf(fpath, group)
# passing group as key to read_hdf
for label in a3.a:
subset = read_hdf(fpath, label)
assert_array_equal(subset, a3[label])
# load Session
from larray.core.session import Session
s = Session(fpath)
assert s.names == sorted(['a0', 'a1', 'a2', 'a3', 'c0,c2', 'c0::2', 'even', ':name?with*special__[characters]'])
def test_from_string():
expected = ndtest("sex=M,F")
res = from_string('''sex M F
\t 0 1''')
assert_array_equal(res, expected)
res = from_string('''sex M F
nan 0 1''')
assert_array_equal(res, expected)
res = from_string('''sex M F
NaN 0 1''')
assert_array_equal(res, expected)
def test_read_csv():
res = read_csv(inputpath('test1d.csv'))
assert_array_equal(res, io_1d)
res = read_csv(inputpath('test2d.csv'))
assert_array_equal(res, io_2d)
res = read_csv(inputpath('test3d.csv'))
assert_array_equal(res, io_3d)
res = read_csv(inputpath('testint_labels.csv'))
assert_array_equal(res, io_int_labels)
res = read_csv(inputpath('test2d_classic.csv'))
assert_array_equal(res, ndtest("a=a0..a2; b0..b2"))
la = read_csv(inputpath('test1d_liam2.csv'), dialect='liam2')
assert la.ndim == 1
assert la.shape == (3,)
assert la.axes.names == ['time']
assert_array_equal(la, [3722, 3395, 3347])
la = read_csv(inputpath('test5d_liam2.csv'), dialect='liam2')
assert la.ndim == 5
assert la.shape == (2, 5, 2, 2, 3)
assert la.axes.names == ['arr', 'age', 'sex', 'nat', 'time']
assert_array_equal(la[X.arr[1], 0, 'F', X.nat[1], :], [3722, 3395, 3347])
# missing values
res = read_csv(inputpath('testmissing_values.csv'))
assert_array_nan_equal(res, io_missing_values)
# test StringIO
res = read_csv(StringIO('a,1,2\n,0,1\n'))
assert_array_equal(res, ndtest('a=1,2'))
# sort_columns=True
res = read_csv(StringIO('a,a2,a0,a1\n,2,0,1\n'), sort_columns=True)
assert_array_equal(res, ndtest(3))
#################
# narrow format #
#################
res = read_csv(inputpath('test1d_narrow.csv'), wide=False)
assert_array_equal(res, io_1d)
res = read_csv(inputpath('test2d_narrow.csv'), wide=False)
assert_array_equal(res, io_2d)
res = read_csv(inputpath('test3d_narrow.csv'), wide=False)
assert_array_equal(res, io_3d)
# missing values
res = read_csv(inputpath('testmissing_values_narrow.csv'), wide=False)
assert_array_nan_equal(res, io_narrow_missing_values)
# unsorted values
res = read_csv(inputpath('testunsorted_narrow.csv'), wide=False)
assert_array_equal(res, io_unsorted)
def test_read_eurostat():
la = read_eurostat(inputpath('test5d_eurostat.csv'))
assert la.ndim == 5
assert la.shape == (2, 5, 2, 2, 3)
assert la.axes.names == ['arr', 'age', 'sex', 'nat', 'time']
# FIXME: integer labels should be parsed as such
assert_array_equal(la[X.arr['1'], '0', 'F', X.nat['1'], :],
[3722, 3395, 3347])
@needs_xlwings
def test_read_excel_xlwings():
arr = read_excel(inputpath('test.xlsx'), '1d')
assert_array_equal(arr, io_1d)
arr = read_excel(inputpath('test.xlsx'), '2d')
assert_array_equal(arr, io_2d)
arr = read_excel(inputpath('test.xlsx'), '2d_classic')
assert_array_equal(arr, ndtest("a=a0..a2; b0..b2"))
arr = read_excel(inputpath('test.xlsx'), '2d_classic', nb_axes=2)
assert_array_equal(arr, ndtest("a=a0..a2; b0..b2"))
arr = read_excel(inputpath('test.xlsx'), '3d')
assert_array_equal(arr, io_3d)
# for > 2d, specifying nb_axes is required if there is no name for the horizontal axis
arr = read_excel(inputpath('test.xlsx'), '3d_classic', nb_axes=3)
assert_array_equal(arr, ndtest("a=1..3; b=b0,b1; c0..c2"))
arr = read_excel(inputpath('test.xlsx'), 'int_labels')
assert_array_equal(arr, io_int_labels)
# passing a Group as sheet arg
axis = Axis('dim=1d,2d,3d,5d')
arr = read_excel(inputpath('test.xlsx'), axis['1d'])
assert_array_equal(arr, io_1d)
# missing rows, default fill_value
arr = read_excel(inputpath('test.xlsx'), 'missing_values')
expected = ndtest("a=1..3; b=b0,b1; c=c0..c2", dtype=float)
expected[2, 'b0'] = nan
expected[3, 'b1'] = nan
assert_array_nan_equal(arr, expected)
# missing rows + fill_value argument
arr = read_excel(inputpath('test.xlsx'), 'missing_values', fill_value=42)
expected = ndtest("a=1..3; b=b0,b1; c=c0..c2", dtype=float)
expected[2, 'b0'] = 42
expected[3, 'b1'] = 42
assert_array_equal(arr, expected)
# range
arr = read_excel(inputpath('test.xlsx'), 'position', range='D3:H9')
assert_array_equal(arr, io_3d)
#################
# narrow format #
#################
arr = read_excel(inputpath('test_narrow.xlsx'), '1d', wide=False)
assert_array_equal(arr, io_1d)
arr = read_excel(inputpath('test_narrow.xlsx'), '2d', wide=False)
assert_array_equal(arr, io_2d)
arr = read_excel(inputpath('test_narrow.xlsx'), '3d', wide=False)
assert_array_equal(arr, io_3d)
# missing rows + fill_value argument
arr = read_excel(inputpath('test_narrow.xlsx'), 'missing_values', fill_value=42, wide=False)
expected = io_narrow_missing_values.copy()
expected[isnan(expected)] = 42
assert_array_equal(arr, expected)
# unsorted values
arr = read_excel(inputpath('test_narrow.xlsx'), 'unsorted', wide=False)
assert_array_equal(arr, io_unsorted)
# range
arr = read_excel(inputpath('test_narrow.xlsx'), 'position', range='D3:G21', wide=False)
assert_array_equal(arr, io_3d)
##############################
# invalid keyword argument #
##############################
with pytest.raises(TypeError, match="'dtype' is an invalid keyword argument for this function "
"when using the xlwings backend"):
read_excel(inputpath('test.xlsx'), engine='xlwings', dtype=float)
#################
# blank cells #
#################
# Excel sheet with blank cells on right/bottom border of the array to read
fpath = inputpath('test_blank_cells.xlsx')
good = read_excel(fpath, 'good')
bad1 = read_excel(fpath, 'blanksafter_morerowsthancols')
bad2 = read_excel(fpath, 'blanksafter_morecolsthanrows')
assert_array_equal(bad1, good)
assert_array_equal(bad2, good)
# with additional empty column in the middle of the array to read
good2 = ndtest('a=a0,a1;b=2003..2006').astype(object)
good2[2005] = None
good2 = good2.set_axes('b', Axis([2003, 2004, None, 2006], 'b'))
bad3 = read_excel(fpath, 'middleblankcol')
bad4 = read_excel(fpath, '16384col')
assert_array_equal(bad3, good2)
assert_array_equal(bad4, good2)
@needs_xlrd
def test_read_excel_pandas():
arr = read_excel(inputpath('test.xlsx'), '1d', engine='xlrd')
assert_array_equal(arr, io_1d)
arr = read_excel(inputpath('test.xlsx'), '2d', engine='xlrd')
assert_array_equal(arr, io_2d)
arr = read_excel(inputpath('test.xlsx'), '2d', nb_axes=2, engine='xlrd')
assert_array_equal(arr, io_2d)
arr = read_excel(inputpath('test.xlsx'), '2d_classic', engine='xlrd')
assert_array_equal(arr, ndtest("a=a0..a2; b0..b2"))
arr = read_excel(inputpath('test.xlsx'), '2d_classic', nb_axes=2, engine='xlrd')
assert_array_equal(arr, ndtest("a=a0..a2; b0..b2"))
arr = read_excel(inputpath('test.xlsx'), '3d', index_col=[0, 1], engine='xlrd')
assert_array_equal(arr, io_3d)
arr = read_excel(inputpath('test.xlsx'), '3d', engine='xlrd')
assert_array_equal(arr, io_3d)
# for > 2d, specifying nb_axes is required if there is no name for the horizontal axis
arr = read_excel(inputpath('test.xlsx'), '3d_classic', nb_axes=3, engine='xlrd')
assert_array_equal(arr, ndtest("a=1..3; b=b0,b1; c0..c2"))
arr = read_excel(inputpath('test.xlsx'), 'int_labels', engine='xlrd')
assert_array_equal(arr, io_int_labels)
# passing a Group as sheet arg
axis = Axis('dim=1d,2d,3d,5d')
arr = read_excel(inputpath('test.xlsx'), axis['1d'], engine='xlrd')
assert_array_equal(arr, io_1d)
# missing rows + fill_value argument
arr = read_excel(inputpath('test.xlsx'), 'missing_values', fill_value=42, engine='xlrd')
expected = io_missing_values.copy()
expected[isnan(expected)] = 42
assert_array_equal(arr, expected)
#################
# narrow format #
#################
arr = read_excel(inputpath('test_narrow.xlsx'), '1d', wide=False, engine='xlrd')
assert_array_equal(arr, io_1d)
arr = read_excel(inputpath('test_narrow.xlsx'), '2d', wide=False, engine='xlrd')
assert_array_equal(arr, io_2d)
arr = read_excel(inputpath('test_narrow.xlsx'), '3d', wide=False, engine='xlrd')
assert_array_equal(arr, io_3d)
# missing rows + fill_value argument
arr = read_excel(inputpath('test_narrow.xlsx'), 'missing_values',
fill_value=42, wide=False, engine='xlrd')
expected = io_narrow_missing_values.copy()
expected[isnan(expected)] = 42
assert_array_equal(arr, expected)
# unsorted values
arr = read_excel(inputpath('test_narrow.xlsx'), 'unsorted', wide=False, engine='xlrd')
assert_array_equal(arr, io_unsorted)
#################
# blank cells #
#################
# Excel sheet with blank cells on right/bottom border of the array to read
fpath = inputpath('test_blank_cells.xlsx')
good1 = read_excel(fpath, 'good', engine='xlrd')
bad1 = read_excel(fpath, 'blanksafter_morerowsthancols', engine='xlrd')
bad2 = read_excel(fpath, 'blanksafter_morecolsthanrows', engine='xlrd')
assert_array_equal(bad1, good1)
assert_array_equal(bad2, good1)
# with additional empty column in the middle of the array to read
good2 = ndtest('a=a0,a1;b=2003..2006').astype(float)
good2[2005] = nan
good2 = good2.set_axes('b', Axis([2003, 2004, 'Unnamed: 3', 2006], 'b'))
bad3 = read_excel(fpath, 'middleblankcol', engine='xlrd')
bad4 = read_excel(fpath, '16384col', engine='xlrd')
assert_array_nan_equal(bad3, good2)
assert_array_nan_equal(bad4, good2)
def test_from_lists():
simple_arr = ndtest((2, 2, 3))
# simple
arr_list = [['a', 'b\\c', 'c0', 'c1', 'c2'],
['a0', 'b0', 0, 1, 2],
['a0', 'b1', 3, 4, 5],
['a1', 'b0', 6, 7, 8],
['a1', 'b1', 9, 10, 11]]
res = from_lists(arr_list)
assert_array_equal(res, simple_arr)
# simple (using dump). This should be the same test than above.
# We just make sure dump() and from_lists() round-trip correctly.
arr_list = simple_arr.dump()
res = from_lists(arr_list)
assert_array_equal(res, simple_arr)
# with anonymous axes
arr_anon = simple_arr.rename({0: None, 1: None, 2: None})
arr_list = arr_anon.dump()
assert arr_list == [[None, None, 'c0', 'c1', 'c2'],
['a0', 'b0', 0, 1, 2],
['a0', 'b1', 3, 4, 5],
['a1', 'b0', 6, 7, 8],
['a1', 'b1', 9, 10, 11]]
res = from_lists(arr_list, nb_axes=3)
assert_array_equal(res, arr_anon)
# with empty ('') axes names
arr_empty_names = simple_arr.rename({0: '', 1: '', 2: ''})
arr_list = arr_empty_names.dump()
assert arr_list == [[ '', '', 'c0', 'c1', 'c2'],
['a0', 'b0', 0, 1, 2],
['a0', 'b1', 3, 4, 5],
['a1', 'b0', 6, 7, 8],
['a1', 'b1', 9, 10, 11]]
res = from_lists(arr_list, nb_axes=3)
# this is purposefully NOT arr_empty_names because from_lists (via df_asarray) transforms '' axes to None
assert_array_equal(res, arr_anon)
# sort_rows
arr = from_lists([['sex', 'nat\\year', 1991, 1992, 1993],
['F', 'BE', 0, 0, 1],
['F', 'FO', 0, 0, 2],
['M', 'BE', 1, 0, 0],
['M', 'FO', 2, 0, 0]])
sorted_arr = from_lists([['sex', 'nat\\year', 1991, 1992, 1993],
['M', 'BE', 1, 0, 0],
['M', 'FO', 2, 0, 0],
['F', 'BE', 0, 0, 1],
['F', 'FO', 0, 0, 2]], sort_rows=True)
assert_array_equal(sorted_arr, arr)
# sort_columns
arr = from_lists([['sex', 'nat\\year', 1991, 1992, 1993],
['M', 'BE', 1, 0, 0],
['M', 'FO', 2, 0, 0],
['F', 'BE', 0, 0, 1],
['F', 'FO', 0, 0, 2]])
sorted_arr = from_lists([['sex', 'nat\\year', 1992, 1991, 1993],
['M', 'BE', 0, 1, 0],
['M', 'FO', 0, 2, 0],
['F', 'BE', 0, 0, 1],
['F', 'FO', 0, 0, 2]], sort_columns=True)
assert_array_equal(sorted_arr, arr)
def test_from_series():
# Series with Index as index
expected = ndtest(3)
s = pd.Series([0, 1, 2], index=pd.Index(['a0', 'a1', 'a2'], name='a'))
assert_array_equal(from_series(s), expected)
s = pd.Series([2, 0, 1], index=pd.Index(['a2', 'a0', 'a1'], name='a'))
assert_array_equal(from_series(s, sort_rows=True), expected)
expected = ndtest(3)[['a2', 'a0', 'a1']]
assert_array_equal(from_series(s), expected)
# Series with MultiIndex as index
age = Axis('age=0..3')
gender = Axis('gender=M,F')
time = Axis('time=2015..2017')
expected = ndtest((age, gender, time))
index = pd.MultiIndex.from_product(expected.axes.labels, names=expected.axes.names)
data = expected.data.flatten()
s = pd.Series(data, index)
res = from_series(s)
assert_array_equal(res, expected)
res = from_series(s, sort_rows=True)
assert_array_equal(res, expected.sort_axes())
expected[0, 'F'] = -1
s = s.reset_index().drop([3, 4, 5]).set_index(['age', 'gender', 'time'])[0]
res = from_series(s, fill_value=-1)
assert_array_equal(res, expected)
def test_from_frame():
# 1) data = scalar
# ================
# Dataframe becomes 1D Array
data = np.array([10])
index = ['i0']
columns = ['c0']
axis_index, axis_columns = Axis(index), Axis(columns)
df = pd.DataFrame(data, index=index, columns=columns)
assert df.index.name is None
assert df.columns.name is None
assert list(df.index.values) == index
assert list(df.columns.values) == columns
# anonymous indexes/columns
# input dataframe:
# ----------------
# c0
# i0 10
# output Array:
# -------------
# {0}\{1} c0
# i0 10
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (1, 1)
assert la.axes.names == [None, None]
assert list(la.axes.labels[0]) == index
assert list(la.axes.labels[1]) == columns
expected_la = Array(data.reshape((1, 1)), [axis_index, axis_columns])
assert_array_equal(la, expected_la)
# anonymous columns
# input dataframe:
# ----------------
# c0
# index
# i0 10
# output Array:
# -------------
# index\{1} c0
# i0 10
df.index.name, df.columns.name = 'index', None
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (1, 1)
assert la.axes.names == ['index', None]
assert list(la.axes.labels[0]) == index
assert list(la.axes.labels[1]) == columns
expected_la = Array(data.reshape((1, 1)), [axis_index.rename('index'), axis_columns])
assert_array_equal(la, expected_la)
# anonymous columns/non string row axis name
# input dataframe:
# ----------------
# c0
# 0
# i0 10
# output Array:
# -------------
# 0\{1} c0
# i0 10
df = pd.DataFrame([10], index=pd.Index(['i0'], name=0), columns=['c0'])
res = from_frame(df)
expected = Array([[10]], [Axis(['i0'], name=0), Axis(['c0'])])
assert res.ndim == 2
assert res.shape == (1, 1)
assert res.axes.names == [0, None]
assert_array_equal(res, expected)
# anonymous index
# input dataframe:
# ----------------
# columns c0
# i0 10
# output Array:
# -------------
# {0}\columns c0
# i0 10
df.index.name, df.columns.name = None, 'columns'
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (1, 1)
assert la.axes.names == [None, 'columns']
assert list(la.axes.labels[0]) == index
assert list(la.axes.labels[1]) == columns
expected_la = Array(data.reshape((1, 1)), [axis_index, axis_columns.rename('columns')])
assert_array_equal(la, expected_la)
# index and columns with name
# input dataframe:
# ----------------
# columns c0
# index
# i0 10
# output Array:
# -------------
# index\columns c0
# i0 10
df.index.name, df.columns.name = 'index', 'columns'
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (1, 1)
assert la.axes.names == ['index', 'columns']
assert list(la.axes.labels[0]) == index
assert list(la.axes.labels[1]) == columns
expected_la = Array(data.reshape((1, 1)), [axis_index.rename('index'), axis_columns.rename('columns')])
assert_array_equal(la, expected_la)
# 2) data = vector
# ================
size = 3
# 2A) data = horizontal vector (1 x N)
# ====================================
# Dataframe becomes 1D Array
data = np.arange(size)
indexes = ['i0']
columns = ['c{}'.format(i) for i in range(size)]
axis_index, axis_columns = Axis(indexes), Axis(columns)
df = pd.DataFrame(data.reshape(1, size), index=indexes, columns=columns)
assert df.index.name is None
assert df.columns.name is None
assert list(df.index.values) == indexes
assert list(df.columns.values) == columns
# anonymous indexes/columns
# input dataframe:
# ----------------
# c0 c1 c2
# i0 0 1 2
# output Array:
# -------------
# {0}\{1} c0 c1 c2
# i0 0 1 2
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (1, size)
assert la.axes.names == [None, None]
assert list(la.axes.labels[0]) == index
assert list(la.axes.labels[1]) == columns
expected_la = Array(data.reshape((1, size)), [axis_index, axis_columns])
assert_array_equal(la, expected_la)
# anonymous columns
# input dataframe:
# ----------------
# c0 c1 c2
# index
# i0 0 1 2
# output Array:
# -------------
# index\{1} c0 c1 c2
# i0 0 1 2
df.index.name, df.columns.name = 'index', None
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (1, size)
assert la.axes.names == ['index', None]
assert list(la.axes.labels[0]) == index
assert list(la.axes.labels[1]) == columns
expected_la = Array(data.reshape((1, size)), [axis_index.rename('index'), axis_columns])
assert_array_equal(la, expected_la)
# anonymous index
# input dataframe:
# ----------------
# columns c0 c1 c2
# i0 0 1 2
# output Array:
# -------------
# {0}\columns c0 c1 c2
# i0 0 1 2
df.index.name, df.columns.name = None, 'columns'
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (1, size)
assert la.axes.names == [None, 'columns']
assert list(la.axes.labels[0]) == index
assert list(la.axes.labels[1]) == columns
expected_la = Array(data.reshape((1, size)), [axis_index, axis_columns.rename('columns')])
assert_array_equal(la, expected_la)
# index and columns with name
# input dataframe:
# ----------------
# columns c0 c1 c2
# index
# i0 0 1 2
# output Array:
# -------------
# index\columns c0 c1 c2
# i0 0 1 2
df.index.name, df.columns.name = 'index', 'columns'
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (1, size)
assert la.axes.names == ['index', 'columns']
assert list(la.axes.labels[0]) == index
assert list(la.axes.labels[1]) == columns
expected_la = Array(data.reshape((1, size)), [axis_index.rename('index'), axis_columns.rename('columns')])
assert_array_equal(la, expected_la)
# 2B) data = vertical vector (N x 1)
# ==================================
# Dataframe becomes 2D Array
data = data.reshape(size, 1)
indexes = ['i{}'.format(i) for i in range(size)]
columns = ['c0']
axis_index, axis_columns = Axis(indexes), Axis(columns)
df = pd.DataFrame(data, index=indexes, columns=columns)
assert df.index.name is None
assert df.columns.name is None
assert list(df.index.values) == indexes
assert list(df.columns.values) == columns
# anonymous indexes/columns
# input dataframe:
# ----------------
# c0
# i0 0
# i1 1
# i2 2
# output Array:
# -------------
# {0}\{1} c0
# i0 0
# i1 1
# i2 2
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (size, 1)
assert la.axes.names == [None, None]
assert list(la.axes.labels[0]) == indexes
assert list(la.axes.labels[1]) == columns
expected_la = Array(data, [axis_index, axis_columns])
assert_array_equal(la, expected_la)
# anonymous columns
# input dataframe:
# ----------------
# c0
# index
# i0 0
# i1 1
# i2 2
# output Array:
# -------------
# index\{1} c0
# i0 0
# i1 1
# i2 2
df.index.name, df.columns.name = 'index', None
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (size, 1)
assert la.axes.names == ['index', None]
assert list(la.axes.labels[0]) == indexes
assert list(la.axes.labels[1]) == columns
expected_la = Array(data, [axis_index.rename('index'), axis_columns])
assert_array_equal(la, expected_la)
# anonymous index
# input dataframe:
# ----------------
# columns c0
# i0 0
# i1 1
# i2 2
# output Array:
# -------------
# {0}\columns c0
# i0 0
# i1 1
# i2 2
df.index.name, df.columns.name = None, 'columns'
la = from_frame(df)
assert la.ndim == 2
assert la.shape == (size, 1)
assert la.axes.names == [None, 'columns']
assert list(la.axes.labels[0]) == indexes
assert list(la.axes.labels[1]) == columns
expected_la = Array(data, [axis_index, axis_columns.rename('columns')])
assert_array_equal(la, expected_la)
# index and columns with name
# input dataframe:
# ----------------
# columns c0
# index
# i0 0
# i1 1
# i2 2
# output Array:
# -------------
# {0}\columns c0
# i0 0
# i1 1
# i2 2
df.index.name, df.columns.name = 'index', 'columns'
assert la.ndim == 2
assert la.shape == (size, 1)
assert la.axes.names == [None, 'columns']
assert list(la.axes.labels[0]) == indexes
assert list(la.axes.labels[1]) == columns
expected_la = Array(data, [axis_index, axis_columns.rename('columns')])
assert_array_equal(la, expected_la)
# 3) 3D array
# ===========
# 3A) Dataframe with 2 index columns
# ==================================
dt = [('age', int), ('sex', 'U1'),
('2007', int), ('2010', int), ('2013', int)]
data = np.array([
(0, 'F', 3722, 3395, 3347),
(0, 'M', 338, 316, 323),
(1, 'F', 2878, 2791, 2822),
(1, 'M', 1121, 1037, 976),
(2, 'F', 4073, 4161, 4429),
(2, 'M', 1561, 1463, 1467),
(3, 'F', 3507, 3741, 3366),
(3, 'M', 2052, 2052, 2118),
], dtype=dt)
df = pd.DataFrame(data)
df.set_index(['age', 'sex'], inplace=True)
df.columns.name = 'time'
la = from_frame(df)
assert la.ndim == 3
assert la.shape == (4, 2, 3)
assert la.axes.names == ['age', 'sex', 'time']
assert_array_equal(la[0, 'F', :], [3722, 3395, 3347])
# 3B) Dataframe with columns.name containing \\
# =============================================
dt = [('age', int), ('sex\\time', 'U1'),
('2007', int), ('2010', int), ('2013', int)]
data = np.array([
(0, 'F', 3722, 3395, 3347),
(0, 'M', 338, 316, 323),
(1, 'F', 2878, 2791, 2822),
(1, 'M', 1121, 1037, 976),
(2, 'F', 4073, 4161, 4429),
(2, 'M', 1561, 1463, 1467),
(3, 'F', 3507, 3741, 3366),
(3, 'M', 2052, 2052, 2118),
], dtype=dt)
df = pd.DataFrame(data)
df.set_index(['age', 'sex\\time'], inplace=True)
la = from_frame(df, unfold_last_axis_name=True)
assert la.ndim == 3
assert la.shape == (4, 2, 3)
assert la.axes.names == ['age', 'sex', 'time']
assert_array_equal(la[0, 'F', :], [3722, 3395, 3347])
# 3C) Dataframe with no axe names (names are None)
# ===============================
arr_no_names = ndtest("a0,a1;b0..b2;c0..c3")
df_no_names = arr_no_names.df
res = from_frame(df_no_names)
assert_array_equal(res, arr_no_names)
# 3D) Dataframe with empty axe names (names are '')
# ==================================
arr_empty_names = ndtest("=a0,a1;=b0..b2;=c0..c3")
assert arr_empty_names.axes.names == ['', '', '']
df_no_names = arr_empty_names.df
res = from_frame(df_no_names)
assert_array_equal(res, arr_empty_names)
# 4) test sort_rows and sort_columns arguments
# ============================================
age = Axis('age=2,0,1,3')
gender = Axis('gender=M,F')
time = Axis('time=2016,2015,2017')
columns = pd.Index(time.labels, name=time.name)
# df.index is an Index instance
expected = ndtest((gender, time))
index = pd.Index(gender.labels, name=gender.name)
data = expected.data
df = pd.DataFrame(data, index=index, columns=columns)
expected = expected.sort_axes()
res = from_frame(df, sort_rows=True, sort_columns=True)
assert_array_equal(res, expected)
# df.index is a MultiIndex instance
expected = ndtest((age, gender, time))
index = pd.MultiIndex.from_product(expected.axes[:-1].labels, names=expected.axes[:-1].names)
data = expected.data.reshape(len(age) * len(gender), len(time))
df = pd.DataFrame(data, index=index, columns=columns)
res = from_frame(df, sort_rows=True, sort_columns=True)
assert_array_equal(res, expected.sort_axes())
# 5) test fill_value
# ==================
expected[0, 'F'] = -1
df = df.reset_index().drop([3]).set_index(['age', 'gender'])
res = from_frame(df, fill_value=-1)
assert_array_equal(res, expected)
def test_to_csv(tmpdir):
arr = io_3d.copy()
arr.to_csv(tmp_path(tmpdir, 'out.csv'))
result = ['a,b\\c,c0,c1,c2\n',
'1,b0,0,1,2\n',
'1,b1,3,4,5\n']
with open(tmp_path(tmpdir, 'out.csv')) as f:
assert f.readlines()[:3] == result
# stacked data (one column containing all the values and another column listing the context of the value)
arr.to_csv(tmp_path(tmpdir, 'out.csv'), wide=False)
result = ['a,b,c,value\n',
'1,b0,c0,0\n',
'1,b0,c1,1\n']
with open(tmp_path(tmpdir, 'out.csv')) as f:
assert f.readlines()[:3] == result
arr = io_1d.copy()
arr.to_csv(tmp_path(tmpdir, 'test_out1d.csv'))
result = ['a,a0,a1,a2\n',
',0,1,2\n']
with open(tmp_path(tmpdir, 'test_out1d.csv')) as f:
assert f.readlines() == result
@needs_xlsxwriter
def test_to_excel_xlsxwriter(tmpdir):
fpath = tmp_path(tmpdir, 'test_to_excel_xlsxwriter.xlsx')
# 1D
a1 = ndtest(3)
# fpath/Sheet1/A1
a1.to_excel(fpath, overwrite_file=True, engine='xlsxwriter')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a1)
# fpath/Sheet1/A1(transposed)
a1.to_excel(fpath, transpose=True, engine='xlsxwriter')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a1)
# fpath/Sheet1/A1
# stacked data (one column containing all the values and another column listing the context of the value)
a1.to_excel(fpath, wide=False, engine='xlsxwriter')
res = read_excel(fpath, engine='xlrd')
stacked_a1 = a1.reshape([a1.a, Axis(['value'])])
assert_array_equal(res, stacked_a1)
# 2D
a2 = ndtest((2, 3))
# fpath/Sheet1/A1
a2.to_excel(fpath, overwrite_file=True, engine='xlsxwriter')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a2)
# fpath/Sheet1/A10
# TODO: this is currently not supported (though we would only need to translate A10 to startrow=0 and startcol=0
# a2.to_excel('fpath', 'Sheet1', 'A10', engine='xlsxwriter')
# res = read_excel('fpath', 'Sheet1', engine='xlrd', skiprows=9)
# assert_array_equal(res, a2)
# fpath/other/A1
a2.to_excel(fpath, 'other', engine='xlsxwriter')
res = read_excel(fpath, 'other', engine='xlrd')
assert_array_equal(res, a2)
# 3D
a3 = ndtest((2, 3, 4))
# fpath/Sheet1/A1
# FIXME: merge_cells=False should be the default (until Pandas is fixed to read its format)
a3.to_excel(fpath, overwrite_file=True, engine='xlsxwriter', merge_cells=False)
# a3.to_excel('fpath', overwrite_file=True, engine='openpyxl')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a3)
# fpath/Sheet1/A20
# TODO: implement position (see above)
# a3.to_excel('fpath', 'Sheet1', 'A20', engine='xlsxwriter', merge_cells=False)
# res = read_excel('fpath', 'Sheet1', engine='xlrd', skiprows=19)
# assert_array_equal(res, a3)
# fpath/other/A1
a3.to_excel(fpath, 'other', engine='xlsxwriter', merge_cells=False)
res = read_excel(fpath, 'other', engine='xlrd')
assert_array_equal(res, a3)
# 1D
a1 = ndtest(3)
# fpath/Sheet1/A1
a1.to_excel(fpath, overwrite_file=True, engine='xlsxwriter')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a1)
# fpath/Sheet1/A1(transposed)
a1.to_excel(fpath, transpose=True, engine='xlsxwriter')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a1)
# fpath/Sheet1/A1
# stacked data (one column containing all the values and another column listing the context of the value)
a1.to_excel(fpath, wide=False, engine='xlsxwriter')
res = read_excel(fpath, engine='xlrd')
stacked_a1 = a1.reshape([a1.a, Axis(['value'])])
assert_array_equal(res, stacked_a1)
# 2D
a2 = ndtest((2, 3))
# fpath/Sheet1/A1
a2.to_excel(fpath, overwrite_file=True, engine='xlsxwriter')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a2)
# fpath/Sheet1/A10
# TODO: this is currently not supported (though we would only need to translate A10 to startrow=0 and startcol=0
# a2.to_excel(fpath, 'Sheet1', 'A10', engine='xlsxwriter')
# res = read_excel('fpath', 'Sheet1', engine='xlrd', skiprows=9)
# assert_array_equal(res, a2)
# fpath/other/A1
a2.to_excel(fpath, 'other', engine='xlsxwriter')
res = read_excel(fpath, 'other', engine='xlrd')
assert_array_equal(res, a2)
# 3D
a3 = ndtest((2, 3, 4))
# fpath/Sheet1/A1
# FIXME: merge_cells=False should be the default (until Pandas is fixed to read its format)
a3.to_excel(fpath, overwrite_file=True, engine='xlsxwriter', merge_cells=False)
# a3.to_excel('fpath', overwrite_file=True, engine='openpyxl')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a3)
# fpath/Sheet1/A20
# TODO: implement position (see above)
# a3.to_excel('fpath', 'Sheet1', 'A20', engine='xlsxwriter', merge_cells=False)
# res = read_excel('fpath', 'Sheet1', engine='xlrd', skiprows=19)
# assert_array_equal(res, a3)
# fpath/other/A1
a3.to_excel(fpath, 'other', engine='xlsxwriter', merge_cells=False)
res = read_excel(fpath, 'other', engine='xlrd')
assert_array_equal(res, a3)
# passing group as sheet_name
a3 = ndtest((4, 3, 4))
os.remove(fpath)
# single element group
for label in a3.a:
a3[label].to_excel(fpath, label, engine='xlsxwriter')
# unnamed group
group = a3.c['c0,c2']
a3[group].to_excel(fpath, group, engine='xlsxwriter')
# unnamed group + slice
group = a3.c['c0::2']
a3[group].to_excel(fpath, group, engine='xlsxwriter')
# named group
group = a3.c['c0,c2'] >> 'even'
a3[group].to_excel(fpath, group, engine='xlsxwriter')
# group with name containing special characters (replaced by _)
group = a3.c['c0,c2'] >> r':name?with*special/\[char]'
a3[group].to_excel(fpath, group, engine='xlsxwriter')
@needs_xlwings
def test_to_excel_xlwings(tmpdir):
fpath = tmp_path(tmpdir, 'test_to_excel_xlwings.xlsx')
# 1D
a1 = ndtest(3)
# live book/Sheet1/A1
# a1.to_excel()
# fpath/Sheet1/A1 (create a new file if does not exist)
if os.path.isfile(fpath):
os.remove(fpath)
a1.to_excel(fpath, engine='xlwings')
# we use xlrd to read back instead of xlwings even if that should work, to make the test faster
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a1)
# fpath/Sheet1/A1(transposed)
a1.to_excel(fpath, transpose=True, engine='xlwings')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a1)
# fpath/Sheet1/A1
# stacked data (one column containing all the values and another column listing the context of the value)
a1.to_excel(fpath, wide=False, engine='xlwings')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a1)
# 2D
a2 = ndtest((2, 3))
# fpath/Sheet1/A1
a2.to_excel(fpath, overwrite_file=True, engine='xlwings')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a2)
# fpath/Sheet1/A10
a2.to_excel(fpath, 'Sheet1', 'A10', engine='xlwings')
res = read_excel(fpath, 'Sheet1', engine='xlrd', skiprows=9)
assert_array_equal(res, a2)
# fpath/other/A1
a2.to_excel(fpath, 'other', engine='xlwings')
res = read_excel(fpath, 'other', engine='xlrd')
assert_array_equal(res, a2)
# transpose
a2.to_excel(fpath, 'transpose', transpose=True, engine='xlwings')
res = read_excel(fpath, 'transpose', engine='xlrd')
assert_array_equal(res, a2.T)
# 3D
a3 = ndtest((2, 3, 4))
# fpath/Sheet1/A1
a3.to_excel(fpath, overwrite_file=True, engine='xlwings')
res = read_excel(fpath, engine='xlrd')
assert_array_equal(res, a3)
# fpath/Sheet1/A20
a3.to_excel(fpath, 'Sheet1', 'A20', engine='xlwings')
res = read_excel(fpath, 'Sheet1', engine='xlrd', skiprows=19)
assert_array_equal(res, a3)
# fpath/other/A1
a3.to_excel(fpath, 'other', engine='xlwings')
res = read_excel(fpath, 'other', engine='xlrd')
assert_array_equal(res, a3)
# passing group as sheet_name
a3 = ndtest((4, 3, 4))
os.remove(fpath)
# single element group
for label in a3.a:
a3[label].to_excel(fpath, label, engine='xlwings')
# unnamed group
group = a3.c['c0,c2']
a3[group].to_excel(fpath, group, engine='xlwings')
# unnamed group + slice
group = a3.c['c0::2']
a3[group].to_excel(fpath, group, engine='xlwings')
# named group
group = a3.c['c0,c2'] >> 'even'
a3[group].to_excel(fpath, group, engine='xlwings')
# group with name containing special characters (replaced by _)
group = a3.c['c0,c2'] >> r':name?with*special/\[char]'
a3[group].to_excel(fpath, group, engine='xlwings')
# checks sheet names
sheet_names = sorted(open_excel(fpath).sheet_names())
assert sheet_names == sorted(['a0', 'a1', 'a2', 'a3', 'c0,c2', 'c0__2', 'even',
'_name_with_special___char_'])
# sheet name of 31 characters (= maximum authorized length)
a3.to_excel(fpath, "sheetname_of_exactly_31_chars__", engine='xlwings')
# sheet name longer than 31 characters
with pytest.raises(ValueError, match="Sheet names cannot exceed 31 characters"):
a3.to_excel(fpath, "sheetname_longer_than_31_characters", engine='xlwings')
def test_dump():
# narrow format
res = list(ndtest(3).dump(wide=False, value_name='data'))
assert res == [['a', 'data'],
['a0', 0],
['a1', 1],
['a2', 2]]
# array with an anonymous axis and a wildcard axis
arr = ndtest((Axis('a0,a1'), Axis(2, 'b')))
res = arr.dump()
assert res == [['\\b', 0, 1],
['a0', 0, 1],
['a1', 2, 3]]
res = arr.dump(_axes_display_names=True)
assert res == [['{0}\\b*', 0, 1],
['a0', 0, 1],
['a1', 2, 3]]
@needs_xlwings
def test_open_excel(tmpdir):
# 1) Create new file
# ==================
fpath = inputpath('should_not_exist.xlsx')
# overwrite_file must be set to True to create a new file
with pytest.raises(ValueError):
open_excel(fpath)
# 2) with headers
# ===============
with open_excel(visible=False) as wb:
# 1D
a1 = ndtest(3)
# Sheet1/A1
wb['Sheet1'] = a1.dump()
res = wb['Sheet1'].load()
assert_array_equal(res, a1)
wb[0] = a1.dump()
res = wb[0].load()
assert_array_equal(res, a1)
# Sheet1/A1(transposed)
# TODO: implement .options on Sheet so that one can write:
# wb[0].options(transpose=True).value = a1.dump()
wb[0]['A1'].options(transpose=True).value = a1.dump()
# TODO: implement .options on Range so that you can write:
# res = wb[0]['A1:B4'].options(transpose=True).load()
# res = from_lists(wb[0]['A1:B4'].options(transpose=True).value)
# assert_array_equal(res, a1)
# 2D
a2 = ndtest((2, 3))
# Sheet1/A1
wb[0] = a2.dump()
res = wb[0].load()
assert_array_equal(res, a2)
# Sheet1/A10
wb[0]['A10'] = a2.dump()
res = wb[0]['A10:D12'].load()
assert_array_equal(res, a2)
# other/A1
wb['other'] = a2.dump()
res = wb['other'].load()
assert_array_equal(res, a2)
# new/A10
# we need to create the sheet first
wb['new'] = ''
wb['new']['A10'] = a2.dump()
res = wb['new']['A10:D12'].load()
assert_array_equal(res, a2)
# new2/A10
# cannot store the return value of "add" because that's a raw xlwings Sheet
wb.sheets.add('new2')
wb['new2']['A10'] = a2.dump()
res = wb['new2']['A10:D12'].load()
assert_array_equal(res, a2)
# 3D
a3 = ndtest((2, 3, 4))
# 3D/A1
wb['3D'] = a3.dump()
res = wb['3D'].load()
assert_array_equal(res, a3)
# 3D/A20
wb['3D']['A20'] = a3.dump()
res = wb['3D']['A20:F26'].load()
assert_array_equal(res, a3)
# 3D/A20 without name for columns
wb['3D']['A20'] = a3.dump()
# assume we have no name for the columns axis (ie change b\c to b)
wb['3D']['B20'] = 'b'
res = wb['3D']['A20:F26'].load(nb_axes=3)
assert_array_equal(res, a3.data)
# the two first axes should be the same
assert res.axes[:2] == a3.axes[:2]
# the third axis should have the same labels (but not the same name obviously)
assert_array_equal(res.axes[2].labels, a3.axes[2].labels)
with open_excel(inputpath('test.xlsx')) as wb:
expected = ndtest("a=a0..a2; b0..b2")
res = wb['2d_classic'].load()
assert_array_equal(res, expected)
# 3) without headers
# ==================
with open_excel(visible=False) as wb:
# 1D
a1 = ndtest(3)
# Sheet1/A1
wb['Sheet1'] = a1
res = wb['Sheet1'].load(header=False)
assert_array_equal(res, a1.data)
wb[0] = a1
res = wb[0].load(header=False)
assert_array_equal(res, a1.data)
# Sheet1/A1(transposed)
# FIXME: we need to .dump(header=False) explicitly because otherwise we go via ArrayConverter which
# includes labels. for consistency's sake we should either change ArrayConverter to not include
# labels, or change wb[0] = a1 to include them (and use wb[0] = a1.data to avoid them?) but that
# would be heavily backward incompatible and how would I load them back?
# wb[0]['A1'].options(transpose=True).value = a1
wb[0]['A1'].options(transpose=True).value = a1.dump(header=False)
res = wb[0]['A1:A3'].load(header=False)
assert_array_equal(res, a1.data)
# 2D
a2 = ndtest((2, 3))
# Sheet1/A1
wb[0] = a2
res = wb[0].load(header=False)
assert_array_equal(res, a2.data)
# Sheet1/A10
wb[0]['A10'] = a2
res = wb[0]['A10:C11'].load(header=False)
assert_array_equal(res, a2.data)
# other/A1
wb['other'] = a2
res = wb['other'].load(header=False)
assert_array_equal(res, a2.data)
# new/A10
# we need to create the sheet first
wb['new'] = ''
wb['new']['A10'] = a2
res = wb['new']['A10:C11'].load(header=False)
assert_array_equal(res, a2.data)
# 3D
a3 = ndtest((2, 3, 4))
# 3D/A1
wb['3D'] = a3
res = wb['3D'].load(header=False)
assert_array_equal(res, a3.data.reshape((6, 4)))
# 3D/A20
wb['3D']['A20'] = a3
res = wb['3D']['A20:D25'].load(header=False)
assert_array_equal(res, a3.data.reshape((6, 4)))
# 4) Blank cells
# ==============
# Excel sheet with blank cells on right/bottom border of the array to read
fpath = inputpath('test_blank_cells.xlsx')
with open_excel(fpath) as wb:
good = wb['good'].load()
bad1 = wb['blanksafter_morerowsthancols'].load()
bad2 = wb['blanksafter_morecolsthanrows'].load()
# with additional empty column in the middle of the array to read
good2 = wb['middleblankcol']['A1:E3'].load()
bad3 = wb['middleblankcol'].load()
bad4 = wb['16384col'].load()
assert_array_equal(bad1, good)
assert_array_equal(bad2, good)
assert_array_equal(bad3, good2)
assert_array_equal(bad4, good2)
# 5) anonymous and wilcard axes
# =============================
arr = ndtest((Axis('a0,a1'), Axis(2, 'b')))
fpath = tmp_path(tmpdir, 'anonymous_and_wildcard_axes.xlsx')
with open_excel(fpath, overwrite_file=True) as wb:
wb[0] = arr.dump()
res = wb[0].load()
# the result should be identical to the original array except we lost the information about
# the wildcard axis being a wildcard axis
expected = arr.set_axes('b', Axis([0, 1], 'b'))
assert_array_equal(res, expected)
# 6) crash test
# =============
arr = ndtest((2, 2))
fpath = tmp_path(tmpdir, 'temporary_test_file.xlsx')
# create and save a test file
with open_excel(fpath, overwrite_file=True) as wb:
wb['arr'] = arr.dump()
wb.save()
# raise exception when the file is open
try:
with open_excel(fpath, overwrite_file=True) as wb:
raise ValueError("")
except ValueError:
pass
# check if file is still available
with open_excel(fpath) as wb:
assert wb.sheet_names() == ['arr']
assert_array_equal(wb['arr'].load(), arr)
# remove file
if os.path.exists(fpath):
os.remove(fpath)
def test_ufuncs(small_array):
raw = small_array.data
# simple one-argument ufunc
assert_array_equal(exp(small_array), np.exp(raw))
# with out=
la_out = zeros(small_array.axes)
raw_out = np.zeros(raw.shape)
la_out2 = exp(small_array, la_out)
raw_out2 = np.exp(raw, raw_out)
# FIXME: this is not the case currently
# self.assertIs(la_out2, la_out)
assert_array_equal(la_out2, la_out)
assert raw_out2 is raw_out
assert_array_equal(la_out, raw_out)
# with out= and broadcasting
# we need to put the 'a' axis first because array numpy only supports that
la_out = zeros([Axis([0, 1, 2], 'a')] + list(small_array.axes))
raw_out = np.zeros((3,) + raw.shape)
la_out2 = exp(small_array, la_out)
raw_out2 = np.exp(raw, raw_out)
# self.assertIs(la_out2, la_out)
# XXX: why is la_out2 transposed?
assert_array_equal(la_out2.transpose(X.a), la_out)
assert raw_out2 is raw_out
assert_array_equal(la_out, raw_out)
sex, lipro = small_array.axes
low = small_array.sum(sex) // 4 + 3
raw_low = raw.sum(0) // 4 + 3
high = small_array.sum(sex) // 4 + 13
raw_high = raw.sum(0) // 4 + 13
# LA + scalars
assert_array_equal(small_array.clip(0, 10), raw.clip(0, 10))
assert_array_equal(clip(small_array, 0, 10), np.clip(raw, 0, 10))
# LA + LA (no broadcasting)
assert_array_equal(clip(small_array, 21 - small_array, 9 + small_array // 2),
np.clip(raw, 21 - raw, 9 + raw // 2))
# LA + LA (with broadcasting)
assert_array_equal(clip(small_array, low, high),
np.clip(raw, raw_low, raw_high))
# where (no broadcasting)
assert_array_equal(where(small_array < 5, -5, small_array),
np.where(raw < 5, -5, raw))
# where (transposed no broadcasting)
assert_array_equal(where(small_array < 5, -5, small_array.T),
np.where(raw < 5, -5, raw))
# where (with broadcasting)
result = where(small_array['P01'] < 5, -5, small_array)
assert result.axes.names == ['sex', 'lipro']
assert_array_equal(result, np.where(raw[:, [0]] < 5, -5, raw))
# round
small_float = small_array + 0.6
rounded = round(small_float)
assert_array_equal(rounded, np.round(raw + 0.6))
def test_diag():
# 2D -> 1D
a = ndtest((3, 3))
d = diag(a)
assert d.ndim == 1
assert d.i[0] == a.i[0, 0]
assert d.i[1] == a.i[1, 1]
assert d.i[2] == a.i[2, 2]
# 1D -> 2D
a2 = diag(d)
assert a2.ndim == 2
assert a2.i[0, 0] == a.i[0, 0]
assert a2.i[1, 1] == a.i[1, 1]
assert a2.i[2, 2] == a.i[2, 2]
# 3D -> 2D
a = ndtest((3, 3, 3))
d = diag(a)
assert d.ndim == 2
assert d.i[0, 0] == a.i[0, 0, 0]
assert d.i[1, 1] == a.i[1, 1, 1]
assert d.i[2, 2] == a.i[2, 2, 2]
# 3D -> 1D
d = diag(a, axes=(0, 1, 2))
assert d.ndim == 1
assert d.i[0] == a.i[0, 0, 0]
assert d.i[1] == a.i[1, 1, 1]
assert d.i[2] == a.i[2, 2, 2]
# 1D (anon) -> 2D
d_anon = d.rename(0, None).ignore_labels()
a2 = diag(d_anon)
assert a2.ndim == 2
# 1D (anon) -> 3D
a3 = diag(d_anon, ndim=3)
assert a2.ndim == 2
assert a3.i[0, 0, 0] == a.i[0, 0, 0]
assert a3.i[1, 1, 1] == a.i[1, 1, 1]
assert a3.i[2, 2, 2] == a.i[2, 2, 2]
# using Axis object
sex = Axis('sex=M,F')
a = eye(sex)
d = diag(a)
assert d.ndim == 1
assert d.axes.names == ['sex_sex']
assert_array_equal(d.axes.labels, [['M_M', 'F_F']])
assert d.i[0] == 1.0
assert d.i[1] == 1.0
@needs_python35
def test_matmul():
# 2D / anonymous axes
a1 = ndtest([Axis(3), Axis(3)])
a2 = eye(3, 3) * 2
# Note that we cannot use @ because that is an invalid syntax in Python 2
# Array value
assert_array_equal(a1.__matmul__(a2), ndtest([Axis(3), Axis(3)]) * 2)
# ndarray value
assert_array_equal(a1.__matmul__(a2.data), ndtest([Axis(3), Axis(3)]) * 2)
# non anonymous axes (N <= 2)
arr1d = ndtest(3)
arr2d = ndtest((3, 3))
# 1D @ 1D
res = arr1d.__matmul__(arr1d)
assert isinstance(res, np.integer)
assert res == 5
# 1D @ 2D
assert_array_equal(arr1d.__matmul__(arr2d),
Array([15, 18, 21], 'b=b0..b2'))
# 2D @ 1D
assert_array_equal(arr2d.__matmul__(arr1d),
Array([5, 14, 23], 'a=a0..a2'))
# 2D(a,b) @ 2D(a,b) -> 2D(a,b)
res = from_lists([['a\\b', 'b0', 'b1', 'b2'],
['a0', 15, 18, 21],
['a1', 42, 54, 66],
['a2', 69, 90, 111]])
assert_array_equal(arr2d.__matmul__(arr2d), res)
# 2D(a,b) @ 2D(b,a) -> 2D(a,a)
res = from_lists([['a\\a', 'a0', 'a1', 'a2'],
['a0', 5, 14, 23],
['a1', 14, 50, 86],
['a2', 23, 86, 149]])
assert_array_equal(arr2d.__matmul__(arr2d.T), res)
# ndarray value
assert_array_equal(arr1d.__matmul__(arr2d.data),
Array([15, 18, 21]))
assert_array_equal(arr2d.data.__matmul__(arr2d.T.data),
res.data)
# different axes
a1 = ndtest('a=a0..a1;b=b0..b2')
a2 = ndtest('b=b0..b2;c=c0..c3')
res = from_lists([[r'a\c', 'c0', 'c1', 'c2', 'c3'],
['a0', 20, 23, 26, 29],
['a1', 56, 68, 80, 92]])
assert_array_equal(a1.__matmul__(a2), res)
# non anonymous axes (N >= 2)
arr2d = ndtest((2, 2))
arr3d = ndtest((2, 2, 2))
arr4d = ndtest((2, 2, 2, 2))
a, b, c, d = arr4d.axes
e = Axis('e=e0,e1')
f = Axis('f=f0,f1')
# 4D(a, b, c, d) @ 3D(e, d, f) -> 5D(a, b, e, c, f)
arr3d = arr3d.set_axes([e, d, f])
res = from_lists([['a', 'b', 'e', 'c\\f', 'f0', 'f1'],
['a0', 'b0', 'e0', 'c0', 2, 3],
['a0', 'b0', 'e0', 'c1', 6, 11],
['a0', 'b0', 'e1', 'c0', 6, 7],
['a0', 'b0', 'e1', 'c1', 26, 31],
['a0', 'b1', 'e0', 'c0', 10, 19],
['a0', 'b1', 'e0', 'c1', 14, 27],
['a0', 'b1', 'e1', 'c0', 46, 55],
['a0', 'b1', 'e1', 'c1', 66, 79],
['a1', 'b0', 'e0', 'c0', 18, 35],
['a1', 'b0', 'e0', 'c1', 22, 43],
['a1', 'b0', 'e1', 'c0', 86, 103],
['a1', 'b0', 'e1', 'c1', 106, 127],
['a1', 'b1', 'e0', 'c0', 26, 51],
['a1', 'b1', 'e0', 'c1', 30, 59],
['a1', 'b1', 'e1', 'c0', 126, 151],
['a1', 'b1', 'e1', 'c1', 146, 175]])
assert_array_equal(arr4d.__matmul__(arr3d), res)
# 3D(e, d, f) @ 4D(a, b, c, d) -> 5D(e, a, b, d, d)
res = from_lists([['e', 'a', 'b', 'd\\d', 'd0', 'd1'],
['e0', 'a0', 'b0', 'd0', 2, 3],
['e0', 'a0', 'b0', 'd1', 6, 11],
['e0', 'a0', 'b1', 'd0', 6, 7],
['e0', 'a0', 'b1', 'd1', 26, 31],
['e0', 'a1', 'b0', 'd0', 10, 11],
['e0', 'a1', 'b0', 'd1', 46, 51],
['e0', 'a1', 'b1', 'd0', 14, 15],
['e0', 'a1', 'b1', 'd1', 66, 71],
['e1', 'a0', 'b0', 'd0', 10, 19],
['e1', 'a0', 'b0', 'd1', 14, 27],
['e1', 'a0', 'b1', 'd0', 46, 55],
['e1', 'a0', 'b1', 'd1', 66, 79],
['e1', 'a1', 'b0', 'd0', 82, 91],
['e1', 'a1', 'b0', 'd1', 118, 131],
['e1', 'a1', 'b1', 'd0', 118, 127],
['e1', 'a1', 'b1', 'd1', 170, 183]])
assert_array_equal(arr3d.__matmul__(arr4d), res)
# 4D(a, b, c, d) @ 3D(b, d, f) -> 4D(a, b, c, f)
arr3d = arr3d.set_axes([b, d, f])
res = from_lists([['a', 'b', 'c\\f', 'f0', 'f1'],
['a0', 'b0', 'c0', 2, 3],
['a0', 'b0', 'c1', 6, 11],
['a0', 'b1', 'c0', 46, 55],
['a0', 'b1', 'c1', 66, 79],
['a1', 'b0', 'c0', 18, 35],
['a1', 'b0', 'c1', 22, 43],
['a1', 'b1', 'c0', 126, 151],
['a1', 'b1', 'c1', 146, 175]])
assert_array_equal(arr4d.__matmul__(arr3d), res)
# 3D(b, d, f) @ 4D(a, b, c, d) -> 4D(b, a, d, d)
res = from_lists([['b', 'a', 'd\\d', 'd0', 'd1'],
['b0', 'a0', 'd0', 2, 3],
['b0', 'a0', 'd1', 6, 11],
['b0', 'a1', 'd0', 10, 11],
['b0', 'a1', 'd1', 46, 51],
['b1', 'a0', 'd0', 46, 55],
['b1', 'a0', 'd1', 66, 79],
['b1', 'a1', 'd0', 118, 127],
['b1', 'a1', 'd1', 170, 183]])
assert_array_equal(arr3d.__matmul__(arr4d), res)
# 4D(a, b, c, d) @ 2D(d, f) -> 5D(a, b, c, f)
arr2d = arr2d.set_axes([d, f])
res = from_lists([['a', 'b', 'c\\f', 'f0', 'f1'],
['a0', 'b0', 'c0', 2, 3],
['a0', 'b0', 'c1', 6, 11],
['a0', 'b1', 'c0', 10, 19],
['a0', 'b1', 'c1', 14, 27],
['a1', 'b0', 'c0', 18, 35],
['a1', 'b0', 'c1', 22, 43],
['a1', 'b1', 'c0', 26, 51],
['a1', 'b1', 'c1', 30, 59]])
assert_array_equal(arr4d.__matmul__(arr2d), res)
# 2D(d, f) @ 4D(a, b, c, d) -> 5D(a, b, d, d)
res = from_lists([['a', 'b', 'd\\d', 'd0', 'd1'],
['a0', 'b0', 'd0', 2, 3],
['a0', 'b0', 'd1', 6, 11],
['a0', 'b1', 'd0', 6, 7],
['a0', 'b1', 'd1', 26, 31],
['a1', 'b0', 'd0', 10, 11],
['a1', 'b0', 'd1', 46, 51],
['a1', 'b1', 'd0', 14, 15],
['a1', 'b1', 'd1', 66, 71]])
assert_array_equal(arr2d.__matmul__(arr4d), res)
@needs_python35
def test_rmatmul():
a1 = eye(3) * 2
a2 = ndtest([Axis(3), Axis(3)])
# equivalent to a1.data @ a2
res = a2.__rmatmul__(a1.data)
assert isinstance(res, Array)
assert_array_equal(res, ndtest([Axis(3), Axis(3)]) * 2)
def test_broadcast_with():
a1 = ndtest((3, 2))
a2 = ndtest(3)
b = a2.broadcast_with(a1)
assert b.ndim == a1.ndim
assert b.shape == (3, 1)
assert_array_equal(b.i[:, 0], a2)
# anonymous axes
a1 = ndtest([Axis(3), Axis(2)])
a2 = ndtest(Axis(3))
b = a2.broadcast_with(a1)
assert b.ndim == a1.ndim
assert b.shape == (3, 1)
assert_array_equal(b.i[:, 0], a2)
a1 = ndtest([Axis(1), Axis(3)])
a2 = ndtest([Axis(3), Axis(1)])
b = a2.broadcast_with(a1)
assert b.ndim == 2
# common axes are reordered according to target (a1 in this case)
assert b.shape == (1, 3)
assert_larray_equiv(b, a2)
a1 = ndtest([Axis(2), Axis(3)])
a2 = ndtest([Axis(3), Axis(2)])
b = a2.broadcast_with(a1)
assert b.ndim == 2
assert b.shape == (2, 3)
assert_larray_equiv(b, a2)
def test_plot():
pass
# small_h = small['M']
# small_h.plot(kind='bar')
# small_h.plot()
# small_h.hist()
# large_data = np.random.randn(1000)
# tick_v = np.random.randint(ord('a'), ord('z'), size=1000)
# ticks = [chr(c) for c in tick_v]
# large_axis = Axis('large', ticks)
# large = Array(large_data, axes=[large_axis])
# large.plot()
# large.hist()
def test_combine_axes():
# combine N axes into 1
# =====================
arr = ndtest((2, 3, 4, 5))
res = arr.combine_axes((X.a, X.b))
assert res.axes.names == ['a_b', 'c', 'd']
assert res.size == arr.size
assert res.shape == (2 * 3, 4, 5)
assert_array_equal(res.axes.a_b.labels[:2], ['a0_b0', 'a0_b1'])
assert_array_equal(res['a1_b0'], arr['a1', 'b0'])
res = arr.combine_axes((X.a, X.c))
assert res.axes.names == ['a_c', 'b', 'd']
assert res.size == arr.size
assert res.shape == (2 * 4, 3, 5)
assert_array_equal(res.axes.a_c.labels[:2], ['a0_c0', 'a0_c1'])
assert_array_equal(res['a1_c0'], arr['a1', 'c0'])
res = arr.combine_axes((X.b, X.d))
assert res.axes.names == ['a', 'b_d', 'c']
assert res.size == arr.size
assert res.shape == (2, 3 * 5, 4)
assert_array_equal(res.axes.b_d.labels[:2], ['b0_d0', 'b0_d1'])
assert_array_equal(res['b1_d0'], arr['b1', 'd0'])
# combine M axes into N
# =====================
arr = ndtest((2, 3, 4, 4, 3, 2))
# using a list of tuples
res = arr.combine_axes([('a', 'c'), ('b', 'f'), ('d', 'e')])
assert res.axes.names == ['a_c', 'b_f', 'd_e']
assert res.size == arr.size
assert res.shape == (2 * 4, 3 * 2, 4 * 3)
assert list(res.axes.a_c.labels[:2]) == ['a0_c0', 'a0_c1']
assert list(res.axes.b_f.labels[:2]) == ['b0_f0', 'b0_f1']
assert list(res.axes.d_e.labels[:2]) == ['d0_e0', 'd0_e1']
assert res['a0_c2', 'b1_f1', 'd3_e2'] == arr['a0', 'b1', 'c2', 'd3', 'e2', 'f1']
res = arr.combine_axes([('a', 'c'), ('b', 'e', 'f')])
assert res.axes.names == ['a_c', 'b_e_f', 'd']
assert res.size == arr.size
assert res.shape == (2 * 4, 3 * 3 * 2, 4)
assert list(res.axes.b_e_f.labels[:4]) == ['b0_e0_f0', 'b0_e0_f1', 'b0_e1_f0', 'b0_e1_f1']
assert_array_equal(res['a0_c2', 'b1_e2_f1'], arr['a0', 'b1', 'c2', 'e2', 'f1'])
# using a dict (-> user defined axes names)
res = arr.combine_axes({('a', 'c'): 'AC', ('b', 'f'): 'BF', ('d', 'e'): 'DE'})
assert res.axes.names == ['AC', 'BF', 'DE']
assert res.size == arr.size
assert res.shape == (2 * 4, 3 * 2, 4 * 3)
res = arr.combine_axes({('a', 'c'): 'AC', ('b', 'e', 'f'): 'BEF'})
assert res.axes.names == ['AC', 'BEF', 'd']
assert res.size == arr.size
assert res.shape == (2 * 4, 3 * 3 * 2, 4)
# combine with wildcard=True
arr = ndtest((2, 3))
res = arr.combine_axes(wildcard=True)
assert res.axes.names == ['a_b']
assert res.size == arr.size
assert res.shape == (6,)
assert_array_equal(res.axes[0].labels, np.arange(6))
def test_split_axes():
# split one axis
# ==============
# default sep
arr = ndtest((2, 3, 4, 5))
combined = arr.combine_axes(('b', 'd'))
assert combined.axes.names == ['a', 'b_d', 'c']
res = combined.split_axes('b_d')
assert res.axes.names == ['a', 'b', 'd', 'c']
assert res.shape == (2, 3, 5, 4)
assert_array_equal(res.transpose('a', 'b', 'c', 'd'), arr)
# with specified names
res = combined.rename(b_d='bd').split_axes('bd', names=('b', 'd'))
assert res.axes.names == ['a', 'b', 'd', 'c']
assert res.shape == (2, 3, 5, 4)
assert_array_equal(res.transpose('a', 'b', 'c', 'd'), arr)
# regex
res = combined.split_axes('b_d', names=['b', 'd'], regex=r'(\w+)_(\w+)')
assert res.axes.names == ['a', 'b', 'd', 'c']
assert res.shape == (2, 3, 5, 4)
assert_array_equal(res.transpose('a', 'b', 'c', 'd'), arr)
# custom sep
combined = ndtest('a|b=a0|b0,a0|b1')
res = combined.split_axes(sep='|')
assert_array_equal(res, ndtest('a=a0;b=b0,b1'))
# split several axes at once
# ==========================
arr = ndtest('a_b=a0_b0..a1_b2; c=c0..c3; d=d0..d3; e_f=e0_f0..e2_f1')
# using a list of tuples
res = arr.split_axes(['a_b', 'e_f'])
assert res.axes.names == ['a', 'b', 'c', 'd', 'e', 'f']
assert res.size == arr.size
assert res.shape == (2, 3, 4, 4, 3, 2)
assert list(res.axes.a.labels) == ['a0', 'a1']
assert list(res.axes.b.labels) == ['b0', 'b1', 'b2']
assert list(res.axes.e.labels) == ['e0', 'e1', 'e2']
assert list(res.axes.f.labels) == ['f0', 'f1']
assert res['a0', 'b1', 'c2', 'd3', 'e2', 'f1'] == arr['a0_b1', 'c2', 'd3', 'e2_f1']
# default to all axes with name containing the delimiter _
assert_array_equal(arr.split_axes(), res)
# using a dict (-> user defined axes names)
res = arr.split_axes({'a_b': ('A', 'B'), 'e_f': ('E', 'F')})
assert res.axes.names == ['A', 'B', 'c', 'd', 'E', 'F']
assert res.size == arr.size
assert res.shape == (2, 3, 4, 4, 3, 2)
# split an axis in more than 2 axes
arr = ndtest('a_b_c=a0_b0_c0..a1_b2_c3; d=d0..d3; e_f=e0_f0..e2_f1')
res = arr.split_axes(['a_b_c', 'e_f'])
assert res.axes.names == ['a', 'b', 'c', 'd', 'e', 'f']
assert res.size == arr.size
assert res.shape == (2, 3, 4, 4, 3, 2)
assert list(res.axes.a.labels) == ['a0', 'a1']
assert list(res.axes.b.labels) == ['b0', 'b1', 'b2']
assert list(res.axes.e.labels) == ['e0', 'e1', 'e2']
assert list(res.axes.f.labels) == ['f0', 'f1']
assert res['a0', 'b1', 'c2', 'd3', 'e2', 'f1'] == arr['a0_b1_c2', 'd3', 'e2_f1']
# split an axis in more than 2 axes + passing a dict
res = arr.split_axes({'a_b_c': ('A', 'B', 'C'), 'e_f': ('E', 'F')})
assert res.axes.names == ['A', 'B', 'C', 'd', 'E', 'F']
assert res.size == arr.size
assert res.shape == (2, 3, 4, 4, 3, 2)
# using regex
arr = ndtest('ab=a0b0..a1b2; c=c0..c3; d=d0..d3; ef=e0f0..e2f1')
res = arr.split_axes({'ab': ('a', 'b'), 'ef': ('e', 'f')}, regex=r'(\w{2})(\w{2})')
assert res.axes.names == ['a', 'b', 'c', 'd', 'e', 'f']
assert res.size == arr.size
assert res.shape == (2, 3, 4, 4, 3, 2)
assert list(res.axes.a.labels) == ['a0', 'a1']
assert list(res.axes.b.labels) == ['b0', 'b1', 'b2']
assert list(res.axes.e.labels) == ['e0', 'e1', 'e2']
assert list(res.axes.f.labels) == ['f0', 'f1']
assert res['a0', 'b1', 'c2', 'd3', 'e2', 'f1'] == arr['a0b1', 'c2', 'd3', 'e2f1']
# labels with object dtype
arr = ndtest((2, 2, 2)).combine_axes(('a', 'b'))
arr = arr.set_axes([Axis(a.labels.astype(object), a.name) for a in arr.axes])
res = arr.split_axes()
expected_kind = 'U' if sys.version_info[0] >= 3 else 'S'
assert res.a.labels.dtype.kind == expected_kind
assert res.b.labels.dtype.kind == expected_kind
assert res.c.labels.dtype.kind == 'O'
assert_array_equal(res, ndtest((2, 2, 2)))
# not sorted by first part then second part (issue #364)
arr = ndtest((2, 3))
combined = arr.combine_axes()['a0_b0, a1_b0, a0_b1, a1_b1, a0_b2, a1_b2']
assert_array_equal(combined.split_axes('a_b'), arr)
# another weirdly sorted test
combined = arr.combine_axes()['a0_b1, a0_b0, a0_b2, a1_b1, a1_b0, a1_b2']
assert_array_equal(combined.split_axes('a_b'), arr['b1,b0,b2'])
# combined does not contain all combinations of labels (issue #369)
combined_partial = combined[['a0_b0', 'a0_b1', 'a1_b1', 'a0_b2', 'a1_b2']]
expected = arr.astype(float)
expected['a1', 'b0'] = nan
assert_array_nan_equal(combined_partial.split_axes('a_b'), expected)
# split labels are ambiguous (issue #485)
combined = ndtest('a_b=a0_b0..a1_b1;c_d=a0_b0..a1_b1')
expected = ndtest('a=a0,a1;b=b0,b1;c=a0,a1;d=b0,b1')
assert_array_equal(combined.split_axes(('a_b', 'c_d')), expected)
# anonymous axes
combined = ndtest('a0_b0,a0_b1,a0_b2,a1_b0,a1_b1,a1_b2')
expected = ndtest('a0,a1;b0,b1,b2')
assert_array_equal(combined.split_axes(0), expected)
# when no axis is specified and no axis contains the sep, split_axes is a no-op.
assert_array_equal(combined.split_axes(), combined)
def test_stack():
# stack along a single axis
# =========================
# simple
a = Axis('a=a0,a1,a2')
b = Axis('b=b0,b1')
arr0 = ndtest(a)
arr1 = ndtest(a, start=-1)
res = stack((arr0, arr1), b)
expected = Array([[0, -1],
[1, 0],
[2, 1]], [a, b])
assert_array_equal(res, expected)
# same but using a group as the stacking axis
larger_b = Axis('b=b0..b3')
res = stack((arr0, arr1), larger_b[:'b1'])
assert_array_equal(res, expected)
# simple with anonymous axis
axis0 = Axis(3)
arr0 = ndtest(axis0)
arr1 = ndtest(axis0, start=-1)
res = stack((arr0, arr1), b)
expected = Array([[0, -1],
[1, 0],
[2, 1]], [axis0, b])
assert_array_equal(res, expected)
# using res_axes
res = stack({'b0': 0, 'b1': 1}, axes=b, res_axes=(a, b))
expected = Array([[0, 1],
[0, 1],
[0, 1]], [a, b])
assert_array_equal(res, expected)
# giving elements as on Array containing Arrays
sex = Axis('sex=M,F')
# not using the same length for nat and type, otherwise numpy gets confused :(
arr1 = ones('nat=BE, FO')
arr2 = zeros('type=1..3')
array_of_arrays = Array([arr1, arr2], sex)
res = stack(array_of_arrays, sex)
expected = from_string(r"""nat type\sex M F
BE 1 1.0 0.0
BE 2 1.0 0.0
BE 3 1.0 0.0
FO 1 1.0 0.0
FO 2 1.0 0.0
FO 3 1.0 0.0""")
assert_array_equal(res, expected)
# non scalar/non Array
res = stack(([1, 2, 3], [4, 5, 6]))
expected = Array([[1, 4],
[2, 5],
[3, 6]])
assert_array_equal(res, expected)
# stack along multiple axes
# =========================
# a) simple
res = stack({('a0', 'b0'): 0,
('a0', 'b1'): 1,
('a1', 'b0'): 2,
('a1', 'b1'): 3,
('a2', 'b0'): 4,
('a2', 'b1'): 5},
(a, b))
expected = ndtest((a, b))
assert_array_equal(res, expected)
# b) keys not given in axes iteration order
res = stack({('a0', 'b0'): 0,
('a1', 'b0'): 2,
('a2', 'b0'): 4,
('a0', 'b1'): 1,
('a1', 'b1'): 3,
('a2', 'b1'): 5},
(a, b))
expected = ndtest((a, b))
assert_array_equal(res, expected)
# c) key parts not given in the order of axes (ie key part for b before key part for a)
res = stack({('a0', 'b0'): 0,
('a1', 'b0'): 1,
('a2', 'b0'): 2,
('a0', 'b1'): 3,
('a1', 'b1'): 4,
('a2', 'b1'): 5},
(b, a))
expected = ndtest((b, a))
assert_array_equal(res, expected)
# d) same as c) but with a key-value sequence
res = stack([(('a0', 'b0'), 0),
(('a1', 'b0'), 1),
(('a2', 'b0'), 2),
(('a0', 'b1'), 3),
(('a1', 'b1'), 4),
(('a2', 'b1'), 5)],
(b, a))
expected = ndtest((b, a))
assert_array_equal(res, expected)
@needs_python36
def test_stack_kwargs_no_axis_labels():
# these tests rely on kwargs ordering, hence python 3.6
# 1) using scalars
# ----------------
# a) with an axis name
res = stack(a0=0, a1=1, axes='a')
expected = Array([0, 1], 'a=a0,a1')
assert_array_equal(res, expected)
# b) without an axis name
res = stack(a0=0, a1=1)
expected = Array([0, 1], 'a0,a1')
assert_array_equal(res, expected)
# 2) dict of arrays
# -----------------
a = Axis('a=a0,a1,a2')
arr0 = ndtest(a)
arr1 = ndtest(a, start=-1)
# a) with an axis name
res = stack(b0=arr0, b1=arr1, axes='b')
expected = Array([[0, -1],
[1, 0],
[2, 1]], [a, 'b=b0,b1'])
assert_array_equal(res, expected)
# b) without an axis name
res = stack(b0=arr0, b1=arr1)
expected = Array([[0, -1],
[1, 0],
[2, 1]], [a, 'b0,b1'])
assert_array_equal(res, expected)
@needs_python37
def test_stack_dict_no_axis_labels():
# these tests rely on dict ordering, hence python 3.7
# 1) dict of scalars
# ------------------
# a) with an axis name
res = stack({'a0': 0, 'a1': 1}, 'a')
expected = Array([0, 1], 'a=a0,a1')
assert_array_equal(res, expected)
# b) without an axis name
res = stack({'a0': 0, 'a1': 1})
expected = Array([0, 1], 'a0,a1')
assert_array_equal(res, expected)
# 2) dict of arrays
# -----------------
a = Axis('a=a0,a1,a2')
arr0 = ndtest(a)
arr1 = ndtest(a, start=-1)
# a) with an axis name
res = stack({'b0': arr0, 'b1': arr1}, 'b')
expected = Array([[0, -1],
[1, 0],
[2, 1]], [a, 'b=b0,b1'])
assert_array_equal(res, expected)
# b) without an axis name
res = stack({'b0': arr0, 'b1': arr1})
expected = Array([[0, -1],
[1, 0],
[2, 1]], [a, 'b0,b1'])
assert_array_equal(res, expected)
def test_0darray_convert():
int_arr = Array(1)
assert int(int_arr) == 1
assert float(int_arr) == 1.0
assert int_arr.__index__() == 1
float_arr = Array(1.0)
assert int(float_arr) == 1
assert float(float_arr) == 1.0
with pytest.raises(TypeError) as e_info:
float_arr.__index__()
msg = e_info.value.args[0]
expected_np11 = "only integer arrays with one element can be converted to an index"
expected_np12 = "only integer scalar arrays can be converted to a scalar index"
assert msg in {expected_np11, expected_np12}
def test_deprecated_methods():
with pytest.warns(FutureWarning) as caught_warnings:
ndtest((2, 2)).with_axes('a', 'd=d0,d1')
assert len(caught_warnings) == 1
assert caught_warnings[0].message.args[0] == "with_axes() is deprecated. Use set_axes() instead."
assert caught_warnings[0].filename == __file__
with pytest.warns(FutureWarning) as caught_warnings:
ndtest((2, 2)).combine_axes().split_axis()
assert len(caught_warnings) == 1
assert caught_warnings[0].message.args[0] == "split_axis() is deprecated. Use split_axes() instead."
assert caught_warnings[0].filename == __file__
def test_eq():
a = ndtest((2, 3, 4))
ao = a.astype(object)
assert_array_equal(ao.eq(ao['c0'], nans_equal=True), a == a['c0'])
if __name__ == "__main__":
# import doctest
# import unittest
# from larray import core
# doctest.testmod(core)
# unittest.main()
pytest.main()
|
gpl-3.0
|
Fireblend/scikit-learn
|
examples/linear_model/plot_sgd_penalties.py
|
249
|
1563
|
"""
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
|
bsd-3-clause
|
rokuz/omim
|
tools/python/booking_hotels_quality.py
|
20
|
2632
|
#!/usr/bin/env python
# coding: utf8
from __future__ import print_function
from collections import namedtuple, defaultdict
from datetime import datetime
from sklearn import metrics
import argparse
import base64
import json
import logging
import matplotlib.pyplot as plt
import os
import pickle
import time
import urllib2
import re
# init logging
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] %(levelname)s: %(message)s')
def load_binary_list(path):
"""Loads reference binary classifier output. """
bits = []
with open(path, 'r') as fd:
for line in fd:
if (not line.strip()) or line.startswith('#'):
continue
bits.append(1 if line.startswith('y') else 0)
return bits
def load_score_list(path):
"""Loads list of matching scores. """
scores = []
with open(path, 'r') as fd:
for line in fd:
if (not line.strip()) or line.startswith('#'):
continue
scores.append(float(re.search(r'result score: (\d*\.\d+)', line).group(1)))
return scores
def process_options():
# TODO(mgsergio): Fix description.
parser = argparse.ArgumentParser(description="Download and process booking hotels.")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose")
parser.add_argument("-q", "--quiet", action="store_false", dest="verbose")
parser.add_argument("--reference_list", dest="reference_list", help="Path to data files")
parser.add_argument("--sample_list", dest="sample_list", help="Name and destination for output file")
parser.add_argument("--show", dest="show", default=False, action="store_true",
help="Show graph for precision and recall")
options = parser.parse_args()
if not options.reference_list or not options.sample_list:
parser.print_help()
exit()
return options
def main():
options = process_options()
reference = load_binary_list(options.reference_list)
sample = load_score_list(options.sample_list)
precision, recall, threshold = metrics.precision_recall_curve(reference, sample)
aa = zip(precision, recall, threshold)
max_by_hmean = max(aa, key=lambda (p, r, t): p*r/(p+r))
print("Optimal threshold: {2} for precision: {0} and recall: {1}".format(*max_by_hmean))
print("AUC: {0}".format(metrics.roc_auc_score(reference, sample)))
if options.show:
plt.plot(recall, precision)
plt.title("Precision/Recall")
plt.ylabel("Precision")
plt.xlabel("Recall")
plt.show()
if __name__ == "__main__":
main()
|
apache-2.0
|
Sentient07/scikit-learn
|
examples/decomposition/plot_image_denoising.py
|
70
|
6249
|
"""
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of a raccoon face image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
###############################################################################
try:
from scipy import misc
face = misc.face(gray=True)
except AttributeError:
# Old versions of scipy have face in the top level package
face = sp.face(gray=True)
# Convert from uint8 representation with values between 0 and 255 to
# a floating point representation with values between 0 and 1.
face = face / 255
# downsample for higher speed
face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
face /= 4.0
height, width = face.shape
# Distort the right half of the image
print('Distorting image...')
distorted = face.copy()
distorted[:, width // 2:] += 0.075 * np.random.randn(height, width // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :width // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from face patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, face, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, width // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = face.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, width // 2:] = reconstruct_from_patches_2d(
patches, (height, width // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], face,
title + ' (time: %.1fs)' % dt)
plt.show()
|
bsd-3-clause
|
drummonds/pySage50
|
setup.py
|
1
|
4088
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import imp
import subprocess
import platform
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
# self.test_args = []
# self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
# Add the current directory to the module search path.
sys.path.append('.')
# # Constants
CODE_DIRECTORY = 'pysage50'
# DOCS_DIRECTORY = 'docs'
TESTS_DIRECTORY = 'tests'
#DATA_DIRECTORY = 'gnucash_books'
PYTEST_FLAGS = ['--doctest-modules']
# define install_requires for specific Python versions
python_version_specific_requires = []
def read(filename):
"""Return the contents of a file.
:param filename: file path
:type filename: :class:`str`
:return: the file's content
:rtype: :class:`str`
"""
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
# Import metadata. Normally this would just be:
#
# from luca import metadata
#
# However, when we do this, we also import `luca/__init__.py'. If this
# imports names from some other modules and these modules have third-party
# dependencies that need installing (which happens after this file is run), the
# script will crash. What we do instead is to load the metadata module by path
# instead, effectively side-stepping the dependency problem. Please make sure
# metadata has no dependencies, otherwise they will need to be added to
# the setup_requires keyword.
metadata = imp.load_source(
'metadata', os.path.join(CODE_DIRECTORY, 'metadata.py'))
# as of Python >= 2.7 and >= 3.2, the argparse module is maintained within
# the Python standard library, otherwise we install it as a separate package
# if sys.version_info < (2, 7) or (3, 0) <= sys.version_info < (3, 3):
# python_version_specific_requires.append('argparse')
# See here for more options:
# <http://pythonhosted.org/setuptools/setuptools.html>
setup_dict = dict(
name=metadata.package,
version=metadata.version,
author=metadata.authors[0],
author_email=metadata.emails[0],
maintainer=metadata.authors[0],
maintainer_email=metadata.emails[0],
url=metadata.url,
description=metadata.description,
long_description=read('README.md'),
keywords=['Sage', 'python', 'binding', 'interface', ],
license='MIT',
platforms='any',
# Find a list of classifiers here:
# <http://pypi.python.org/pypi?%3Aaction=list_classifiers>
classifiers=[
'Development Status :: 3 - pre Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Topic :: Office/Business',
'Topic :: Office/Business :: Financial',
'Topic :: Office/Business :: Financial :: Accounting',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=find_packages(exclude=(TESTS_DIRECTORY, )),
install_requires=[
# 'sqlite3',
# 'pandas',
] + python_version_specific_requires,
# Allow tests to be run with `python setup.py test'.
tests_require=[
'pytest',
'py',
],
# console=['scripts/piecash_ledger.py','scripts/piecash_toqif.py'],
scripts=[],
cmdclass = {'test': PyTest},
test_suite="tests",
zip_safe=False, # don't use eggs
)
def main():
setup(**setup_dict)
if __name__ == '__main__':
main()
|
mit
|
klim-/pyplane
|
core/Toolbar.py
|
1
|
1723
|
# -*- coding: utf-8 -*-
# Copyright (C) 2013
# by Klemens Fritzsche, pyplane@leckstrom.de
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'Klemens Fritzsche'
from PyQt4 import QtGui
from matplotlib.backend_bases import NavigationToolbar2 as NavigationToolbar
from matplotlib.backends.backend_qt4 import cursord
class Toolbar(NavigationToolbar):
"""
This class hides the functionality of NavigationToolbar, and only
provides the necessary functions (only zooming at the moment)
"""
def _init_toolbar(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val) for val in min(x0, x1), min(y0, y1), w, h]
self.canvas.drawRectangle(rect)
def set_cursor(self, cursor):
QtGui.QApplication.restoreOverrideCursor()
QtGui.QApplication.setOverrideCursor(QtGui.QCursor(cursord[cursor]))
if __package__ is None:
__package__ = "core.toolbar"
|
gpl-3.0
|
kevin-intel/scikit-learn
|
sklearn/preprocessing/tests/test_discretization.py
|
3
|
12009
|
import pytest
import numpy as np
import scipy.sparse as sp
import warnings
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils._testing import (
assert_array_almost_equal,
assert_array_equal,
assert_allclose_dense_sparse
)
X = [[-2, 1.5, -4, -1],
[-1, 2.5, -3, -0.5],
[0, 3.5, -2, 0.5],
[1, 4.5, -1, 2]]
@pytest.mark.parametrize(
'strategy, expected',
[('uniform', [[0, 0, 0, 0], [1, 1, 1, 0], [2, 2, 2, 1], [2, 2, 2, 2]]),
('kmeans', [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]]),
('quantile', [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [2, 2, 2, 2]])])
def test_fit_transform(strategy, expected):
est = KBinsDiscretizer(n_bins=3, encode='ordinal', strategy=strategy)
est.fit(X)
assert_array_equal(expected, est.transform(X))
def test_valid_n_bins():
KBinsDiscretizer(n_bins=2).fit_transform(X)
KBinsDiscretizer(n_bins=np.array([2])[0]).fit_transform(X)
assert KBinsDiscretizer(n_bins=2).fit(X).n_bins_.dtype == np.dtype(int)
def test_invalid_n_bins():
est = KBinsDiscretizer(n_bins=1)
err_msg = ("KBinsDiscretizer received an invalid "
"number of bins. Received 1, expected at least 2.")
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
est = KBinsDiscretizer(n_bins=1.1)
err_msg = ("KBinsDiscretizer received an invalid "
"n_bins type. Received float, expected int.")
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
def test_invalid_n_bins_array():
# Bad shape
n_bins = np.full((2, 4), 2.)
est = KBinsDiscretizer(n_bins=n_bins)
err_msg = r"n_bins must be a scalar or array of shape \(n_features,\)."
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
# Incorrect number of features
n_bins = [1, 2, 2]
est = KBinsDiscretizer(n_bins=n_bins)
err_msg = r"n_bins must be a scalar or array of shape \(n_features,\)."
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
# Bad bin values
n_bins = [1, 2, 2, 1]
est = KBinsDiscretizer(n_bins=n_bins)
err_msg = ("KBinsDiscretizer received an invalid number of bins "
"at indices 0, 3. Number of bins must be at least 2, "
"and must be an int.")
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
# Float bin values
n_bins = [2.1, 2, 2.1, 2]
est = KBinsDiscretizer(n_bins=n_bins)
err_msg = ("KBinsDiscretizer received an invalid number of bins "
"at indices 0, 2. Number of bins must be at least 2, "
"and must be an int.")
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
@pytest.mark.parametrize(
'strategy, expected',
[('uniform', [[0, 0, 0, 0], [0, 1, 1, 0], [1, 2, 2, 1], [1, 2, 2, 2]]),
('kmeans', [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 2, 2, 2]]),
('quantile', [[0, 0, 0, 0], [0, 1, 1, 1], [1, 2, 2, 2], [1, 2, 2, 2]])])
def test_fit_transform_n_bins_array(strategy, expected):
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode='ordinal',
strategy=strategy).fit(X)
assert_array_equal(expected, est.transform(X))
# test the shape of bin_edges_
n_features = np.array(X).shape[1]
assert est.bin_edges_.shape == (n_features, )
for bin_edges, n_bins in zip(est.bin_edges_, est.n_bins_):
assert bin_edges.shape == (n_bins + 1, )
@pytest.mark.parametrize('strategy', ['uniform', 'kmeans', 'quantile'])
def test_same_min_max(strategy):
warnings.simplefilter("always")
X = np.array([[1, -2],
[1, -1],
[1, 0],
[1, 1]])
est = KBinsDiscretizer(strategy=strategy, n_bins=3, encode='ordinal')
warning_message = ("Feature 0 is constant and will be replaced "
"with 0.")
with pytest.warns(UserWarning, match=warning_message):
est.fit(X)
assert est.n_bins_[0] == 1
# replace the feature with zeros
Xt = est.transform(X)
assert_array_equal(Xt[:, 0], np.zeros(X.shape[0]))
def test_transform_1d_behavior():
X = np.arange(4)
est = KBinsDiscretizer(n_bins=2)
with pytest.raises(ValueError):
est.fit(X)
est = KBinsDiscretizer(n_bins=2)
est.fit(X.reshape(-1, 1))
with pytest.raises(ValueError):
est.transform(X)
@pytest.mark.parametrize('i', range(1, 9))
def test_numeric_stability(i):
X_init = np.array([2., 4., 6., 8., 10.]).reshape(-1, 1)
Xt_expected = np.array([0, 0, 1, 1, 1]).reshape(-1, 1)
# Test up to discretizing nano units
X = X_init / 10**i
Xt = KBinsDiscretizer(n_bins=2, encode='ordinal').fit_transform(X)
assert_array_equal(Xt_expected, Xt)
def test_invalid_encode_option():
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode='invalid-encode')
err_msg = (r"Valid options for 'encode' are "
r"\('onehot', 'onehot-dense', 'ordinal'\). "
r"Got encode='invalid-encode' instead.")
with pytest.raises(ValueError, match=err_msg):
est.fit(X)
def test_encode_options():
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3],
encode='ordinal').fit(X)
Xt_1 = est.transform(X)
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3],
encode='onehot-dense').fit(X)
Xt_2 = est.transform(X)
assert not sp.issparse(Xt_2)
assert_array_equal(OneHotEncoder(
categories=[np.arange(i) for i in [2, 3, 3, 3]],
sparse=False)
.fit_transform(Xt_1), Xt_2)
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3],
encode='onehot').fit(X)
Xt_3 = est.transform(X)
assert sp.issparse(Xt_3)
assert_array_equal(OneHotEncoder(
categories=[np.arange(i) for i in [2, 3, 3, 3]],
sparse=True)
.fit_transform(Xt_1).toarray(),
Xt_3.toarray())
def test_invalid_strategy_option():
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], strategy='invalid-strategy')
err_msg = (r"Valid options for 'strategy' are "
r"\('uniform', 'quantile', 'kmeans'\). "
r"Got strategy='invalid-strategy' instead.")
with pytest.raises(ValueError, match=err_msg):
est.fit(X)
@pytest.mark.parametrize(
'strategy, expected_2bins, expected_3bins, expected_5bins',
[('uniform', [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 2, 2], [0, 0, 1, 1, 4, 4]),
('kmeans', [0, 0, 0, 0, 1, 1], [0, 0, 1, 1, 2, 2], [0, 0, 1, 2, 3, 4]),
('quantile', [0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 2, 2], [0, 1, 2, 3, 4, 4])])
def test_nonuniform_strategies(
strategy, expected_2bins, expected_3bins, expected_5bins):
X = np.array([0, 0.5, 2, 3, 9, 10]).reshape(-1, 1)
# with 2 bins
est = KBinsDiscretizer(n_bins=2, strategy=strategy, encode='ordinal')
Xt = est.fit_transform(X)
assert_array_equal(expected_2bins, Xt.ravel())
# with 3 bins
est = KBinsDiscretizer(n_bins=3, strategy=strategy, encode='ordinal')
Xt = est.fit_transform(X)
assert_array_equal(expected_3bins, Xt.ravel())
# with 5 bins
est = KBinsDiscretizer(n_bins=5, strategy=strategy, encode='ordinal')
Xt = est.fit_transform(X)
assert_array_equal(expected_5bins, Xt.ravel())
@pytest.mark.parametrize(
'strategy, expected_inv',
[('uniform', [[-1.5, 2., -3.5, -0.5], [-0.5, 3., -2.5, -0.5],
[0.5, 4., -1.5, 0.5], [0.5, 4., -1.5, 1.5]]),
('kmeans', [[-1.375, 2.125, -3.375, -0.5625],
[-1.375, 2.125, -3.375, -0.5625],
[-0.125, 3.375, -2.125, 0.5625],
[0.75, 4.25, -1.25, 1.625]]),
('quantile', [[-1.5, 2., -3.5, -0.75], [-0.5, 3., -2.5, 0.],
[0.5, 4., -1.5, 1.25], [0.5, 4., -1.5, 1.25]])])
@pytest.mark.parametrize('encode', ['ordinal', 'onehot', 'onehot-dense'])
def test_inverse_transform(strategy, encode, expected_inv):
kbd = KBinsDiscretizer(n_bins=3, strategy=strategy, encode=encode)
Xt = kbd.fit_transform(X)
Xinv = kbd.inverse_transform(Xt)
assert_array_almost_equal(expected_inv, Xinv)
@pytest.mark.parametrize('strategy', ['uniform', 'kmeans', 'quantile'])
def test_transform_outside_fit_range(strategy):
X = np.array([0, 1, 2, 3])[:, None]
kbd = KBinsDiscretizer(n_bins=4, strategy=strategy, encode='ordinal')
kbd.fit(X)
X2 = np.array([-2, 5])[:, None]
X2t = kbd.transform(X2)
assert_array_equal(X2t.max(axis=0) + 1, kbd.n_bins_)
assert_array_equal(X2t.min(axis=0), [0])
def test_overwrite():
X = np.array([0, 1, 2, 3])[:, None]
X_before = X.copy()
est = KBinsDiscretizer(n_bins=3, encode="ordinal")
Xt = est.fit_transform(X)
assert_array_equal(X, X_before)
Xt_before = Xt.copy()
Xinv = est.inverse_transform(Xt)
assert_array_equal(Xt, Xt_before)
assert_array_equal(Xinv, np.array([[0.5], [1.5], [2.5], [2.5]]))
@pytest.mark.parametrize(
'strategy, expected_bin_edges',
[('quantile', [0, 1, 3]), ('kmeans', [0, 1.5, 3])])
def test_redundant_bins(strategy, expected_bin_edges):
X = [[0], [0], [0], [0], [3], [3]]
kbd = KBinsDiscretizer(n_bins=3, strategy=strategy)
warning_message = ("Consider decreasing the number of bins.")
with pytest.warns(UserWarning, match=warning_message):
kbd.fit(X)
assert_array_almost_equal(kbd.bin_edges_[0], expected_bin_edges)
def test_percentile_numeric_stability():
X = np.array([0.05, 0.05, 0.95]).reshape(-1, 1)
bin_edges = np.array([0.05, 0.23, 0.41, 0.59, 0.77, 0.95])
Xt = np.array([0, 0, 4]).reshape(-1, 1)
kbd = KBinsDiscretizer(n_bins=10, encode='ordinal',
strategy='quantile')
warning_message = ("Consider decreasing the number of bins.")
with pytest.warns(UserWarning, match=warning_message):
kbd.fit(X)
assert_array_almost_equal(kbd.bin_edges_[0], bin_edges)
assert_array_almost_equal(kbd.transform(X), Xt)
@pytest.mark.parametrize("in_dtype", [np.float16, np.float32, np.float64])
@pytest.mark.parametrize("out_dtype", [None, np.float16, np.float32,
np.float64])
@pytest.mark.parametrize('encode', ['ordinal', 'onehot', 'onehot-dense'])
def test_consistent_dtype(in_dtype, out_dtype, encode):
X_input = np.array(X, dtype=in_dtype)
kbd = KBinsDiscretizer(n_bins=3, encode=encode, dtype=out_dtype)
# a error is raised if a wrong dtype is define for the model
if out_dtype not in [None, np.float32, np.float64]:
with pytest.raises(ValueError, match="Valid options for 'dtype' are"):
kbd.fit(X_input)
else:
kbd.fit(X_input)
# test output dtype
if out_dtype is not None:
expected_dtype = out_dtype
elif out_dtype is None and X_input.dtype == np.float16:
# wrong numeric input dtype are cast in np.float64
expected_dtype = np.float64
else:
expected_dtype = X_input.dtype
Xt = kbd.transform(X_input)
assert Xt.dtype == expected_dtype
@pytest.mark.parametrize('input_dtype', [np.float16, np.float32, np.float64])
@pytest.mark.parametrize('encode', ['ordinal', 'onehot', 'onehot-dense'])
def test_32_equal_64(input_dtype, encode):
# TODO this check is redundant with common checks and can be removed
# once #16290 is merged
X_input = np.array(X, dtype=input_dtype)
# 32 bit output
kbd_32 = KBinsDiscretizer(n_bins=3, encode=encode, dtype=np.float32)
kbd_32.fit(X_input)
Xt_32 = kbd_32.transform(X_input)
# 64 bit output
kbd_64 = KBinsDiscretizer(n_bins=3, encode=encode, dtype=np.float64)
kbd_64.fit(X_input)
Xt_64 = kbd_64.transform(X_input)
assert_allclose_dense_sparse(Xt_32, Xt_64)
|
bsd-3-clause
|
robin-lai/scikit-learn
|
sklearn/neighbors/unsupervised.py
|
117
|
4755
|
"""Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`k_neighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False)
>>> np.asarray(nbrs[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
|
bsd-3-clause
|
russel1237/scikit-learn
|
sklearn/cluster/setup.py
|
263
|
1449
|
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
omerwe/PCGCs
|
deprecated/pcgcs_direct.py
|
1
|
39643
|
from __future__ import division
import numpy as np
import scipy.stats as stats
import scipy.linalg as la
import sys
import random
import time
import os
import os.path
import pandas as pd
import itertools
np.set_printoptions(precision=4, linewidth=200)
from sklearn.linear_model import LinearRegression, LogisticRegression
import pcgcs_utils
from pcgcs_utils import print_memory_usage
# def print_memory():
# import psutil
# process = psutil.Process(os.getpid())
# print 'memory usage:', process.memory_info().rss
def print_sumstats(cov1, u1_0, u1_1, var_t1, cov2, u2_0=None, u2_1=None, var_t2=None, s1=None, sum_s1=None, sum_s1_sqr=None, s2=None, sum_s2=None, sum_s2_sqr=None):
if (cov1 is None and cov2 is None and sum_s1 is None and sum_s2 is None): return
print
print
print 'summary statistics for subsequent estimation:'
print '-----------------------------------'
if (cov1 is not None):
print 'mean Q for study 1 (mean_Q1): %0.4f'%(np.mean((u1_0 + u1_1)**2))
print 'liability variance explained by covariates (var_t1): %0.4f'%(var_t1)
if (cov2 is not None):
print 'mean Q for study 2 (mean_Q2): %0.4f'%(np.mean((u2_0 + u2_1)**2))
print 'liability variance explained by covariates (var_t2): %0.4f'%(var_t2)
if (sum_s1 is not None):
print 'study 1 genotypes deflation factor (geno1_factor): %0.6f'%((sum_s1 - s1.sum()) / sum_s1)
print 'study 1 squared genotypes deflation factor (sqr_geno1_factor): %0.6f'%((sum_s1_sqr - np.sum(s1**2)) / sum_s1_sqr)
if (sum_s2 is not None):
print 'study 2 genotypes deflation factor (geno2_factor): %0.6f'%((sum_s2 - s2.sum()) / sum_s2)
print 'study 2 squared genotypes deflation factor (sqr_geno2_factor): %0.6f'%((sum_s2_sqr - np.sum(s2**2)) / sum_s2_sqr)
print
print
def pcgc_jackknife_sig2g(X, y, numer_sig2g, denom_sig2g, pcgc_coeff=1.0, u0=None, u1=None, window_size=1000):
if (u0 is not None):
u_sqr = (u0 + u1)**2
qy = y * (u0+u1)
else:
qy = y
if (window_size is None or window_size<0): window_size = X.shape[0]
estimators_arr = np.empty(X.shape[0])
for i in xrange(0, X.shape[0], window_size):
X_i = X[i:i+window_size]
G_i = X_i.dot(X.T) / X.shape[1]
indices0 = np.arange(G_i.shape[0])
G_i[indices0, i+indices0]=0
for j in xrange(G_i.shape[0]):
numer_sig2g_i = numer_sig2g - 2*G_i[j].dot(qy[i+j]*qy)
if (u0 is None): denom_sig2g_i = denom_sig2g - 2*G_i[j].dot(G_i[j])
else: denom_sig2g_i = denom_sig2g - 2*G_i[j].dot(G_i[j] * u_sqr[i+j]*u_sqr)
estimators_arr[i+j] = numer_sig2g_i / denom_sig2g_i
estimators_arr /= pcgc_coeff
sig2g_var = (X.shape[0]-1)/float(X.shape[0]) * np.sum((estimators_arr - estimators_arr.mean())**2)
return np.sqrt(sig2g_var)
def pcgc_jackknife_corr(X1, X2, y1, y2,
numer_sig2g1, denom_sig2g1, numer_sig2g2, denom_sig2g2, numer_rho, denom_rho,
pcgc_coeff1=1.0, pcgc_coeff2=1.0, pcgc_coeff12=1.0,
u1_0=None, u1_1=None, u2_0=None, u2_1=None,
is_same=None, window_size=1000):
if (window_size is None or window_size<0): window_size = X1.shape[0]
if (u1_0 is not None):
u1_sqr = (u1_0 + u1_1)**2
u2_sqr = (u2_0 + u2_1)**2
qy1 = y1 * (u1_0+u1_1)
qy2 = y2 * (u2_0+u2_1)
else:
qy1 = y1
qy2 = y2
sig2g1_estimators_arr = np.empty(X1.shape[0])
sig2g2_estimators_arr = np.empty(X2.shape[0])
rho_estimators_arr = np.empty(X1.shape[0] + X2.shape[0])
#exclude individuals from study 1
for i in xrange(0, X1.shape[0], window_size):
X1_i = X1[i:i+window_size]
G_i = X1_i.dot(X1.T) / X1.shape[1]
indices0 = np.arange(G_i.shape[0])
G_i[indices0, i+indices0]=0
for j in xrange(G_i.shape[0]):
numer_sig2g1_i = numer_sig2g1 - 2*G_i[j].dot(qy1[i+j]*qy1)
if (u1_0 is None): denom_sig2g1_i = denom_sig2g1 - 2*G_i[j].dot(G_i[j])
else: denom_sig2g1_i = denom_sig2g1 - 2*G_i[j].dot(G_i[j] * u1_sqr[i+j]*u1_sqr)
sig2g1_estimators_arr[i+j] = numer_sig2g1_i / denom_sig2g1_i
G_i = X1_i.dot(X2.T) / X1.shape[1]
G_i[is_same[i:i+window_size]]=0
for j in xrange(G_i.shape[0]):
numer_rho_i = numer_rho - G_i[j].dot(qy1[i+j]*qy2)
if (u1_0 is None): denom_rho_i = denom_rho - G_i[j].dot(G_i[j])
else: denom_rho_i = denom_rho - G_i[j].dot(G_i[j] * u1_sqr[i+j]*u2_sqr)
rho_estimators_arr[i+j] = numer_rho_i / denom_rho_i
#exclude individuals from study 2
for i in xrange(0, X2.shape[0], window_size):
X2_i = X2[i:i+window_size]
G_i = X2_i.dot(X2.T) / X1.shape[1]
indices0 = np.arange(G_i.shape[0])
G_i[indices0, i+indices0]=0
for j in xrange(G_i.shape[0]):
numer_sig2g2_i = numer_sig2g2 - G_i[j].dot(qy2[i+j]*qy2)
if (u2_0 is None): denom_sig2g2_i = denom_sig2g2 - 2*G_i[j].dot(G_i[j])
else: denom_sig2g2_i = denom_sig2g2 - 2*G_i[j].dot(G_i[j] * u2_sqr[i+j]*u2_sqr)
sig2g2_estimators_arr[i+j] = numer_sig2g2_i / denom_sig2g2_i
G_i = X2_i.dot(X1.T) / X1.shape[1]
G_i[is_same.T[i:i+window_size]]=0
for j in xrange(G_i.shape[0]):
numer_rho_i = numer_rho - G_i[j].dot(qy1[i+j]*qy2)
if (u1_0 is None): denom_rho_i = denom_rho - G_i[j].dot(G_i[j])
else: denom_rho_i = denom_rho - G_i[j].dot(G_i[j] * u2_sqr[i+j]*u1_sqr)
rho_estimators_arr[X1.shape[0]+i+j] = numer_rho_i / denom_rho_i
sig2g1_estimators_arr /= pcgc_coeff1
sig2g2_estimators_arr /= pcgc_coeff2
rho_estimators_arr /= pcgc_coeff12
sig2g1_var = (X1.shape[0]-1)/float(X1.shape[0]) * np.sum((sig2g1_estimators_arr - sig2g1_estimators_arr.mean())**2)
sig2g2_var = (X2.shape[0]-1)/float(X2.shape[0]) * np.sum((sig2g2_estimators_arr - sig2g2_estimators_arr.mean())**2)
rho_var = (rho_estimators_arr.shape[0]-1)/float(rho_estimators_arr.shape[0]) * np.sum((rho_estimators_arr - rho_estimators_arr.mean())**2)
#compute genetic correlation pseudo-values
sig2g1 = numer_sig2g1 / denom_sig2g1 / pcgc_coeff1
sig2g2 = numer_sig2g2 / denom_sig2g2 / pcgc_coeff2
sig2g1_estimators_arr = np.concatenate((sig2g1_estimators_arr, np.ones(X2.shape[0])*sig2g1))
sig2g2_estimators_arr = np.concatenate((np.ones(X1.shape[0])*sig2g2, sig2g2_estimators_arr))
corr_estimators_arr = rho_estimators_arr / np.sqrt(sig2g1_estimators_arr * sig2g2_estimators_arr)
corr_var = (corr_estimators_arr.shape[0]-1)/float(corr_estimators_arr.shape[0]) * np.sum((corr_estimators_arr - corr_estimators_arr.mean())**2)
return np.sqrt(sig2g1_var), np.sqrt(sig2g2_var), np.sqrt(rho_var), np.sqrt(corr_var)
# # # def permutation_test(G, yyT, is_same, num_perms=10000):
# # # x = G.reshape(-1)
# # # y = yyT.reshape(-1)
# # # x = x[~(is_same.reshape(-1))]
# # # y = y[~(is_same.reshape(-1))]
# # # real_stat = x.dot(y)
# # # null_stats = np.empty(num_perms)
# # # for i in xrange(num_perms):
# # # if (i>0 and i % 100 == 0): print 'finished %d/%d permutations'%(i, num_perms)
# # # np.random.shuffle(y)
# # # null_stats[i] = x.dot(y)
# # # pvalue = np.mean(np.abs(null_stats) > np.abs(real_stat))
# # # if (pvalue < 1.0/num_perms): pvalue = 1.0/num_perms
# # # return pvalue
def permutation_test2(X1, y1, X2, y2, G12_issame, is_same1, is_same2, num_perms=10000):
has_same = (G12_issame.shape[0] > 0)
c = float(X1.shape[1])
y1 = y1.copy()
y2 = y2.copy()
null_stats = np.empty(num_perms)
z1 = y1.dot(X1)
z2 = y2.dot(X2)
real_stat = z1.dot(z2) / c
if has_same: real_stat -= G12_issame.dot(y1[is_same1] * y2[is_same2])
for i in xrange(num_perms):
if (i>0 and i % 100 == 0): print 'finished %d/%d permutations'%(i, num_perms)
np.random.shuffle(y1)
np.random.shuffle(y2)
z1 = y1.dot(X1)
z2 = y2.dot(X2)
null_stats[i] = z1.dot(z2) / c
if has_same: null_stats[i] -= G12_issame.dot(y1[is_same1] * y2[is_same2])
pvalue = np.mean(np.abs(null_stats) > np.abs(real_stat))
if (pvalue < 1.0/num_perms): pvalue = 1.0/num_perms
return pvalue
def permutation_test_heritability(X, y, G_diag, num_perms=10000):
c = float(X.shape[1])
y = y.copy()
null_stats = np.empty(num_perms)
z = y.dot(X)
real_stat = z.dot(z) / c
real_stat -= G_diag.dot(y**2)
for i in xrange(num_perms):
if (i>0 and i % 100 == 0): print 'finished %d/%d permutations'%(i, num_perms)
np.random.shuffle(y)
z = y.dot(X)
null_stats[i] = z.dot(z) / c
null_stats[i] -= G_diag.dot(y**2)
pvalue = np.mean(np.abs(null_stats) > np.abs(real_stat))
if (pvalue < 1.0/num_perms): pvalue = 1.0/num_perms
return pvalue
#compute the PCGC denominator with limited memory, by only storing matrices of size (window_size x sample_size)
def pcgc_denom_lowmem(X1, X2, u1_0, u1_1, u2_0, u2_1, is_same=None, window_size=1000):
print_memory_usage(7)
denom=0
if (window_size is None or window_size<0): window_size = X1.shape[0]
for i in xrange(0, X1.shape[0], window_size):
G_i = X1[i:i+window_size].dot(X2.T)
if (is_same is None):
indices0 = np.arange(G_i.shape[0])
G_i[indices0, i+indices0]=0
else: G_i[is_same[i:i+window_size]] = 0
u1_0_i = u1_0[i:i+window_size]
u1_1_i = u1_1[i:i+window_size]
denom += np.einsum('ij,ij,i,j',G_i,G_i,u1_0_i**2,u2_0**2) + np.einsum('ij,ij,i,j',G_i,G_i,u1_0_i**2,u2_1**2) + np.einsum('ij,ij,i,j',G_i,G_i,u1_1_i**2,u2_0**2) + np.einsum('ij,ij,i,j',G_i,G_i,u1_1_i**2,u2_1**2)
denom += 2 * (
np.einsum('ij,ij,i,j->', G_i, G_i, u1_0_i**2,u2_0*u2_1)
+ np.einsum('ij,ij,i,j->', G_i, G_i, u1_0_i*u1_1_i,u2_0**2)
+ np.einsum('ij,ij,i,j->', G_i, G_i, u1_0_i*u1_1_i,u2_0*u2_1)
+ np.einsum('ij,ij,i,j->', G_i, G_i, u1_0_i*u1_1_i,u2_1*u2_0)
+ np.einsum('ij,ij,i,j->', G_i, G_i, u1_0_i*u1_1_i,u2_1**2)
+ np.einsum('ij,ij,i,j->', G_i, G_i, u1_1_i**2, u2_1*u2_0)
)
denom /= X1.shape[1]**2
return denom
#compute the PCGC denominator with limited memory, by only storing matrices of size (window_size x sample_size), without covariates
def pcgc_denom_lowmem_nocov(X1, X2, is_same=None, window_size=1000):
print_memory_usage(6)
denom=0
if (window_size is None or window_size<0): window_size = X1.shape[0]
for i in xrange(0, X1.shape[0], window_size):
G_i = X1[i:i+window_size].dot(X2.T)
if (is_same is None):
indices0 = np.arange(G_i.shape[0])
G_i[indices0, i+indices0]=0
else: G_i[is_same[i:i+window_size]] = 0
denom += np.einsum('ij,ij',G_i,G_i)
denom /= X1.shape[1]**2
return denom
def write_sumstats(z, n, snpNames, out_file, compute_p=True):
#Compute p-values
t = z / np.sqrt(n)
t[t>1.0] = 1.0
t[t<-1.0] = -1.0
degrees_f = n-2
TINY = 1.0e-20
stat = t * np.sqrt(degrees_f / ((1.0-t+TINY) * (1.0+t+TINY)))
pvals = stats.t.sf(np.abs(stat), degrees_f)*2
if not compute_p: pvals[:] = np.zeros(pvals.shape[0]) + np.nan
df = pd.DataFrame(snpNames, columns=['snpid'])
df['a1'] = ['1']*len(pvals)
df['a2'] = ['2']*len(pvals)
df['N'] = [n]*len(pvals)
df['P'] = pvals
df['Z'] = z
if (len(out_file) < 4 or out_file[-5:] != '.gzip' or out_file[-3:] != '.gz'): out_file += '.gz'
df.to_csv(out_file, sep='\t', index=False, float_format='%0.6e', compression='gzip', na_rep='NA')
def print_preamble():
print '*********************************************************************'
print '* PCGC-direct for heritability and genetic correlation estimates'
print '* Version 1.0.0'
print '* (C) 2018 Omer Weissbrod'
print '* Technion - Israel Institute of Technology'
print '*********************************************************************'
print
#compute liability variance due to covariates
def varLiab_covar(prev, tau_i, phe):
var_E_t_given_y = prev * (1-prev) * (tau_i[phe>phe.mean()].mean() - tau_i[phe<phe.mean()].mean())**2
E_var_t_given_y = prev * np.var(tau_i[phe>phe.mean()]) + (1-prev) * np.var(tau_i[phe<phe.mean()])
var_t = var_E_t_given_y + E_var_t_given_y
return var_t
def my_linreg(X,y):
R = X.T.dot(X)
XTy = X.T.dot(y)
L = la.cho_factor(R)
coef = la.cho_solve(L, XTy)
return coef
#initial computations required for PCGC
def regress_given_PCs(X, cov, PC_indices):
assert np.all(PC_indices <= cov.shape[1]), 'given PC number cannot be larger than %d'%(cov.shape[1])
assert np.all(PC_indices > 0)
assert np.all(~np.isnan(cov))
assert np.all(~np.isnan(X))
coef = my_linreg(cov[:, PC_indices-1], X)
X -= cov[:, PC_indices-1].dot(coef)
# linreg = LinearRegression(fit_intercept=False)
# linreg.fit(cov[:, PC_indices-1], X)
# X -= linreg.predict(cov[:, PC_indices-1])
return X
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
#parameters for exact computations
parser.add_argument('--sumstats_only', metavar='sumstats_only', type=int, default=0, help='If set to 1, PCGC-s will only compute summary statistics and print them to files, without estimating variance components (default 0)')
parser.add_argument('--bfile1', metavar='bfile1', required=True, help='plink file for study 1')
parser.add_argument('--bfile2', metavar='bfile2', default=None, help='plink file for study 2')
parser.add_argument('--pheno1', metavar='pheno1', required=True, help='phenotypes file for study 1')
parser.add_argument('--pheno2', metavar='pheno2', default=None, help='phenotypes file for study 2')
parser.add_argument('--covar1', metavar='covar1', default=None, help='covariates file for study 1')
parser.add_argument('--covar2', metavar='covar2', default=None, help='covariates file for study 2')
parser.add_argument('--prev1', metavar='prev1', type=float, required=True, help='population prevalence of study 1')
parser.add_argument('--prev2', metavar='prev2', type=float, default=None, help='population prevalence of study 2')
parser.add_argument('--extract', metavar='extract', default=None, help='file with list of SNPs to use')
parser.add_argument('--keep1', metavar='keep1', default=None, help='file with list of individuals to use in study 1')
parser.add_argument('--keep2', metavar='keep2', default=None, help='file with list of individuals to use in study 2')
parser.add_argument('--norm', metavar='norm', default=None, help='SNPs normalization method (see help file)')
parser.add_argument('--maf', metavar='maf', default=None, help='MAFs file (to be used with "--norm maf" option)')
parser.add_argument('--numPCs1', metavar='numPCs1', type=int, default=0, help='#PCs to regress out of dataset 1')
parser.add_argument('--numPCs2', metavar='numPCs2', type=int, default=0, help='#PCs to regress out of dataset 2')
parser.add_argument('--chr', metavar='chr', type=int, default=None, help='use only SNPs from a specific chromosome')
parser.add_argument('--missingPhenotype', metavar='missingPhenotype', default='-9', help='identifier for missing values (default: -9)')
parser.add_argument('--center', metavar='center', type=int, default=1, help='whether to center SNPs prior to computing kinship (0 or 1, default 1)')
parser.add_argument('--mem_size', metavar='mem_size', type=int, default=1000, help='The maximum number of rows in each kinship matrix to be computed. Larger values will improve run-time take up more memory')
parser.add_argument('--jackknife', metavar='jackknife', type=int, default=0, help='Whether jackknife-based standard errors will be computed (0 or 1, default 1)')
parser.add_argument('--num_perms', metavar='num_perms', type=int, default=0, help='number of permutation testing iterations')
parser.add_argument('--z1_nocov_out', metavar='z1_nocov_out', default=None, help='output file for Z-score statistics for study 1 without covariates')
parser.add_argument('--z2_nocov_out', metavar='z2_nocov_out', default=None, help='output file for Z-score statistics for study 2 without covariates')
parser.add_argument('--z1_cov_out', metavar='z1_cov_out', default=None, help='output file for Z-score statistics for study 1 with covariates')
parser.add_argument('--z2_cov_out', metavar='z2_cov_out', default=None, help='output file for Z-score statistics for study 2 with covariates')
parser.add_argument('--Gty1_nocov_out', metavar='Gty1_nocov_out', default=None, help='output file for covariate-less summary information for individuals in study 1')
parser.add_argument('--Gty2_nocov_out', metavar='Gty2_nocov_out', default=None, help='output file for covariate-less summary information for individuals in study 2')
parser.add_argument('--Gty1_cov_out', metavar='Gty1_cov_out', default=None, help='output file for covariates-summary information for individuals in study 1')
parser.add_argument('--Gty2_cov_out', metavar='Gty2_cov_out', default=None, help='output file for covariates-summary information for individuals in study 2')
parser.add_argument('--PC1', metavar='PC1', default=None, help='comma-separated indices of covariates that are PCs in covar1 (starting from 1)')
parser.add_argument('--PC2', metavar='PC2', default=None, help='comma-separated indices of covariates that are PCs in covar2 (starting from 1)')
parser.add_argument('--snp1', metavar='snp1', default=None, type=int, help='read only a subset of SNPs starting from snp1, starting from 1 (must be specified with snp2)')
parser.add_argument('--snp2', metavar='snp1', default=None, type=int, help='read only a subset of SNPs ending with snp2, starting from 1 (must be specified with snp1)')
parser.add_argument('--snp_weights', metavar='snp_weights', default=None, help='snp weights file (two columns: snp name, weight)')
args = parser.parse_args()
print_preamble()
#validate command line arguments
#####################################################################################
if (args.bfile2 is not None):
assert args.pheno2 is not None, '--pheno2 must be specified with --bfile2'
assert args.prev2 is not None, '--prev2 must be specified with --bfile2'
if (args.bfile2 is None):
assert args.keep2 is None, '--keep2 cannot be specified without --bfile2'
assert args.covar2 is None, '--covar2 cannot be specified without --bfile2'
assert args.prev2 is None, '--prev2 cannot be specified without --bfile2'
assert args.pheno2 is None, '--pheno2 cannot be specified without --bfile2'
assert args.z2_nocov_out is None, '--z2_nocov_out cannot be specified without --bfile2'
assert args.z2_cov_out is None, '--z2_cov_out cannot be specified without --bfile2'
assert args.numPCs2==0, '--numPCs2 cannot be specified without --bfile2'
assert args.PC2 is None, '--PC2 cannot be specified without --bfile2'
if (args.numPCs1>0): assert args.PC1 is None, 'PC1 cannot be specified with numPCs1'
if (args.numPCs2>0): assert args.PC2 is None, 'PC2 cannot be specified with numPCs2'
if (args.PC1 is not None):
assert args.covar1 is not None, '--PC1 cannot be specified without --covar1'
args.PC1 = np.array(args.PC1.split(','), dtype=np.int)
assert np.all(args.PC1 >= 1), '--PC1 numbers must be >=1'
if (args.PC2 is not None):
assert args.covar2 is not None, '--PC2 cannot be specified without --covar2'
args.PC2 = np.array(args.PC2.split(','), dtype=np.int)
assert np.all(args.PC2 >= 1), '--PC2 numbers must be >=1'
if (args.snp1 is not None):
assert args.snp1>=1, '--snp1 must be >=1'
assert args.snp2 is not None, '--snp1 must be specified with --snp2'
assert args.bfile2 is None, '--snp1 cannot be specified when two bfiles are provided'
assert args.Gty1_nocov_out is None, 'Gty1_nocov_out cannot be specified when --snp1 is set'
assert args.Gty1_cov_out is None, 'Gty1_cov_out cannot be specified when --snp1 is set'
if (args.snp2 is not None): assert args.snp1 is not None, '--snp2 must be specified with --snp1'
if (args.maf is not None): assert args.norm=='maf', '--maf option can only be used when "--norm maf" option is invoked'
if (args.norm == 'maf'): assert args.maf is not None, 'maf file must be provided to use "--norm maf"'
if (args.covar1 is None):
assert args.z1_cov_out is None, 'z1_cov_out cannor be specified without covar1'
assert args.Gty1_cov_out is None, 'Gty1_out cannor be specified without covar1'
if (args.covar2 is None):
assert args.z2_cov_out is None, 'z2_cov_out cannor be specified without covar1'
assert args.Gty2_cov_out is None, 'Gty2_out cannor be specified without covar1'
if (args.sumstats_only > 0):
assert args.z1_nocov_out is not None or args.z1_cov_out is not None, 'z1_nocov_out or z1_cov_out must be defined when sumstats_only=1'
assert args.num_perms==0, 'permutation testing can not be used when sumstats_only=1'
#####################################################################################
#read and preprocess the data
X1, bed1, phe1, cov1, X2, bed2, phe2, cov2 = pcgcs_utils.read_SNPs(bfile1=args.bfile1, pheno1=args.pheno1, prev1=args.prev1, covar1=args.covar1, keep1=args.keep1, bfile2=args.bfile2, pheno2=args.pheno2, prev2=args.prev2, covar2=args.covar2, keep2=args.keep2, extract=args.extract, missingPhenotype=args.missingPhenotype, chr=args.chr, norm=args.norm, maf=args.maf, center=args.center>0, snp1=args.snp1, snp2=args.snp2)
assert np.all(~np.isnan(X1))
if (cov1 is not None): assert np.all(~np.isnan(cov1))
#regress out PCs
s1, sum_s1, sum_s1_sqr = None, None, None
if (args.PC1 is not None):
print 'regressing given PCs out of bfile1'
X1 = regress_given_PCs(X1, cov1, args.PC1)
elif (args.numPCs1>0):
print 'Regressing top %d PCs out of bfile 1'%(args.numPCs1)
X1, U1, s1, sum_s1, sum_s1_sqr = pcgcs_utils.regress_PCs(X1, args.numPCs1)
print 'done'
if (cov1 is None): cov1 = U1
else: cov1 = np.concatenate((cov1, U1), axis=1)
s2, sum_s2, sum_s2_sqr = None, None, None
if (args.PC2 is not None):
print 'regressing given PCs out of bfile2'
X2 = regress_given_PCs(X2, cov2, args.PC2)
elif (args.numPCs2>0):
print 'Regressing top %d PCs out of bfile 2'%(args.numPCs2)
X2, U2, s2, sum_s2, sum_s2_sqr = pcgcs_utils.regress_PCs(X2, args.numPCs2)
print 'done'
if (cov2 is None): cov2 = U2
else: cov2 = np.concatenate((cov2, U2), axis=1)
#apply weights
if (args.snp_weights is not None):
print 'weighting SNPs...'
df_weights = pd.read_csv(args.snp_weights, names=['snp', 'weigt'], delim_whitespace=True, header=None, index_col='snp', squeeze=True)
###import ipdb; ipdb.set_trace()
assert np.all(np.isin(bed1.sid, df_weights.index)), 'not all SNPs have weights'
df_weights = df_weights.loc[bed1.sid]
assert df_weights.shape[0] == len(bed1.sid)
snp_weights = df_weights.values
assert np.all(snp_weights>=0)
X1 *= np.sqrt(snp_weights * X1.shape[1]/snp_weights.sum())
if (bed2 is not None):
X2 *= np.sqrt(snp_weights * X2.shape[1]/snp_weights.sum())
#print plink file sizes
print_memory_usage(3.1)
print 'bfile1: %d cases, %d controls, %d SNPs'%(np.sum(phe1>phe1.mean()), np.sum(phe1<=phe1.mean()), bed1.sid.shape[0])
print_memory_usage(3.2)
if (args.sumstats_only==0 or args.Gty1_nocov_out is not None or args.Gty1_cov_out is not None):
G1_diag = np.einsum('ij,ij->i', X1,X1) / float(X1.shape[1])
###print 'G1_diag:', G1_diag[:10]
print_memory_usage(3.3)
if (bed2 is not None):
if (args.sumstats_only==0 or args.Gty2_nocov_out is not None or args.Gty2_cov_out is not None):
G2_diag = np.einsum('ij,ij->i', X2,X2) / float(X2.shape[1])
print 'bfile2: %d cases, %d controls, %d SNPs'%(np.sum(phe2>phe2.mean()), np.sum(phe2<=phe2.mean()), bed2.sid.shape[0])
print_memory_usage(4)
#PCGC initial computations
y1_norm, tau_i_1, pcgc_coeff1, ty1, u1_0, u1_1 = pcgcs_utils.prepare_PCGC(phe1, args.prev1, cov1)
if (cov1 is not None): var_t1 = varLiab_covar(args.prev1, tau_i_1, phe1)
else: var_t1=0
if (bed2 is None): u2_0, u2_1, var_t2 = None, None, None
else:
y2_norm, tau_i_2, pcgc_coeff2, ty2, u2_0, u2_1 = pcgcs_utils.prepare_PCGC(phe2, args.prev2, cov2)
if (cov2 is not None): var_t2 = varLiab_covar(args.prev2, tau_i_2, phe2)
else: var_t2=0
pcgc_coeff12 = np.sqrt(pcgc_coeff1 * pcgc_coeff2)
#compute z-scores
z1_nocov = y1_norm.dot(X1) / np.sqrt(len(phe1))
z1_withcov = (ty1 * (u1_0+u1_1)).dot(X1)
if (bed2 is not None):
z2_nocov = y2_norm.dot(X2) / np.sqrt(len(phe2))
z2_withcov = (ty2 * (u2_0+u2_1)).dot(X2)
#write z-scores if required
if (args.z1_nocov_out is not None): write_sumstats(z1_nocov, len(phe1), bed1.sid, args.z1_nocov_out)
if (args.z1_cov_out is not None): write_sumstats(z1_withcov, len(phe1), bed1.sid, args.z1_cov_out, compute_p=False)
if (args.z2_nocov_out is not None): write_sumstats(z2_nocov, len(phe2), bed2.sid, args.z2_nocov_out)
if (args.z2_cov_out is not None): write_sumstats(z2_withcov, len(phe2), bed2.sid, args.z2_cov_out, compute_p=False)
print_memory_usage(5)
#write Gty files
if (args.Gty1_nocov_out is not None):
Gty1 = np.sqrt(G1_diag) * y1_norm
df = pd.DataFrame(bed1.iid, columns=['fid', 'iid'])
df['Gty1'] = Gty1
df.to_csv(args.Gty1_nocov_out, sep='\t', index=False, float_format='%0.6e', header=None)
if (args.Gty2_nocov_out is not None):
Gty2 = np.sqrt(G2_diag) * y2_norm
df = pd.DataFrame(bed2.iid, columns=['fid', 'iid'])
df['Gty2'] = Gty2
df.to_csv(args.Gty2_nocov_out, sep='\t', index=False, float_format='%0.6e', header=None)
if (args.Gty1_cov_out is not None):
Gty1 = np.sqrt(G1_diag) * ty1 * (u1_0 + u1_1)
df = pd.DataFrame(bed1.iid, columns=['fid', 'iid'])
df['Gty1'] = Gty1
df.to_csv(args.Gty1_cov_out, sep='\t', index=False, float_format='%0.6e', header=None)
if (args.Gty2_cov_out is not None):
Gty2 = np.sqrt(G2_diag) * ty2 * (u2_0 + u2_1)
df = pd.DataFrame(bed2.iid, columns=['fid', 'iid'])
df['Gty2'] = Gty2
df.to_csv(args.Gty2_cov_out, sep='\t', index=False, float_format='%0.6e', header=None)
if (args.sumstats_only > 0):
print_sumstats(cov1, u1_0, u1_1, var_t1, cov2, u2_0, u2_1, var_t2, s1, sum_s1, sum_s1_sqr, s2, sum_s2, sum_s2_sqr)
sys.exit(0)
#find overlapping individuals
if (bed2 is not None):
print 'marking correlations between overlapping individuals...'
is_same = np.zeros((X1.shape[0], X2.shape[0]), dtype=np.bool)
is_same1 = np.zeros(X1.shape[0], dtype=np.bool)
is_same2 = np.zeros(X2.shape[0], dtype=np.bool)
num_overlap=0
for i1, ind1 in enumerate(bed1.iid[:,1]):
for i2, ind2 in enumerate(bed2.iid[:,1]):
if (ind1 == ind2):
is_same[i1,i2] = True
is_same1[i1] = True
is_same2[i2] = True
num_overlap+=1
print 'found %d overlapping individuals'%(num_overlap)
#G12_issame = np.mean(X1[is_same1] * X2[is_same2], axis=1)
G12_issame = np.einsum('ij,ij->i', X1[is_same1], X2[is_same2]) / float(X1.shape[1])
#Compute PCGC estimates, ignore covariates
#sig2g_1_nocov_old = np.sum(np.outer(y1_norm, y1_norm) * G1) / np.sum(G1**2) / pcgc_coeff1
sig2g1_numer = z1_nocov.dot(z1_nocov) * len(phe1) / float(X1.shape[1]) - G1_diag.dot(y1_norm**2)
print 'computing PCGC denominator without covariates...'
t0 = time.time()
sig2g1_denom = pcgc_denom_lowmem_nocov(X1,X1, window_size=args.mem_size)
print 'done in %0.2f seconds'%(time.time() - t0)
sig2g_1_nocov = sig2g1_numer / sig2g1_denom / pcgc_coeff1
if (bed2 is not None):
#sig2g_2_nocov_old = np.sum(np.outer(y2_norm, y2_norm) * G2) / np.sum(G2**2) / pcgc_coeff2
sig2g2_numer = z2_nocov.dot(z2_nocov) * len(phe2) / float(X2.shape[1]) - G2_diag.dot(y2_norm**2)
sig2g2_denom = pcgc_denom_lowmem_nocov(X2,X2, window_size=args.mem_size)
sig2g_2_nocov = sig2g2_numer / sig2g2_denom / pcgc_coeff2
#rho_nocov_old = np.sum(np.outer(y1_norm, y2_norm) * G12) / np.sum(G12**2) / pcgc_coeff12
rho_numer = z1_nocov.dot(z2_nocov) * np.sqrt(len(phe1) * len(phe2)) / float(X2.shape[1]) - np.sum(G12_issame * y1_norm[is_same1] * y2_norm[is_same2])
rho_denom = pcgc_denom_lowmem_nocov(X1, X2, is_same=is_same, window_size=args.mem_size)
rho_nocov = rho_numer / rho_denom / pcgc_coeff12
#perform jackknife computations
if (args.jackknife > 0):
print 'Computing jackknife standard errors with omitted covariates...'
t0 = time.time()
if (bed2 is None):
sig2g1_se_nocov = pcgc_jackknife_sig2g(X1, y1_norm, sig2g1_numer, sig2g1_denom, pcgc_coeff1, window_size=args.mem_size)
else:
sig2g1_se_nocov, sig2g2_se_nocov, rho_se_nocov, corr_se_nocov = pcgc_jackknife_corr(X1, X2, y1_norm, y2_norm,
sig2g1_numer, sig2g1_denom, sig2g2_numer, sig2g2_denom, rho_numer, rho_denom,
pcgc_coeff1, pcgc_coeff2, pcgc_coeff12,
is_same=is_same, window_size=args.mem_size)
print 'done in %0.2f seconds'%(time.time() - t0)
print
print 'Results when excluding covariates'
print '---------------------------------'
print 'study 1 h2: %0.4f'%(sig2g_1_nocov),
if (args.jackknife>0): print '(%0.4f)'%(sig2g1_se_nocov),
print
if (bed2 is not None):
print 'study 2 h2: %0.4f'%(sig2g_2_nocov),
if (args.jackknife>0): print '(%0.4f)'%(sig2g2_se_nocov),
print
print 'genetic covariance: %0.4f'%(rho_nocov),
if (args.jackknife>0): print '(%0.4f)'%(rho_se_nocov),
print
print 'genetic correlation: %0.4f'%(rho_nocov / np.sqrt(sig2g_1_nocov * sig2g_2_nocov)),
if (args.jackknife>0): print '(%0.4f)'%(corr_se_nocov),
print
#permutation testing code
if (args.num_perms > 0):
print
print 'Performing covariate-less permutation testing for heritability of study 1 with %d permutations...'%(args.num_perms)
t0 = time.time()
rho_pvalue_nocov = permutation_test_heritability(X1, y1_norm, G1_diag, num_perms=args.num_perms)
print 'done in %0.2f seconds'%(time.time()-t0)
print 'study 1 h2 p-value (excluding covariates): %0.5e'%(rho_pvalue_nocov)
if (rho_pvalue_nocov < 100.0/args.num_perms):
print 'WARNING: p-value is close to the possible limit due to the number of permutations. Please increase the number of permutations to obtain a more accurate result'
if (bed2 is not None and args.num_perms > 0):
print
print 'Performing covariate-less permutation testing for heritability of study 2 with %d permutations...'%(args.num_perms)
t0 = time.time()
rho_pvalue_nocov = permutation_test_heritability(X2, y2_norm, G2_diag, num_perms=args.num_perms)
print 'done in %0.2f seconds'%(time.time()-t0)
print 'study 2 h2 p-value (excluding covariates): %0.5e'%(rho_pvalue_nocov)
if (rho_pvalue_nocov < 100.0/args.num_perms):
print 'WARNING: p-value is close to the possible limit due to the number of permutations. Please increase the number of permutations to obtain a more accurate result'
print
print 'Performing covariate-less permutation testing for genetic correlation with %d permutations...'%(args.num_perms)
t0 = time.time()
rho_pvalue_nocov = permutation_test2(X1, y1_norm, X2, y2_norm, G12_issame, is_same1, is_same2, num_perms=args.num_perms)
print 'done in %0.2f seconds'%(time.time()-t0)
print 'genetic correlation p-value (excluding covariates): %0.5e'%(rho_pvalue_nocov)
if (rho_pvalue_nocov < 100.0/args.num_perms):
print 'WARNING: p-value is close to the possible limit due to the number of permutations. Please increase the number of permutations to obtain a more accurate result'
print
print
if (cov1 is not None or cov2 is not None):
qty1 = ty1 * (u1_0 + u1_1)
if (bed2 is not None): qty2 = ty2 * (u2_0 + u2_1)
#Compute PCGC estimates, include covariates
#sig2g_1_withcov_old = np.sum(np.outer(ty1, ty1)*G1*Q1) / np.sum((G1*Q1)**2)
numer_sig2g1 = z1_withcov.dot(z1_withcov) / X1.shape[1] - G1_diag.dot(qty1**2)
denom_sig2g1 = pcgc_denom_lowmem(X1, X1, u1_0, u1_1, u1_0, u1_1, window_size=args.mem_size)
sig2g_1_withcov = numer_sig2g1 / denom_sig2g1
h2_1_withcov = sig2g_1_withcov / (1 + var_t1)
if (bed2 is not None):
#sig2g_2_withcov_old = np.sum(np.outer(ty2, ty2)*G2*Q2) / np.sum((G2*Q2)**2)
numer_sig2g2 = z2_withcov.dot(z2_withcov) / X2.shape[1] - G2_diag.dot(qty2**2)
denom_sig2g2 = pcgc_denom_lowmem(X2, X2, u2_0, u2_1, u2_0, u2_1, window_size=args.mem_size)
sig2g_2_withcov = numer_sig2g2 / denom_sig2g2
h2_2_withcov = sig2g_2_withcov / (1 + var_t2)
#rho_withcov_old = np.sum(np.outer(ty1, ty2)*G12*Q12) / np.sum((G12*Q12)**2)
numer_rho = z1_withcov.dot(z2_withcov) / X2.shape[1] - np.sum(G12_issame * qty1[is_same1] * qty2[is_same2])
denom_rho = pcgc_denom_lowmem(X1, X2, u1_0, u1_1, u2_0, u2_1, is_same, window_size=args.mem_size)
rho_withcov = numer_rho / denom_rho
if (args.jackknife > 0):
print 'Computing jackknife standard errors with covariates...'
t0 = time.time()
if (bed2 is None):
sig2g1_se_withcov = pcgc_jackknife_sig2g(X1, ty1, numer_sig2g1, denom_sig2g1, u0=u1_0, u1=u1_1, window_size=args.mem_size)
else:
sig2g1_se_withcov, sig2g2_se_withcov, rho_se_withcov, corr_se_withcov = pcgc_jackknife_corr(X1, X2, ty1, ty2,
numer_sig2g1, denom_sig2g1, numer_sig2g2, denom_sig2g2, numer_rho, denom_rho,
u1_0=u1_0, u1_1=u1_1, u2_0=u2_0, u2_1=u2_1,
is_same=is_same, window_size=args.mem_size)
print 'done in %0.2f seconds'%(time.time()-t0)
print
print 'Results when including covariates'
print '---------------------------------'
if (args.jackknife==0):
print 'study 1 h2: %0.4f (genetic variance: %0.4f)'%(h2_1_withcov, sig2g_1_withcov)
else:
print 'study 1 h2: %0.4f (%0.4f) (genetic variance: %0.4f (%0.4f))'%(h2_1_withcov, sig2g1_se_withcov/(1+var_t1), sig2g_1_withcov, sig2g1_se_withcov)
if (bed2 is not None):
if (args.jackknife==0):
print 'study 2 h2: %0.4f (genetic variance: %0.4f)'%(h2_2_withcov, sig2g_2_withcov)
print 'genetic covariance: %0.4f'%(rho_withcov)
print 'genetic correlation: %0.4f'%(rho_withcov / np.sqrt(sig2g_1_withcov * sig2g_2_withcov))
else:
print 'study 2 h2: %0.4f (%0.4f) (genetic variance: %0.4f (%0.4f))'%(h2_2_withcov, sig2g2_se_withcov/(1+var_t2), sig2g_2_withcov, sig2g2_se_withcov)
print 'genetic covariance: %0.4f (%0.4f)'%(rho_withcov, rho_se_withcov)
print 'genetic correlation: %0.4f (%0.4f)'%(rho_withcov / np.sqrt(sig2g_1_withcov * sig2g_2_withcov), corr_se_withcov)
#permutation testing code
if (args.num_perms > 0):
print
print 'Performing covariate-aware permutation testing for heritability of study 1 with %d permutations...'%(args.num_perms)
t0 = time.time()
rho_pvalue_cov = permutation_test_heritability(X1, qty1, G1_diag, num_perms=args.num_perms)
print 'done in %0.2f seconds'%(time.time()-t0)
print 'study 1 h2 p-value (including covariates): %0.5e'%(rho_pvalue_cov)
if (rho_pvalue_cov < 100.0/args.num_perms):
print 'WARNING: p-value is close to the possible limit due to the number of permutations. Please increase the number of permutations to obtain a more accurate result'
if (args.covar2 is not None and args.num_perms > 0):
print
print 'Performing covariate-aware permutation testing for heritability of study 2 with %d permutations...'%(args.num_perms)
t0 = time.time()
rho_pvalue_cov = permutation_test_heritability(X2, qty2, G2_diag, num_perms=args.num_perms)
print 'done in %0.2f seconds'%(time.time()-t0)
print 'study 2 h2 p-value (including covariates): %0.5e'%(rho_pvalue_cov)
if (rho_pvalue_cov < 100.0/args.num_perms):
print 'WARNING: p-value is close to the possible limit due to the number of permutations. Please increase the number of permutations to obtain a more accurate result'
print
print 'Performing covariate-aware permutation testing for genetic correlation with %d permutations...'%(args.num_perms)
t0 = time.time()
rho_pvalue_cov = permutation_test2(X1, qty1, X2, qty2, G12_issame, is_same1, is_same2, num_perms=args.num_perms)
print 'done in %0.2f seconds'%(time.time()-t0)
print 'genetic correlation p-value (including covariates): %0.5e'%(rho_pvalue_cov)
if (rho_pvalue_cov < 100.0/args.num_perms):
print 'WARNING: p-value is close to the possible limit due to the number of permutations. Please increase the number of permutations to obtain a more accurate result'
print_sumstats(cov1, u1_0, u1_1, var_t1, cov2, u2_0, u2_1, var_t2, s1, sum_s1, sum_s1_sqr, s2, sum_s2, sum_s2_sqr)
|
mit
|
joshloyal/scikit-learn
|
examples/plot_kernel_approximation.py
|
26
|
8069
|
"""
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = (data[:n_samples // 2],
digits.target[:n_samples // 2])
# Now predict the value of the digit on the second half:
data_test, targets_test = (data[n_samples // 2:],
digits.target[n_samples // 2:])
# data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Generate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
Winand/pandas
|
pandas/tests/io/msgpack/test_case.py
|
13
|
2740
|
# coding: utf-8
from pandas.io.msgpack import packb, unpackb
def check(length, obj):
v = packb(obj)
assert len(v) == length, \
"%r length should be %r but get %r" % (obj, length, len(v))
assert unpackb(v, use_list=0) == obj
def test_1():
for o in [None, True, False, 0, 1, (1 << 6), (1 << 7) - 1, -1,
-((1 << 5) - 1), -(1 << 5)]:
check(1, o)
def test_2():
for o in [1 << 7, (1 << 8) - 1, -((1 << 5) + 1), -(1 << 7)]:
check(2, o)
def test_3():
for o in [1 << 8, (1 << 16) - 1, -((1 << 7) + 1), -(1 << 15)]:
check(3, o)
def test_5():
for o in [1 << 16, (1 << 32) - 1, -((1 << 15) + 1), -(1 << 31)]:
check(5, o)
def test_9():
for o in [1 << 32, (1 << 64) - 1, -((1 << 31) + 1), -(1 << 63), 1.0, 0.1,
-0.1, -1.0]:
check(9, o)
def check_raw(overhead, num):
check(num + overhead, b" " * num)
def test_fixraw():
check_raw(1, 0)
check_raw(1, (1 << 5) - 1)
def test_raw16():
check_raw(3, 1 << 5)
check_raw(3, (1 << 16) - 1)
def test_raw32():
check_raw(5, 1 << 16)
def check_array(overhead, num):
check(num + overhead, (None, ) * num)
def test_fixarray():
check_array(1, 0)
check_array(1, (1 << 4) - 1)
def test_array16():
check_array(3, 1 << 4)
check_array(3, (1 << 16) - 1)
def test_array32():
check_array(5, (1 << 16))
def match(obj, buf):
assert packb(obj) == buf
assert unpackb(buf, use_list=0) == obj
def test_match():
cases = [
(None, b'\xc0'),
(False, b'\xc2'),
(True, b'\xc3'),
(0, b'\x00'),
(127, b'\x7f'),
(128, b'\xcc\x80'),
(256, b'\xcd\x01\x00'),
(-1, b'\xff'),
(-33, b'\xd0\xdf'),
(-129, b'\xd1\xff\x7f'),
({1: 1}, b'\x81\x01\x01'),
(1.0, b"\xcb\x3f\xf0\x00\x00\x00\x00\x00\x00"),
((), b'\x90'),
(tuple(range(15)), (b"\x9f\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09"
b"\x0a\x0b\x0c\x0d\x0e")),
(tuple(range(16)), (b"\xdc\x00\x10\x00\x01\x02\x03\x04\x05\x06\x07"
b"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f")),
({}, b'\x80'),
(dict([(x, x) for x in range(15)]),
(b'\x8f\x00\x00\x01\x01\x02\x02\x03\x03\x04\x04\x05\x05\x06\x06\x07'
b'\x07\x08\x08\t\t\n\n\x0b\x0b\x0c\x0c\r\r\x0e\x0e')),
(dict([(x, x) for x in range(16)]),
(b'\xde\x00\x10\x00\x00\x01\x01\x02\x02\x03\x03\x04\x04\x05\x05\x06'
b'\x06\x07\x07\x08\x08\t\t\n\n\x0b\x0b\x0c\x0c\r\r\x0e\x0e'
b'\x0f\x0f')),
]
for v, p in cases:
match(v, p)
def test_unicode():
assert unpackb(packb('foobar'), use_list=1) == b'foobar'
|
bsd-3-clause
|
tongwang01/tensorflow
|
tensorflow/python/client/notebook.py
|
33
|
4608
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import socket
import sys
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"password", None,
"Password to require. If set, the server will allow public access."
" Only used if notebook config file does not exist.")
flags.DEFINE_string("notebook_dir", "experimental/brain/notebooks",
"root location where to store notebooks")
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print ("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = ([sys.argv[0]] +
[x for x in sys.argv[1:] if x.startswith("--flagfile")])
app.run()
|
apache-2.0
|
mhogg/scipy
|
scipy/signal/waveforms.py
|
64
|
14818
|
# Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `width` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
|
bsd-3-clause
|
renhaocui/adPlatform
|
caseStudy.py
|
2
|
26683
|
__author__ = 'rencui'
from afinn import Afinn
from sklearn.externals import joblib
import numpy
import json
from textstat.textstat import textstat
from nltk.stem.porter import *
from tokenizer import simpleTokenize
import logging
from scipy.sparse import hstack, csr_matrix
from sklearn import svm
stemmer = PorterStemmer()
logging.basicConfig()
def mapMention(inputFile):
mentionFile = open(inputFile, 'r')
outputMapper = {}
for line in mentionFile:
mention = json.loads(line.strip())
if mention['verified'] == 'true':
verify = 1
else:
verify = 0
outputMapper[mention['screen_name']] = (verify, mention['followers_count'])
mentionFile.close()
return outputMapper
def stemContent(input):
words = simpleTokenize(input)
out = ''
for word in words:
temp = stemmer.stem(word)
out += temp + ' '
return out.strip()
def POSRatio(inputList):
out = []
temp = []
for item in inputList:
temp.append(float(item))
if sum(temp) == 0:
out = [0.0, 0.0, 0.0]
else:
for item in temp:
out.append(item / sum(temp))
return out
def longestLength(input):
outputLength = 0
for key, value in input.items():
length = 0
if value != '-1' and value != '_':
length += 1
if value == '0':
if length > outputLength:
outputLength = length
continue
nextNode = value
while nextNode != '-1' and nextNode != '_' and nextNode != '0':
length += 1
nextNode = input[nextNode]
if length > outputLength:
outputLength = length
return outputLength
def POSRatio(inputList):
out = []
temp = []
for item in inputList:
temp.append(float(item))
if sum(temp) == 0:
out = [0.0, 0.0, 0.0]
else:
for item in temp:
out.append(item / sum(temp))
return out
def outputHeads(input):
output = ''
for key, value in input.items():
if value[1] == 0:
output += value[0] + '/' + value[2] + ' '
return output.strip()
def run(group, groupTitle, outputFile='result.output'):
resultFile = open(outputFile, 'a')
mentionMapper = mapMention('adData/analysis/ranked/mention.json')
print groupTitle
resultFile.write(groupTitle + '\n')
print 'group: ' + str(group)
resultFile.write('group: ' + str(group) + '\n')
afinn = Afinn()
posFile = open('adData/analysis/groups/' + groupTitle + '/group' + str(group) + '.pos', 'r')
negFile = open('adData/analysis/groups/' + groupTitle + '/group' + str(group) + '.neg', 'r')
posParseLengthFile = open('adData/analysis/groups/' + groupTitle + '/parserLength' + str(group) + '.pos', 'r')
negParseLengthFile = open('adData/analysis/groups/' + groupTitle + '/parserLength' + str(group) + '.neg', 'r')
posHeadCountFile = open('adData/analysis/groups/' + groupTitle + '/parserHeadCount' + str(group) + '.pos', 'r')
negHeadCountFile = open('adData/analysis/groups/' + groupTitle + '/parserHeadCount' + str(group) + '.neg', 'r')
posPOSCountFile = open('adData/analysis/groups/' + groupTitle + '/parserPOSCount' + str(group) + '.pos', 'r')
negPOSCountFile = open('adData/analysis/groups/' + groupTitle + '/parserPOSCount' + str(group) + '.neg', 'r')
ids = []
contents = []
scores = []
labels = []
parseLength = []
headCount = []
usernames = []
POScounts = []
print 'loading...'
for line in posFile:
seg = line.strip().split(' :: ')
text = seg[3]
username = seg[7].split(';')
score = float(seg[0])
ids.append(seg[5])
usernames.append(username)
contents.append(text)
scores.append(score)
labels.append(1)
for line in negFile:
seg = line.strip().split(' :: ')
text = seg[3]
username = seg[7].split(';')
score = float(seg[0])
ids.append(seg[5])
usernames.append(username)
contents.append(text)
scores.append(score)
labels.append(0)
for line in posParseLengthFile:
parseLength.append(int(line.strip(' :: ')[0]))
for line in negParseLengthFile:
parseLength.append(int(line.strip(' :: ')[0]))
for line in posHeadCountFile:
headCount.append(int(line.strip(' :: ')[0]))
for line in negHeadCountFile:
headCount.append(int(line.strip(' :: ')[0]))
for line in posPOSCountFile:
POScounts.append(POSRatio(line.strip().split(' :: ')[0].split(' ')))
for line in negPOSCountFile:
POScounts.append(POSRatio(line.strip().split(' :: ')[0].split(' ')))
posHeadCountFile.close()
negHeadCountFile.close()
posParseLengthFile.close()
negParseLengthFile.close()
posPOSCountFile.close()
negPOSCountFile.close()
posFile.close()
negFile.close()
semanticFeatures_train = []
semanticFeatures_test = []
classes_train = []
classes_test = []
index_test = []
for index, content in enumerate(contents):
temp = []
words = simpleTokenize(content)
twLen = float(len(words))
sentiScore = afinn.score(stemContent(content))
readScore = textstat.coleman_liau_index(content)
temp.append(twLen)
if content.count('URRL') > 0:
temp.append(1)
else:
temp.append(0)
if content.count('HHTTG') > 0:
temp.append(1)
else:
temp.append(0)
if content.count('USSERNM') > 0:
temp.append(1)
else:
temp.append(0)
temp.append(sentiScore / twLen)
temp.append(readScore)
temp.append(parseLength[index] / twLen)
temp.append(headCount[index] / twLen)
temp += POScounts[index]
if content.count('!') > 0:
temp.append(1)
else:
temp.append(0)
if content.count('?') > 0:
temp.append(1)
else:
temp.append(0)
mentionFlag = 0
mentionFollowers = 0
userCount = 0.0
for user in usernames[index]:
if user in mentionMapper:
userCount += 1
if mentionMapper[user][0] == 1:
mentionFlag = 1
mentionFollowers += mentionMapper[user][1]
temp.append(mentionFlag)
if userCount == 0:
temp.append(0.0)
else:
temp.append(mentionFollowers / userCount)
if index % 5 == 0:
semanticFeatures_test.append(numpy.array(temp))
classes_test.append(labels[index])
index_test.append(index)
else:
semanticFeatures_train.append(numpy.array(temp))
classes_train.append(labels[index])
feature_train = csr_matrix(numpy.array(semanticFeatures_train))
feature_test = csr_matrix(numpy.array(semanticFeatures_test))
resultFile.flush()
model = svm.SVC()
model.fit(feature_train, classes_train)
predictions = model.predict(feature_test)
if len(predictions) != len(classes_test):
print 'inference error!'
for index, label in enumerate(predictions):
if label == 0 and classes_test[index] == 1:
print ids[index_test[index]]
print contents[index_test[index]]
resultFile.flush()
resultFile.write('\n')
resultFile.flush()
resultFile.close()
def run2(group, groupTitle, outputFile='result.output'):
resultFile = open(outputFile, 'a')
mentionMapper = mapMention('adData/analysis/ranked/mention.json')
tempListFile = open('results/temp.list', 'r')
excludeList = []
for line in tempListFile:
excludeList.append(line.strip())
print groupTitle
resultFile.write(groupTitle + '\n')
print 'group: ' + str(group)
resultFile.write('group: ' + str(group) + '\n')
afinn = Afinn()
posFile = open('adData/analysis/groups/' + groupTitle + '/group' + str(group) + '.pos', 'r')
negFile = open('adData/analysis/groups/' + groupTitle + '/group' + str(group) + '.neg', 'r')
posParseLengthFile = open('adData/analysis/groups/' + groupTitle + '/parserLength' + str(group) + '.pos', 'r')
negParseLengthFile = open('adData/analysis/groups/' + groupTitle + '/parserLength' + str(group) + '.neg', 'r')
posHeadCountFile = open('adData/analysis/groups/' + groupTitle + '/parserHeadCount' + str(group) + '.pos', 'r')
negHeadCountFile = open('adData/analysis/groups/' + groupTitle + '/parserHeadCount' + str(group) + '.neg', 'r')
posPOSCountFile = open('adData/analysis/groups/' + groupTitle + '/parserPOSCount' + str(group) + '.pos', 'r')
negPOSCountFile = open('adData/analysis/groups/' + groupTitle + '/parserPOSCount' + str(group) + '.neg', 'r')
ids = []
contents = []
scores = []
labels = []
parseLength = []
headCount = []
usernames = []
POScounts = []
print 'loading...'
for line in posFile:
seg = line.strip().split(' :: ')
text = seg[3]
username = seg[7].split(';')
score = float(seg[0])
ids.append(seg[5])
usernames.append(username)
contents.append(text)
scores.append(score)
labels.append(1)
for line in negFile:
seg = line.strip().split(' :: ')
text = seg[3]
username = seg[7].split(';')
score = float(seg[0])
ids.append(seg[5])
usernames.append(username)
contents.append(text)
scores.append(score)
labels.append(0)
for line in posParseLengthFile:
parseLength.append(int(line.strip(' :: ')[0]))
for line in negParseLengthFile:
parseLength.append(int(line.strip(' :: ')[0]))
for line in posHeadCountFile:
headCount.append(int(line.strip(' :: ')[0]))
for line in negHeadCountFile:
headCount.append(int(line.strip(' :: ')[0]))
for line in posPOSCountFile:
POScounts.append(POSRatio(line.strip().split(' :: ')[0].split(' ')))
for line in negPOSCountFile:
POScounts.append(POSRatio(line.strip().split(' :: ')[0].split(' ')))
posHeadCountFile.close()
negHeadCountFile.close()
posParseLengthFile.close()
negParseLengthFile.close()
posPOSCountFile.close()
negPOSCountFile.close()
posFile.close()
negFile.close()
semanticFeatures_train = []
semanticFeatures_test = []
classes_train = []
classes_test = []
index_test = []
for index, content in enumerate(contents):
temp = []
words = simpleTokenize(content)
twLen = float(len(words))
sentiScore = afinn.score(stemContent(content))
readScore = textstat.coleman_liau_index(content)
temp.append(twLen)
if content.count('URRL') > 0:
temp.append(1)
else:
temp.append(0)
if content.count('HHTTG') > 0:
temp.append(1)
else:
temp.append(0)
if content.count('USSERNM') > 0:
temp.append(1)
else:
temp.append(0)
temp.append(sentiScore / twLen)
temp.append(readScore)
temp.append(parseLength[index] / twLen)
temp.append(headCount[index] / twLen)
temp += POScounts[index]
if content.count('!') > 0:
temp.append(1)
else:
temp.append(0)
if content.count('?') > 0:
temp.append(1)
else:
temp.append(0)
mentionFlag = 0
mentionFollowers = 0
userCount = 0.0
for user in usernames[index]:
if user in mentionMapper:
userCount += 1
if mentionMapper[user][0] == 1:
mentionFlag = 1
mentionFollowers += mentionMapper[user][1]
temp.append(mentionFlag)
if userCount == 0:
temp.append(0.0)
else:
temp.append(mentionFollowers / userCount)
if index % 5 == 0:
semanticFeatures_test.append(numpy.array(temp))
classes_test.append(labels[index])
index_test.append(index)
else:
semanticFeatures_train.append(numpy.array(temp))
classes_train.append(labels[index])
feature_train = csr_matrix(numpy.array(semanticFeatures_train))
feature_test = csr_matrix(numpy.array(semanticFeatures_test))
resultFile.flush()
model = svm.SVC()
model.fit(feature_train, classes_train)
# joblib.dump(model, 'results/full.pkl')
# model = joblib.load('results/full.pkl')
predictions = model.predict(feature_test)
if len(predictions) != len(classes_test):
print 'inference error!'
for index, label in enumerate(predictions):
# tempListFile.write(ids[index_test[index]]+'\n')
if label == 1 and classes_test[index] == 1:
print ids[index_test[index]]
print contents[index_test[index]]
resultFile.flush()
resultFile.write('\n')
resultFile.flush()
tempListFile.close()
resultFile.close()
def run3():
mentionMapper = mapMention('adData/analysis/ranked/mention.json')
tempListFile = open('results/temp.list', 'r')
excludeList = []
for line in tempListFile:
excludeList.append(line.strip())
groupTitle = 'totalGroup'
group = 0
afinn = Afinn()
posFile = open('adData/analysis/groups/' + groupTitle + '/group' + str(group) + '.pos', 'r')
negFile = open('adData/analysis/groups/' + groupTitle + '/group' + str(group) + '.neg', 'r')
posParseLengthFile = open('adData/analysis/groups/' + groupTitle + '/parserLength' + str(group) + '.pos', 'r')
negParseLengthFile = open('adData/analysis/groups/' + groupTitle + '/parserLength' + str(group) + '.neg', 'r')
posHeadCountFile = open('adData/analysis/groups/' + groupTitle + '/parserHeadCount' + str(group) + '.pos', 'r')
negHeadCountFile = open('adData/analysis/groups/' + groupTitle + '/parserHeadCount' + str(group) + '.neg', 'r')
posPOSCountFile = open('adData/analysis/groups/' + groupTitle + '/parserPOSCount' + str(group) + '.pos', 'r')
negPOSCountFile = open('adData/analysis/groups/' + groupTitle + '/parserPOSCount' + str(group) + '.neg', 'r')
ids = []
contents = []
scores = []
labels = []
parseLength = []
headCount = []
usernames = []
POScounts = []
print 'loading...'
for line in posFile:
seg = line.strip().split(' :: ')
text = seg[3]
username = seg[7].split(';')
score = float(seg[0])
ids.append(seg[5])
usernames.append(username)
contents.append(text)
scores.append(score)
labels.append(1)
for line in negFile:
seg = line.strip().split(' :: ')
text = seg[3]
username = seg[7].split(';')
score = float(seg[0])
ids.append(seg[5])
usernames.append(username)
contents.append(text)
scores.append(score)
labels.append(0)
for line in posParseLengthFile:
parseLength.append(int(line.strip(' :: ')[0]))
for line in negParseLengthFile:
parseLength.append(int(line.strip(' :: ')[0]))
for line in posHeadCountFile:
headCount.append(int(line.strip(' :: ')[0]))
for line in negHeadCountFile:
headCount.append(int(line.strip(' :: ')[0]))
for line in posPOSCountFile:
POScounts.append(POSRatio(line.strip().split(' :: ')[0].split(' ')))
for line in negPOSCountFile:
POScounts.append(POSRatio(line.strip().split(' :: ')[0].split(' ')))
posHeadCountFile.close()
negHeadCountFile.close()
posParseLengthFile.close()
negParseLengthFile.close()
posPOSCountFile.close()
negPOSCountFile.close()
posFile.close()
negFile.close()
semanticFeatures_train = []
semanticFeatures_test = []
classes_train = []
classes_test = []
index_test = []
for index, content in enumerate(contents):
temp = []
words = simpleTokenize(content)
twLen = float(len(words))
sentiScore = afinn.score(stemContent(content))
readScore = textstat.coleman_liau_index(content)
temp.append(twLen)
if content.count('URRL') > 0:
temp.append(1)
else:
temp.append(0)
if content.count('HHTTG') > 0:
temp.append(1)
else:
temp.append(0)
if content.count('USSERNM') > 0:
temp.append(1)
else:
temp.append(0)
temp.append(sentiScore / twLen)
temp.append(readScore)
temp.append(parseLength[index] / twLen)
temp.append(headCount[index] / twLen)
temp += POScounts[index]
if content.count('!') > 0:
temp.append(1)
else:
temp.append(0)
if content.count('?') > 0:
temp.append(1)
else:
temp.append(0)
mentionFlag = 0
mentionFollowers = 0
userCount = 0.0
for user in usernames[index]:
if user in mentionMapper:
userCount += 1
if mentionMapper[user][0] == 1:
mentionFlag = 1
mentionFollowers += mentionMapper[user][1]
temp.append(mentionFlag)
if userCount == 0:
temp.append(0.0)
else:
temp.append(mentionFollowers / userCount)
if ids[index] not in excludeList:
semanticFeatures_train.append(numpy.array(temp))
classes_train.append(labels[index])
else:
semanticFeatures_test.append(numpy.array(temp))
classes_test.append(labels[index])
print ids[index] + '\t' + contents[index] + '\t' + str(usernames[index])
feature_train = csr_matrix(numpy.array(semanticFeatures_train))
feature_test = csr_matrix(numpy.array(semanticFeatures_test))
model = svm.SVC()
model.fit(feature_train, classes_train)
# joblib.dump(model, 'results/full.pkl')
# model = joblib.load('results/full.pkl')
predictions = model.predict(feature_test)
score = model.decision_function(feature_test)
print classes_test
print predictions
print score
tempListFile.close()
def trainModel(groupTitle):
print 'loading...'
mentionMapper = mapMention('adData/analysis/ranked/mention.json')
group = 0
afinn = Afinn()
posFile = open('adData/analysis/groups/' + groupTitle + '/group' + str(group) + '.pos', 'r')
negFile = open('adData/analysis/groups/' + groupTitle + '/group' + str(group) + '.neg', 'r')
posParseLengthFile = open('adData/analysis/groups/' + groupTitle + '/parserLength' + str(group) + '.pos', 'r')
negParseLengthFile = open('adData/analysis/groups/' + groupTitle + '/parserLength' + str(group) + '.neg', 'r')
posHeadCountFile = open('adData/analysis/groups/' + groupTitle + '/parserHeadCount' + str(group) + '.pos', 'r')
negHeadCountFile = open('adData/analysis/groups/' + groupTitle + '/parserHeadCount' + str(group) + '.neg', 'r')
posPOSCountFile = open('adData/analysis/groups/' + groupTitle + '/parserPOSCount' + str(group) + '.pos', 'r')
negPOSCountFile = open('adData/analysis/groups/' + groupTitle + '/parserPOSCount' + str(group) + '.neg', 'r')
ids = []
contents = []
scores = []
labels = []
parseLength = []
headCount = []
usernames = []
POScounts = []
for line in posFile:
seg = line.strip().split(' :: ')
text = seg[3]
username = seg[7].split(';')
score = float(seg[0])
ids.append(seg[5])
usernames.append(username)
contents.append(text)
scores.append(score)
labels.append(1)
for line in negFile:
seg = line.strip().split(' :: ')
text = seg[3]
username = seg[7].split(';')
score = float(seg[0])
ids.append(seg[5])
usernames.append(username)
contents.append(text)
scores.append(score)
labels.append(0)
for line in posParseLengthFile:
parseLength.append(int(line.strip(' :: ')[0]))
for line in negParseLengthFile:
parseLength.append(int(line.strip(' :: ')[0]))
for line in posHeadCountFile:
headCount.append(int(line.strip(' :: ')[0]))
for line in negHeadCountFile:
headCount.append(int(line.strip(' :: ')[0]))
for line in posPOSCountFile:
POScounts.append(POSRatio(line.strip().split(' :: ')[0].split(' ')))
for line in negPOSCountFile:
POScounts.append(POSRatio(line.strip().split(' :: ')[0].split(' ')))
posHeadCountFile.close()
negHeadCountFile.close()
posParseLengthFile.close()
negParseLengthFile.close()
posPOSCountFile.close()
negPOSCountFile.close()
posFile.close()
negFile.close()
semanticFeatures_train = []
classes_train = []
for index, content in enumerate(contents):
temp = []
words = simpleTokenize(content)
twLen = float(len(words))
sentiScore = afinn.score(stemContent(content))
readScore = textstat.coleman_liau_index(content)
temp.append(twLen)
if content.count('URRL') > 0:
temp.append(1)
else:
temp.append(0)
if content.count('HHTTG') > 0:
temp.append(1)
else:
temp.append(0)
if content.count('USSERNM') > 0:
temp.append(1)
else:
temp.append(0)
temp.append(sentiScore / twLen)
temp.append(readScore)
temp.append(parseLength[index] / twLen)
temp.append(headCount[index] / twLen)
temp += POScounts[index]
if content.count('!') > 0:
temp.append(1)
else:
temp.append(0)
if content.count('?') > 0:
temp.append(1)
else:
temp.append(0)
mentionFlag = 0
mentionFollowers = 0
userCount = 0.0
for user in usernames[index]:
if user in mentionMapper:
userCount += 1
if mentionMapper[user][0] == 1:
mentionFlag = 1
mentionFollowers += mentionMapper[user][1]
temp.append(mentionFlag)
if userCount == 0:
temp.append(0.0)
else:
temp.append(mentionFollowers / userCount)
semanticFeatures_train.append(numpy.array(temp))
classes_train.append(labels[index])
feature_train = csr_matrix(numpy.array(semanticFeatures_train))
model = svm.SVC()
model.fit(feature_train, classes_train)
joblib.dump(model, 'models/full.pkl')
def extractor():
inputFile = open('infer/test.predict', 'r')
tempData = {}
tempOutput = {}
posCount = {'N': 0, 'V': 0, 'A': 0}
lengthOutput = []
headOutput = []
posOutput = []
for line in inputFile:
if line.strip() != '':
words = line.strip().split()
tempData[words[0]] = words[6]
tempOutput[int(words[0])] = (words[1], int(words[6]), words[4])
if words[4] in ['N', '^']:
posCount['N'] += 1
elif words[4] == 'V':
posCount['V'] += 1
elif words[4] in ['A', 'R']:
posCount['A'] += 1
else:
longLen = longestLength(tempData)
lengthOutput.append(longLen)
headOutput.append(len(outputHeads(tempOutput).split()))
posOutput.append((posCount['N'], posCount['V'], posCount['A']))
tempData = {}
tempOutput = {}
posCount = {'N': 0, 'V': 0, 'A': 0}
inputFile.close()
return lengthOutput, headOutput, posOutput
def infer():
print 'loading...'
mentionMapper = mapMention('adData/analysis/ranked/mention.json')
afinn = Afinn()
mentionFile = open('infer/mention.input', 'r')
inputFile = open('infer/test', 'r')
contents = []
usernames = []
for line in inputFile:
text = line.strip()
contents.append(text)
for i in range(len(contents)):
usernames.append([])
for line in mentionFile:
items = line.strip().split(';')
if len(items) == 0:
usernames.append([])
else:
usernames.append(items)
parseLength, headCount, POSoutput = extractor()
inputFile.close()
mentionFile.close()
semanticFeatures_test = []
for index, content in enumerate(contents):
temp = []
words = simpleTokenize(content)
twLen = float(len(words))
sentiScore = afinn.score(stemContent(content))
readScore = textstat.coleman_liau_index(content)
temp.append(twLen)
if content.count('URRL') > 0:
temp.append(1)
else:
temp.append(0)
if content.count('HHTTG') > 0:
temp.append(1)
else:
temp.append(0)
if content.count('USSERNM') > 0:
temp.append(1)
else:
temp.append(0)
temp.append(sentiScore / twLen)
temp.append(readScore)
temp.append(parseLength[index] / twLen)
temp.append(headCount[index] / twLen)
temp += POSRatio(POSoutput[index])
if content.count('!') > 0:
temp.append(1)
else:
temp.append(0)
if content.count('?') > 0:
temp.append(1)
else:
temp.append(0)
mentionFlag = 0
mentionFollowers = 0
userCount = 0.0
for user in usernames[index]:
if user in mentionMapper:
userCount += 1
if mentionMapper[user][0] == 1:
mentionFlag = 1
mentionFollowers += mentionMapper[user][1]
temp.append(mentionFlag)
if userCount == 0:
temp.append(0.0)
else:
temp.append(mentionFollowers / userCount)
semanticFeatures_test.append(numpy.array(temp))
feature_test = csr_matrix(numpy.array(semanticFeatures_test))
model = joblib.load('models/full.pkl')
predictions = model.predict(feature_test)
score = model.decision_function(feature_test)
#print classes_test
#print predictions
#print numpy.count_nonzero(predictions)
for index, pred in enumerate(predictions):
#print pred
print score[index]
#print score
infer()
# trainModel('totalGroup')
# run3()
# outputFilename = 'results/test.result'
# run2(3, 'simGroup', outputFile=outputFilename)
|
mit
|
shyamalschandra/scikit-learn
|
sklearn/utils/random.py
|
234
|
10510
|
# Author: Hamzeh Alsalhi <ha258@cornell.edu>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
|
bsd-3-clause
|
erelsgl/economics
|
double-auction-simulations/main.AAAI18-submission.py
|
1
|
15070
|
#!python3
"""
Simulation of single-type multi-unit double-auction mechanisms.
Author: Erel Segal-Halevi
Since : 2017-07
"""
import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas.tools import plotting
import matplotlib.pyplot as plt
import math
import os
import random
from doubleauction import MUDA,WALRAS,walrasianEquilibrium,randomTradeWithExogeneousPrice
import torq_datasets_read as torq
from random_datasets import randomAuctions
COLUMNS=(
'Total buyers', 'Total sellers', 'Total traders', 'Min total traders', 'Total units',
'Max units per trader', 'Min units per trader', 'Normalized max units per trader', 'stddev',
'Optimal buyers', 'Optimal sellers', 'Optimal units',
'Optimal gain', 'MUDA-lottery gain', 'MUDA-Vickrey traders gain', 'MUDA-Vickrey total gain')
def replicaAuctions(replicaNums:list, auctions:list):
"""
INPUT: auctions - list of m auctions;
replicaNums - list of n integers.
OUTPUT: generator of m*n auctions, where in each auction, each agent is replicated i times.
"""
for auctionID,auctionTraders in auctions:
for replicas in replicaNums:
traders = replicas * auctionTraders
yield auctionID,traders
def sampleAuctions(agentNums:list, auctions:list):
"""
INPUT: auctions - list of m auctions;
agentNums - list of n integers.
OUTPUT: generator of m*n auctions, where in each auction, i agents are sampled from the empirical distribution
"""
for auctionID,auctionTraders in auctions:
for agentNum in agentNums:
traders = [random.choice(auctionTraders) for i in range(agentNum)]
yield auctionID,traders
def simulateAuctions(auctions:list, resultsFilename:str, keyColumns:list):
"""
Simulate the auctions in the given generator.
"""
columns = keyColumns+COLUMNS
results = DataFrame(columns=columns)
print("\t{}".format(columns))
resultsFilenameTemp = resultsFilename+".temp"
for auctionID,traders in auctions:
if not traders:
raise ValueError("traders for auction {} is empty", auctionID)
print("Simulating auction {} with {} traders".format(auctionID,len(traders)))
totalBuyers = sum([t.isBuyer for t in traders])
totalSellers = len(traders)-totalBuyers
unitsPerTrader = [t.totalUnits() for t in traders]
maxUnitsPerTrader = max(unitsPerTrader)
minUnitsPerTrader = min(unitsPerTrader)
stddev = np.sqrt(sum([t.totalUnits()**2 for t in traders]))
(buyersWALRAS, sellersWALRAS, sizeWALRAS, gainWALRAS) = WALRAS(traders)
(sizeMUDALottery, gainMUDALottery, gainMUDALottery, sizeMUDAVickrey, tradersGainMUDAVickrey, totalGainMUDAVickrey) = MUDA(traders, Lottery=True, Vickrey=True)
resultsRow = [
*auctionID,
totalBuyers, totalSellers, totalBuyers+totalSellers, min(totalBuyers,totalSellers), sum(unitsPerTrader),
maxUnitsPerTrader, minUnitsPerTrader, maxUnitsPerTrader/max(1,minUnitsPerTrader), stddev,
buyersWALRAS, sellersWALRAS, sizeWALRAS,
gainWALRAS, gainMUDALottery, tradersGainMUDAVickrey, totalGainMUDAVickrey]
print("\t{}".format(resultsRow))
results.loc[len(results)] = resultsRow
results.to_csv(resultsFilenameTemp)
results.to_csv(resultsFilename)
os.remove(resultsFilenameTemp)
return results
def torqSimulationBySymbolDate(filename, combineByOrderDate=False, replicaNums=[1]):
"""
Treat each (symbol,date) combination as a separate auction.
"""
datasetFilename = "datasets/"+filename+".CSV"
resultsFilename = "results/"+filename+("-combined" if combineByOrderDate else "")+"-x"+str(max(replicaNums))+".csv"
return simulateAuctions(replicaAuctions(replicaNums,
torq.auctionsBySymbolDate(datasetFilename, combineByOrderDate)),
resultsFilename, keyColumns=("symbol","date"))
def torqSimulateBySymbol(filename, combineByOrderDate=False, agentNums=[100]):
"""
Treat all bidders for the same symbol, in ALL dates, as a distribution of values for that symbol.
"""
datasetFilename = "datasets/"+filename+".CSV"
resultsFilename = "results/"+filename+("-combined" if combineByOrderDate else "")+"-s"+str(max(agentNums))+".csv"
return simulateAuctions(sampleAuctions(agentNums,
torq.auctionsBySymbol(datasetFilename, combineByOrderDate)),
resultsFilename, keyColumns=("symbol",))
### PLOTS ###
YLABEL = 'MUDA GFT divided by maximum GFT'
YLIM = [0,1.05]
titleFontSize = 20
legendFontSize = 20
axesFontSize = 20
markerSize = 14
def plotTorq(filename, resultsFilename=None, combineByOrderDate=False, replicaNums=None, agentNums=None, numOfBins=10, ax=None, title=None, xColumn = 'Optimal units'):
if resultsFilename:
pass
elif replicaNums:
resultsFilename = "results/"+\
filename+\
("-combined" if combineByOrderDate else "")+\
"-x"+str(max(replicaNums))+\
".csv"
elif agentNums:
resultsFilename = "results/"+\
filename+\
("-combined" if combineByOrderDate else "")+\
"-s"+str(max(agentNums))+\
".csv"
else:
raise(Error("cannot calculate resultsFilename"))
plotResults(resultsFilename, xColumn=xColumn, numOfBins=numOfBins, ax=ax, title=title)
def plotResults(resultsFilename=None, xColumn='Min total traders', numOfBins=10, ax=None, title=None):
if not ax:
ax = plt.subplot(1, 1, 1)
if not title:
title = resultsFilename
print("plotting",resultsFilename)
results = pd.read_csv(resultsFilename)
results['Optimal market size'] = (results['Optimal buyers']+results['Optimal sellers']) / 2
results['Normalized market size'] = results['Optimal units'] / (results['Max units per trader'])
results['log10(M)'] = np.log(results['Max units per trader'])/np.log(10)
print(len(results), " auctions")
results = results[results['Optimal gain']>0]
print(len(results), " auctions with positive optimal gain")
for field in ['MUDA-lottery', 'MUDA-Vickrey traders', 'MUDA-Vickrey total']:
results[field+' ratio'] = results[field+' gain'] / results['Optimal gain']
if numOfBins:
results_bins = results.groupby(pd.cut(results[xColumn],numOfBins)).mean()
else:
results_bins = results.groupby(results[xColumn]).mean()
results_bins.plot(x=xColumn, y='MUDA-Vickrey total ratio', style=['b^-'], ax=ax, markersize=markerSize)
results_bins.plot(x=xColumn, y='MUDA-Vickrey traders ratio', style=['gv-'], ax=ax, markersize=markerSize)
results_bins.plot(x=xColumn, y='MUDA-lottery ratio', style=['ro-'], ax=ax, markersize=markerSize)
#plt.legend(loc=0,prop={'size':legendFontSize})
ax.legend_.remove()
ax.set_title(title, fontsize= titleFontSize, weight='bold')
ax.set_ylabel(YLABEL, fontsize= axesFontSize)
ax.tick_params(axis='both', which='major', labelsize=axesFontSize)
ax.tick_params(axis='both', which='minor', labelsize=axesFontSize)
ax.set_ylim(YLIM)
### MAIN PROGRAM ###
MUDA.LOG = randomTradeWithExogeneousPrice.LOG = False
def torqSimulation():
numOfBins = 100
numOfTraderss=list(range(10,1000,10))*1
filename = "901101-910131-SOD" #"910121-910121-IBM-SOD" # "901101-910131-SOD" # "901101-910131- SOD-NORM" #
if createResults:
torqSimulateBySymbol(filename, combineByOrderDate=True, agentNums=numOfTraderss)
torqSimulateBySymbol(filename, combineByOrderDate=False, agentNums=numOfTraderss)
#torqSimulateBySymbol(filename+"-NORM", combineByOrderDate=False, agentNums=numOfTraderss)
#torqSimulateBySymbol(filename+"-NORM", combineByOrderDate=True, agentNums=numOfTraderss)
#plotTorq(filename=filename, combineByOrderDate=False, agentNums=numOfTraderss, numOfBins=numOfBins)
# plotTorq(filename=filename, combineByOrderDate=True, agentNums=numOfTraderss, numOfBins=numOfBins,
# ax = plt.subplot(1,1,1), title="Auctions based on TORQ database", xColumn="Optimal units")
# plt.xlabel('Optimal #units (k)')
ax = plt.subplot(1,2,2)
plotTorq(filename=filename, combineByOrderDate=True, agentNums=numOfTraderss, numOfBins=numOfBins,
ax=ax, title="TORQ; combined", xColumn="Total traders")
ax.set_xlabel('Total #traders', fontsize=axesFontSize)
ax.set_xlim([0,1000])
ax.set_ylabel("")
ax = plt.subplot(1,2,1, sharey=None)
plotTorq(filename=filename, combineByOrderDate=False, agentNums=numOfTraderss, numOfBins=numOfBins, ax=ax, title="TORQ; additive", xColumn="Total traders")
ax.set_xlabel('Total #traders', fontsize=axesFontSize)
ax.set_xlim([0,1000])
plt.show()
# ax = plt.subplot(1,2,2)
# plotTorq(filename=filename+"-NORM", combineByOrderDate=True, agentNums=numOfTraderss, numOfBins=numOfBins,
# ax=ax, title="TORQ; normalized, combined", xColumn="Total traders")
# ax.set_xlabel('Total #traders', fontsize=axesFontSize)
# ax.set_xlim([0,1000])
# ax = plt.subplot(1,2,1, sharey=ax)
# plotTorq(filename=filename+"-NORM", combineByOrderDate=False, agentNums=numOfTraderss, numOfBins=numOfBins, ax=ax, title="TORQ; normalized, additive", xColumn="Total traders")
# ax.set_xlabel('Total #traders', fontsize=axesFontSize)
# ax.set_xlim([0,1000])
# plt.show()
# plotTorq(filename=filename+"-NORM", combineByOrderDate=True, agentNums=numOfTraderss, numOfBins=numOfBins,
# ax = plt.subplot(2,1,2))
# plt.show()
def randomSimulation(numOfAuctions = 100):
numOfTraderss = range(2000000, 42000000, 2000000)
minNumOfUnitsPerTrader = 10
maxNumOfUnitsPerTraders = [100,1000,10000,1000000,10000000,100000000,100000]
meanValue = 500
maxNoiseSizes = [50,100,150,200,300,350,400,450,500,250]
numOfBins = 20
# general
filenameTraders = "results/random-traders-{}units-{}noise.csv".format(maxNumOfUnitsPerTraders[-1],maxNoiseSizes[-1])
filenameUnitsFixedTraders = "results/random-units-{}traders-{}noise.csv".format(numOfTraderss[-1],maxNoiseSizes[-1])
filenameUnitsFixedVirtual = "results/random-units-{}virtual-{}noise.csv".format(numOfTraderss[-1],maxNoiseSizes[-1])
filenameNoise = "results/random-noise-{}traders-{}units.csv".format(numOfTraderss[-1],maxNumOfUnitsPerTraders[3])
# additive
filenameTradersAdd = "results/random-traders-{}units-{}noise-additive.csv".format(maxNumOfUnitsPerTraders[3],maxNoiseSizes[-1])
filenameUnitsAdd = "results/random-units-{}traders-{}noise-additive.csv".format(numOfTraderss[-1],maxNoiseSizes[-1])
filenameNoiseAdd = "results/random-noise-{}traders-{}units-additive.csv".format(numOfTraderss[-1],maxNumOfUnitsPerTraders[3])
if createResults:
keyColumns=("numOfTraders","minNumOfUnitsPerTrader","maxNumOfUnitsPerTrader","maxNoiseSize")
### non-additive
simulateAuctions(randomAuctions( ### as function of #traders
numOfAuctions, numOfTraderss, minNumOfUnitsPerTrader, maxNumOfUnitsPerTraders[-1:], meanValue, maxNoiseSizes[-1:], fixedNumOfVirtualTraders=True),
filenameTraders, keyColumns=keyColumns)
simulateAuctions(randomAuctions( ### as function of m - fixed total units
numOfAuctions, numOfTraderss[-1:], minNumOfUnitsPerTrader, maxNumOfUnitsPerTraders, meanValue, maxNoiseSizes[-1:], fixedNumOfVirtualTraders=True),
filenameUnitsFixedVirtual, keyColumns=keyColumns)
# simulateAuctions(randomAuctions( ### as function of m - fixed total traders - TOO LONG
# numOfAuctions, [100], minNumOfUnitsPerTrader, maxNumOfUnitsPerTraders, meanValue, maxNoiseSizes[-1:], fixedNumOfVirtualTraders=False),
# filenameUnitsFixedTraders, keyColumns=keyColumns)
simulateAuctions(randomAuctions( ### as function of noise
numOfAuctions, numOfTraderss[-1:], minNumOfUnitsPerTrader, maxNumOfUnitsPerTraders[-1:], meanValue, maxNoiseSizes, fixedNumOfVirtualTraders=True),
filenameNoise, keyColumns=keyColumns)
### additive
# simulateAuctions(randomAuctions( ### as function of #traders
# numOfAuctions, numOfTraderss, maxNumOfUnitsPerTraders[3], maxNumOfUnitsPerTraders[-1:], meanValue, maxNoiseSizes[-1:], fixedNumOfVirtualTraders=True),
# filenameTradersAdd, keyColumns=keyColumns)
# # simulateAuctions(randomAuctions( ### as function of m - fixed total units
# # numOfAuctions, numOfTraderss[-1:], maxNumOfUnitsPerTraders, meanValue, maxNoiseSizes[-1:],isAdditive=True, fixedNumOfVirtualTraders=True),
# # filenameUnitsAdd, keyColumns=keyColumns)
# simulateAuctions(randomAuctions( ### as function of noise
# numOfAuctions, numOfTraderss[-1:], maxNumOfUnitsPerTraders[3], maxNumOfUnitsPerTraders[-1:], meanValue, maxNoiseSizes, fixedNumOfVirtualTraders=True),
# filenameNoiseAdd, keyColumns=keyColumns)
# # simulateAuctions(randomAuctions( ### as function of m - fixed total traders
# # numOfAuctions, [100], maxNumOfUnitsPerTraders, meanValue, maxNoiseSizes[-1:], isAdditive=True, fixedNumOfVirtualTraders=False),
# # filenameUnitsFixedTraders, keyColumns=keyColumns)
TITLESTART = ""# "Uniform; "
### non-additive
ax=plt.subplot(1,2,1)
plotResults(filenameTraders,"Total traders",numOfBins, ax, title=
TITLESTART+"m={},M={},noise={}".format(minNumOfUnitsPerTrader,maxNumOfUnitsPerTraders[-1],maxNoiseSizes[-1]))
ax.set_xlabel('Total #traders', fontsize=axesFontSize)
ax.set_xlim([0,1000])
# ax=plt.subplot(1,1,1)
# plotResults(filenameTraders,"Optimal units",numOfBins, ax, title=
# TITLESTART+"m={},M={},noise={}".format(minNumOfUnitsPerTrader,maxNumOfUnitsPerTraders[3],maxNoiseSizes[-1]))
# plt.xlabel('Optimal #units (k)')
# plt.show()
ax=plt.subplot(1,2,2, sharey=None)
plotResults(filenameUnitsFixedVirtual,"log10(M)",numOfBins=None, ax=ax, title=TITLESTART+"m={},units={},noise={}".format(minNumOfUnitsPerTrader,numOfTraderss[-1],maxNoiseSizes[-1]))
#labels = [""]+["{:.0e}".format(t) for t in sorted(maxNumOfUnitsPerTraders)]
ax.set_xlim([1,8])
ax.set_xticklabels(["","100","1e3","1e4","1e5","1e6","1e7","1e8"])
ax.set_xlabel('Max #units per trader (M)', fontsize=axesFontSize)
ax.set_ylabel("")
plt.show()
# plotResults(filenameUnitsFixedTraders,"maxNumOfUnitsPerTrader",numOfBins, plt.subplot(1,1,1),
# title="traders={}, noise={}".format(numOfTraderss[-1],maxNoiseSizes[-1]))
# plt.xlabel('#units per trader (M)')
# plt.show()
# plotResults(filenameNoise,"maxNoiseSize",numOfBins, plt.subplot(1,1,1),
# title=TITLESTART+"units={},m={},M={}".format(numOfTraderss[-1],minNumOfUnitsPerTrader,maxNumOfUnitsPerTraders[3]))
# plt.xlabel('Max noise size (A)', fontsize=axesFontSize)
# plt.show()
### additive
# # plotResults(filenameTradersAdd,"numOfTraders",numOfBins, plt.subplot(1,1,1), title=TITLESTART+"m={},M={},n oise={}, additive".format(minNumOfUnitsPerTrader,maxNumOfUnitsPerTraders[3],maxNoiseSizes[-1]))
# # plt.xlabel('total #units')
# plotResults(filenameTradersAdd,"Optimal units",numOfBins, plt.subplot(1,1,1),
# title=TITLESTART+"m={},M={},noise={},additive".format(minNumOfUnitsPerTrader, maxNumOfUnitsPerTraders[3],maxNoiseSizes[-1]))
# plt.xlabel('optimal #units (k)')
# plt.show()
#
# # plotResults(filenameUnitsAdd,"maxNumOfUnitsPerTrader",numOfBins, plt.subplot(1,1,1),
# # title=TITLESTART+"traders={},noise={},additive".format(numOfTraderss[-1],maxNoiseSizes[-1]))
# # plt.ylabel('')
# plotResults(filenameNoiseAdd,"maxNoiseSize",numOfBins, plt.subplot(1,1,1),
# title=TITLESTART+"traders={},m={},M={},additive".format(numOfTraderss[-1],minNumOfUnitsPerTrader, maxNumOfUnitsPerTraders[3]))
# plt.xlabel('Max noise size (A)')
# plt.show()
createResults = False # True #
torqSimulation()
randomSimulation(numOfAuctions = 10)
|
lgpl-2.1
|
jhamrick/cogsci-proceedings-analysis
|
scraper.py
|
1
|
2997
|
import urllib2
import pandas as pd
from bs4 import BeautifulSoup, element
def load_html(url):
response = urllib2.urlopen(url)
html = response.read().replace(" ", "")
return html
def get_papers_table(year):
url = "https://mindmodeling.org/cogsci{}/".format(year)
soup = BeautifulSoup(load_html(url))
tables = soup.find_all("table")
tds = tables[5].find_all("td")
tds = [td for td in tds if len(td.contents) > 0]
paper_type = None
papers = []
paper = {}
for td in tds:
elem = td.contents[0]
if isinstance(elem, element.NavigableString):
paper['authors'] = unicode(elem)
paper['year'] = year
paper['section'] = paper_type
papers.append(paper)
paper = {}
elif elem.name == 'a':
href = url + elem.attrs['href']
title = "".join(elem.contents)
paper['url'] = href
paper['title'] = title
elif elem.name == 'h2':
section_name, = elem.contents
paper_type = section_name.strip()
return pd.DataFrame(papers)
def get_papers_list(year):
url = "https://mindmodeling.org/cogsci{}/".format(year)
html = load_html(url)
html = html.replace("<li>", "").replace("<li id=session>", "")
soup = BeautifulSoup(html)
papers = []
paper = {}
paper_type = None
for elem in soup.findAll("a"):
if not isinstance(elem.contents[0], element.NavigableString):
continue
sibling = elem.findNextSibling()
if not hasattr(sibling, "name"):
continue
if sibling.name != "ul":
continue
toplevel = elem.findParent().findParent()
break
for section in toplevel.contents:
if isinstance(section, element.NavigableString):
paper_type = section.strip()
continue
for elem in section.find_all("a"):
href = url + elem.attrs['href']
try:
title = "".join(elem.contents)
except TypeError:
continue
paper = {}
paper['year'] = year
paper['url'] = href
paper['title'] = title
paper['section'] = paper_type
sibling = elem.findNextSibling()
authors, = sibling.contents
paper['authors'] = unicode(authors)
papers.append(paper)
return pd.DataFrame(papers)
def get_papers():
papers = pd.concat([
get_papers_table(2014),
get_papers_list(2013),
get_papers_list(2012),
get_papers_list(2011),
get_papers_list(2010)
])
papers = papers\
.set_index('url')\
.sort()
if papers.isnull().any().any():
raise RuntimeError("some entries are null")
return papers
if __name__ == "__main__":
pathname = "cogsci_proceedings_raw.csv"
papers = get_papers()
papers.to_csv(pathname, encoding='utf-8')
|
mit
|
Azeret/galIMF
|
plot_stellar_yield_table.py
|
1
|
38495
|
import time
import math
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
import element_abundances_solar
reference_name = 'Anders1989'
H_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'H')
# He_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'He')
C_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'C')
# N_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'N')
O_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'O')
Mg_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'Mg')
Fe_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'Fe')
Si_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'Si')
Ca_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'Ca')
def plot_lifetime_and_finalmass():
Z2_list = [0.0004, 0.004, 0.008, 0.012]
file = open('yield_tables/rearranged/setllar_final_mass_from_portinari98/portinari98_Z=0.004.txt', 'r')
data = file.readlines()
file.close()
list2 = str.split(data[3])
list_ini_mass = []
for j in list2:
list_ini_mass.append(math.log(float(j), 10))
list_fin_mass = []
i = len(Z2_list) - 1
while i > -1:
file = open('yield_tables/rearranged/setllar_final_mass_from_portinari98/portinari98_Z={}.txt'.format(Z2_list[i]), 'r')
data = file.readlines()
file.close()
list2 = str.split(data[5])
list3 = []
for j in list2:
list3.append(math.log(float(j), 10))
list = [float(data[1]), list3]
list_fin_mass.append(list)
(i) = (i - 1)
color_list_ = []
for i in range(len(list_fin_mass)):
ZZZ = list_fin_mass[i][0]
Z_box = math.log(ZZZ, 10) - math.log(0.01886, 10)
color_list_.append(round(((Z_box+7)**4.001 - (-6.001 + 7) ** 4.001) / ((1 + 7) ** 4.001 - (-6.001 + 7) ** 4.001) * 1000))
colors = plt.cm.hsv_r(np.linspace(0, 1, 1000))
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(21, figsize=(4, 3.5))
# plt.xlim(-1.5, 2.5)
# plt.ylim(-1.5, 1.5)
# i = len(Z2_list) - 1
# while i > -1:
# plt.plot(list_ini_mass, list_fin_mass[i][1], label='Z={}'.format(list_fin_mass[i][0]))
# (i) = (i - 1)
# plt.plot([-2, 3], [-2, 3], ls='dashed', c='k', lw=0.7)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$ [$M_\odot$])')
# plt.ylabel(r'log$_{10}$($M_{\rm *, final}$ [$M_\odot$])')
# plt.tight_layout()
# plt.savefig('Interpolated_stellar_final_mass.pdf', dpi=250)
list_lifetime = []
i = len(Z2_list) - 1
while i > -1:
file = open(
'yield_tables/rearranged/setllar_lifetime_from_portinari98/portinari98_Z={}.txt'.format(Z2_list[i]), 'r')
data = file.readlines()
file.close()
list2 = str.split(data[5])
list3 = []
for j in list2:
list3.append(math.log(float(j), 10))
list = [float(data[1]), list3]
list_lifetime.append(list)
(i) = (i - 1)
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(22, figsize=(4, 3.5))
# plt.xlim(-1.5, 2.5)
# plt.ylim(6, 15)
# i = len(Z2_list) - 1
# while i > -1:
# plt.plot(list_ini_mass, list_lifetime[i][1], label='Z={}'.format(list_fin_mass[i][0]))
# (i) = (i - 1)
# # plt.plot([-2, 3], [-2, 3], ls='dashed', c='k', lw=0.7)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$ [$M_\odot$])')
# plt.ylabel(r'log$_{10}$(life time [yr])')
# plt.tight_layout()
# plt.savefig('Interpolated_stellar_lifetime.pdf', dpi=250)
##########
Metallicity_origen = [0.008, 0.02]
Age_origen = [
[6.47E+10, 3.54E+10, 2.09E+10, 1.30E+10, 8.46E+09, 5.72E+09, 4.12E+09, 2.92E+09, 2.36E+09, 2.18E+09, 1.82E+09,
1.58E+09, 1.41E+09, 1.25E+09, 1.23E+09, 6.86E+08, 4.12E+08, 1.93E+08, 1.15E+08, 7.71E+07, 5.59E+07, 3.44E+07,
2.10E+07, 1.49E+07, 1.01E+07, 6.65E+06, 5.30E+06, 4.15E+06, 3.44E+06, 3.32E+06],
[7.92E+10, 4.45E+10, 2.61E+10, 1.59E+10, 1.03E+10, 6.89E+09, 4.73E+09, 3.59E+09, 2.87E+09, 2.64E+09, 2.18E+09,
1.84E+09, 1.59E+09, 1.38E+09, 1.21E+09, 7.64E+08, 4.56E+08, 2.03E+08, 1.15E+08, 7.45E+07, 5.31E+07, 3.17E+07,
1.89E+07, 1.33E+07, 9.15E+06, 6.13E+06, 5.12E+06, 4.12E+06, 3.39E+06, 3.23E+06]]
Age_012 = []
for i in range(len(Age_origen[0])):
Age_012.append((Age_origen[0][i]*2+Age_origen[1][i])/3)
Remnant_mass_origen = [
[1.35, 1.48, 1.84, 2.04, 6.9, 12.5, 5.69, 9.89],
[1.31, 1.44, 1.87, 2.11, 7.18, 2.06, 2.09, 2.11]
]
Remnant_mass_012 = []
for i in range(len(Remnant_mass_origen[0])):
Remnant_mass_012.append((Remnant_mass_origen[0][i]*2+Remnant_mass_origen[1][i])/3)
Mass = [0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5,
1.6, 1.7, 1.8, 1.9, 2.0, 2.5, 3.0, 4.0, 5.0, 6.0,
7.0, 9.0, 12., 15., 20., 30., 40., 60., 100, 120]
Metallicity = [0.0004, 0.004, 0.008, 0.12]
Age = [
[4.28E+10, 2.37E+10, 1.41E+10, 8.97E+09, 6.03E+09, 4.23E+09, 3.08E+09, 2.34E+09, 1.92E+09, 1.66E+09, 1.39E+09,
1.18E+09, 1.11E+09, 9.66E+08, 8.33E+08, 4.64E+08, 3.03E+08, 1.61E+08, 1.01E+08, 7.15E+07, 5.33E+07, 3.42E+07,
2.13E+07, 1.54E+07, 1.06E+07, 6.90E+06, 5.45E+06, 4.20E+06, 3.32E+06, 3.11E+06],
[5.35E+10, 2.95E+10, 1.73E+10, 1.09E+10, 7.13E+09, 4.93E+09, 3.52E+09, 2.64E+09, 2.39E+09, 1.95E+09, 1.63E+09,
1.28E+09, 1.25E+09, 1.23E+09, 1.08E+09, 5.98E+08, 3.67E+08, 1.82E+08, 1.11E+08, 7.62E+07, 5.61E+07, 3.51E+07,
2.14E+07, 1.52E+07, 1.05E+07, 6.85E+06, 5.44E+06, 4.19E+06, 3.38E+06, 3.23E+06],
[6.47E+10, 3.54E+10, 2.09E+10, 1.30E+10, 8.46E+09, 5.72E+09, 4.12E+09, 2.92E+09, 2.36E+09, 2.18E+09, 1.82E+09,
1.58E+09, 1.41E+09, 1.25E+09, 1.23E+09, 6.86E+08, 4.12E+08, 1.93E+08, 1.15E+08, 7.71E+07, 5.59E+07, 3.44E+07,
2.10E+07, 1.49E+07, 1.01E+07, 6.65E+06, 5.30E+06, 4.15E+06, 3.44E+06, 3.32E+06],
Age_012]
len_mass = len(Mass)
log_Mass = []
for i in range(len_mass):
log_Mass.append(math.log(Mass[i], 10))
len_metal = len(Metallicity)
log_Metallicity = []
for i in range(len_metal):
log_Metallicity.append(math.log(Metallicity[i], 10))
log_Age = []
for i in range(len_metal):
log_Age.append([])
for j in range(len_mass):
log_Age[i].append(math.log(Age[i][j], 10))
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(4, 4))
i = 0
while i < len(Z2_list):
ZZZ = list_fin_mass[i][0]
Z_box = round(math.log(ZZZ, 10)-math.log(0.01886, 10), 2)
axs[0].plot(list_ini_mass, list_lifetime[i][1], lw=(6-i)/2, label='Z={}, [Z]={}'.format(ZZZ, Z_box), color=colors[color_list_[i]])
(i) = (i + 1)
i = len_metal-1
# while i > -1:
# axs[0].scatter(log_Mass, log_Age[i], s=3, marker='*', edgecolors='w', linewidth='0.1', zorder=10)
# (i) = (i - 1)
axs[0].plot([-1, 2], [7, 7])
axs[0].plot([math.log(17, 10), math.log(17, 10)], [6, 15])
# axs[0].set_yticks(np.arange(6, 16, 2))
axs[0].set_ylim(6, 15)
axs[0].set_ylabel(r'log$_{10}$(life time [yr])')
axs[0].legend(prop={'size': 6}, loc='best')
Mass = [
[9, 12, 15, 20, 30, 40, 60, 100, 120],
[9, 12, 15, 20, 30, 40, 100, 120],
[9, 12, 15, 20, 30, 40, 60, 120],
[9, 12, 15, 20, 30, 40, 60, 120]
]
Metallicity = [0.0004, 0.004, 0.008, 0.12]
Remnant_mass = [
[1.35, 1.5, 1.8, 2.07, 6.98, 14.91, 24.58, 32.06, 30.6],
[1.35, 1.5, 1.82, 2.04, 6.98, 12.6, 36.7, 35.2],
[1.35, 1.48, 1.84, 2.04, 6.9, 12.5, 5.69, 9.89],
Remnant_mass_012
]
#################################################################
# WW95_solar = 0.01886
# Metallicity_WW95 = [0, WW95_solar*10**-4, WW95_solar*0.01, WW95_solar*0.1, WW95_solar]
# Mass_WW95 = [12, 13, 15, 18, 20, 22, 25, 30, 35, 40]
# Remnant_mass_WW95_B = [
# [1.32, 1.46, 1.43, 1.76, 2.06, 2.02, 2.07, 1.94, 3.86, 5.45],
# [1.38, 1.31, 1.49, 1.69, 1.97, 2.12, 1.99, 2.01, 3.39, 4.45],
# [1.40, 1.44, 1.56, 1.58, 1.98, 2.04, 1.87, 2.21, 2.42, 4.42],
# [1.28, 1.44, 1.63, 1.61, 1.97, 2.01, 1.87, 2.08, 3.03, 4.09],
# [1.35, 1.28, 1.53, 3.40, 4.12, 1.49, 1.90, 1.54, 7.62, 12.2]
# ]
# Interpolation_remnant_mass_WW95_B = interpolate.interp2d(Mass_WW95, Metallicity_WW95, Remnant_mass_WW95_B)
# Remnant_mass_WW95_B_new = []
# for i in range(len(Metallicity)):
# Remnant_mass_WW95_B_new.append([])
# for j in range(len(Mass_WW95)):
# Remnant_mass_WW95_B_new[i].append(Interpolation_remnant_mass_WW95_B(Mass_WW95[j], Metallicity[i]))
#
# log_Remnant_mass_WW95_B = []
# for i in range(len_metal):
# log_Remnant_mass_WW95_B.append([])
# for j in range(len(Remnant_mass_WW95_B[i])):
# log_Remnant_mass_WW95_B[i].append(math.log(Remnant_mass_WW95_B[i][j], 10))
#
# log_mass_WW95 = []
# for i in range(len(Mass_WW95)):
# log_mass_WW95.append(math.log(Mass_WW95[i], 10))
#################################################################
len_metal = len(Metallicity)
log_Metallicity = []
for i in range(len_metal):
log_Metallicity.append(math.log(Metallicity[i], 10))
log_Remnant_mass = []
for i in range(len_metal):
log_Remnant_mass.append([])
for j in range(len(Remnant_mass[i])):
log_Remnant_mass[i].append(math.log(Remnant_mass[i][j], 10))
log_mass = []
for i in range(len_metal):
log_mass.append([])
for j in range(len(Mass[i])):
log_mass[i].append(math.log(Mass[i][j], 10))
# print(log_mass)
# print(len(log_mass[0]))
# print(len(log_mass))
# print(len(log_Remnant_mass[0]))
i = 0
while i < len(Z2_list):
axs[1].plot(list_ini_mass, list_fin_mass[i][1], lw=(6-i)/2, label='Z={}'.format(list_fin_mass[i][0]), color=colors[color_list_[i]])
(i) = (i + 1)
i = len_metal-1
# while i > -1:
# axs[1].scatter(log_mass[i], log_Remnant_mass[i], s=10, marker='*', edgecolors='w', linewidth='0.1', zorder=10)
# (i) = (i - 1)
# i = len_metal-1
# # while i > -1:
# # axs[1].scatter(log_mass_WW95, log_Remnant_mass_WW95_B[i], s=10, marker='^', edgecolors='w', linewidth='0.1', zorder=10)
# # (i) = (i - 1)
axs[1].set_yticks(np.arange(-2, 2, 1))
axs[1].set_ylim(-1.5, 1.5)
axs[1].set_ylabel(r'log$_{10}(M_{\rm *, final}$ [$M_\odot$])')
axs[1].set_xlabel(r'log$_{10}(M_{\rm *, initial}$ [$M_\odot$])')
plt.tight_layout()
# Remove horizontal space between axes
fig.subplots_adjust(hspace=0)
plt.savefig('Interpolated_stellar_lifetime_final_mass.pdf', dpi=250)
plt.show()
return
def function_read_file(yield_table_name):
####################
### read in file ###
####################
if yield_table_name == "portinari98":
file_yield = open(
'yield_tables/agb_and_massive_stars_portinari98_marigo01_gce_totalyields.txt', 'r')
# 'yield_tables/agb_and_massive_stars_portinari98_marigo01.txt', 'r')
# Use net yields of Portinari and Marigo
# Net yields with masses up to 7Msun are from Marigo, above those of Portinari are taken.
# Only isotopes are selected which are available in both yield sets and go up to Fe.
# Initial masses go from the lowest mass available up to 100Msun.
# Yield set ID M01P98 in Ritter et al. 2017.
# References: Marigo et al. 2001, http://ukads.nottingham.ac.uk/abs/2001A%26A...370..194M
# Portinari et al. 1998, http://ukads.nottingham.ac.uk/abs/1998A%26A...334..505P
data = file_yield.readlines()
file_yield.close()
elif yield_table_name == "Kobayashi06":
file_yield = open(
'yield_tables/agb_and_massive_stars_Kobayashi06_marigo01_gce_totalyields.txt', 'r')
# Use net yields of Woosley S. E., Weaver T. A., 1995, ApJS, 101, 181 (WW95)
# Use WW95 model B which has the highest [Mg/Fe].
data = file_yield.readlines()
file_yield.close()
elif yield_table_name == "WW95":
file_yield = open(
'yield_tables/massive_stars_WW95_totalyields.txt', 'r')
# Use net yields of Woosley S. E., Weaver T. A., 1995, ApJS, 101, 181 (WW95)
# Use WW95 model B which has the highest [Mg/Fe].
data = file_yield.readlines()
file_yield.close()
elif yield_table_name == "marigo01":
file_yield = open(
'yield_tables/agb_marigo01_totalyields.txt', 'r')
data = file_yield.readlines()
file_yield.close()
###########################
### extract information ###
###########################
#
H_relative_line_number = function_get_element_line_number(data, 'H-1')
He_relative_line_number = function_get_element_line_number(data, 'He-4')
C_relative_line_number = function_get_element_line_number(data, 'C-12')
N_relative_line_number = function_get_element_line_number(data, 'N-14')
O_relative_line_number = function_get_element_line_number(data, 'O-16')
Ne_relative_line_number = function_get_element_line_number(data, 'Ne-20')
Mg_relative_line_number = function_get_element_line_number(data, 'Mg-24')
Si_relative_line_number = function_get_element_line_number(data, 'Si-28')
S_relative_line_number = function_get_element_line_number(data, 'S-32')
Ca_relative_line_number = function_get_element_line_number(data, 'Ca-40')
Fe_relative_line_number = function_get_element_line_number(data, 'Fe-56')
#
global M_list, Z_list, eject_mass_list, H_eject_mass_list, He_eject_mass_list, C_eject_mass_list, \
N_eject_mass_list, O_eject_mass_list, Ne_eject_mass_list, Mg_eject_mass_list, Si_eject_mass_list, \
S_eject_mass_list, Ca_eject_mass_list, Fe_eject_mass_list, Metal_eject_mass_list
global O_over_Mg_list, Mg_over_Fe_list, Ca_over_Fe_list, Si_over_Fe_list, C_over_H_list, Mg_over_H_list, \
Si_over_H_list, Fe_over_H_list, O_over_H_list, Z_over_H_list, \
Z_over_X_list, Z_over_Z0_list, XXX_list, YYY_list, ZZZ_list, O_over_Fe_list
#
i = len(data)-1
while i > -1:
line_i = str.split(data[i])
if line_i[1] == 'Table:':
line_H = str.split(data[i + H_relative_line_number])
line_He = str.split(data[i + He_relative_line_number])
line_C = str.split(data[i + C_relative_line_number])
line_N = str.split(data[i + N_relative_line_number])
line_O = str.split(data[i + O_relative_line_number])
line_Ne = str.split(data[i + Ne_relative_line_number])
line_Mg = str.split(data[i + Mg_relative_line_number])
line_Si = str.split(data[i + Si_relative_line_number])
line_S = str.split(data[i + S_relative_line_number])
line_Ca = str.split(data[i + Ca_relative_line_number])
line_Fe = str.split(data[i + Fe_relative_line_number])
line_Mfinal = str.split(data[i + 2])
(Z, M) = function_get_Z_M(line_i[2]) # metallicity and mass of the star
ejecta_mass = round((M - function_get_Mfinal(line_Mfinal[2])), 5) ####################
H_mass = function_get_element_mass(line_H[1])
He_mass = function_get_element_mass(line_He[1])
C_mass = function_get_element_mass(line_C[1])
N_mass = function_get_element_mass(line_N[1])
O_mass = function_get_element_mass(line_O[1])
Ne_mass = function_get_element_mass(line_Ne[1])
Mg_mass = function_get_element_mass(line_Mg[1])
Si_mass = function_get_element_mass(line_Si[1])
S_mass = function_get_element_mass(line_S[1])
Ca_mass = function_get_element_mass(line_Ca[1])
Fe_mass = function_get_element_mass(line_Fe[1])
H_num = H_mass/1.0079
C_num = C_mass/12.011
N_num = N_mass/14.007
O_num = O_mass/15.9994
Ne_num = Ne_mass/20.18
Mg_num = Mg_mass/24.305
Si_num = Si_mass/28.085
S_num = S_mass/32.06
Ca_num = Ca_mass/40.078
Fe_num = Fe_mass/55.845
Metal_num = C_num+N_num+O_num+Ne_num+Mg_num+Si_num+S_num+Ca_num+Fe_num
O_over_Mg = math.log(O_num/Mg_num, 10) - O_abundances_solar + Mg_abundances_solar
Mg_over_H = math.log(Mg_num/H_num, 10) - Mg_abundances_solar + H_abundances_solar
Si_over_H = math.log(Si_num/H_num, 10) - Si_abundances_solar + H_abundances_solar
C_over_H = math.log(C_num/H_num, 10) - C_abundances_solar + H_abundances_solar
Fe_over_H = math.log(Fe_num/H_num, 10) - Fe_abundances_solar + H_abundances_solar
O_over_H = math.log(O_num/H_num, 10) - O_abundances_solar + H_abundances_solar
Mg_over_Fe = math.log(Mg_num/Fe_num, 10) - Mg_abundances_solar + Fe_abundances_solar
Ca_over_Fe = math.log(Ca_num/Fe_num, 10) - Ca_abundances_solar + Fe_abundances_solar
Si_over_Fe = math.log(Si_num/Fe_num, 10) - Si_abundances_solar + Fe_abundances_solar
O_over_Fe = math.log(O_num/Fe_num, 10) - O_abundances_solar + Fe_abundances_solar
Metal_mass = round((ejecta_mass - H_mass - He_mass), 5) ####################
# Metal_mass = round((C_mass+N_mass+O_mass+Ne_mass+Mg_mass+Si_mass+S_mass+Ca_mass+Fe_mass), 5) ###### the same ######
if Metal_mass<0:
print("Warning: Metal_mass=", Metal_mass, "<0")
print("check stellar yield table with metallicity and mass being:", Z, "&", M)
Metal_mass = 0
Z_over_X = math.log(Metal_mass / H_mass, 10) - math.log(0.01886 / 0.7381, 10)
Z_over_Z0 = math.log(Metal_mass / ejecta_mass, 10) - math.log(0.01886, 10)
Z_over_H = math.log(Metal_num / H_num, 10) - math.log(0.01886 / 18 / 0.7381, 10) # where 18 is the estimated average atomic weight over the weight of hydrogen.
XXX = H_mass / ejecta_mass
YYY = He_mass / ejecta_mass
ZZZ = Metal_mass / ejecta_mass
if len(Z_list) == 0:
Z_list.append(Z)
Z_n = 0
M_list.append([])
eject_mass_list.append([])
H_eject_mass_list.append([])
He_eject_mass_list.append([])
C_eject_mass_list.append([])
N_eject_mass_list.append([])
O_eject_mass_list.append([])
Ne_eject_mass_list.append([])
Mg_eject_mass_list.append([])
Si_eject_mass_list.append([])
S_eject_mass_list.append([])
Ca_eject_mass_list.append([])
Fe_eject_mass_list.append([])
Metal_eject_mass_list.append([])
Z_over_H_list.append([])
Z_over_X_list.append([])
Z_over_Z0_list.append([])
XXX_list.append([])
YYY_list.append([])
ZZZ_list.append([])
O_over_Mg_list.append([])
Mg_over_Fe_list.append([])
Si_over_Fe_list.append([])
Ca_over_Fe_list.append([])
Mg_over_H_list.append([])
Si_over_H_list.append([])
C_over_H_list.append([])
Fe_over_H_list.append([])
O_over_H_list.append([])
O_over_Fe_list.append([])
if Z != Z_list[-1]:
Z_list.append(Z)
Z_n += 1
M_list.append([])
eject_mass_list.append([])
H_eject_mass_list.append([])
He_eject_mass_list.append([])
C_eject_mass_list.append([])
N_eject_mass_list.append([])
O_eject_mass_list.append([])
Ne_eject_mass_list.append([])
Mg_eject_mass_list.append([])
Si_eject_mass_list.append([])
S_eject_mass_list.append([])
Ca_eject_mass_list.append([])
Fe_eject_mass_list.append([])
Metal_eject_mass_list.append([])
O_over_Mg_list.append([])
Mg_over_Fe_list.append([])
Ca_over_Fe_list.append([])
Si_over_Fe_list.append([])
Mg_over_H_list.append([])
Si_over_H_list.append([])
C_over_H_list.append([])
Fe_over_H_list.append([])
O_over_H_list.append([])
Z_over_H_list.append([])
Z_over_X_list.append([])
Z_over_Z0_list.append([])
XXX_list.append([])
YYY_list.append([])
ZZZ_list.append([])
O_over_Fe_list.append([])
M_list[Z_n].append(M)
eject_mass_list[Z_n].append(ejecta_mass)
H_eject_mass_list[Z_n].append(H_mass)
He_eject_mass_list[Z_n].append(He_mass)
C_eject_mass_list[Z_n].append(C_mass)
N_eject_mass_list[Z_n].append(N_mass)
O_eject_mass_list[Z_n].append(O_mass)
Ne_eject_mass_list[Z_n].append(Ne_mass)
Mg_eject_mass_list[Z_n].append(Mg_mass)
Si_eject_mass_list[Z_n].append(Si_mass)
S_eject_mass_list[Z_n].append(S_mass)
Ca_eject_mass_list[Z_n].append(Ca_mass)
Fe_eject_mass_list[Z_n].append(Fe_mass)
Metal_eject_mass_list[Z_n].append(Metal_mass)
O_over_Mg_list[Z_n].append(O_over_Mg)
Mg_over_Fe_list[Z_n].append(Mg_over_Fe)
Ca_over_Fe_list[Z_n].append(Ca_over_Fe)
Si_over_Fe_list[Z_n].append(Si_over_Fe)
Mg_over_H_list[Z_n].append(Mg_over_H)
Si_over_H_list[Z_n].append(Si_over_H)
C_over_H_list[Z_n].append(C_over_H)
O_over_H_list[Z_n].append(O_over_H)
Z_over_H_list[Z_n].append(Z_over_H)
Z_over_X_list[Z_n].append(Z_over_X)
Z_over_Z0_list[Z_n].append(Z_over_Z0)
XXX_list[Z_n].append(XXX)
YYY_list[Z_n].append(YYY)
ZZZ_list[Z_n].append(ZZZ)
Fe_over_H_list[Z_n].append(Fe_over_H)
O_over_Fe_list[Z_n].append(O_over_Fe)
(i) = (i - 1)
return
def function_get_Mfinal(Mfinal_string):
i_end = len(Mfinal_string)
i = 0
mass_str = ''
while i < i_end:
mass_str += Mfinal_string[i]
(i) = (i + 1)
mass = float(mass_str)
return mass
def function_get_element_mass(element_mass_string):
i_end = len(element_mass_string)
i = 1
mass_str = ''
while i < i_end:
mass_str += element_mass_string[i]
(i) = (i + 1)
mass = float(mass_str)
return mass
def function_get_element_line_number(data, element):
i = 0
while i < len(data):
line_i = str.split(data[i])
if line_i[1] == 'Table:':
start = i
j = 0
while j < 100:
line_j = str.split(data[j])
if line_j[0] == '&'+element:
end = j
element_relative_line_number = j - i
break
(j) = (j+1)
break
(i) = (i + 1)
return element_relative_line_number
def function_get_Z_M(M_Z_string):
i = 0
i_M_start = 0
i_M_end = 0
i_Z_start = 0
i_Z_end = 0
while i < len(M_Z_string):
if M_Z_string[i] == 'M':
i_M_start = i+2
if M_Z_string[i] == ',':
i_M_end = i
i_Z_start = i+3
if M_Z_string[i] == ')':
i_Z_end = i
(i) = (i+1)
i = i_Z_start
Z_str = ''
while i < i_Z_end:
Z_str += M_Z_string[i]
(i) = (i + 1)
Z = float(Z_str)
i = i_M_start
M_str = ''
while i < i_M_end:
M_str += M_Z_string[i]
(i) = (i + 1)
M = float(M_str)
return (Z, M)
def funtion_plot_yields():
global O_over_Mg_list, Mg_over_Fe_list, C_over_H_list, Mg_over_H_list, Si_over_H_list, Fe_over_H_list, O_over_H_list, Z_over_X_list, Z_over_Z0_list, \
Z_over_H_list, O_over_Fe_list, M_list, Z_list, XXX_list, YYY_list, ZZZ_list
color_list_ = []
for i in range(len(Z_list)):
ZZZ = Z_list[i]
if ZZZ > 0:
Z_box = math.log(ZZZ, 10) - math.log(0.01886, 10)
else:
Z_box = -6
color_list_.append(round(((Z_box+7)**4.001 - (-6.001 + 7) ** 4.001) / ((1 + 7) ** 4.001 - (-6.001 + 7) ** 4.001) * 1000))
colors = plt.cm.hsv_r(np.linspace(0, 1, 1000))
j = 0
while j < len(M_list):
i = 0
while i < len(M_list[j]):
M_list[j][i] = math.log(M_list[j][i], 10)
(i) = (i+1)
(j) = (j+1)
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(1, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# # plt.ylim(0, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], O_over_Mg_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# O_mass_eject_SNIa = 0.148 # TNH93 0.148 i99CDD1 0.09, i99CDD2 0.06, i99W7 0.14, ivo12/13 0.09-0.1, t03 0.14, t86 0.13
# Mg_mass_eject_SNIa = 0.009 # TNH93 0.009 i99CDD1 0.0077, i99CDD2 0.0042, i99W7 0.0085, ivo12/13 0.015-0.029, t03 0.013, t86 0.016
# O_num = O_mass_eject_SNIa / 15.9994
# Mg_num = Mg_mass_eject_SNIa / 24.305
# O_over_Mg_SNIa = math.log(O_num / Mg_num, 10) - O_abundances_solar + Mg_abundances_solar
# plt.plot([-0.3, 0.9], [O_over_Mg_SNIa, O_over_Mg_SNIa], ls="--", lw=2, label="SNIa")
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[O/Mg]')
# plt.tight_layout()
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(2, figsize=(4, 3.5))
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Mg_over_Fe_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
Mg_mass_eject_SNIa = 0.0158 # TNH93 0.148 i99CDD1 0.09, i99CDD2 0.06, i99W7 0.14, ivo12/13 0.09-0.1, t03 0.14, t86 0.13
Fe_mass_eject_SNIa = 0.68 #0.63 # Recchi2009 halfed to 0.372 # TNH93 0.744 i99CDD1 0.56, i99CDD2 0.76, i99W7 0.63, ivo12/13 0.62-0.67, t03 0.74, t86 0.63
Ca_mass_eject_SNIa = 0.0181
Si_mass_eject_SNIa = 0.142
Ca_num = Ca_mass_eject_SNIa / 40.078
Si_num = Si_mass_eject_SNIa / 28.085
Mg_num = Mg_mass_eject_SNIa / 24.305
Fe_num = Fe_mass_eject_SNIa / 55.845
Mg_over_Fe_SNIa = math.log(Mg_num / Fe_num, 10) - Mg_abundances_solar + Fe_abundances_solar
Si_over_Fe_SNIa = math.log(Si_num / Fe_num, 10) - Si_abundances_solar + Fe_abundances_solar
Ca_over_Fe_SNIa = math.log(Ca_num / Fe_num, 10) - Ca_abundances_solar + Fe_abundances_solar
# plt.plot([-0.3, 0.9], [Mg_over_Fe_SNIa, Mg_over_Fe_SNIa], ls="--", lw=2, label="SNIa")
# plt.plot([-2, 3], [0, 0], lw=0.5, ls='dotted')
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 3.5)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$ [$M_\odot$])')
# plt.ylabel(r'[Mg/Fe]')
# plt.tight_layout()
# plt.savefig('steller_yield_Mg_over_Fe.pdf', dpi=250)
#
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(3, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 7)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], O_over_Fe_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# O_over_Fe_SNIa = math.log(O_num / Fe_num, 10) - O_abundances_solar + Fe_abundances_solar
# plt.plot([-0.3, 0.9], [O_over_Fe_SNIa, O_over_Fe_SNIa], ls="--", lw=2, label="SNIa")
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[O/Fe]')
# plt.tight_layout()
# #
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(4, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Mg_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Mg/H]')
# plt.tight_layout()
# #
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(42, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Si_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Si/H]')
# plt.tight_layout()
# #
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(41, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# # plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], C_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[C/H]')
# plt.tight_layout()
# plt.savefig('steller_yield_Mg.pdf', dpi=250)
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(5, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], O_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[O/H]')
# plt.tight_layout()
# # plt.savefig('steller_yield_O.pdf', dpi=250)
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(6, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list)-1
# while i > -1:
# plt.plot(M_list[i], Fe_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Fe/H]')
# plt.tight_layout()
# # plt.savefig('steller_yield_Fe.pdf', dpi=250)
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(7, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Z_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Z/H]')
# plt.title("Number ratio")
# plt.tight_layout()
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(8, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Z_over_X_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Z/X]')
# plt.title("Mass ratio")
# plt.tight_layout()
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(11, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(0.23, 0.6)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], YYY_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# # plt.plot([-2, 3], [0.25, 0.25], lw=0.5)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel('Y')
# plt.tight_layout()
# # plt.savefig('steller_yield_Y.pdf', dpi=250)
##########
fig, axs = plt.subplots(3, 1, sharex=True, figsize=(3, 4))
# i = len(M_list) - 1
# while i > -1:
# axs[0].plot(M_list[i], Z_over_Z0_list[i], lw=(i+2)/2, color=colors[color_list_[i]])
# (i) = (i - 1)
# axs[0].plot([-2, 3], [0, 0], lw=0.7, ls='dotted')
# # axs[0].set_yticks(np.arange(-1, 2.1, 1))
# axs[0].set_ylim(-2, 1.6)
# axs[0].set_ylabel(r'[Z]')
#
# i = len(M_list) - 1
# while i > -1:
# # axs[1].plot(M_list[i], XXX_list[i], lw=(i+2)/2, color=colors[color_list_[i]])
# axs[1].plot(M_list[i], YYY_list[i], lw=(i+2)/2, color=colors[color_list_[i]])
# # axs[1].plot(M_list[i], ZZZ_list[i], lw=(i+2)/2, color=colors[color_list_[i]])
# (i) = (i - 1)
# axs[1].plot([-2, 3], [0.273, 0.273], lw=0.7, ls='dotted')
# # axs[1].set_yticks(np.arange(0.2, 0.61, 0.1))
# axs[1].set_ylim(0.24, 0.605)
# axs[1].set_xlim(-0.5, 2.2)
# axs[1].set_ylabel('Y')
# axs[0].plot([1.3073, 1.3073], [-0.1, 1.7], lw=0.2)
axs[0].axvspan(1.3073, 3, alpha=0.2, color='red')
i = len(M_list) - 1
while i > -1:
ZZZ = Z_list[i]
if ZZZ > 0:
Z_box = round(math.log(ZZZ, 10) - math.log(0.01886, 10), 2)
else:
Z_box = -6
M_list[i].insert(0, math.log(150, 10))
Mg_over_Fe_list[i].insert(0, Mg_over_Fe_list[i][0])
axs[0].plot(M_list[i], Mg_over_Fe_list[i], lw=2**i*0.7, label=r'$Z={}$'.format(ZZZ), color='k', ls=['-', 'dashed', 'dotted'][i])
(i) = (i - 1)
# axs[0].plot([-0.3, 0.9], [Mg_over_Fe_SNIa, Mg_over_Fe_SNIa], ls="--", lw=1, label="SNIa", c='k')
# axs[0].plot([-2, 3], [0, 0], lw=0.7, ls='dotted')
# axs[0].set_yticks(np.arange(-2, 2.1, 2))
axs[0].set_xlim(0.7, 1.7)
axs[0].set_ylim(-0.1, 1.7)
axs[0].set_ylabel(r'[Mg/Fe]')
axs[0].set_xlabel(r'log$_{10}(M_{\rm *, initial}$ [$M_\odot$])')
axs[0].legend(prop={'size': 6}, loc='best')
axs[1].axvspan(1.3073, 3, alpha=0.2, color='red')
i = len(M_list) - 1
while i > -1:
Si_over_Fe_list[i].insert(0, Si_over_Fe_list[i][0])
axs[1].plot(M_list[i], Si_over_Fe_list[i], lw=2**i*0.7, label=r'$Z={}$'.format(ZZZ),
color='k', ls=['-', 'dashed', 'dotted'][i])
(i) = (i - 1)
# axs[1].plot([-0.3, 0.9], [Si_over_Fe_SNIa, Si_over_Fe_SNIa], ls="--", lw=1, label="SNIa", c='k')
# axs[1].plot([-2, 3], [0, 0], lw=0.7, ls='dotted')
# axs[1].set_yticks(np.arange(-2, 2.1, 2))
axs[1].set_ylim(-0.1, 1.7)
axs[1].set_ylabel(r'[Si/Fe]')
axs[1].set_xlabel(r'log$_{10}(M_{\rm *, initial}$ [$M_\odot$])')
# axs[1].legend(prop={'size': 6}, loc='best')
axs[2].axvspan(1.3073, 3, alpha=0.2, color='red')
i = len(M_list) - 1
while i > -1:
Ca_over_Fe_list[i].insert(0, Ca_over_Fe_list[i][0])
axs[2].plot(M_list[i], Ca_over_Fe_list[i], lw=2**i*0.7, label=r'$Z={}$'.format(ZZZ),
color='k', ls=['-', 'dashed', 'dotted'][i])
(i) = (i - 1)
# axs[2].plot([-0.3, 0.9], [Ca_over_Fe_SNIa, Ca_over_Fe_SNIa], ls="--", lw=1, label="SNIa", c='k')
# axs[2].plot([-2, 3], [0, 0], lw=0.7, ls='dotted')
# axs[2].set_yticks(np.arange(-2, 2.1, 2))
axs[2].set_ylim(-0.1, 1.7)
axs[2].set_ylabel(r'[Ca/Fe]')
axs[2].set_xlabel(r'log$_{10}(M_{\rm *, initial}$ [$M_\odot$])')
# axs[2].legend(prop={'size': 6}, loc='best')
plt.tight_layout()
# Remove horizontal space between axes
fig.subplots_adjust(hspace=0)
plt.savefig('stellar_yields.pdf', dpi=250)
plt.show()
return
if __name__ == '__main__':
start_time = time.time()
Z_list = []
M_list = []
eject_mass_list = []
H_eject_mass_list = []
He_eject_mass_list = []
C_eject_mass_list = []
N_eject_mass_list = []
O_eject_mass_list = []
Ne_eject_mass_list = []
Mg_eject_mass_list = []
Si_eject_mass_list = []
S_eject_mass_list = []
Ca_eject_mass_list = []
Fe_eject_mass_list = []
Metal_eject_mass_list = []
O_over_Mg_list = []
Mg_over_H_list = []
Si_over_H_list = []
C_over_H_list = []
Fe_over_H_list = []
O_over_H_list = []
Z_over_H_list = []
Z_over_X_list = []
Z_over_Z0_list = []
XXX_list = []
YYY_list = []
ZZZ_list = []
Mg_over_Fe_list = []
Si_over_Fe_list = []
Ca_over_Fe_list = []
O_over_Fe_list = []
yield_table_name = "Kobayashi06" # being "WW95" or "portinari98" or "marigo01"
function_read_file(yield_table_name)
funtion_plot_yields()
plot_lifetime_and_finalmass()
print(" - Run time: %s -" % round((time.time() - start_time), 2))
|
gpl-3.0
|
sangwook236/SWDT
|
sw_dev/python/ext/test/high_performance_computing/spark/pyspark_database.py
|
2
|
5564
|
#!/usr/bin/env python
from pyspark.sql import SparkSession
import pyspark.sql.types as types
import pyspark.sql.functions as func
import traceback, sys
# REF [site] >> https://spark.apache.org/docs/latest/sql-programming-guide.html#jdbc-to-other-databases
def sqlite_jdbc():
spark = SparkSession.builder.appName('sqlite-jdbc') \
.config('spark.jars.packages', 'org.xerial:sqlite-jdbc:3.23.1') \
.getOrCreate()
#spark = SparkSession.builder.appName('sqlite-jdbc') \
# .config('spark.jars', 'sqlite-jdbc-3.23.1.jar') \
# .getOrCreate()
spark.sparkContext.setLogLevel('WARN')
# REF [site] >> https://spark.apache.org/docs/latest/sql-programming-guide.html#pyspark-usage-guide-for-pandas-with-apache-arrow
# Enable Arrow-based columnar data transfers.
spark.conf.set('spark.sql.execution.arrow.enabled', 'true')
if False:
#db_url = 'jdbc:sqlite:/path/to/dbfile' # File DB.
df = spark.read \
.format('jdbc') \
.option('url', 'jdbc:sqlite:iris.db') \
.option('driver', 'org.sqlite.JDBC') \
.option('dbtable', 'iris') \
.load()
elif False:
# REF [site] >> https://www.sqlite.org/inmemorydb.html
#db_url = 'jdbc:sqlite::memory:' # In-memory DB.
db_url = 'jdbc:sqlite::memory:?cache=shared' # Shared in-memory DB.
#db_url = 'jdbc:sqlite:dbname?mode=memory&cache=shared' # Named, shared in-memory DB.
# NOTE [error] >> Requirement failed: Option 'dbtable' is required.
# NOTE [error] >> SQL error or missing database (no such table: test123).
df = spark.read \
.format('jdbc') \
.option('url', db_url) \
.option('driver', 'org.sqlite.JDBC') \
.option('dbtable', 'test123') \
.load()
else:
rdd = spark.sparkContext.parallelize([
(123, 'Katie', 19, 'brown'),
(234, 'Michael', 22, 'green'),
(345, 'Simone', 23, 'blue')
])
# Specify schema.
schema = types.StructType([
types.StructField('id', types.LongType(), True),
types.StructField('name', types.StringType(), True),
types.StructField('age', types.LongType(), True),
types.StructField('eyeColor', types.StringType(), True)
])
df = spark.createDataFrame(rdd, schema)
df.show()
# NOTE [info] >> It seems that only file DB of SQLite can be used in Spark.
db_url = 'jdbc:sqlite:test.sqlite' # File DB.
# Isolation level: NONE, READ_COMMITTED, READ_UNCOMMITTED, REPEATABLE_READ, SERIALIZABLE.
# REF [site] >> https://stackoverflow.com/questions/16162357/transaction-isolation-levels-relation-with-locks-on-table
df.write \
.format('jdbc') \
.mode('overwrite') \
.option('url', db_url) \
.option('driver', 'org.sqlite.JDBC') \
.option('dbtable', 'swimmers') \
.option('isolationLevel', 'NONE') \
.save()
#df.write.jdbc(url=db_url, table='test', mode='overwrite', properties={'driver': 'org.sqlite.JDBC'})
df1 = df.withColumn('gender', func.lit(0))
df2 = spark.createDataFrame(
[(13, 'Lucy', 12, 'brown'), (37, 'Brian', 47, 'black')],
('id', 'name', 'age', 'eyeColor')
)
df2.write \
.format('jdbc') \
.mode('append') \
.option('url', db_url) \
.option('driver', 'org.sqlite.JDBC') \
.option('dbtable', 'swimmers') \
.option('isolationLevel', 'NONE') \
.save()
def mysql_jdbc():
spark = SparkSession.builder.appName('mysql-jdbc') \
.config('spark.jars.packages', 'mysql:mysql-connector-java:8.0.12') \
.getOrCreate()
#spark = SparkSession.builder.appName('mysql-jdbc') \
# .config('spark.jars', 'mysql-connector-java-8.0.12-bin.jar') \
# .getOrCreate()
spark.sparkContext.setLogLevel('WARN')
# REF [site] >> https://spark.apache.org/docs/latest/sql-programming-guide.html#pyspark-usage-guide-for-pandas-with-apache-arrow
# Enable Arrow-based columnar data transfers.
spark.conf.set('spark.sql.execution.arrow.enabled', 'true')
df = spark.read \
.format('jdbc') \
.option('url', 'jdbc:mysql://host:3306/dbname?characterEncoding=UTF-8&serverTimezone=UTC') \
.option('driver', 'com.mysql.cj.jdbc.Driver') \
.option('dbtable', 'tablename') \
.option('user', 'username') \
.option('password', 'password') \
.load()
df.show()
def sql_basic():
spark = SparkSession.builder.appName('dataframe-operation').getOrCreate()
spark.sparkContext.setLogLevel('WARN')
df = spark.createDataFrame(
[(123, 'Katie', 19, 'brown'), (234, 'Michael', 22, 'green'), (345, 'Simone', 23, 'blue')],
('id', 'name', 'age', 'eyeColor')
)
#df.printSchema()
#df.cache()
df.createOrReplaceTempView('swimmers') # DataFrame -> SQL.
#df1 = spark.sql('select * from swimmers') # SQL -> DataFrame.
spark.sql('select * from swimmers where age >= 20').show()
#spark.catalog.dropTempView('swimmers')
def main():
#sqlite_jdbc()
#mysql_jdbc()
sql_basic()
#%%------------------------------------------------------------------
# Usage:
# python pyspark_database.py
# spark-submit --packages mysql:mysql-connector-java:8.0.12,org.xerial:sqlite-jdbc:3.23.1 pyspark_database.py
# spark-submit --master local[4] --packages mysql:mysql-connector-java:8.0.12,org.xerial:sqlite-jdbc:3.23.1 pyspark_database.py
# spark-submit --master spark://host:7077 --packages mysql:mysql-connector-java:8.0.12,org.xerial:sqlite-jdbc:3.23.1 --executor-memory 10g pyspark_database.py
if '__main__' == __name__:
try:
main()
except:
#ex = sys.exc_info() # (type, exception object, traceback).
##print('{} raised: {}.'.format(ex[0], ex[1]))
#print('{} raised: {}.'.format(ex[0].__name__, ex[1]))
#traceback.print_tb(ex[2], limit=None, file=sys.stdout)
#traceback.print_exception(*sys.exc_info(), limit=None, file=sys.stdout)
traceback.print_exc(limit=None, file=sys.stdout)
|
gpl-3.0
|
Encesat/LumexData
|
LumexData.py
|
1
|
17585
|
#!/usr/bin/python2
#LumexData.py
import datetime as dt
import numpy as np
import scipy.optimize as opt
import matplotlib.pyplot as plt
omitFlagged = True
def storeAsDatetime(date, time):
day = int(date.split(".")[0])
month = int(date.split(".")[1])
year = int(date.split(".")[2])
hour = int(time.split(":")[0])
minute = int(time.split(":")[1])
second = int(time.split(":")[2])
return dt.datetime(year, month, day, hour, minute, second)
#Calculate the passed days of the year (including leap year)
def month(mon, yea):
if(mon == 1):
days = 0
elif(mon == 2):#January 31 days
days = 31
elif(mon == 3):#February 28 days
days = 59
elif(mon == 4):#March 31 days
days = 90
elif(mon == 5):#April 30 days
days = 120
elif(mon == 6):#May 31 days
days = 151
elif(mon == 7):#June 30 days
days = 181
elif(mon == 8):#July 31 days
days = 212
elif(mon == 9):#August 31 days
days = 243
elif(mon == 10):#September 30 days
days = 273
elif(mon == 11):#October 31 days
days = 304
elif(mon == 12):#November 30 days
days = 334
if(yea % 4 == 0 and yea % 100 != 0 or yea % 400 == 0):
days = days + 1
return days
def calcDays(date):
years = 365*(date.year)
months = month(date.month, date.year)
days = date.day
return years+months+days
def calcDatetime(days):
pass
##
#
# This program contains the class LumexData. This class
#
# - stores the content of the calibration file (__init__)
# - flags the data (flagging) (Not implemented, because no flagging criteria are available)
# - arranges the accepted and flagged data in columns of time and concentration (storeTimeConc)
# - calculates the number of accepted measurements per day and calculates the daily means of the data set (length & averaging)
# - creates a column with date and the daiyly means, and flags the daily means of which the number of accepted measurements is smaller than 216 (averaing)
class LumexData:
## Constructor of LumexData
#
# Reads the content of the calibrationfile. Stores as a list of hashmaps
def __init__(self, calibrationfile="none", filedescriptor="none"):
self.__lumexdata = []
self.averaged = False
#Open the calibrationfile and read the content.
#The file should be stored as a plain text format
if(calibrationfile != "none"):
f = open(calibrationfile, "r")
calibration = f.readlines()
elif(filedescriptor != "none"):
calibration = filedescriptor.readlines()
for line in calibration:
if(len(line.split(" ")) != 7):
continue
#Store date and time as datetime
x = storeAsDatetime(line.split(" ")[0], line.split(" ")[1])
self.__lumexdata.append({"date": x, "time_dec": float(line.split(" ")[2]), "zero_span": float(line.split(" ")[3]), \
"calib_factor": float(line.split(" ")[4]), "temperature": float(line.split(" ")[5]), \
"concentration": float(line.split(" ")[6]), "flag": -1, "standarddeviation": 0, "counter": 0})
return
#END OF __init__()
## Helpfunction of LumexData
#
# Explains the class LumexData
def help(self):
f = open("README", "r")
cont = f.readlines()
for element in cont:
print(element)
return
#END OF help()
## Getter of LumexData
#
# Return the data of __lumexdata
def get(self, elementnumber, key):
if(type(elementnumber) is str):
output = []
try:
start = int(elementnumber.split(":")[0])
end = int(elementnumber.split(":")[1])
except AttributeError:
pass
raw_keys = key.split(",")
keys = []
for element in raw_keys:
keys.append(element)
for i in range(start, end):
for element in keys:
output.append(self.__lumexdata[i][element])
return output
elif(elementnumber != -1 and key != "all"):
return self.__lumexdata[elementnumber][key]
elif(elementnumber != -1 and key == "all"):
return self.__lumexdata[elementnumber]
elif(elementnumber == -1 and key != "all"):
output = []
for i in range(len(self.__lumexdata)):
output.append(self.__lumexdata[i][key])
return output
elif(elementnumber == -1 and key == "all"):
output = []
for i in range(len(self.__lumexdata)):
output.append(self.__lumexdata[i])
return output
return
#END OF get()
## Length of LumexData.__lumexdate
#
# Return the number of values in the object
def length(self):
return len(self.__lumexdata)
#END OF length()
## Save the data to a txt-file
#
# Stores the time and the concentration
def storeTimeConc(self, filename, ran="all"):
f = open(filename, "w")
g = open("flagged_"+filename,"w")
if(ran != "all"):
start = int(ran.split(":")[0])
end = int(ran.split(":")[1])
else:
start = 0
end = len(self.__lumexdata)
f.write("1. Date\n2. Time in decimal\n3. Concentration\n")
for i in range(len(self.__lumexdata)):
if(i >= start and i < end):
if(self.__lumexdata[i]["flag"] == -1):
f.write("{} {}\t{}\n".format(self.__lumexdata[i]["date"], self.__lumexdata[i]["time_dec"], self.__lumexdata[i]["concentration"]))
else:
g.write("{} {}\t{}\t{}\n".format(self.__lumexdata[i]["date"], self.__lumexdata[i]["time_dec"], self.__lumexdata[i]["concentration"], self.__lumexdata[i]["flag"]))
f.close()
return
## Flag the data
#
# Flags the data by the given criteria. criteria has to be a textfile
def flagging(self, filename="Flagged.dat", criteria=None):
f = open(filename, "w")
flag = [0 for x in range(len(self.__lumexdata))]
#Here flag the data by the given criteria
for line in self.__lumexdata:
f.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(line["date"], line["time_dec"], line["zero_span"], line["calib_factor"], line["temperature"], line["concentration"], line["flag"]))
f.close()
for i in range(len(self.__lumexdata)):
self.__lumexdata[i]["flag"] = flag[i]
return
## Averaging the data
#
# Group the data for each day and calculate the mean, print them to a file (and return the output to the calling function)
def averaging(self, ran="all", overwrite=False):
f = open("averagedOutput.txt", "w")
givendate = calcDays(self.__lumexdata[0]["date"])
print(givendate)
#givendate = 365*(self.__lumexdata[0]["date"].year - 1900) + month(self.__lumexdata[0]["date"].month, self.__lumexdata[0]["date"].year) + self.__lumexdata[0]["date"].day
dummylist = []
averaged = []
errors = []
dates = []
flag = []
counter = 0
if(ran != "all"):
start = int(ran.split(":")[0])
end = int(ran.split(":")[1])
else:
start = 0
end = len(self.__lumexdata)
i = start
#for i in range(start, end): #Iterate over the whole data
while(i < end):
#mydate = 365*(self.__lumexdata[i]["date"].year - 1900) + month(self.__lumexdata[i]["date"].month, self.__lumexdata[i]["date"].year) + self.__lumexdata[i]["date"].day
mydate = calcDays(self.__lumexdata[i]["date"])
#print(mydate)
if(mydate == givendate):
#Omit the flagged data (if any)
if(omitFlagged and self.__lumexdata[i]["flag"] == -1):
dummylist.append(self.__lumexdata[i]["concentration"])
counter = counter + 1
else:
date = "{}.{}.{}".format(self.__lumexdata[i-1]["date"].day, self.__lumexdata[i-1]["date"].month, self.__lumexdata[i-1]["date"].year)
if(counter >= 216):
f.write("{}\t{}\t{}\t{}\n".format(date, np.mean(dummylist), np.std(dummylist), counter))
else:
f.write("{}\t{}\t{}\t{}\t###\n".format(date, np.mean(dummylist), np.std(dummylist), counter))
givendate = mydate
averaged.append(np.mean(dummylist))
errors.append(np.std(dummylist))
dates.append(dt.datetime.strptime(date, "%d.%m.%Y"))
flag.append(0 if counter <= 216 else -1)
if(omitFlagged and self.__lumexdata[i]["flag"] == -1):
dummylist = [self.__lumexdata[i]["concentration"]]
counter = 1
i = i + 1
if(counter != 0):
date = "{}.{}.{}".format(self.__lumexdata[end-1]["date"].day, self.__lumexdata[end-1]["date"].month, self.__lumexdata[end-1]["date"].year)
if(counter >= 216):
f.write("{}\t{}\t{}\t{}\n".format(date, np.mean(dummylist), np.std(dummylist), counter))
else:
f.write("{}\t{}\t{}\t{}\t###\n".format(date, np.mean(dummylist), np.std(dummylist), counter))
averaged.append(np.mean(dummylist))
errors.append(np.std(dummylist))
dates.append(dt.datetime.strptime(date, "%d.%m.%Y"))
flag.append(0 if counter <= 216 else -1)
dummylist = []
counter = 0
f.close()
#Overwrite the content of lumexdata
if(overwrite):
f = open("averagedOutput.txt", "r")
content = f.readlines()
f.close()
self.__lumexdata = [dict([("date", 0), ("concentration", 0), ("standarddeviation", 0), ("counter", 0), ("flag", 0)]) for x in range(len(content))]
for i in range(len(content)):
self.__lumexdata[i]["date"] = dt.datetime(int(content[i].split("\t")[0].split(".")[2]), int(content[i].split("\t")[0].split(".")[1]), int(content[i].split("\t")[0].split(".")[0]))
self.__lumexdata[i]["concentration"] = float(content[i].split("\t")[1])
self.__lumexdata[i]["standarddeviation"] = float(content[i].split("\t")[2])
self.__lumexdata[i]["counter"] = int(content[i].split("\t")[3])
self.__lumexdata[i]["flag"] = 99 if int(content[i].split("\t")[3]) < 216 else -1
self.averaged = True
return [averaged, errors, dates, flag]
#END OF average()
#Calculate fit using scipy.optimize.leastsq. The x-axis is the number of measurements. Fit as
#sinusoidal function
#linear
#polynomial (2 - 6)
#exponential
#logarithm
#gauss
#Parameter has to be a list with initial parameters
def __fitting(self, parameter, daily=False, ran="all", typ="trig", averaged=0, errors=0, av_date=0, flag=0):
dates = []
conc = []
standarddeviation = []
if(ran != "all"):
begin = ran.split(":")[0]
end = ran.split(":")[1]
begin = dt.datetime(int(begin.split(".")[2]), int(begin.split(".")[1]), int(begin.split(".")[0]))
end = dt.datetime(int(end.split(".")[2]), int(end.split(".")[1]), int(end.split(".")[0]))
else:
begin = self.__lumexdata[0]["date"]
end = self.__lumexdata[-1]["date"]
if(averaged == 0):
for i in range(len(self.__lumexdata)):
if(self.__lumexdata[i]["date"] >= begin and self.__lumexdata[i]["date"] < end or ran == "all"):
if(self.__lumexdata[i]["flag"] == -1):
dates.append(self.__lumexdata[i]["date"])
conc.append(self.__lumexdata[i]["concentration"])
else:
#[averaged, errors, av_date, flag] = self.averaging()
standarddeviation = []
for i in range(len(averaged)):
if(av_date[i] >= begin and av_date[i] < end or ran == "all"):
if(flag[i] == -1):
dates.append(av_date[i])
conc.append(averaged[i])
standarddeviation.append(errors[i])
array = np.linspace(0,len(dates)-1,len(dates))
#FITTING
if(typ == "trig"):
fitfunc = lambda parameter, x: parameter[0] * np.cos(2*np.pi / parameter[1]*x + parameter[2]) + parameter[3]*x
elif(typ == "lin"):
fitfunc = lambda parameter, x: parameter[0] * x + parameter[1]
elif(typ == "poly2"):
fitfunc = lambda parameter, x: parameter[0] * x**2 + parameter[1] * x + parameter[2]
elif(typ == "poly3"):
fitfunc = lambda parameter, x: parameter[0] * x**3 + parameter[1] * x**2 + parameter[2] * x + parameter[3]
elif(typ == "poly4"):
fitfunc = lambda parameter, x: parameter[0] * x**4 + parameter[1] * x**3 + parameter[2] * x**2 + parameter[3] * x + parameter[4]
elif(typ == "poly5"):
fitfunc = lambda parameter, x: parameter[0] * x**5 + parameter[1] * x**4 + parameter[2] * x**3 + parameter[3] * x**2 + parameter[4] * x + parameter[5]
elif(typ == "poly6"):
fitfunc = lambda parameter, x: parameter[0] * x**6 + parameter[1] * x**5 + parameter[2] * x**4 + parameter[3] * x**3 + parameter[4] * x**2 + parameter[5] * x + parameter[6]
elif(typ == "exp"):
fitfunc = lambda parameter, x: parameter[0] * np.exp(parameter[1] * x + parameter[2]) + parameter[3] * x + parameter[4]
elif(typ == "log"):
fitfunc = lambda parameter, x: parameter[0] * np.log(x) / np.log(parameter[1]) + parameter[2] * x + parameter[3]
elif(typ == "gauss"):
fitfunc = lambda parameter, x: 1 / (parameter[0] * np.sqrt(2 * np.pi)) * np.exp(-0.5 * ((x - parameter[1])/(parameter[0]))**2)
errfunc = lambda parameter, x, y: fitfunc(parameter,x) - y
p1, success = opt.leastsq(errfunc, parameter[:], args=(array, conc))
return [fitfunc, p1, array]
#Plotting without fit. If daily=True, use the daily mean, otherwise use all unflagged data
def plotting(self, title="Default", xlabel="x-Axis", ylabel="y-Axis", daily=False, ran="all", axessize=10, fsize=10, msize=10, colour="#000000", markerstyle="h", leastsq=False, typ="lin", parameter=[1,1], averaged=0, errors=0, av_date=0, flag=[]):
dates = []
conc = []
if(ran != "all"):
begin = ran.split(":")[0]
end = ran.split(":")[1]
begin = dt.datetime(int(begin.split(".")[2]), int(begin.split(".")[1]), int(begin.split(".")[0]))
end = dt.datetime(int(end.split(".")[2]), int(end.split(".")[1]), int(end.split(".")[0]))
else:
begin = self.__lumexdata[0]["date"]
end = self.__lumexdata[-1]["date"]
if(averaged == 0):
for i in range(len(self.__lumexdata)):
if(self.__lumexdata[i]["date"] >= begin and self.__lumexdata[i]["date"] < end or ran == "all"):
if(self.__lumexdata[i]["flag"] == -1):
dates.append(self.__lumexdata[i]["date"])
conc.append(self.__lumexdata[i]["concentration"])
else:
standarddeviation = []
for i in range(len(averaged)):
if(av_date[i] >= begin and av_date[i] < end or ran == "all"):
if(flag[i] == -1):
dates.append(av_date[i])
conc.append(averaged[i])
standarddeviation.append(errors[i])
fig = plt.figure()
#NotForUsing, (sp1) = plt.subplots(1, 1, sharey=False)
#sp1.set_title(title, fontsize=fsize)
plt.title(title, fontsize=fsize)
if(averaged == 0):
plt.plot(dates, conc, ls=".", marker=markerstyle, markersize=msize, color=colour)
# sp1.plot(dates, conc, ls=".", marker=markerstyle, markersize=msize, color=colour)
else:
plt.errorbar(dates, conc, yerr=standarddeviation, fmt=markerstyle, markersize=msize, color=colour)
# sp1.errorbar(dates, conc, yerr=standarddeviation, fmt=markerstyle, markersize=msize, color=colour)
if(leastsq):
[fitfunc, p1, array] = self.__fitting(parameter, daily, ran, typ=typ, averaged=averaged, errors=errors, av_date=av_date, flag=flag)
#Write fitparameters to a file
f = open("fitparams.txt", "w")
for i in range(len(p1)):
f.write("p{} = {}\n".format(i, p1[i]))
f.close()
plt.plot(dates, fitfunc(p1, array))
# sp1.plot(dates, fitfunc(p1, array))
#sp1.tick_params(labelsize=axessize)
plt.tick_params(labelsize=axessize)
plt.xlabel(xlabel, fontsize=fsize)
plt.ylabel(ylabel, fontsize=fsize)
#sp1.set_xlabel(xlabel, fontsize=fsize)
#sp1.set_ylabel(ylabel, fontsize=fsize)
#sp1.grid(True)
plt.grid(True)
plt.show()
|
mit
|
chrsrds/scikit-learn
|
examples/applications/wikipedia_principal_eigenvector.py
|
10
|
7724
|
"""
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
https://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
https://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in scikit-learn.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from joblib import Memory
from sklearn.decomposition import randomized_svd
from urllib.request import urlopen
print(__doc__)
# #############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
# #############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = {source}
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = {i: name for name, i in index_map.items()}
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest components of the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(np.isclose(X.sum(axis=1), 0),
1.0 / n, 0)).ravel()
scores = np.full(n, 1. / n, dtype=np.float32) # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
|
bsd-3-clause
|
emon10005/scikit-image
|
setup.py
|
11
|
4995
|
#! /usr/bin/env python
descr = """Image Processing SciKit
Image processing algorithms for SciPy, including IO, morphology, filtering,
warping, color manipulation, object detection, etc.
Please refer to the online documentation at
http://scikit-image.org/
"""
DISTNAME = 'scikit-image'
DESCRIPTION = 'Image processing routines for SciPy'
LONG_DESCRIPTION = descr
MAINTAINER = 'Stefan van der Walt'
MAINTAINER_EMAIL = 'stefan@sun.ac.za'
URL = 'http://scikit-image.org'
LICENSE = 'Modified BSD'
DOWNLOAD_URL = 'http://github.com/scikit-image/scikit-image'
import os
import sys
import setuptools
from distutils.command.build_py import build_py
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# skimage __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-image to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKIMAGE_SETUP__ = True
with open('skimage/__init__.py') as fid:
for line in fid:
if line.startswith('__version__'):
VERSION = line.strip().split()[-1][1:-1]
break
with open('requirements.txt') as fid:
INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]
# requirements for those browsing PyPI
REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]
REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]
REQUIRES = [r.replace('[array]', '') for r in REQUIRES]
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('skimage')
config.add_data_dir('skimage/data')
return config
if __name__ == "__main__":
try:
from numpy.distutils.core import setup
extra = {'configuration': configuration}
# Do not try and upgrade larger dependencies
for lib in ['scipy', 'numpy', 'matplotlib', 'pillow']:
try:
__import__(lib)
INSTALL_REQUIRES = [i for i in INSTALL_REQUIRES
if lib not in i]
except ImportError:
pass
except ImportError:
if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info', '--version',
'clean')):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install scikit-image when Numpy is not yet
# present in the system.
pass
else:
print('To install scikit-image from source, you will need numpy.\n' +
'Install numpy with pip:\n' +
'pip install numpy\n'
'Or use your operating system package manager. For more\n' +
'details, see http://scikit-image.org/docs/stable/install.html')
sys.exit(1)
setup(
name=DISTNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
license=LICENSE,
download_url=DOWNLOAD_URL,
version=VERSION,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
],
install_requires=INSTALL_REQUIRES,
# install cython when running setup.py (source install)
setup_requires=['cython>=0.21'],
requires=REQUIRES,
packages=setuptools.find_packages(exclude=['doc']),
include_package_data=True,
zip_safe=False, # the package can run out of an .egg file
entry_points={
'console_scripts': ['skivi = skimage.scripts.skivi:main'],
},
cmdclass={'build_py': build_py},
**extra
)
|
bsd-3-clause
|
francis-liberty/kaggle
|
Titanic/Benchmarks/myfirstforest.py
|
3
|
3926
|
""" Writing my first randomforest code.
Author : AstroDave
Date : 23rd September, 2012
please see packages.python.org/milk/randomforests.html for more
"""
import numpy as np
import csv as csv
from sklearn.ensemble import RandomForestClassifier
csv_file_object = csv.reader(open('train.csv', 'rb')) #Load in the training csv file
header = csv_file_object.next() #Skip the fist line as it is a header
train_data=[] #Creat a variable called 'train_data'
for row in csv_file_object: #Skip through each row in the csv file
train_data.append(row[1:]) #adding each row to the data variable
train_data = np.array(train_data) #Then convert from a list to an array
#I need to convert all strings to integer classifiers:
#Male = 1, female = 0:
train_data[train_data[0::,3]=='male',3] = 1
train_data[train_data[0::,3]=='female',3] = 0
#embark c=0, s=1, q=2
train_data[train_data[0::,10] =='C',10] = 0
train_data[train_data[0::,10] =='S',10] = 1
train_data[train_data[0::,10] =='Q',10] = 2
#I need to fill in the gaps of the data and make it complete.
#So where there is no price, I will assume price on median of that class
#Where there is no age I will give median of all ages
#All the ages with no data make the median of the data
train_data[train_data[0::,4] == '',4] = np.median(train_data[train_data[0::,4]\
!= '',4].astype(np.float))
#All missing ebmbarks just make them embark from most common place
train_data[train_data[0::,10] == '',10] = np.round(np.mean(train_data[train_data[0::,10]\
!= '',10].astype(np.float)))
train_data = np.delete(train_data,[2,7,9],1) #remove the name data, cabin and ticket
#I need to do the same with the test data now so that the columns are in the same
#as the training data
test_file_object = csv.reader(open('test.csv', 'rb')) #Load in the test csv file
header = test_file_object.next() #Skip the fist line as it is a header
test_data=[] #Creat a variable called 'test_data'
ids = []
for row in test_file_object: #Skip through each row in the csv file
ids.append(row[0])
test_data.append(row[1:]) #adding each row to the data variable
test_data = np.array(test_data) #Then convert from a list to an array
#I need to convert all strings to integer classifiers:
#Male = 1, female = 0:
test_data[test_data[0::,2]=='male',2] = 1
test_data[test_data[0::,2]=='female',2] = 0
#ebark c=0, s=1, q=2
test_data[test_data[0::,9] =='C',9] = 0 #Note this is not ideal, in more complex 3 is not 3 tmes better than 1 than 2 is 2 times better than 1
test_data[test_data[0::,9] =='S',9] = 1
test_data[test_data[0::,9] =='Q',9] = 2
#All the ages with no data make the median of the data
test_data[test_data[0::,3] == '',3] = np.median(test_data[test_data[0::,3]\
!= '',3].astype(np.float))
#All missing ebmbarks just make them embark from most common place
test_data[test_data[0::,9] == '',9] = np.round(np.mean(test_data[test_data[0::,9]\
!= '',9].astype(np.float)))
#All the missing prices assume median of their respectice class
for i in xrange(np.size(test_data[0::,0])):
if test_data[i,7] == '':
test_data[i,7] = np.median(test_data[(test_data[0::,7] != '') &\
(test_data[0::,0] == test_data[i,0])\
,7].astype(np.float))
test_data = np.delete(test_data,[1,6,8],1) #remove the name data, cabin and ticket
#The data is now ready to go. So lets train then test!
print 'Training '
forest = RandomForestClassifier(n_estimators=100)
forest = forest.fit(train_data[0::,1::],\
train_data[0::,0])
print 'Predicting'
output = forest.predict(test_data)
open_file_object = csv.writer(open("myfirstforest.csv", "wb"))
open_file_object.writerow(["PassengerId","Survived"])
open_file_object.writerows(zip(ids, output))
|
gpl-2.0
|
pypot/scikit-learn
|
sklearn/decomposition/nmf.py
|
16
|
19101
|
""" Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def check_non_negative(X, whom):
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
U, S, V = randomized_svd(X, n_components)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
random_state = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
random_state = self.random_state
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a')
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar')
elif init == "random":
rng = check_random_state(random_state)
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
|
bsd-3-clause
|
rohit21122012/DCASE2013
|
runs/2016/baseline2016_mfcc_21/src/evaluation.py
|
56
|
43426
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import numpy
import sys
from sklearn import metrics
class DCASE2016_SceneClassification_Metrics():
"""DCASE 2016 scene classification metrics
Examples
--------
>>> dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> y_true = []
>>> y_pred = []
>>> for result in results:
>>> y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
>>> y_pred.append(result[1])
>>>
>>> dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
>>>
>>> results = dcase2016_scene_metric.results()
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
Evaluated scene labels in the list
"""
self.accuracies_per_class = None
self.Nsys = None
self.Nref = None
self.class_list = class_list
self.eps = numpy.spacing(1)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
return self.results()
def accuracies(self, y_true, y_pred, labels):
"""Calculate accuracy
Parameters
----------
y_true : numpy.array
Ground truth array, list of scene labels
y_pred : numpy.array
System output array, list of scene labels
labels : list
list of scene labels
Returns
-------
array : numpy.array [shape=(number of scene labels,)]
Accuracy per scene label class
"""
confusion_matrix = metrics.confusion_matrix(y_true=y_true, y_pred=y_pred, labels=labels).astype(float)
return numpy.divide(numpy.diag(confusion_matrix), numpy.sum(confusion_matrix, 1) + self.eps)
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
accuracies_per_class = self.accuracies(y_pred=system_output, y_true=annotated_ground_truth,
labels=self.class_list)
if self.accuracies_per_class is None:
self.accuracies_per_class = accuracies_per_class
else:
self.accuracies_per_class = numpy.vstack((self.accuracies_per_class, accuracies_per_class))
Nref = numpy.zeros(len(self.class_list))
Nsys = numpy.zeros(len(self.class_list))
for class_id, class_label in enumerate(self.class_list):
for item in system_output:
if item == class_label:
Nsys[class_id] += 1
for item in annotated_ground_truth:
if item == class_label:
Nref[class_id] += 1
if self.Nref is None:
self.Nref = Nref
else:
self.Nref = numpy.vstack((self.Nref, Nref))
if self.Nsys is None:
self.Nsys = Nsys
else:
self.Nsys = numpy.vstack((self.Nsys, Nsys))
def results(self):
"""Get results
Outputs results in dict, format:
{
'class_wise_data':
{
'office': {
'Nsys': 10,
'Nref': 7,
},
}
'class_wise_accuracy':
{
'office': 0.6,
'home': 0.4,
}
'overall_accuracy': numpy.mean(self.accuracies_per_class)
'Nsys': 100,
'Nref': 100,
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'class_wise_data': {},
'class_wise_accuracy': {},
'overall_accuracy': numpy.mean(self.accuracies_per_class)
}
if len(self.Nsys.shape) == 2:
results['Nsys'] = int(sum(sum(self.Nsys)))
results['Nref'] = int(sum(sum(self.Nref)))
else:
results['Nsys'] = int(sum(self.Nsys))
results['Nref'] = int(sum(self.Nref))
for class_id, class_label in enumerate(self.class_list):
if len(self.accuracies_per_class.shape) == 2:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[:, class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(sum(self.Nsys[:, class_id])),
'Nref': int(sum(self.Nref[:, class_id])),
}
else:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(self.Nsys[class_id]),
'Nref': int(self.Nref[class_id]),
}
return results
class EventDetectionMetrics(object):
"""Baseclass for sound event metric classes.
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
"""
self.class_list = class_list
self.eps = numpy.spacing(1)
def max_event_offset(self, data):
"""Get maximum event offset from event list
Parameters
----------
data : list
Event list, list of event dicts
Returns
-------
max : float > 0
Maximum event offset
"""
max = 0
for event in data:
if event['event_offset'] > max:
max = event['event_offset']
return max
def list_to_roll(self, data, time_resolution=0.01):
"""Convert event list into event roll.
Event roll is binary matrix indicating event activity withing time segment defined by time_resolution.
Parameters
----------
data : list
Event list, list of event dicts
time_resolution : float > 0
Time resolution used when converting event into event roll.
Returns
-------
event_roll : numpy.ndarray [shape=(math.ceil(data_length * 1 / time_resolution) + 1, amount of classes)]
Event roll
"""
# Initialize
data_length = self.max_event_offset(data)
event_roll = numpy.zeros((math.ceil(data_length * 1 / time_resolution) + 1, len(self.class_list)))
# Fill-in event_roll
for event in data:
pos = self.class_list.index(event['event_label'].rstrip())
onset = math.floor(event['event_onset'] * 1 / time_resolution)
offset = math.ceil(event['event_offset'] * 1 / time_resolution) + 1
event_roll[onset:offset, pos] = 1
return event_roll
class DCASE2016_EventDetection_SegmentBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Segment based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_segment_based_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_segment_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['segment_based_metrics'] = dcase2016_segment_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
"""
self.time_resolution = time_resolution
self.overall = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
'ER': 0.0,
'S': 0.0,
'D': 0.0,
'I': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=self.time_resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=self.time_resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute segment-based overall metrics
for segment_id in range(0, annotated_event_roll.shape[0]):
annotated_segment = annotated_event_roll[segment_id, :]
system_segment = system_event_roll[segment_id, :]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
S = min(Nref, Nsys) - Ntp
D = max(0, Nref - Nsys)
I = max(0, Nsys - Nref)
ER = max(Nref, Nsys) - Ntp
self.overall['Ntp'] += Ntp
self.overall['Ntn'] += Ntn
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['S'] += S
self.overall['D'] += D
self.overall['I'] += I
self.overall['ER'] += ER
for class_id, class_label in enumerate(self.class_list):
annotated_segment = annotated_event_roll[:, class_id]
system_segment = system_event_roll[:, class_id]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Ntn'] += Ntn
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
return self
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (
results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = self.overall['ER'] / self.overall['Nref']
results['overall']['S'] = self.overall['S'] / self.overall['Nref']
results['overall']['D'] = self.overall['D'] / self.overall['Nref']
results['overall']['I'] = self.overall['I'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_id, class_label in enumerate(self.class_list):
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * (
(results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (
results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn'] +
self.class_wise[class_label]['Nfp']) / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2016_EventDetection_EventBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Event based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_event_based_metric = DCASE2016_EventDetection_EventBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_event_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['event_based_metrics'] = dcase2016_event_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0, t_collar=0.2):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
t_collar : float > 0
Time collar for event onset and offset condition
(Default value = 0.2)
"""
self.time_resolution = time_resolution
self.t_collar = t_collar
self.overall = {
'Nref': 0.0,
'Nsys': 0.0,
'Nsubs': 0.0,
'Ntp': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Nref': 0.0,
'Nsys': 0.0,
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Overall metrics
# Total number of detected and reference events
Nsys = len(system_output)
Nref = len(annotated_ground_truth)
sys_correct = numpy.zeros(Nsys, dtype=bool)
ref_correct = numpy.zeros(Nref, dtype=bool)
# Number of correctly transcribed events, onset/offset within a t_collar range
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
label_condition = annotated_ground_truth[j]['event_label'] == system_output[i]['event_label']
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if label_condition and onset_condition and offset_condition:
ref_correct[j] = True
sys_correct[i] = True
break
Ntp = numpy.sum(sys_correct)
sys_leftover = numpy.nonzero(numpy.negative(sys_correct))[0]
ref_leftover = numpy.nonzero(numpy.negative(ref_correct))[0]
# Substitutions
Nsubs = 0
for j in ref_leftover:
for i in sys_leftover:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Nsubs += 1
break
Nfp = Nsys - Ntp - Nsubs
Nfn = Nref - Ntp - Nsubs
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['Ntp'] += Ntp
self.overall['Nsubs'] += Nsubs
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
# Class-wise metrics
for class_id, class_label in enumerate(self.class_list):
Nref = 0.0
Nsys = 0.0
Ntp = 0.0
# Count event frequencies in the ground truth
for i in range(0, len(annotated_ground_truth)):
if annotated_ground_truth[i]['event_label'] == class_label:
Nref += 1
# Count event frequencies in the system output
for i in range(0, len(system_output)):
if system_output[i]['event_label'] == class_label:
Nsys += 1
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == class_label and system_output[i][
'event_label'] == class_label:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Ntp += 1
break
Nfp = Nsys - Ntp
Nfn = Nref - Ntp
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
def onset_condition(self, annotated_event, system_event, t_collar=0.200):
"""Onset condition, checked does the event pair fulfill condition
Condition:
- event onsets are within t_collar each other
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
Returns
-------
result : bool
Condition result
"""
return math.fabs(annotated_event['event_onset'] - system_event['event_onset']) <= t_collar
def offset_condition(self, annotated_event, system_event, t_collar=0.200, percentage_of_length=0.5):
"""Offset condition, checking does the event pair fulfill condition
Condition:
- event offsets are within t_collar each other
or
- system event offset is within the percentage_of_length*annotated event_length
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
percentage_of_length : float [0-1]
Returns
-------
result : bool
Condition result
"""
annotated_length = annotated_event['event_offset'] - annotated_event['event_onset']
return math.fabs(annotated_event['event_offset'] - system_event['event_offset']) <= max(t_collar,
percentage_of_length * annotated_length)
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (
results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = (self.overall['Nfn'] + self.overall['Nfp'] + self.overall['Nsubs']) / self.overall[
'Nref']
results['overall']['S'] = self.overall['Nsubs'] / self.overall['Nref']
results['overall']['D'] = self.overall['Nfn'] / self.overall['Nref']
results['overall']['I'] = self.overall['Nfp'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_label in self.class_list:
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * (
(results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (
results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn'] +
self.class_wise[class_label]['Nfp']) / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (
self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
# Class-wise average
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2013_EventDetection_Metrics(EventDetectionMetrics):
"""Lecagy DCASE2013 metrics, converted from the provided Matlab implementation
Supported metrics:
- Frame based
- F-score (F)
- AEER
- Event based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
- Class based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
"""
#
def frame_based(self, annotated_ground_truth, system_output, resolution=0.01):
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute frame-based metrics
Nref = sum(sum(annotated_event_roll))
Ntot = sum(sum(system_event_roll))
Ntp = sum(sum(system_event_roll + annotated_event_roll > 1))
Nfp = sum(sum(system_event_roll - annotated_event_roll > 0))
Nfn = sum(sum(annotated_event_roll - system_event_roll > 0))
Nsubs = min(Nfp, Nfn)
eps = numpy.spacing(1)
results = dict()
results['Rec'] = Ntp / (Nref + eps)
results['Pre'] = Ntp / (Ntot + eps)
results['F'] = 2 * ((results['Pre'] * results['Rec']) / (results['Pre'] + results['Rec'] + eps))
results['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
return results
def event_based(self, annotated_ground_truth, system_output):
# Event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events
Ntot = len(system_output)
Nref = len(annotated_ground_truth)
# Number of correctly transcribed events, onset within a +/-100 ms range
Ncorr = 0
NcorrOff = 0
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (
math.fabs(annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
Ncorr += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(
0.1, 0.5 * (
annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j]['event_onset'])):
NcorrOff += 1
break # In order to not evaluate duplicates
# Compute onset-only event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = min(Nfp, Nfn)
results['onset']['Rec'] = Ncorr / (Nref + eps)
results['onset']['Pre'] = Ncorr / (Ntot + eps)
results['onset']['F'] = 2 * (
(results['onset']['Pre'] * results['onset']['Rec']) / (
results['onset']['Pre'] + results['onset']['Rec'] + eps))
results['onset']['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
# Compute onset-offset event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = min(NfpOff, NfnOff)
results['onset-offset']['Rec'] = NcorrOff / (Nref + eps)
results['onset-offset']['Pre'] = NcorrOff / (Ntot + eps)
results['onset-offset']['F'] = 2 * ((results['onset-offset']['Pre'] * results['onset-offset']['Rec']) / (
results['onset-offset']['Pre'] + results['onset-offset']['Rec'] + eps))
results['onset-offset']['AEER'] = (NfnOff + NfpOff + NsubsOff) / (Nref + eps)
return results
def class_based(self, annotated_ground_truth, system_output):
# Class-wise event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events per class
Ntot = numpy.zeros((len(self.class_list), 1))
for event in system_output:
pos = self.class_list.index(event['event_label'])
Ntot[pos] += 1
Nref = numpy.zeros((len(self.class_list), 1))
for event in annotated_ground_truth:
pos = self.class_list.index(event['event_label'])
Nref[pos] += 1
I = (Nref > 0).nonzero()[0] # index for classes present in ground-truth
# Number of correctly transcribed events per class, onset within a +/-100 ms range
Ncorr = numpy.zeros((len(self.class_list), 1))
NcorrOff = numpy.zeros((len(self.class_list), 1))
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (
math.fabs(
annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
pos = self.class_list.index(system_output[i]['event_label'])
Ncorr[pos] += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(
0.1, 0.5 * (
annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j][
'event_onset'])):
pos = self.class_list.index(system_output[i]['event_label'])
NcorrOff[pos] += 1
break # In order to not evaluate duplicates
# Compute onset-only class-wise event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = numpy.minimum(Nfp, Nfn)
tempRec = Ncorr[I] / (Nref[I] + eps)
tempPre = Ncorr[I] / (Ntot[I] + eps)
results['onset']['Rec'] = numpy.mean(tempRec)
results['onset']['Pre'] = numpy.mean(tempPre)
tempF = 2 * ((tempPre * tempRec) / (tempPre + tempRec + eps))
results['onset']['F'] = numpy.mean(tempF)
tempAEER = (Nfn[I] + Nfp[I] + Nsubs[I]) / (Nref[I] + eps)
results['onset']['AEER'] = numpy.mean(tempAEER)
# Compute onset-offset class-wise event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = numpy.minimum(NfpOff, NfnOff)
tempRecOff = NcorrOff[I] / (Nref[I] + eps)
tempPreOff = NcorrOff[I] / (Ntot[I] + eps)
results['onset-offset']['Rec'] = numpy.mean(tempRecOff)
results['onset-offset']['Pre'] = numpy.mean(tempPreOff)
tempFOff = 2 * ((tempPreOff * tempRecOff) / (tempPreOff + tempRecOff + eps))
results['onset-offset']['F'] = numpy.mean(tempFOff)
tempAEEROff = (NfnOff[I] + NfpOff[I] + NsubsOff[I]) / (Nref[I] + eps)
results['onset-offset']['AEER'] = numpy.mean(tempAEEROff)
return results
def main(argv):
# Examples to show usage and required data structures
class_list = ['class1', 'class2', 'class3']
system_output = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.1,
'event_offset': 4.7
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
annotated_groundtruth = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.2,
'event_offset': 5.4
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
dcase2013metric = DCASE2013_EventDetection_Metrics(class_list=class_list)
print 'DCASE2013'
print 'Frame-based:', dcase2013metric.frame_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Event-based:', dcase2013metric.event_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Class-based:', dcase2013metric.class_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
dcase2016_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=class_list)
print 'DCASE2016'
print dcase2016_metric.evaluate(system_output=system_output, annotated_ground_truth=annotated_groundtruth).results()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
mit
|
mbayon/TFG-MachineLearning
|
vbig/lib/python2.7/site-packages/sklearn/datasets/tests/test_rcv1.py
|
322
|
2414
|
"""Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
|
mit
|
bhargav/scikit-learn
|
sklearn/ensemble/tests/test_forest.py
|
26
|
41675
|
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, est.transform, X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
|
bsd-3-clause
|
MadsJensen/agency_connectivity
|
phase_analysis_wide.py
|
1
|
5708
|
# -*- coding: utf-8 -*-
"""
@author: mje
@emai: mads@cnru.dk
"""
import numpy as np
# import mne
import matplotlib.pyplot as plt
import pandas as pd
from my_settings import *
plt.style.use("ggplot")
b_df = pd.read_csv(
"/Users/au194693/projects/agency_connectivity/data/behavioural_results.csv")
def calc_ISPC_time_between(data, chan_1=52, chan_2=1):
result = np.empty([data.shape[0]])
for i in range(data.shape[0]):
result[i] = np.abs(
np.mean(
np.exp(1j * (np.angle(data[i, chan_1, window_start:window_end])
- np.angle(data[i, chan_2, window_start:
window_end])))))
return result
label_dict = {"ba_1_4_r": [1, 52],
"ba_1_4_l": [0, 51],
"ba_4_4": [51, 52],
"ba_1_1": [0, 1]}
# "ba_4_39_l": [49, 51],
# "ba_4_39_r": [50, 52],
# "ba_39_39": [49, 50]}
# bands = ["delta", "theta", "alpha", "beta", "gamma1", "gamma2"]
bands = ["beta"]
# subjects = ["p9"]
labels = list(np.load(data_path + "label_names.npy"))
times = np.arange(-2000, 2001, 1.95325)
times = times / 1000.
window_length = 153
step_length = 15
results_all = pd.DataFrame()
for subject in subjects:
print("Working on: " + subject)
# ht_vol = np.load(tf_folder + "/%s_vol_HT-comp.npy" %
# subject)
ht_testing = np.load(tf_folder + "%s_inv_HT-comp.npy" % subject)
b_tmp = b_df[(b_df.subject == subject) & (b_df.condition == "invol"
)].reset_index()
b_tmp = b_tmp[-89:]
for k, band in enumerate(bands):
k = 3
# results_testing = {}
ht_testing_band = ht_testing[-89:, :, :, k]
step = 1
j = 768 # times index to start
while times[window_length + j] < times[1040]:
window_start = j
window_end = j + window_length
res = pd.DataFrame(
calc_ISPC_time_between(
ht_testing_band,
chan_1=label_dict["ba_1_4_r"][0],
chan_2=label_dict["ba_1_4_r"][1]),
columns=["ba_1_4_r"])
res["step"] = step
res["subject"] = subject
res["binding"] = b_tmp.binding.get_values()
res["trial_status"] = b_tmp.trial_status.get_values()
res["condition"] = "testing"
res["band"] = band
# res["trial_nr"] = np.arange(1, 90, 1)
res["ba_1_4_l"] = calc_ISPC_time_between(
ht_testing_band,
chan_1=label_dict["ba_1_4_l"][0],
chan_2=label_dict["ba_1_4_l"][1])
res["ba_1_1"] = calc_ISPC_time_between(
ht_testing_band,
chan_1=label_dict["ba_1_1"][0], chan_2=label_dict["ba_1_1"][1])
res["ba_4_4"] = calc_ISPC_time_between(
ht_testing_band,
chan_1=label_dict["ba_4_4"][0], chan_2=label_dict["ba_4_4"][1])
results_all = results_all.append(res)
j += step_length
step += 1
print("Working on: " + subject)
# ht_vol = np.load(tf_folder + "/%s_vol_HT-comp.npy" %
# subject)
ht_learning = np.load(tf_folder + "%s_vol_HT-comp.npy" % subject)
b_tmp = b_df[(b_df.subject == subject) & (b_df.condition == "vol"
)].reset_index()
b_tmp = b_tmp[-89:]
for k, band in enumerate(bands):
k = 3
# Results_learning = {}
ht_learning_band = ht_learning[-89:, :, :, k]
j = 768 # times index to start
step = 1
while times[window_length + j] < times[1040]:
window_start = j
window_end = j + window_length
res = pd.DataFrame(
calc_ISPC_time_between(
ht_learning_band,
chan_1=label_dict["ba_1_4_r"][0],
chan_2=label_dict["ba_1_4_r"][1]),
columns=["ba_1_4_r"])
res["step"] = step
res["subject"] = subject
res["binding"] = b_tmp.binding.get_values()
res["trial_status"] = b_tmp.trial_status.get_values()
res["condition"] = "learning"
res["band"] = band
# res["trial_nr"] = np.arange(1, 90, 1)
res["ba_1_4_l"] = calc_ISPC_time_between(
ht_learning_band,
chan_1=label_dict["ba_1_4_l"][0],
chan_2=label_dict["ba_1_4_l"][1])
res["ba_1_1"] = calc_ISPC_time_between(
ht_learning_band,
chan_1=label_dict["ba_1_1"][0], chan_2=label_dict["ba_1_1"][1])
res["ba_4_4"] = calc_ISPC_time_between(
ht_learning_band,
chan_1=label_dict["ba_4_4"][0], chan_2=label_dict["ba_4_4"][1])
results_all = results_all.append(res)
j += step_length
step += 1
# combine condition to predict testing from learning
res_testing = results_all[results_all.condition == "testing"]
res_learning = results_all[results_all.condition == "learning"]
res_combined = res_testing.copy()
res_combined["ba_1_1_learning"] = res_learning["ba_1_1"]
res_combined["ba_4_4_learning"] = res_learning["ba_4_4"]
res_combined["ba_1_4_l_learning"] = res_learning["ba_1_4_l"]
res_combined["ba_1_4_r_learning"] = res_learning["ba_1_4_r"]
tmp = res_combined[res_combined.step == 8]
X = tmp[["ba_1_1", "ba_4_4", "ba_1_4_l", "ba_1_4_r",
"ba_1_1_learning", "ba_4_4_learning",
"ba_1_4_l_learning", "ba_1_4_r_learning"]].get_values()
y = tmp[["binding"]]
|
bsd-3-clause
|
LFPy/LFPy
|
LFPy/cell.py
|
1
|
106760
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2012 Computational Neuroscience Group, NMBU.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
import os
import neuron
from neuron import units
import numpy as np
import scipy.stats
import sys
import posixpath
from warnings import warn
import pickle
from .run_simulation import _run_simulation_with_probes
from .run_simulation import _collect_geometry_neuron
from .alias_method import alias_method
# check neuron version:
try:
try:
assert neuron.version >= '7.7.2'
except AttributeError:
warn('Could not read NEURON version info. v7.7.2 or newer required')
except TypeError:
# workaround for doc build neuron Mock module
pass
except AssertionError:
warn('LFPy requires NEURON v7.7.2 or newer. Found v{}'.format(
neuron.version))
class Cell(object):
"""
The main cell class used in LFPy.
Parameters
----------
morphology: str or neuron.h.SectionList
File path of morphology on format that NEURON can understand (w. file
ending .hoc, .asc, .swc or .xml), or neuron.h.SectionList instance
filled with references to neuron.h.Section instances.
v_init: float
Initial membrane potential. Defaults to -70 mV.
Ra: float or None
Axial resistance. Defaults to None (unit Ohm*cm)
cm: float
Membrane capacitance. Defaults to None (unit uF/cm2)
passive: bool
Passive mechanisms are initialized if True. Defaults to False
passive_parameters: dict
parameter dictionary with values for the passive membrane mechanism in
NEURON ('pas'). The dictionary must contain keys 'g_pas' [S/cm^2] and
'e_pas' [mV], like the default:
passive_parameters=dict(g_pas=0.001, e_pas=-70)
extracellular: bool
Switch for NEURON's extracellular mechanism. Defaults to False
dt: float
simulation timestep. Defaults to 2^-4 ms
tstart: float
Initialization time for simulation <= 0 ms. Defaults to 0.
tstop: float
Stop time for simulation > 0 ms. Defaults to 100 ms.
nsegs_method: 'lambda100' or 'lambda_f' or 'fixed_length' or None
nseg rule, used by NEURON to determine number of compartments.
Defaults to 'lambda100'
max_nsegs_length: float or None
Maximum segment length for method 'fixed_length'. Defaults to None
lambda_f: int
AC frequency for method 'lambda_f'. Defaults to 100
d_lambda: float
Parameter for d_lambda rule. Defaults to 0.1
delete_sections: bool
Delete pre-existing section-references. Defaults to True
custom_code: list or None
List of model-specific code files ([.py/.hoc]). Defaults to None
custom_fun: list or None
List of model-specific functions with args. Defaults to None
custom_fun_args: list or None
List of args passed to custom_fun functions. Defaults to None
pt3d: bool
Use pt3d-info of the cell geometries switch. Defaults to False
celsius: float or None
Temperature in celsius. If nothing is specified here
or in custom code it is 6.3 celcius
verbose: bool
Verbose output switch. Defaults to False
Examples
--------
Simple example of how to use the Cell class with a passive-circuit
morphology (modify morphology path accordingly):
>>> import os
>>> import LFPy
>>> cellParameters = {
>>> 'morphology': os.path.join('examples', 'morphologies',
>>> 'L5_Mainen96_LFPy.hoc'),
>>> 'v_init': -65.,
>>> 'cm': 1.0,
>>> 'Ra': 150,
>>> 'passive': True,
>>> 'passive_parameters': {'g_pas': 1./30000, 'e_pas': -65},
>>> 'dt': 2**-3,
>>> 'tstart': 0,
>>> 'tstop': 50,
>>> }
>>> cell = LFPy.Cell(**cellParameters)
>>> cell.simulate()
>>> print(cell.somav)
See also
--------
TemplateCell
NetworkCell
"""
def __init__(self, morphology,
v_init=-70.,
Ra=None,
cm=None,
passive=False,
passive_parameters=None,
extracellular=False,
tstart=0.,
tstop=100.,
dt=2**-4,
nsegs_method='lambda100',
lambda_f=100,
d_lambda=0.1,
max_nsegs_length=None,
delete_sections=True,
custom_code=None,
custom_fun=None,
custom_fun_args=None,
pt3d=False,
celsius=None,
verbose=False,
**kwargs):
self.verbose = verbose
self.pt3d = pt3d
if passive_parameters is None:
passive_parameters = dict(g_pas=0.001, e_pas=-70.)
# check if there are un-used keyword arguments present in kwargs
for key, value in kwargs.items():
raise ValueError('keyword/argument {}={}'.format(key, value),
'is invalid input to class LFPy.Cell')
if passive:
assert isinstance(passive_parameters, dict), \
'passive_parameters must be a dictionary'
for key in ['g_pas', 'e_pas']:
assert key in passive_parameters.keys(), \
'key {} not found in passive_parameters'.format(key)
if not hasattr(neuron.h, 'd_lambda'):
neuron.h.load_file('stdlib.hoc') # NEURON std. library
neuron.h.load_file('import3d.hoc') # import 3D morphology lib
if not hasattr(neuron.h, 'continuerun'):
neuron.h.load_file('stdrun.hoc') # NEURON stdrun library
if delete_sections:
if not isinstance(morphology, type(neuron.h.SectionList)):
if self.verbose:
print('%s existing sections deleted from memory' %
sum(1 for sec in neuron.h.allsec()))
neuron.h('forall delete_section()')
else:
if not isinstance(morphology, type(neuron.h.SectionList)):
mssg = "%s sections detected! " % sum(
1 for sec in neuron.h.allsec()) \
+ "Consider setting 'delete_sections=True'"
warn(mssg)
# load morphology
assert morphology is not None, \
('deprecated keyword argument morphology==None, value must be ' +
'a file path or neuron.h.SectionList instance with ' +
'neuron.h.Section instances')
if "win32" in sys.platform and isinstance(morphology, str):
# fix Path on windows
morphology = morphology.replace(os.sep, posixpath.sep)
self.morphology = morphology
if isinstance(self.morphology, str):
if os.path.isfile(self.morphology):
self._load_geometry()
else:
raise Exception('non-existent file %s' % self.morphology)
else:
assert isinstance(self.morphology, type(neuron.h.SectionList)), \
("Could not recognize Cell keyword argument morphology as " +
"neuron.h.SectionList instance")
# instantiate 3D geometry of all sections
neuron.h.define_shape()
# set some additional attributes
self._create_sectionlists()
# Some parameters and lists initialised
assert tstart <= 0, 'tstart must be <= 0.'
try:
assert dt in 2.**np.arange(-16, -1)
except AssertionError:
if tstart == 0.:
if self.verbose:
print('int(1./dt) not factorizable in base 2. cell.tvec '
'errors may occur, continuing initialization.')
elif tstart < 0:
raise AssertionError(
'int(1./dt) must be factorizable in base 2 if tstart < 0.')
self.dt = dt
self.tstart = tstart
self.tstop = tstop
self.synapses = []
self.synidx = []
self.pointprocesses = []
self.pointprocess_idx = []
self.v_init = v_init
self.default_rotation = self.__get_rotation()
# Set axial resistance and membrane capacitance
self.Ra = Ra
self.cm = cm
self.__set_ra_and_cm()
# Set passive properties, insert passive on all segments
self.passive_parameters = passive_parameters
if passive:
self.__set_passive()
else:
if self.verbose:
print('No passive properties added')
# run user specified code and functions if argument given
if custom_code is not None or custom_fun is not None:
self.__run_custom_codes(custom_code, custom_fun, custom_fun_args)
# Insert extracellular mech on all segments
self.extracellular = extracellular
if self.extracellular:
self.__set_extracellular()
else:
if self.verbose:
print("no extracellular mechanism inserted")
# set number of segments accd to rule, and calculate the number
self.__set_negs(nsegs_method, lambda_f, d_lambda, max_nsegs_length)
self.totnsegs = self.__calc_totnsegs()
if self.verbose:
print("Total number of segments: %i" % self.totnsegs)
# extract pt3d info from NEURON, and set these with the same rotation
# and position in space as in our simulations, assuming RH rule, which
# NEURON do NOT use in shape plot
if self.pt3d:
self.x3d, self.y3d, self.z3d, self.diam3d = self._collect_pt3d()
# Gather geometry, set position and rotation of morphology
if self.pt3d:
self._update_pt3d()
else: # self._update_pt3d makes a call to self._collect_geometry()
self._collect_geometry()
if hasattr(self, 'somapos'):
self.set_pos()
else:
if self.verbose:
print('no soma, using the midpoint if initial segment.')
self.set_rotation(**self.default_rotation)
if celsius is not None:
if neuron.h.celsius != 6.3:
print("Changing temperature %1.2f to %1.2f"
% (neuron.h.celsius, celsius))
neuron.h.celsius = celsius
# initialize membrane voltage in all segments.
neuron.h.finitialize(self.v_init * units.mV)
self._neuron_tvec = None
def __del__(self):
"""Cell finalizer"""
self.strip_hoc_objects()
def strip_hoc_objects(self):
"""Destroy any NEURON hoc objects in the cell object"""
if not (isinstance(neuron, type(None)) or
isinstance(neuron.nrn, type(None))):
nrntypes = (neuron.nrn.Segment, neuron.nrn.Section,
neuron.nrn.Mechanism, type(neuron.h.List()))
for key in self.__dict__.keys():
if isinstance(getattr(self, key), nrntypes):
setattr(self, key, None)
if self.verbose:
print('{}.{} = None'.format(self.__name__, key))
def _load_geometry(self):
"""Load the morphology-file in NEURON"""
# import the morphology, try and determine format
fileEnding = self.morphology.split('.')[-1]
if fileEnding == 'hoc' or fileEnding == 'HOC':
neuron.h.load_file(1, self.morphology)
else:
neuron.h('objref this')
if fileEnding == 'asc' or fileEnding == 'ASC':
Import = neuron.h.Import3d_Neurolucida3()
if not self.verbose:
Import.quiet = 1
elif fileEnding == 'swc' or fileEnding == 'SWC':
Import = neuron.h.Import3d_SWC_read()
elif fileEnding == 'xml' or fileEnding == 'XML':
Import = neuron.h.Import3d_MorphML()
else:
raise ValueError('%s not a recognised morphology file format'
% self.morphology).with_traceback(
'Should be either .hoc, .asc, .swc, .xml')
# assuming now that morphologies file is the correct format
try:
Import.input(self.morphology)
except BaseException:
if not hasattr(neuron, 'neuroml'):
raise Exception('Can not import, try and copy the '
'nrn/share/lib/python/neuron/neuroml '
'folder into %s' % neuron.__path__[0])
else:
raise Exception('something wrong with file, see output')
try:
imprt = neuron.h.Import3d_GUI(Import, 0)
except BaseException:
raise Exception('See output, try to correct the file')
imprt.instantiate(neuron.h.this)
neuron.h.define_shape()
self._create_sectionlists()
def __run_custom_codes(self, custom_code, custom_fun, custom_fun_args):
"""Execute custom model code and functions with arguments"""
# load custom codes
if custom_code is not None:
for code in custom_code:
if "win32" in sys.platform:
code = code.replace(os.sep, posixpath.sep)
if code.split('.')[-1] == 'hoc':
try:
neuron.h.xopen(code)
except RuntimeError:
ERRMSG = '\n'.join(
[
'',
'Could not load custom model code (%s)' % code,
'while creating a Cell object.',
'One possible cause is NEURON mechanisms have',
'not been compiled, ',
'try running nrnivmodl or mknrndll (Windows) ',
'in the .mod-file containing folder. ',
])
raise Exception(ERRMSG)
elif code.split('.')[-1] == 'py':
exec(code)
else:
raise Exception('%s not a .hoc- nor .py-file' % code)
# run custom functions with arguments
i = 0
if custom_fun is not None:
for fun in custom_fun:
fun(self, **custom_fun_args[i])
i += 1
# recreate sectionlists in case something changed
neuron.h.define_shape()
self._create_sectionlists()
def __set_negs(self, nsegs_method, lambda_f, d_lambda, max_nsegs_length):
"""Set number of segments per section according to the lambda-rule,
or according to maximum length of segments"""
if nsegs_method == 'lambda100':
self.__set_nsegs_lambda100(d_lambda)
elif nsegs_method == 'lambda_f':
self.__set_nsegs_lambda_f(lambda_f, d_lambda)
elif nsegs_method == 'fixed_length':
self.__set_nsegs_fixed_length(max_nsegs_length)
else:
if self.verbose:
print('No nsegs_method applied (%s)' % nsegs_method)
def __get_rotation(self):
"""Check if there exists a corresponding file
with rotation angles"""
if isinstance(self.morphology, str):
base = os.path.splitext(self.morphology)[0]
if os.path.isfile(base + '.rot'):
rotation_file = base + '.rot'
rotation_data = open(rotation_file)
rotation = {}
for line in rotation_data:
var, val = line.split('=')
val = val.strip()
val = float(str(val))
rotation[var] = val
else:
rotation = {}
else:
rotation = {}
return rotation
def _create_sectionlists(self):
"""Create section lists for different kinds of sections"""
# list with all sections
self.allsecnames = []
if not isinstance(self.morphology, type(neuron.h.SectionList)):
self.allseclist = neuron.h.SectionList()
for sec in neuron.h.allsec():
self.allsecnames.append(sec.name())
self.allseclist.append(sec=sec)
else:
self.allseclist = self.morphology
for sec in neuron.h.allsec():
self.allsecnames.append(sec.name())
# list of soma sections, assuming it is named on the format "soma*"
self.nsomasec = 0
self.somalist = neuron.h.SectionList()
for sec in neuron.h.allsec():
if sec.name().find('soma') >= 0:
self.somalist.append(sec=sec)
self.nsomasec += 1
def __get_idx(self, seclist):
"""Return boolean vector which indexes where segments in seclist
matches segments in neuron.h.allsec(), rewritten from
LFPy.hoc function get_idx()"""
if neuron.h.allsec() == seclist:
return np.ones(self.totnsegs, dtype=bool)
else:
idxvec = np.zeros(self.totnsegs, dtype=bool)
# get sectionnames from seclist
seclistnames = []
for sec in seclist:
seclistnames.append(sec.name())
seclistnames = np.array(seclistnames, dtype='|S128')
segnames = np.empty(self.totnsegs, dtype='|S128')
i = 0
for sec in self.allseclist:
secname = sec.name()
for seg in sec:
segnames[i] = secname
i += 1
for name in seclistnames:
idxvec[segnames == name] = True
return idxvec
def __set_nsegs_lambda_f(self, frequency=100, d_lambda=0.1):
"""Set the number of segments for section according to the
d_lambda-rule for a given input frequency
Parameters
----------
frequency: float
frequency at which AC length constant is computed
d_lambda: float
"""
neuron.h.pop_section() # dirty fix, see NEURON doc
for sec in self.allseclist:
sec.nseg = int(
(sec.L / (d_lambda * neuron.h.lambda_f(frequency, sec=sec))
+ .9) / 2) * 2 + 1
if self.verbose:
print("set nsegs using lambda-rule with frequency %i." % frequency)
def __set_nsegs_lambda100(self, d_lambda=0.1):
"""Set the numbers of segments using d_lambda(100)"""
self.__set_nsegs_lambda_f(frequency=100, d_lambda=d_lambda)
def __set_nsegs_fixed_length(self, maxlength):
"""Set nseg for sections so that every segment L < maxlength"""
for sec in self.allseclist:
sec.nseg = int(sec.L / maxlength) + 1
def __calc_totnsegs(self):
"""Calculate the number of segments in the allseclist"""
i = 0
for sec in self.allseclist:
i += sec.nseg
return i
def __check_currents(self):
"""Check that the sum of all membrane and electrode currents over all
segments is sufficiently close to zero"""
raise NotImplementedError('this function need to be written')
def __set_ra_and_cm(self):
"""Insert ra and cm on all segments"""
for sec in self.allseclist:
if self.Ra is not None:
sec.Ra = self.Ra
if self.cm is not None:
sec.cm = self.cm
def __set_passive(self):
"""Insert passive mechanism on all segments"""
for sec in self.allseclist:
sec.insert('pas')
sec.g_pas = self.passive_parameters['g_pas']
sec.e_pas = self.passive_parameters['e_pas']
def __set_extracellular(self):
"""Insert extracellular mechanism on all sections
to set an external potential V_ext as boundary condition.
"""
for sec in self.allseclist:
sec.insert('extracellular')
self.extracellular = True
def set_synapse(self, idx, syntype,
record_current=False,
record_potential=False,
weight=None, **kwargs):
"""Insert synapse on cell segment
Parameters
----------
idx: int
Index of compartment where synapse is inserted
syntype: str
Type of synapse. Built-in types in NEURON: ExpSyn, Exp2Syn
record_current: bool
If True, record synapse current
record_potential: bool
If True, record postsynaptic potential seen by the synapse
weight: float
Strength of synapse
kwargs
arguments passed on from class Synapse
Returns
-------
int
index of synapse object on cell
"""
if not hasattr(self, '_hoc_synlist'):
self._hoc_synlist = neuron.h.List()
if not hasattr(self, '_synitorecord'):
self._synitorecord = []
if not hasattr(self, '_synvtorecord'):
self._synvtorecord = []
if not hasattr(self, '_hoc_netstimlist'):
self._hoc_netstimlist = neuron.h.List()
if not hasattr(self, '_hoc_netconlist'):
self._hoc_netconlist = neuron.h.List()
if not hasattr(self, '_sptimeslist'):
self._sptimeslist = []
# need to append w. one empty array per synapse
self._sptimeslist.append(np.array([]))
i = 0
cmd = 'neuron.h.{}(seg.x, sec=sec)'
for sec in self.allseclist:
for seg in sec:
if i == idx:
command = cmd.format(syntype)
syn = eval(command, locals(), globals())
for param in list(kwargs.keys()):
try:
setattr(syn, param, kwargs[param])
except BaseException:
pass
self._hoc_synlist.append(syn)
# create NetStim (generator) and NetCon (connection)
# objects
self._hoc_netstimlist.append(neuron.h.NetStim(0.5))
self._hoc_netstimlist[-1].number = 0
nc = neuron.h.NetCon(self._hoc_netstimlist[-1], syn)
nc.weight[0] = weight
self._hoc_netconlist.append(nc)
# record current
if record_current:
self._synitorecord.append(
self._hoc_synlist.count() - 1)
# record potential
if record_potential:
self._synvtorecord.append(
self._hoc_synlist.count() - 1)
i += 1
return self._hoc_synlist.count() - 1
def set_point_process(self, idx, pptype, record_current=False,
record_potential=False, **kwargs):
"""Insert pptype-electrode type pointprocess on segment numbered
idx on cell object
Parameters
----------
idx: int
Index of compartment where point process is inserted
pptype: str
Type of pointprocess. Examples: SEClamp, VClamp,
IClamp, SinIClamp, ChirpIClamp
record_current: bool
Decides if current is stored
kwargs
Parameters passed on from class StimIntElectrode
Returns
-------
int
index of point process on cell
"""
if not hasattr(self, '_hoc_stimlist'):
self._hoc_stimlist = neuron.h.List()
if not hasattr(self, '_stimitorecord'):
self._stimitorecord = []
if not hasattr(self, '_stimvtorecord'):
self._stimvtorecord = []
i = 0
cmd1 = 'neuron.h.'
cmd2 = '(seg.x, sec=sec)'
ppset = False
for sec in self.allseclist:
for seg in sec:
if i == idx:
command = cmd1 + pptype + cmd2
stim = eval(command, locals(), globals())
for key, value in kwargs.items():
try:
itr = enumerate(iter(value))
except TypeError:
setattr(stim, key, value)
else:
for i, v in itr:
getattr(stim, key)[i] = v
self._hoc_stimlist.append(stim)
# record current
if record_current:
self._stimitorecord.append(
self._hoc_stimlist.count() - 1)
# record potential
if record_potential:
self._stimvtorecord.append(
self._hoc_stimlist.count() - 1)
ppset = True
break
i += 1
if ppset:
break
return self._hoc_stimlist.count() - 1
def _collect_geometry(self):
"""Collects x, y, z-coordinates from NEURON"""
# None-type some attributes if they do not exist:
if not hasattr(self, 'x'):
self.x = None
self.y = None
self.z = None
self.area = None
self.d = None
self.length = None
_collect_geometry_neuron(self)
self.somaidx = self.get_idx(section='soma')
if self.somaidx.size > 1:
xmids = self.x[self.somaidx].mean(axis=-1)
ymids = self.y[self.somaidx].mean(axis=-1)
zmids = self.z[self.somaidx].mean(axis=-1)
self.somapos = np.zeros(3)
self.somapos[0] = xmids.mean()
self.somapos[1] = ymids.mean()
self.somapos[2] = zmids.mean()
elif self.somaidx.size == 1:
self.somapos = np.zeros(3)
self.somapos[0] = self.x[self.somaidx].mean()
self.somapos[1] = self.y[self.somaidx].mean()
self.somapos[2] = self.z[self.somaidx].mean()
elif self.somaidx.size == 0:
if self.verbose:
warn("There is no soma!" +
"Using first segment as root point")
self.somaidx = np.array([0])
self.somapos = np.zeros(3)
self.somapos[0] = self.x[self.somaidx].mean()
self.somapos[1] = self.y[self.somaidx].mean()
self.somapos[2] = self.z[self.somaidx].mean()
else:
raise Exception('Huh?!')
def get_idx(self, section='allsec', z_min=-np.inf, z_max=np.inf):
"""
Returns compartment idx of segments from sections with names that match
the pattern defined in input section on interval [z_min, z_max].
Parameters
----------
section: str
Any entry in cell.allsecnames or just 'allsec'.
z_min: float
Depth filter. Specify minimum z-position
z_max: float
Depth filter. Specify maximum z-position
Returns
-------
ndarray, dtype=int
segment indices
Examples
--------
>>> idx = cell.get_idx(section='allsec')
>>> print(idx)
>>> idx = cell.get_idx(section=['soma', 'dend', 'apic'])
>>> print(idx)
"""
if section == 'allsec':
seclist = neuron.h.allsec()
else:
seclist = neuron.h.SectionList()
if isinstance(section, str):
for sec in self.allseclist:
if sec.name().find(section) >= 0:
seclist.append(sec=sec)
elif isinstance(section, list):
for secname in section:
for sec in self.allseclist:
if sec.name().find(secname) >= 0:
seclist.append(sec=sec)
else:
if self.verbose:
print('%s did not match any section name' % str(section))
idx = self.__get_idx(seclist)
sel_z_idx = (
self.z[idx].mean(
axis=-
1) > z_min) & (
self.z[idx].mean(
axis=-
1) < z_max)
return np.arange(self.totnsegs)[idx][sel_z_idx]
def get_closest_idx(self, x=0., y=0., z=0., section='allsec'):
"""Get the index number of a segment in specified section which
midpoint is closest to the coordinates defined by the user
Parameters
----------
x: float
x-coordinate
y: float
y-coordinate
z: float
z-coordinate
section: str
String matching a section-name. Defaults to 'allsec'.
Returns
-------
int
segment index
"""
idx = self.get_idx(section)
dist = ((self.x[idx].mean(axis=-1) - x)**2 +
(self.y[idx].mean(axis=-1) - y)**2 +
(self.z[idx].mean(axis=-1) - z)**2)
return idx[np.argmin(dist)]
def get_rand_idx_area_norm(self, section='allsec', nidx=1,
z_min=-1E6, z_max=1E6):
"""Return nidx segment indices in section with random probability
normalized to the membrane area of segment on
interval [z_min, z_max]
Parameters
----------
section: str
String matching a section-name
nidx: int
Number of random indices
z_min: float
Depth filter
z_max: float
Depth filter
Returns
-------
ndarray, dtype=int
segment indices
"""
poss_idx = self.get_idx(section=section, z_min=z_min, z_max=z_max)
if nidx < 1:
print('nidx < 1, returning empty array')
return np.array([])
elif poss_idx.size == 0:
print('No possible segment idx match quiery - returning '
'empty array')
return np.array([])
else:
area = self.area[poss_idx]
area /= area.sum()
return alias_method(poss_idx, area, nidx)
def get_rand_idx_area_and_distribution_norm(self, section='allsec', nidx=1,
z_min=-1E6, z_max=1E6,
fun=scipy.stats.norm,
funargs=dict(loc=0, scale=100),
funweights=None):
"""
Return nidx segment indices in section with random probability
normalized to the membrane area of each segment multiplied by
the value of the probability density function of "fun", a function
in the scipy.stats module with corresponding function arguments
in "funargs" on the interval [z_min, z_max]
Parameters
----------
section: str
string matching a section name
nidx: int
number of random indices
z_min: float
lower depth interval
z_max: float
upper depth interval
fun: function or str, or iterable of function or str
if function a scipy.stats method, if str, must be method in
scipy.stats module with the same name (like 'norm'),
if iterable (list, tuple, numpy.array) of function or str some
probability distribution in scipy.stats module
funargs: dict or iterable
iterable (list, tuple, numpy.array) of dict, arguments to fun.pdf
method (e.g., w. keys 'loc' and 'scale')
funweights: None or iterable
iterable (list, tuple, numpy.array) of floats, scaling of each
individual fun (i.e., introduces layer specificity)
Examples
--------
>>> import LFPy
>>> import numpy as np
>>> import scipy.stats as ss
>>> import matplotlib.pyplot as plt
>>> from os.path import join
>>> cell = LFPy.Cell(morphology=join('cells', 'cells', 'j4a.hoc'))
>>> cell.set_rotation(x=4.99, y=-4.33, z=3.14)
>>> idx = cell.get_rand_idx_area_and_distribution_norm(
nidx=10000, fun=ss.norm, funargs=dict(loc=0, scale=200))
>>> bins = np.arange(-30, 120)*10
>>> plt.hist(cell.zmid[idx], bins=bins, alpha=0.5)
>>> plt.show()
"""
poss_idx = self.get_idx(section=section, z_min=z_min, z_max=z_max)
if nidx < 1:
print('nidx < 1, returning empty array')
return np.array([])
elif poss_idx.size == 0:
print('No possible segment idx match query - returning '
'empty array')
return np.array([])
else:
p = self.area[poss_idx]
# scale with density function
if type(fun) in [list, tuple, np.ndarray]:
assert type(funargs) in [list, tuple, np.ndarray]
assert type(funweights) in [list, tuple, np.ndarray]
assert len(fun) == len(funargs) & len(fun) == len(funweights)
mod = np.zeros(poss_idx.shape)
for f, args, scl in zip(fun, funargs, funweights):
if isinstance(f, str) and f in dir(scipy.stats):
f = getattr(scipy.stats, f)
df = f(**args)
mod += df.pdf(x=self.z[poss_idx].mean(axis=-1)) * scl
p *= mod
else:
if isinstance(fun, str) and fun in dir(scipy.stats):
fun = getattr(scipy.stats, fun)
df = fun(**funargs)
p *= df.pdf(x=self.z[poss_idx].mean(axis=-1))
# normalize
p /= p.sum()
return alias_method(poss_idx, p, nidx)
def enable_extracellular_stimulation(
self, electrode, t_ext=None, n=1, model='inf'):
r"""
Enable extracellular stimulation with 'extracellular' mechanism.
Extracellular potentials are computed from the electrode currents
using the pointsource approximation.
If 'model' is 'inf' (default), potentials are computed as
(:math:`r_i` is the position of a comparment i,
:math:`r_e` is the position of an elextrode e, :math:`\sigma` is the
conductivity of the medium):
.. math::
V_e(r_i) = \sum_n \frac{I_n}{4 \pi \sigma |r_i - r_n|}
If model is 'semi', the method of images is used:
.. math::
V_e(r_i) = \sum_n \frac{I_n}{2 \pi \sigma |r_i - r_n|}
Parameters
----------
electrode: RecExtElectrode
Electrode object with stimulating currents
t_ext: np.ndarray or list
Time im ms corrisponding to step changes in the provided currents.
If None, currents are assumed to have
the same time steps as NEURON simulation.
n: int
Points per electrode to compute spatial averaging
model: str
'inf' or 'semi'. If 'inf' the medium is assumed to be infinite and
homogeneous. If 'semi', the method of
images is used.
Returns
-------
v_ext: np.ndarray
Computed extracellular potentials at cell mid points
"""
# access electrode object and append mapping
if electrode is not None:
# put electrode argument in list if needed
if isinstance(electrode, list):
electrodes = electrode
else:
electrodes = [electrode]
else:
print("'electrode' is None")
return
assert model in ['inf', 'semi'], "'model' can be 'inf' or 'semi'"
# extracellular stimulation
if np.any([np.any(el.probe.currents != 0) for el in electrodes]):
cell_mid_points = np.array([self.x.mean(axis=-1),
self.y.mean(axis=-1),
self.z.mean(axis=-1)]).T
n_tsteps = int(self.tstop / self.dt + 1)
t_cell = np.arange(n_tsteps) * self.dt
if t_ext is None:
print("Assuming t_ext is the same as simulation time")
t_ext = t_cell
for electrode in electrodes:
assert electrode.probe.currents.shape[1] == len(t_cell), \
("Discrepancy between t_ext and cell simulation time" +
"steps. Provide the 't_ext' argument")
else:
assert len(t_ext) < len(t_cell), \
"Stimulation time steps greater than cell simulation steps"
v_ext = np.zeros((self.totnsegs, len(t_ext)))
for electrode in electrodes:
if np.any(np.any(electrode.probe.currents != 0)):
electrode.probe.points_per_electrode = int(n)
electrode.probe.model = model
ve = electrode.probe.compute_field(cell_mid_points)
if len(electrode.probe.currents.shape) == 1:
ve = ve[:, np.newaxis]
v_ext += ve
self.__set_extracellular()
self.insert_v_ext(v_ext, np.array(t_ext))
else:
v_ext = None
return v_ext
def simulate(self, probes=None,
rec_imem=False, rec_vmem=False,
rec_ipas=False, rec_icap=False,
rec_variables=[],
variable_dt=False, atol=0.001, rtol=0.,
to_memory=True,
to_file=False, file_name=None,
**kwargs):
"""
This is the main function running the simulation of the NEURON model.
Start NEURON simulation and record variables specified by arguments.
Parameters
----------
probes: list of :obj:, optional
None or list of LFPykit.RecExtElectrode like object instances that
each have a public method `get_transformation_matrix` returning
a matrix that linearly maps each compartments' transmembrane
current to corresponding measurement as
.. math:: \\mathbf{P} = \\mathbf{M} \\mathbf{I}
rec_imem: bool
If true, segment membrane currents will be recorded
If no electrode argument is given, it is necessary to
set rec_imem=True in order to make predictions later on.
Units of (nA).
rec_vmem: bool
Record segment membrane voltages (mV)
rec_ipas: bool
Record passive segment membrane currents (nA)
rec_icap: bool
Record capacitive segment membrane currents (nA)
rec_variables: list
List of segment state variables to record, e.g. arg=['cai', ]
variable_dt: bool
Use NEURON's variable timestep method
atol: float
Absolute local error tolerance for NEURON variable timestep method
rtol: float
Relative local error tolerance for NEURON variable timestep method
to_memory: bool
Only valid with probes=[:obj:], store measurements as `:obj:.data`
to_file: bool
Only valid with probes, save simulated data in hdf5 file format
file_name: str
Name of hdf5 file, '.h5' is appended if it doesnt exist
"""
for key in kwargs.keys():
if key in ['electrode', 'rec_current_dipole_moment',
'dotprodcoeffs', 'rec_isyn', 'rec_vmemsyn',
'rec_istim', 'rec_vmemstim']:
warn('Cell.simulate parameter {} is deprecated.'.format(key))
# set up integrator, use the CVode().fast_imem method by default
# as it doesn't hurt sim speeds much if at all.
cvode = neuron.h.CVode()
try:
cvode.use_fast_imem(1)
except AttributeError:
raise Exception('neuron.h.CVode().use_fast_imem() method not '
'found. Update NEURON to v.7.4 or newer')
if not variable_dt:
dt = self.dt
else:
dt = None
self._set_soma_volt_recorder(dt)
if rec_imem:
self._set_imem_recorders(dt)
if rec_vmem:
self._set_voltage_recorders(dt)
if rec_ipas:
self._set_ipas_recorders(dt)
if rec_icap:
self._set_icap_recorders(dt)
if len(rec_variables) > 0:
self._set_variable_recorders(rec_variables, dt)
if hasattr(self, '_stimitorecord'):
if len(self._stimitorecord) > 0:
self.__set_ipointprocess_recorders(dt)
if hasattr(self, '_stimvtorecord'):
if len(self._stimvtorecord) > 0:
self.__set_vpointprocess_recorders(dt)
if hasattr(self, '_synitorecord'):
if len(self._synitorecord) > 0:
self.__set_isyn_recorders(dt)
if hasattr(self, '_synvtorecord'):
if len(self._synvtorecord) > 0:
self.__set_vsyn_recorders(dt)
# set time recorder from NEURON
self.__set_time_recorders(dt)
# run fadvance until t >= tstop, and calculate LFP if asked for
if probes is None or len(probes) == 0:
if not rec_imem and self.verbose:
print("rec_imem = %s, membrane currents will not be recorded!"
% str(rec_imem))
self.__run_simulation(cvode, variable_dt, atol, rtol)
else:
# simulate with probes saving to memory and/or file:
_run_simulation_with_probes(self, cvode, probes,
variable_dt, atol, rtol,
to_memory, to_file, file_name)
# somatic trace
if self.nsomasec >= 1:
self.somav = np.array(self.somav)
self.__collect_tvec()
if rec_imem:
self._calc_imem()
if rec_ipas:
self._calc_ipas()
if rec_icap:
self._calc_icap()
if rec_vmem:
self._collect_vmem()
if hasattr(self, '_hoc_stimireclist'):
self._collect_istim()
if hasattr(self, '_hoc_stimvreclist'):
self.__collect_vstim()
if hasattr(self, '_hoc_synireclist'):
self._collect_isyn()
if hasattr(self, '_hoc_synvreclist'):
self._collect_vsyn()
if len(rec_variables) > 0:
self._collect_rec_variables(rec_variables)
if hasattr(self, '_hoc_netstimlist'):
self._hoc_netstimlist = None
del self._hoc_netstimlist
self.__purge_hoc_pointprocesses()
def __purge_hoc_pointprocesses(self):
"""
Empty lists which may store point process objects in `hoc` name space.
This is needed to avoid "Segmentation Fault 11"
"""
if hasattr(self, '_hoc_synlist'):
self._hoc_synlist.remove_all()
if hasattr(self, '__hoc_stimlist'):
self._hoc_stimlist.remove_all()
def __run_simulation(self, cvode, variable_dt=False, atol=0.001, rtol=0.):
"""
Running the actual simulation in NEURON, simulations in NEURON
is now interruptable.
"""
neuron.h.dt = self.dt
# variable dt method
if variable_dt:
cvode.active(1)
cvode.atol(atol)
cvode.rtol(rtol)
else:
cvode.active(0)
# re-initialize state
neuron.h.finitialize(self.v_init * units.mV)
# initialize current- and record
if cvode.active():
cvode.re_init()
else:
neuron.h.fcurrent()
neuron.h.frecord_init()
# Starting simulation at t != 0
neuron.h.t = self.tstart
self._load_spikes()
# advance simulation until tstop
neuron.h.continuerun(self.tstop * units.ms)
# for consistency with 'old' behaviour where tstop is included in tvec:
if neuron.h.t < self.tstop:
neuron.h.fadvance()
def __collect_tvec(self):
"""
Set the tvec to be a monotonically increasing numpy array after sim.
"""
self.tvec = np.array(self._neuron_tvec.to_python())
self._neuron_tvec = None
del self._neuron_tvec
def _calc_imem(self):
"""
Fetch the vectors from the memireclist and calculate self.imem
containing all the membrane currents.
"""
self.imem = np.array(self._hoc_memireclist)
self._hoc_memireclist = None
del self._hoc_memireclist
def _calc_ipas(self):
"""
Get the passive currents
"""
self.ipas = np.array(self._hoc_memipasreclist)
for i in range(self.ipas.shape[0]):
self.ipas[i, ] *= self.area[i] * 1E-2
self._hoc_memipasreclist = None
del self._hoc_memipasreclist
def _calc_icap(self):
"""
Get the capacitive currents
"""
self.icap = np.array(self._hoc_memicapreclist)
for i in range(self.icap.shape[0]):
self.icap[i, ] *= self.area[i] * 1E-2
self._hoc_memicapreclist = None
del self._hoc_memicapreclist
def _collect_vmem(self):
"""
Get the membrane currents
"""
self.vmem = np.array(self._hoc_memvreclist)
self._hoc_memvreclist = None
del self._hoc_memvreclist
def _collect_isyn(self):
"""
Get the synaptic currents
"""
for syn in self.synapses:
if syn.record_current:
syn.collect_current(self)
self._hoc_synireclist = None
del self._hoc_synireclist
def _collect_vsyn(self):
"""
Collect the membrane voltage of segments with synapses
"""
for syn in self.synapses:
if syn.record_potential:
syn.collect_potential(self)
self._hoc_synvreclist = None
del self._hoc_synvreclist
def _collect_istim(self):
"""
Get the pointprocess currents
"""
for pp in self.pointprocesses:
if pp.record_current:
pp.collect_current(self)
self._hoc_stimireclist = None
del self._hoc_stimireclist
def __collect_vstim(self):
"""
Collect the membrane voltage of segments with stimulus
"""
for pp in self.pointprocesses:
if pp.record_potential:
pp.collect_potential(self)
self._hoc_stimvreclist = None
del self._hoc_stimvreclist
def _collect_rec_variables(self, rec_variables):
"""
Create dict of np.arrays from recorded variables, each dictionary
element named as the corresponding recorded variable name, i.e 'cai'
"""
self.rec_variables = {}
i = 0
for values in self._hoc_recvariablesreclist:
self.rec_variables.update({rec_variables[i]: np.array(values)})
if self.verbose:
print('collected recorded variable %s' % rec_variables[i])
i += 1
del self._hoc_recvariablesreclist
def _load_spikes(self):
"""
Initialize spiketimes from netcon if they exist
"""
if hasattr(self, '_hoc_synlist'):
if len(self._hoc_synlist) == len(self._sptimeslist):
for i in range(int(self._hoc_synlist.count())):
for spt in self._sptimeslist[i]:
self._hoc_netconlist.o(i).event(spt)
def _set_soma_volt_recorder(self, dt):
"""Record somatic membrane potential"""
if self.nsomasec == 0:
if self.verbose:
warn('Cell instance appears to have no somatic section. '
'No somav attribute will be set.')
elif self.nsomasec == 1:
if dt is not None:
self.somav = neuron.h.Vector(int(self.tstop / self.dt + 1))
for sec in self.somalist:
self.somav.record(sec(0.5)._ref_v, self.dt)
else:
self.somav = neuron.h.Vector()
for sec in self.somalist:
self.somav.record(sec(0.5)._ref_v)
elif self.nsomasec > 1:
if dt is not None:
self.somav = neuron.h.Vector(int(self.tstop / self.dt + 1))
nseg = self.get_idx('soma').size
i, j = divmod(nseg, 2)
k = 1
for sec in self.somalist:
for seg in sec:
if nseg == 2 and k == 1:
# if 2 segments, record from the first one:
self.somav.record(seg._ref_v, self.dt)
else:
if k == i * 2:
# record from one of the middle segments:
self.somav.record(seg._ref_v, self.dt)
k += 1
else:
self.somav = neuron.h.Vector()
nseg = self.get_idx('soma').size
i, j = divmod(nseg, 2)
k = 1
for sec in self.somalist:
for seg in sec:
if nseg == 2 and k == 1:
# if 2 segments, record from the first one:
self.somav.record(seg._ref_v)
else:
if k == i * 2:
# record from one of the middle segments:
self.somav.record(seg._ref_v)
k += 1
def _set_imem_recorders(self, dt):
"""
Record membrane currents for all segments
"""
self._hoc_memireclist = neuron.h.List()
for sec in self.allseclist:
for seg in sec:
if dt is not None:
memirec = neuron.h.Vector(int(self.tstop / self.dt + 1))
memirec.record(seg._ref_i_membrane_, self.dt)
else:
memirec = neuron.h.Vector()
memirec.record(seg._ref_i_membrane_)
self._hoc_memireclist.append(memirec)
def __set_time_recorders(self, dt):
"""
Record time of simulation
"""
if dt is not None:
self._neuron_tvec = neuron.h.Vector(int(self.tstop / self.dt + 1))
self._neuron_tvec.record(neuron.h._ref_t, self.dt)
else:
self._neuron_tvec = neuron.h.Vector()
self._neuron_tvec.record(neuron.h._ref_t)
def _set_ipas_recorders(self, dt):
"""
Record passive membrane currents for all segments
"""
self._hoc_memipasreclist = neuron.h.List()
for sec in self.allseclist:
for seg in sec:
if dt is not None:
memipasrec = neuron.h.Vector(int(self.tstop / self.dt + 1))
memipasrec.record(seg._ref_i_pas, self.dt)
else:
memipasrec = neuron.h.Vector()
memipasrec.record(seg._ref_i_pas)
self._hoc_memipasreclist.append(memipasrec)
def _set_icap_recorders(self, dt):
"""
Record capacitive membrane currents for all segments
"""
self._hoc_memicapreclist = neuron.h.List()
for sec in self.allseclist:
for seg in sec:
if dt is not None:
memicaprec = neuron.h.Vector(int(self.tstop / self.dt + 1))
memicaprec.record(seg._ref_i_cap, self.dt)
else:
memicaprec = neuron.h.Vector()
memicaprec.record(seg._ref_i_cap)
self._hoc_memicapreclist.append(memicaprec)
def __set_ipointprocess_recorders(self, dt):
"""
Record point process current
"""
self._hoc_stimireclist = neuron.h.List()
for idx, pp in enumerate(self.pointprocesses):
if idx in self._stimitorecord:
stim = self._hoc_stimlist[idx]
if dt is not None:
stimirec = neuron.h.Vector(int(self.tstop / self.dt + 1))
stimirec.record(stim._ref_i, self.dt)
else:
stimirec = neuron.h.Vector()
stimirec.record(stim._ref_i)
else:
stimirec = neuron.h.Vector(0)
self._hoc_stimireclist.append(stimirec)
def __set_vpointprocess_recorders(self, dt):
"""
Record point process membrane
"""
self._hoc_stimvreclist = neuron.h.List()
for idx, pp in enumerate(self.pointprocesses):
if idx in self._stimvtorecord:
stim = self._hoc_stimlist[idx]
seg = stim.get_segment()
if dt is not None:
stimvrec = neuron.h.Vector(int(self.tstop / self.dt + 1))
stimvrec.record(seg._ref_v, self.dt)
else:
stimvrec = neuron.h.Vector()
stimvrec.record(seg._ref_v)
else:
stimvrec = neuron.h.Vector(0)
self._hoc_stimvreclist.append(stimvrec)
def __set_isyn_recorders(self, dt):
"""
Record point process current
"""
self._hoc_synireclist = neuron.h.List()
for idx, pp in enumerate(self.synapses):
if idx in self._synitorecord:
syn = self._hoc_synlist[idx]
if dt is not None:
synirec = neuron.h.Vector(int(self.tstop / self.dt + 1))
synirec.record(syn._ref_i, self.dt)
else:
synirec = neuron.h.Vector()
synirec.record(syn._ref_i)
else:
synirec = neuron.h.Vector(0)
self._hoc_synireclist.append(synirec)
def __set_vsyn_recorders(self, dt):
"""
Record point process membrane
"""
self._hoc_synvreclist = neuron.h.List()
for idx, pp in enumerate(self.synapses):
if idx in self._synvtorecord:
syn = self._hoc_synlist[idx]
seg = syn.get_segment()
if dt is not None:
synvrec = neuron.h.Vector(int(self.tstop / self.dt + 1))
synvrec.record(seg._ref_v, self.dt)
else:
synvrec = neuron.h.Vector()
synvrec.record(seg._ref_v)
else:
synvrec = neuron.h.Vector(0)
self._hoc_synvreclist.append(synvrec)
def _set_voltage_recorders(self, dt):
"""
Record membrane potentials for all segments
"""
self._hoc_memvreclist = neuron.h.List()
for sec in self.allseclist:
for seg in sec:
if dt is not None:
memvrec = neuron.h.Vector(int(self.tstop / self.dt + 1))
memvrec.record(seg._ref_v, self.dt)
else:
memvrec = neuron.h.Vector()
memvrec.record(seg._ref_v)
self._hoc_memvreclist.append(memvrec)
def __set_current_dipole_moment_array(self, dt):
"""
Creates container for current dipole moment, an empty
n_timesteps x 3 `numpy.ndarray` that will be filled with values during
the course of each simulation
"""
if dt is not None:
self.current_dipole_moment = np.zeros(
(int(self.tstop / self.dt + 1), 3))
else:
self.current_dipole_moment = []
def _set_variable_recorders(self, rec_variables, dt):
"""
Create a recorder for each variable name in list
rec_variables
Variables is stored in nested list self._hoc_recvariablesreclist
"""
self._hoc_recvariablesreclist = neuron.h.List()
for variable in rec_variables:
variablereclist = neuron.h.List()
self._hoc_recvariablesreclist.append(variablereclist)
for sec in self.allseclist:
for seg in sec:
if dt is not None:
recvector = neuron.h.Vector(
int(self.tstop / self.dt + 1))
else:
recvector = neuron.h.Vector()
try:
if dt is not None:
recvector.record(
getattr(
seg, '_ref_%s' %
variable), self.dt)
else:
recvector.record(
getattr(
seg, '_ref_%s' %
variable))
except(NameError, AttributeError):
print('non-existing variable %s, section %s.%f' %
(variable, sec.name(), seg.x))
variablereclist.append(recvector)
def set_pos(self, x=0., y=0., z=0.):
"""Set the cell position.
Move the cell geometry so that midpoint of soma section is
in (x, y, z). If no soma pos, use the first segment
Parameters
----------
x: float
x position defaults to 0.0
y: float
y position defaults to 0.0
z: float
z position defaults to 0.0
"""
diffx = x - self.somapos[0]
diffy = y - self.somapos[1]
diffz = z - self.somapos[2]
# also update the pt3d_pos:
if self.pt3d and hasattr(self, 'x3d'):
self._set_pt3d_pos(diffx, diffy, diffz)
else:
self.somapos[0] = x
self.somapos[1] = y
self.somapos[2] = z
self.x[:, 0] += diffx
self.y[:, 0] += diffy
self.z[:, 0] += diffz
self.x[:, -1] += diffx
self.y[:, -1] += diffy
self.z[:, -1] += diffz
self.__update_synapse_positions()
def cellpickler(self, filename, pickler=pickle.dump):
"""Save data in cell to filename, using cPickle. It will however
destroy any ``neuron.h`` objects upon saving, as c-objects cannot be
pickled
Parameters
----------
filename: str
Where to save cell
Examples
--------
>>> # To save a cell, issue:
>>> cell.cellpickler('cell.cpickle')
>>> # To load this cell again in another session:
>>> import cPickle
>>> with file('cell.cpickle', 'rb') as f:
>>> cell = cPickle.load(f)
Returns
-------
None or pickle
"""
self.strip_hoc_objects()
if pickler == pickle.dump:
filen = open(filename, 'wb')
pickler(self, filen, protocol=2)
filen.close()
return None
elif pickler == pickle.dumps:
return pickle.dumps(self)
def __update_synapse_positions(self):
"""
Update synapse positions after rotation of morphology
"""
for i in range(len(self.synapses)):
self.synapses[i].update_pos(self)
def set_rotation(self, x=None, y=None, z=None, rotation_order='xyz'):
"""
Rotate geometry of cell object around the x-, y-, z-axis in the order
described by rotation_order parameter.
Parameters
----------
x: float or None
rotation angle in radians. Default: None
y: float or None
rotation angle in radians. Default: None
z: float or None
rotation angle in radians. Default: None
rotation_order: str
string with 3 elements containing x, y and z
e.g. 'xyz', 'zyx'. Default: 'xyz'
Examples
--------
>>> cell = LFPy.Cell(**kwargs)
>>> rotation = {'x': 1.233, 'y': 0.236, 'z': np.pi}
>>> cell.set_rotation(**rotation)
"""
if not isinstance(rotation_order, str):
raise AttributeError('rotation_order must be a string')
elif not np.all([u in rotation_order for u in 'xyz']):
raise AttributeError("'x', 'y', and 'z' must be in rotation_order")
elif len(rotation_order) != 3:
raise AttributeError(
"rotation_order should have 3 elements (e.g. 'zyx')")
for ax in rotation_order:
if ax == 'x' and x is not None:
theta = -x
rotation_x = np.array([[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)]])
rel_start, rel_end = self._rel_positions()
rel_start = np.dot(rel_start, rotation_x)
rel_end = np.dot(rel_end, rotation_x)
self._real_positions(rel_start, rel_end)
if self.verbose:
print(
'Rotated geometry %g radians around x-axis' %
(-theta))
else:
if self.verbose:
print('Geometry not rotated around x-axis')
if ax == 'y' and y is not None:
phi = -y
rotation_y = np.array([[np.cos(phi), 0, np.sin(phi)],
[0, 1, 0],
[-np.sin(phi), 0, np.cos(phi)]])
rel_start, rel_end = self._rel_positions()
rel_start = np.dot(rel_start, rotation_y)
rel_end = np.dot(rel_end, rotation_y)
self._real_positions(rel_start, rel_end)
if self.verbose:
print('Rotated geometry %g radians around y-axis' % (-phi))
else:
if self.verbose:
print('Geometry not rotated around y-axis')
if ax == 'z' and z is not None:
gamma = -z
rotation_z = np.array([[np.cos(gamma), -np.sin(gamma), 0],
[np.sin(gamma), np.cos(gamma), 0],
[0, 0, 1]])
rel_start, rel_end = self._rel_positions()
rel_start = np.dot(rel_start, rotation_z)
rel_end = np.dot(rel_end, rotation_z)
self._real_positions(rel_start, rel_end)
if self.verbose:
print(
'Rotated geometry %g radians around z-axis' %
(-gamma))
else:
if self.verbose:
print('Geometry not rotated around z-axis')
# rotate the pt3d geometry accordingly
if self.pt3d and hasattr(self, 'x3d'):
self._set_pt3d_rotation(x, y, z, rotation_order)
def chiral_morphology(self, axis='x'):
"""
Mirror the morphology around given axis, (default x-axis),
useful to introduce more heterogeneouties in morphology shapes
Parameters
----------
axis: str
'x' or 'y' or 'z'
"""
# morphology relative to soma-position
rel_start, rel_end = self._rel_positions()
if axis == 'x':
rel_start[:, 0] = -rel_start[:, 0]
rel_end[:, 0] = -rel_end[:, 0]
elif axis == 'y':
rel_start[:, 1] = -rel_start[:, 1]
rel_end[:, 1] = -rel_end[:, 1]
elif axis == 'z':
rel_start[:, 2] = -rel_start[:, 2]
rel_end[:, 2] = -rel_end[:, 2]
else:
raise Exception("axis must be either 'x', 'y' or 'z'")
if self.verbose:
print('morphology mirrored across %s-axis' % axis)
# set the proper 3D positions
self._real_positions(rel_start, rel_end)
def _rel_positions(self):
"""
Morphology relative to soma position
"""
rel_start = np.array([self.x[:, 0] - self.somapos[0],
self.y[:, 0] - self.somapos[1],
self.z[:, 0] - self.somapos[2]]).T
rel_end = np.array([self.x[:, -1] - self.somapos[0],
self.y[:, -1] - self.somapos[1],
self.z[:, -1] - self.somapos[2]]).T
return rel_start, rel_end
def _real_positions(self, rel_start, rel_end):
"""
Morphology coordinates relative to Origo
"""
self.x[:, 0] = rel_start[:, 0] + self.somapos[0]
self.y[:, 0] = rel_start[:, 1] + self.somapos[1]
self.z[:, 0] = rel_start[:, 2] + self.somapos[2]
self.x[:, -1] = rel_end[:, 0] + self.somapos[0]
self.y[:, -1] = rel_end[:, 1] + self.somapos[1]
self.z[:, -1] = rel_end[:, 2] + self.somapos[2]
self.__update_synapse_positions()
def get_rand_prob_area_norm(self, section='allsec',
z_min=-10000, z_max=10000):
"""
Return the probability (0-1) for synaptic coupling on segments
in section sum(prob)=1 over all segments in section.
Probability normalized by area.
Parameters
----------
section: str
string matching a section-name. Defaults to 'allsec'
z_min: float
depth filter
z_max: float
depth filter
Returns
-------
ndarray, dtype=float
"""
idx = self.get_idx(section=section, z_min=z_min, z_max=z_max)
prob = self.area[idx] / sum(self.area[idx])
return prob
def get_rand_prob_area_norm_from_idx(self, idx=np.array([0])):
"""
Return the normalized probability (0-1) for synaptic coupling on
segments in idx-array.
Normalised probability determined by area of segments.
Parameters
----------
idx: ndarray, dtype=int.
array of segment indices
Returns
-------
ndarray, dtype=float
"""
prob = self.area[idx] / sum(self.area[idx])
return prob
def get_intersegment_vector(self, idx0=0, idx1=0):
"""Return the distance between midpoints of two segments with index
idx0 and idx1. The argument returned is a list [x, y, z], where
x = self.x[idx1].mean(axis=-1) - self.x[idx0].mean(axis=-1) etc.
Parameters
----------
idx0: int
idx1: int
Returns
-------
list of floats
distance between midpoints along x,y,z axis in µm
"""
vector = []
try:
if idx1 < 0 or idx0 < 0:
raise Exception('idx0 < 0 or idx1 < 0')
vector.append(
self.x[idx1].mean(axis=-1) -
self.x[idx0].mean(axis=-1))
vector.append(
self.y[idx1].mean(axis=-1) -
self.y[idx0].mean(axis=-1))
vector.append(
self.z[idx1].mean(axis=-1) -
self.z[idx0].mean(axis=-1))
return vector
except BaseException:
ERRMSG = 'idx0 and idx1 must be ints on [0, %i]' % self.totnsegs
raise ValueError(ERRMSG)
def get_intersegment_distance(self, idx0=0, idx1=0):
"""
Return the Euclidean distance between midpoints of two segments.
Parameters
----------
idx0: int
idx1: int
Returns
-------
float
distance (µm).
"""
try:
vector = np.array(self.get_intersegment_vector(idx0, idx1))
return np.sqrt((vector**2).sum())
except BaseException:
ERRMSG = 'idx0 and idx1 must be ints on [0, %i]' % self.totnsegs
raise ValueError(ERRMSG)
def get_idx_children(self, parent="soma[0]"):
"""Get the idx of parent's children sections, i.e. compartments ids
of sections connected to parent-argument
Parameters
----------
parent: str
name-pattern matching a sectionname. Defaults to "soma[0]"
Returns
-------
ndarray, dtype=int
"""
idxvec = np.zeros(self.totnsegs)
secnamelist = []
childseclist = []
# filling list of sectionnames for all sections, one entry per segment
for sec in self.allseclist:
for seg in sec:
secnamelist.append(sec.name())
if parent in secnamelist:
# filling list of children section-names
for sec in self.allseclist:
if sec.name() == parent:
sref = neuron.h.SectionRef(sec=sec)
break
assert sec.name() == parent == sref.sec.name()
for sec in sref.child:
childseclist.append(sec.name())
# idxvec=1 where both coincide
i = 0
for sec in secnamelist:
for childsec in childseclist:
if sec == childsec:
idxvec[i] += 1
i += 1
[idx] = np.where(idxvec)
return idx
else:
return np.array([])
def get_idx_parent_children(self, parent="soma[0]"):
"""
Get all idx of segments of parent and children sections, i.e. segment
idx of sections connected to parent-argument, and also of the parent
segments
Parameters
----------
parent: str
name-pattern matching a sectionname. Defaults to "soma[0]"
Returns
-------
ndarray, dtype=int
"""
seclist = [parent]
for sec in self.allseclist:
if sec.name() == parent:
sref = neuron.h.SectionRef(sec=sec)
break
assert sref.sec.name() == parent
for sec in sref.child:
seclist.append(sec.name())
return self.get_idx(section=seclist)
def get_idx_name(self, idx=np.array([0], dtype=int)):
'''
Return NEURON convention name of segments with index idx.
The returned argument is an array of tuples with corresponding
segment idx, section name, and position along the section, like;
[(0, 'neuron.h.soma[0]', 0.5),]
Parameters
----------
idx: ndarray, dtype int
segment indices, must be between 0 and cell.totnsegs
Returns
-------
ndarray, dtype=object
tuples with section names of segments
'''
# ensure idx is array-like, or convert
if isinstance(idx, int) or np.int64:
idx = np.array([idx])
elif len(idx) == 0:
return
else:
idx = np.array(idx).astype(int)
# ensure all idx are valid
if np.any(idx >= self.totnsegs):
wrongidx = idx[np.where(idx >= self.totnsegs)]
raise Exception('idx %s >= number of compartments' % str(wrongidx))
# create list of seg names:
allsegnames = []
segidx = 0
for sec in self.allseclist:
for seg in sec:
allsegnames.append((segidx, '%s' % sec.name(), seg.x))
segidx += 1
return np.array(allsegnames, dtype=object)[idx][0]
def _collect_pt3d(self):
"""collect the pt3d info, for each section"""
x = []
y = []
z = []
d = []
for sec in self.allseclist:
n3d = int(neuron.h.n3d(sec=sec))
x_i, y_i, z_i = np.zeros(n3d), np.zeros(n3d), np.zeros(n3d),
d_i = np.zeros(n3d)
for i in range(n3d):
x_i[i] = neuron.h.x3d(i, sec=sec)
y_i[i] = neuron.h.y3d(i, sec=sec)
z_i[i] = neuron.h.z3d(i, sec=sec)
d_i[i] = neuron.h.diam3d(i, sec=sec)
x.append(x_i)
y.append(y_i)
z.append(z_i)
d.append(d_i)
# remove offsets which may be present if soma is centred in Origo
if len(x) > 1:
xoff = x[0].mean()
yoff = y[0].mean()
zoff = z[0].mean()
for i in range(len(x)):
x[i] -= xoff
y[i] -= yoff
z[i] -= zoff
return x, y, z, d
def _update_pt3d(self):
"""
update the locations in neuron.hoc.space using neuron.h.pt3dchange()
"""
for i, sec in enumerate(self.allseclist):
n3d = int(neuron.h.n3d(sec=sec))
for n in range(n3d):
neuron.h.pt3dchange(n,
self.x3d[i][n],
self.y3d[i][n],
self.z3d[i][n],
self.diam3d[i][n], sec=sec)
# let NEURON know about the changes we just did:
neuron.h.define_shape()
# must recollect the geometry, otherwise we get roundoff errors!
self._collect_geometry()
def _set_pt3d_pos(self, diffx=0, diffy=0, diffz=0):
"""
Offset pt3d geometry with differential displacement
indicated in Cell.set_pos()
"""
for i in range(len(self.x3d)):
self.x3d[i] += diffx
self.y3d[i] += diffy
self.z3d[i] += diffz
self._update_pt3d()
def _set_pt3d_rotation(self, x=None, y=None, z=None, rotation_order='xyz'):
"""
Rotate pt3d geometry of cell object around the x-, y-, z-axis
in the order described by rotation_order parameter.
rotation_order should be a string with 3 elements containing x, y and z
e.g. 'xyz', 'zyx'
Input should be angles in radians.
using rotation matrices, takes dict with rot. angles,
where x, y, z are the rotation angles around respective axes.
All rotation angles are optional.
Parameters
----------
x: float
rotation angle in radians
y: float
rotation angle in radians
z: float
rotation angle in radians
rotation_order: str
rotation order, default: 'xyz'
Examples
--------
>>> cell = LFPy.Cell(**kwargs)
>>> rotation = {'x': 1.233, 'y': 0.236, 'z': np.pi}
>>> cell.set_pt3d_rotation(**rotation)
"""
for ax in rotation_order:
if ax == 'x' and x is not None:
theta = -x
rotation_x = np.array([[1, 0, 0],
[0, np.cos(theta), -np.sin(theta)],
[0, np.sin(theta), np.cos(theta)]])
for i in range(len(self.x3d)):
rel_pos = self._rel_pt3d_positions(
self.x3d[i], self.y3d[i], self.z3d[i])
rel_pos = np.dot(rel_pos, rotation_x)
self.x3d[i], self.y3d[i], self.z3d[i] = \
self._real_pt3d_positions(rel_pos)
if self.verbose:
print(('Rotated geometry %g radians around x-axis' %
(-theta)))
else:
if self.verbose:
print('Geometry not rotated around x-axis')
if ax == 'y' and y is not None:
phi = -y
rotation_y = np.array([[np.cos(phi), 0, np.sin(phi)],
[0, 1, 0],
[-np.sin(phi), 0, np.cos(phi)]])
for i in range(len(self.x3d)):
rel_pos = self._rel_pt3d_positions(
self.x3d[i], self.y3d[i], self.z3d[i])
rel_pos = np.dot(rel_pos, rotation_y)
self.x3d[i], self.y3d[i], self.z3d[i] = \
self._real_pt3d_positions(rel_pos)
if self.verbose:
print('Rotated geometry %g radians around y-axis' % (-phi))
else:
if self.verbose:
print('Geometry not rotated around y-axis')
if ax == 'z' and z is not None:
gamma = -z
rotation_z = np.array([[np.cos(gamma), -np.sin(gamma), 0],
[np.sin(gamma), np.cos(gamma), 0],
[0, 0, 1]])
for i in range(len(self.x3d)):
rel_pos = self._rel_pt3d_positions(
self.x3d[i], self.y3d[i], self.z3d[i])
rel_pos = np.dot(rel_pos, rotation_z)
self.x3d[i], self.y3d[i], self.z3d[i] = \
self._real_pt3d_positions(rel_pos)
if self.verbose:
print(
'Rotated geometry %g radians around z-axis' %
(-gamma))
else:
if self.verbose:
print('Geometry not rotated around z-axis')
self._update_pt3d()
def _rel_pt3d_positions(self, x, y, z):
"""Morphology relative to soma position """
rel_pos = np.transpose(np.array([x - self.somapos[0],
y - self.somapos[1],
z - self.somapos[2]]))
return rel_pos
def _real_pt3d_positions(self, rel_pos):
"""Morphology coordinates relative to Origo """
x = rel_pos[:, 0] + self.somapos[0]
y = rel_pos[:, 1] + self.somapos[1]
z = rel_pos[:, 2] + self.somapos[2]
x = np.array(x).flatten()
y = np.array(y).flatten()
z = np.array(z).flatten()
return x, y, z
def _create_polygon(self, i, projection=('x', 'z')):
"""create a polygon to fill for each section"""
x = getattr(self, projection[0] + '3d')[i]
y = getattr(self, projection[1] + '3d')[i]
# x = self.x3d[i]
# z = self.z3d[i]
d = self.diam3d[i]
# calculate angles
dx = np.diff(x)
dy = np.diff(y)
theta = np.arctan2(dy, dx)
x = np.r_[x, x[::-1]]
y = np.r_[y, y[::-1]]
theta = np.r_[theta, theta[::-1]]
d = np.r_[d, d[::-1]]
# 1st corner:
x[0] -= 0.5 * d[0] * np.sin(theta[0])
y[0] += 0.5 * d[0] * np.cos(theta[0])
# pt3d points between start and end of section, first side
x[1:dx.size] -= 0.25 * d[1:dx.size] * (
np.sin(theta[:dx.size - 1]) + np.sin(theta[1:dx.size]))
y[1:dy.size] += 0.25 * d[1:dy.size] * (
np.cos(theta[:dy.size - 1]) + np.cos(theta[1:dx.size]))
# end of section, first side
x[dx.size] -= 0.5 * d[dx.size] * np.sin(theta[dx.size])
y[dy.size] += 0.5 * d[dy.size] * np.cos(theta[dy.size])
# other side
# end of section, second side
x[dx.size + 1] += 0.5 * d[dx.size + 1] * np.sin(theta[dx.size])
y[dy.size + 1] -= 0.5 * d[dy.size + 1] * np.cos(theta[dy.size])
# pt3d points between start and end of section, second side
x[::-1][1:dx.size] += 0.25 * d[::-1][1:dx.size] * (
np.sin(theta[::-1][:dx.size - 1]) + np.sin(theta[::-1][1:dx.size]))
y[::-1][1:dy.size] -= 0.25 * d[::-1][1:dy.size] * (
np.cos(theta[::-1][:dy.size - 1]) + np.cos(theta[::-1][1:dx.size]))
# last corner:
x[-1] += 0.5 * d[-1] * np.sin(theta[-1])
y[-1] -= 0.5 * d[-1] * np.cos(theta[-1])
return x, y
def get_pt3d_polygons(self, projection=('x', 'z')):
"""For each section create a polygon in the plane determined by keyword
argument projection=('x', 'z'), that can be
visualized using e.g., plt.fill()
Parameters
----------
projection: tuple of strings
Determining projection. Defaults to ('x', 'z')
Returns
-------
list
list of (x, z) tuples giving the trajectory
of each section that can be plotted using PolyCollection
Examples
--------
>>> from matplotlib.collections import PolyCollection
>>> import matplotlib.pyplot as plt
>>> cell = LFPy.Cell(morphology='PATH/TO/MORPHOLOGY')
>>> zips = []
>>> for x, z in cell.get_pt3d_polygons(projection=('x', 'z')):
>>> zips.append(list(zip(x, z)))
>>> polycol = PolyCollection(zips,
>>> edgecolors='none',
>>> facecolors='gray')
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.add_collection(polycol)
>>> ax.axis(ax.axis('equal'))
>>> plt.show()
"""
if len(projection) != 2:
raise ValueError("projection arg be a tuple like ('x', 'y')")
if 'x' in projection and 'y' in projection:
pass
elif 'x' in projection and 'z' in projection:
pass
elif 'y' in projection and 'z' in projection:
pass
else:
mssg = "projection must be a length 2 tuple of 'x', 'y' or 'z'!"
raise ValueError(mssg)
assert self.pt3d is True, 'Cell keyword argument pt3d != True'
polygons = []
for i in range(len(self.x3d)):
polygons.append(self._create_polygon(i, projection))
return polygons
def _create_segment_polygon(self, i, projection=('x', 'z')):
"""Create a polygon to fill for segment i, in the plane
determined by kwarg projection"""
x = getattr(self, projection[0])[i]
z = getattr(self, projection[1])[i]
d = self.d[i]
# calculate angles
dx = np.diff(x)
dz = np.diff(z)
theta = np.arctan2(dz, dx)
x = np.r_[x, x[::-1]]
z = np.r_[z, z[::-1]]
# 1st corner:
x[0] -= 0.5 * d * np.sin(theta)
z[0] += 0.5 * d * np.cos(theta)
# end of section, first side
x[1] -= 0.5 * d * np.sin(theta)
z[1] += 0.5 * d * np.cos(theta)
# other side
# end of section, second side
x[2] += 0.5 * d * np.sin(theta)
z[2] -= 0.5 * d * np.cos(theta)
# last corner:
x[3] += 0.5 * d * np.sin(theta)
z[3] -= 0.5 * d * np.cos(theta)
return x, z
def get_idx_polygons(self, projection=('x', 'z')):
"""For each segment idx in cell create a polygon in the plane
determined by the projection kwarg (default ('x', 'z')),
that can be visualized using plt.fill() or
mpl.collections.PolyCollection
Parameters
----------
projection: tuple of strings
Determining projection. Defaults to ('x', 'z')
Returns
-------
polygons: list
list of (ndarray, ndarray) tuples
giving the trajectory of each section
Examples
--------
>>> from matplotlib.collections import PolyCollection
>>> import matplotlib.pyplot as plt
>>> cell = LFPy.Cell(morphology='PATH/TO/MORPHOLOGY')
>>> zips = []
>>> for x, z in cell.get_idx_polygons(projection=('x', 'z')):
>>> zips.append(list(zip(x, z)))
>>> polycol = PolyCollection(zips,
>>> edgecolors='none',
>>> facecolors='gray')
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.add_collection(polycol)
>>> ax.axis(ax.axis('equal'))
>>> plt.show()
"""
if len(projection) != 2:
raise ValueError("projection arg be a tuple like ('x', 'y')")
if 'x' in projection and 'y' in projection:
pass
elif 'x' in projection and 'z' in projection:
pass
elif 'y' in projection and 'z' in projection:
pass
else:
mssg = "projection must be a length 2 tuple of 'x', 'y' or 'z'!"
raise ValueError(mssg)
polygons = []
for i in np.arange(self.totnsegs):
polygons.append(self._create_segment_polygon(i, projection))
return polygons
def insert_v_ext(self, v_ext, t_ext):
"""Set external extracellular potential around cell.
Playback of some extracellular potential v_ext on each cell.totnseg
compartments. Assumes that the "extracellular"-mechanism is inserted
on each compartment.
Can be used to study ephaptic effects and similar
The inputs will be copied and attached to the cell object as
cell.v_ext, cell.t_ext, and converted
to (list of) neuron.h.Vector types, to allow playback into each
compartment e_extracellular reference.
Can not be deleted prior to running cell.simulate()
Parameters
----------
v_ext: ndarray
Numpy array of size cell.totnsegs x t_ext.size, unit mV
t_ext: ndarray
Time vector of v_ext in ms
Examples
--------
>>> import LFPy
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> #create cell
>>> cell = LFPy.Cell(morphology='morphologies/example_morphology.hoc',
>>> passive=True)
>>> #time vector and extracellular field for every segment:
>>> t_ext = np.arange(cell.tstop / cell.dt+ 1) * cell.dt
>>> v_ext = np.random.rand(cell.totnsegs, t_ext.size)-0.5
>>> #insert potentials and record response:
>>> cell.insert_v_ext(v_ext, t_ext)
>>> cell.simulate(rec_imem=True, rec_vmem=True)
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(311)
>>> ax2 = fig.add_subplot(312)
>>> ax3 = fig.add_subplot(313)
>>> eim = ax1.matshow(np.array(cell.v_ext), cmap='spectral')
>>> cb1 = fig.colorbar(eim, ax=ax1)
>>> cb1.set_label('v_ext')
>>> ax1.axis(ax1.axis('tight'))
>>> iim = ax2.matshow(cell.imem, cmap='spectral')
>>> cb2 = fig.colorbar(iim, ax=ax2)
>>> cb2.set_label('imem')
>>> ax2.axis(ax2.axis('tight'))
>>> vim = ax3.matshow(cell.vmem, cmap='spectral')
>>> ax3.axis(ax3.axis('tight'))
>>> cb3 = fig.colorbar(vim, ax=ax3)
>>> cb3.set_label('vmem')
>>> ax3.set_xlabel('tstep')
>>> plt.show()
"""
# test dimensions of input
try:
if v_ext.shape[0] != self.totnsegs:
raise ValueError("v_ext.shape[0] != cell.totnsegs")
if v_ext.shape[1] != t_ext.size:
raise ValueError('v_ext.shape[1] != t_ext.size')
except BaseException:
raise ValueError('v_ext, t_ext must both be np.array types')
if not self.extracellular:
raise Exception('LFPy.Cell arg extracellular != True')
# create list of extracellular potentials on each segment, time vector
self.t_ext = neuron.h.Vector(t_ext)
self.v_ext = []
for v in v_ext:
self.v_ext.append(neuron.h.Vector(v))
# play v_ext into e_extracellular reference
i = 0
for sec in self.allseclist:
for seg in sec:
self.v_ext[i].play(seg._ref_e_extracellular, self.t_ext)
i += 1
return
def get_axial_currents_from_vmem(self, timepoints=None):
"""Compute axial currents from cell sim: get current magnitude,
distance vectors and position vectors.
Parameters
----------
timepoints: ndarray, dtype=int
array of timepoints in simulation at which you want to compute
the axial currents. Defaults to False. If not given,
all simulation timesteps will be included.
Returns
-------
i_axial: ndarray, dtype=float
Shape ((cell.totnsegs-1)*2, len(timepoints)) array of axial current
magnitudes I in units of (nA) in cell at all timesteps in
timepoints, or at all timesteps of the simulation if
timepoints=None.
Contains two current magnitudes per segment,
(except for the root segment): 1) the current from the mid point of
the segment to the segment start point, and 2) the current from
the segment start point to the mid point of the parent segment.
d_vectors: ndarray, dtype=float
Shape (3, (cell.totnsegs-1)*2) array of distance vectors traveled
by each axial current in i_axial in units of (µm). The indices of
the first axis, correspond to the first axis of i_axial and
pos_vectors.
pos_vectors: ndarray, dtype=float
Shape ((cell.totnsegs-1)*2, 3) array of position vectors pointing
to the mid point of each axial current in i_axial in units of (µm).
The indices of the first axis, correspond to the first axis
of i_axial and d_vectors.
Raises
------
AttributeError
Raises an exeption if the cell.vmem attribute cannot be found
"""
if not hasattr(self, 'vmem'):
raise AttributeError('no vmem, run cell.simulate(rec_vmem=True)')
self._ri_list = self.get_axial_resistance()
i_axial = []
d_vectors = []
pos_vectors = []
dseg = np.c_[self.x.mean(axis=-1) - self.x[:, 0],
self.y.mean(axis=-1) - self.y[:, 0],
self.z.mean(axis=-1) - self.z[:, 0]]
dpar = np.c_[self.x[:, -1] - self.x.mean(axis=-1),
self.y[:, -1] - self.y.mean(axis=-1),
self.z[:, -1] - self.z.mean(axis=-1)]
# children_dict = self.get_dict_of_children_idx()
for sec in self.allseclist:
if not neuron.h.SectionRef(sec=sec).has_parent():
if sec.nseg == 1:
# skip soma, since soma is an orphan
continue
else:
# the first segment has more than one segment,
# need to compute axial currents within this section.
seg_idx = 1
parent_idx = 0
bottom_seg = False
first_sec = True
branch = False
parentsec = None
children_dict = None
connection_dict = None
conn_point = 1
else:
# section has parent section
first_sec = False
bottom_seg = True
secref = neuron.h.SectionRef(sec=sec)
parentseg = secref.parent()
parentsec = parentseg.sec
children_dict = self.get_dict_of_children_idx()
branch = len(children_dict[parentsec.name()]) > 1
connection_dict = self.get_dict_parent_connections()
conn_point = connection_dict[sec.name()]
# find parent index
if conn_point == 1 or parentsec.nseg == 1:
internal_parent_idx = -1 # last seg in sec
elif conn_point == 0:
internal_parent_idx = 0 # first seg in sec
else:
# if parentseg is not first or last seg in parentsec
segment_xlist = np.array(
[segment.x for segment in parentsec])
internal_parent_idx = np.abs(
segment_xlist - conn_point).argmin()
parent_idx = self.get_idx(section=parentsec.name())[
internal_parent_idx]
# find segment index
seg_idx = self.get_idx(section=sec.name())[0]
for _ in sec:
if first_sec:
first_sec = False
continue
iseg, ipar = self._parent_and_segment_current(seg_idx,
parent_idx,
bottom_seg,
branch,
parentsec,
children_dict,
connection_dict,
conn_point,
timepoints,
sec
)
if bottom_seg:
# if a seg is connected to soma, it is
# connected to the middle of soma,
# and dpar needs to be altered.
par_dist = np.array([(self.x[seg_idx, 0] -
self.x[parent_idx].mean(axis=-1)),
(self.y[seg_idx, 0] -
self.y[parent_idx].mean(axis=-1)),
(self.z[seg_idx, 0] -
self.z[parent_idx].mean(axis=-1))])
else:
par_dist = dpar[parent_idx]
d_vectors.append(par_dist)
d_vectors.append(dseg[seg_idx])
i_axial.append(ipar)
i_axial.append(iseg)
pos_par = np.array([self.x[seg_idx, 0],
self.y[seg_idx, 0],
self.z[seg_idx, 0]]) - 0.5 * par_dist
pos_seg = np.array([self.x[seg_idx].mean(axis=-1),
self.y[seg_idx].mean(axis=-1),
self.z[seg_idx].mean(axis=-1)])
pos_seg -= 0.5 * dseg[seg_idx]
pos_vectors.append(pos_par)
pos_vectors.append(pos_seg)
parent_idx = seg_idx
seg_idx += 1
branch = False
bottom_seg = False
return np.array(i_axial), np.array(d_vectors).T, np.array(pos_vectors)
def get_axial_resistance(self):
"""
Return NEURON axial resistance for all cell compartments.
Returns
-------
ri_list: ndarray, dtype=float
Shape (cell.totnsegs, ) array containing neuron.h.ri(seg.x) in
units of (MOhm) for all segments in cell calculated using the
neuron.h.ri(seg.x) method. neuron.h.ri(seg.x) returns the
axial resistance from the middle of the segment to the middle of
the parent segment. Note: If seg is the first segment in a section,
i.e. the parent segment belongs to a different section or there is
no parent section, then neuron.h.ri(seg.x) returns the axial
resistance from the middle of the segment to the node connecting
the segment to the parent section (or a ghost node if there is no
parent)
"""
ri_list = np.zeros(self.totnsegs)
comp = 0
for sec in self.allseclist:
for seg in sec:
ri_list[comp] = neuron.h.ri(seg.x, sec=sec)
comp += 1
return ri_list
def get_dict_of_children_idx(self):
"""
Return dictionary with children segment indices for all sections.
Returns
-------
children_dict: dictionary
Dictionary containing a list for each section,
with the segment index of all the section's children.
The dictionary is needed to find the
sibling of a segment.
"""
children_dict = {}
for sec in self.allseclist:
children_dict[sec.name()] = []
for child in neuron.h.SectionRef(sec=sec).child:
# add index of first segment of each child
children_dict[sec.name()].append(int(self.get_idx(
section=child.name())[0]))
return children_dict
def get_dict_parent_connections(self):
"""
Return dictionary with parent connection point for all sections.
Returns
-------
connection_dict: dictionary
Dictionary containing a float in range [0, 1] for each section
in cell. The float gives the location on the parent segment
to which the section is connected.
The dictionary is needed for computing axial currents.
"""
connection_dict = {}
for i, sec in enumerate(self.allseclist):
connection_dict[sec.name()] = neuron.h.parent_connection(sec=sec)
return connection_dict
def _parent_and_segment_current(self, seg_idx, parent_idx, bottom_seg,
branch=False, parentsec=None,
children_dict=None, connection_dict=None,
conn_point=1, timepoints=None, sec=None):
"""
Return axial current from segment (seg_idx) mid to segment start,
and current from parent segment (parent_idx) end to parent segment mid.
Parameters
----------
seg_idx: int
Segment index
parent_idx: int
Parent index
bottom_seg: boolean
branch: boolean
parentsec: neuron.Section object
parent section
children_dict: dict or None
Default None
connection_dict: dict or None
Default None
conn_point: float
relative connection point on section in the interval [0, 1].
Defaults to 1
timepoints: ndarray, dtype=int
array of timepoints in simulation at which you want to compute
the axial currents. Defaults to None. If not given,
the axial currents. Defaults to None. If not given,
all simulation timesteps will be included.
sec: neuron.Section object
current section needed in new NEURON version
Returns
-------
iseg: dtype=float
Axial current in units of (nA)
from segment mid point to segment start point.
ipar: dtype=float
Axial current in units of (nA)
from parent segment end point to parent segment mid point.
"""
# axial resistance between segment mid and parent node
seg_ri = self._ri_list[seg_idx]
vmem = self.vmem
if timepoints is not None:
vmem = self.vmem[:, timepoints]
vpar = vmem[parent_idx]
vseg = vmem[seg_idx]
# if segment is the first in its section and it is connected to
# top or bottom of parent section, we need to find parent_ri explicitly
if bottom_seg and (conn_point == 0 or conn_point == 1):
if conn_point == 0:
parent_ri = self._ri_list[parent_idx]
else:
parent_ri = neuron.h.ri(0, sec=sec)
if not branch:
ri = parent_ri + seg_ri
iseg = (vpar - vseg) / ri
ipar = iseg
else:
# if branch, need to compute iseg and ipar separately
[sib_idcs] = np.take(children_dict[parentsec.name()],
np.where(children_dict[parentsec.name()]
!= seg_idx))
sibs = [self.get_idx_name(sib_idcs)[i][1]
for i in range(len(sib_idcs))]
# compute potential in branch point between parent and siblings
v_branch_num = vpar / parent_ri + vseg / seg_ri
v_branch_denom = 1. / parent_ri + 1. / seg_ri
for sib_idx, sib in zip(sib_idcs, sibs):
sib_conn_point = connection_dict[sib]
if sib_conn_point == conn_point:
v_branch_num += vmem[sib_idx] / self._ri_list[sib_idx]
v_branch_denom += 1. / self._ri_list[sib_idx]
v_branch = v_branch_num / v_branch_denom
iseg = (v_branch - vseg) / seg_ri
# set ipar=iseg
# only fraction of total current into parent is added per
# sibling
ipar = iseg
else:
iseg = (vpar - vseg) / seg_ri
ipar = iseg
return iseg, ipar
def distort_geometry(self, factor=0., axis='z', nu=0.0):
"""
Distorts cellular morphology with a relative factor along a chosen axis
preserving Poisson's ratio. A ratio nu=0.5 assumes uncompressible and
isotropic media that embeds the cell. A ratio nu=0 will only affect
geometry along the chosen axis. A ratio nu=-1 will isometrically scale
the neuron geometry along each axis.
This method does not affect the underlying cable properties of the
cell, only predictions of extracellular measurements (by affecting the
relative locations of sources representing the compartments).
Parameters
----------
factor: float
relative compression/stretching factor of morphology. Default is 0
(no compression/stretching). Positive values implies a compression
along the chosen axis.
axis: str
which axis to apply compression/stretching. Default is "z".
nu: float
Poisson's ratio. Ratio between axial and transversal
compression/stretching. Default is 0.
"""
assert abs(factor) < 1., 'abs(factor) >= 1, factor must be in <-1, 1>'
assert axis in ['x', 'y', 'z'], \
'axis={} not "x", "y" or "z"'.format(axis)
for pos, dir_ in zip(self.somapos, 'xyz'):
geometry = np.c_[getattr(self, dir_)[:, 0],
getattr(self, dir_)[:, -1]]
if dir_ == axis:
geometry -= pos
geometry *= (1. - factor)
geometry += pos
else:
geometry -= pos
geometry *= (1. + factor * nu)
geometry += pos
setattr(self, dir_, geometry)
# recompute length of each segment
self._set_length()
def _set_length(self):
'''callable method to (re)set length attribute'''
self.length = np.sqrt(np.diff(self.x, axis=-1)**2 +
np.diff(self.y, axis=-1)**2 +
np.diff(self.z, axis=-1)**2).flatten()
def _set_area(self):
'''callable method to (re)set area attribute'''
if self.d.ndim == 1:
self.area = self.length * np.pi * self.d
else:
# Surface area of conical frusta
# A = pi*(r1+r2)*sqrt((r1-r2)^2 + h^2)
self.area = np.pi * self.d.sum(axis=-1) * \
np.sqrt(np.diff(self.d, axis=-1)**2 + self.length**2)
def get_multi_current_dipole_moments(self, timepoints=None):
'''
Return 3D current dipole moment vector and middle position vector
from each axial current in space.
Parameters
----------
timepoints: ndarray, dtype=int or None
array of timepoints at which you want to compute
the current dipole moments. Defaults to None. If not given,
all simulation timesteps will be included.
Returns
-------
multi_dipoles: ndarray, dtype = float
Shape (n_axial_currents, 3, n_timepoints) array
containing the x-,y-,z-components of the current dipole moment
from each axial current in cell, at all timepoints.
The number of axial currents,
n_axial_currents = (cell.totnsegs-1) * 2
and the number of timepoints, n_timepoints = cell.tvec.size.
The current dipole moments are given in units of (nA µm).
pos_axial: ndarray, dtype = float
Shape (n_axial_currents, 3) array containing the x-, y-, and
z-components giving the mid position in space of each multi_dipole
in units of (µm).
Examples
--------
Get all current dipole moments and positions from all axial currents in
a single neuron simulation:
>>> import LFPy
>>> import numpy as np
>>> cell = LFPy.Cell('PATH/TO/MORPHOLOGY', extracellular=False)
>>> syn = LFPy.Synapse(cell, idx=cell.get_closest_idx(0,0,1000),
>>> syntype='ExpSyn', e=0., tau=1., weight=0.001)
>>> syn.set_spike_times(np.mgrid[20:100:20])
>>> cell.simulate(rec_vmem=True, rec_imem=False)
>>> timepoints = np.array([1,2,3,4])
>>> multi_dipoles, dipole_locs = cell.get_multi_current_dipole_moments(
>>> timepoints=timepoints)
'''
i_axial, d_axial, pos_axial = self.get_axial_currents_from_vmem(
timepoints=timepoints)
Ni, Nt = i_axial.shape
multi_dipoles = np.zeros((Ni, 3, Nt))
for i in range(Ni):
multi_dipoles[i, ] = (i_axial[i][:, np.newaxis] * d_axial[:, i]).T
return multi_dipoles, pos_axial
|
gpl-3.0
|
mblondel/scikit-learn
|
sklearn/setup.py
|
24
|
2991
|
import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for hmm
config.add_extension(
'_hmmc',
sources=['_hmmc.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
NunoEdgarGub1/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/projections/__init__.py
|
69
|
2179
|
from geo import AitoffAxes, HammerAxes, LambertAxes
from polar import PolarAxes
from matplotlib import axes
class ProjectionRegistry(object):
"""
Manages the set of projections available to the system.
"""
def __init__(self):
self._all_projection_types = {}
def register(self, *projections):
"""
Register a new set of projection(s).
"""
for projection in projections:
name = projection.name
self._all_projection_types[name] = projection
def get_projection_class(self, name):
"""
Get a projection class from its *name*.
"""
return self._all_projection_types[name]
def get_projection_names(self):
"""
Get a list of the names of all projections currently
registered.
"""
names = self._all_projection_types.keys()
names.sort()
return names
projection_registry = ProjectionRegistry()
projection_registry.register(
axes.Axes,
PolarAxes,
AitoffAxes,
HammerAxes,
LambertAxes)
def register_projection(cls):
projection_registry.register(cls)
def get_projection_class(projection=None):
"""
Get a projection class from its name.
If *projection* is None, a standard rectilinear projection is
returned.
"""
if projection is None:
projection = 'rectilinear'
try:
return projection_registry.get_projection_class(projection)
except KeyError:
raise ValueError("Unknown projection '%s'" % projection)
def projection_factory(projection, figure, rect, **kwargs):
"""
Get a new projection instance.
*projection* is a projection name.
*figure* is a figure to add the axes to.
*rect* is a :class:`~matplotlib.transforms.Bbox` object specifying
the location of the axes within the figure.
Any other kwargs are passed along to the specific projection
constructor being used.
"""
return get_projection_class(projection)(figure, rect, **kwargs)
def get_projection_names():
"""
Get a list of acceptable projection names.
"""
return projection_registry.get_projection_names()
|
gpl-3.0
|
simonsfoundation/inferelator_ng
|
inferelator_ng/tests/test_mi_R.py
|
3
|
4576
|
import unittest, os
import pandas as pd
import numpy as np
from .. import mi_R
my_dir = os.path.dirname(__file__)
class TestMI(unittest.TestCase):
"""
Superclass for common methods
"""
cores = bins = 10
def calculate_mi(self):
driver = mi_R.MIDriver()
target = driver.target_directory = os.path.join(my_dir, "artifacts")
if not os.path.exists(target):
os.makedirs(target)
driver.cores = self.cores
driver.bins = self.bins
(self.clr_matrix, self.mi_matrix) = driver.run(self.x_dataframe, self.y_dataframe)
def print_results(self):
print("\nx")
print(self.x_dataframe)
print("y")
print(self.y_dataframe)
print("mi")
print(self.mi_matrix)
print("clr")
print(self.clr_matrix)
class Test2By2(TestMI):
def test_12_34_identical(self):
"Compute mi for identical arrays [[1, 2], [2, 4]]."
L = [[1, 2], [3, 4]]
self.x_dataframe = pd.DataFrame(np.array(L))
self.y_dataframe = pd.DataFrame(np.array(L))
self.calculate_mi()
#self.print_results()
expected = np.array([[0, 1], [1, 0]])
np.testing.assert_almost_equal(self.clr_matrix.as_matrix(), expected)
def test_12_34_minus(self):
"Compute mi for identical arrays [[1, 2], [2, 4]]."
L = [[1, 2], [3, 4]]
self.x_dataframe = pd.DataFrame(np.array(L))
self.y_dataframe = pd.DataFrame(-np.array(L))
self.calculate_mi()
#self.print_results()
expected = np.array([[0, 1], [1, 0]])
np.testing.assert_almost_equal(self.clr_matrix.as_matrix(), expected)
def test_12_34_times_pi(self):
"Compute mi for identical arrays [[1, 2], [2, 4]]."
L = [[1, 2], [3, 4]]
self.x_dataframe = pd.DataFrame(np.array(L))
self.y_dataframe = pd.DataFrame(np.pi * np.array(L))
self.calculate_mi()
#self.print_results()
expected = np.array([[0, 1], [1, 0]])
np.testing.assert_almost_equal(self.clr_matrix.as_matrix(), expected)
def test_12_34_swapped(self):
"Compute mi for identical arrays [[1, 2], [2, 4]]."
L = [[1, 2], [3, 4]]
L2 = [[3, 4], [2, 1]]
self.x_dataframe = pd.DataFrame(np.array(L))
self.y_dataframe = pd.DataFrame(np.array(L2))
self.calculate_mi()
expected = np.array([[0, 1], [1, 0]])
np.testing.assert_almost_equal(self.clr_matrix.as_matrix(), expected)
def test_12_34_transposed(self):
"Compute mi for identical arrays [[1, 2], [2, 4]]."
L = [[1, 2], [3, 4]]
self.x_dataframe = pd.DataFrame(np.array(L))
self.y_dataframe = pd.DataFrame(np.array(L).transpose())
self.calculate_mi()
#self.print_results()
expected = np.array([[0, 1], [1, 0]])
np.testing.assert_almost_equal(self.clr_matrix.as_matrix(), expected)
def test_12_34_and_zeros(self):
"Compute mi for identical arrays [[1, 2], [2, 4]]."
L = [[1, 2], [3, 4]]
self.x_dataframe = pd.DataFrame(np.array(L))
self.y_dataframe = pd.DataFrame(np.zeros((2,2)))
self.calculate_mi()
#self.print_results()
# the entire clr matrix is NAN
self.assertTrue(np.isnan(self.clr_matrix.as_matrix()).all())
def test_12_34_and_ones(self):
"Compute mi for identical arrays [[1, 2], [2, 4]]."
L = [[1, 2], [3, 4]]
self.x_dataframe = pd.DataFrame(np.array(L))
self.y_dataframe = pd.DataFrame(np.ones((2,2)))
self.calculate_mi()
#self.print_results()
self.assertTrue(np.isnan(self.clr_matrix.as_matrix()).all())
class Test2By3(TestMI):
def test_12_34_identical(self):
"Compute mi for identical arrays [[1, 2, 1], [2, 4, 6]]."
L = [[1, 2, 1], [3, 4, 6]]
self.x_dataframe = pd.DataFrame(np.array(L))
self.y_dataframe = pd.DataFrame(np.array(L))
self.calculate_mi()
#self.print_results()
expected = np.array([[0, 1], [1, 0]])
np.testing.assert_almost_equal(self.clr_matrix.as_matrix(), expected)
def test_mixed(self):
"Compute mi for mixed arrays."
L = [[1, 2, 1], [3, 4, 6]]
L2 = [[3, 7, 1], [9, 0, 2]]
self.x_dataframe = pd.DataFrame(np.array(L))
self.y_dataframe = pd.DataFrame(np.array(L2))
self.calculate_mi()
self.print_results()
expected = np.array([[0, 1], [1, 0]])
#np.testing.assert_almost_equal(self.clr_matrix.as_matrix(), expected)
|
bsd-2-clause
|
yyjiang/scikit-learn
|
sklearn/preprocessing/data.py
|
113
|
56747
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
|
bsd-3-clause
|
vrtsystems/pyhaystack
|
pyhaystack/client/ops/his.py
|
1
|
31373
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
High-level history functions. These wrap the basic his_read function to allow
some alternate representations of the historical data.
"""
import hszinc
import fysom
import pytz
from copy import deepcopy
from datetime import tzinfo
from six import string_types
from ...util import state
from ...util.asyncexc import AsynchronousException
try:
from pandas import Series, DataFrame
HAVE_PANDAS = True
except ImportError: # pragma: no cover
# Not covered, since we'll always have 'pandas' available during tests.
HAVE_PANDAS = False
def _resolve_tz(tz):
"""
Resolve a given timestamp.
"""
if (tz is None) or isinstance(tz, tzinfo):
return tz
if isinstance(tz, string_types):
if '/' in tz:
# Olson database name
return pytz.timezone(tz)
else:
return hszinc.zoneinfo.timezone(tz)
class HisReadSeriesOperation(state.HaystackOperation):
"""
Read the series data from a 'point' entity and present it in a concise
format.
"""
FORMAT_LIST = 'list' # [(ts1, value1), (ts2, value2), ...]
FORMAT_DICT = 'dict' # {ts1: value1, ts2: value2, ...}
FORMAT_SERIES = 'series' # pandas.Series
def __init__(self, session, point, rng, tz, series_format):
"""
Read the series data and return it.
:param session: Haystack HTTP session object.
:param point: ID of historical 'point' object to read.
:param rng: Range to read from 'point'
:param tz: Timezone to translate timezones to. May be None.
:param series_format: What format to present the series in.
"""
super(HisReadSeriesOperation, self).__init__()
if series_format not in (self.FORMAT_LIST, self.FORMAT_DICT,
self.FORMAT_SERIES):
raise ValueError('Unrecognised series_format %s' % series_format)
if (series_format == self.FORMAT_SERIES) and (not HAVE_PANDAS):
raise NotImplementedError('pandas not available.')
if isinstance(rng, slice):
rng = ','.join([
hszinc.dump_scalar(p, mode=hszinc.MODE_ZINC)
for p in (rng.start, rng.stop)
])
self._session = session
self._point = point
self._range = hszinc.dump_scalar(rng, mode=hszinc.MODE_ZINC)
self._tz = _resolve_tz(tz)
self._series_format = series_format
self._state_machine = fysom.Fysom(
initial='init', final='done',
events=[
# Event Current State New State
('go', 'init', 'read'),
('read_done', 'read', 'done'),
('exception', '*', 'done'),
], callbacks={
'onenterread': self._do_read,
'onenterdone': self._do_done,
})
def go(self):
self._state_machine.go()
def _do_read(self, event):
"""
Request the data from the server.
"""
self._session.his_read(point=self._point, rng=self._range,
callback=self._on_read)
def _on_read(self, operation, **kwargs):
"""
Process the grid, format it into the requested format.
"""
try:
# See if the read succeeded.
grid = operation.result
if self._tz is None:
conv_ts = lambda ts : ts
else:
conv_ts = lambda ts : ts.astimezone(self._tz)
# Convert grid to list of tuples
data = [(conv_ts(row['ts']), row['val']) for row in grid]
if self._series_format == self.FORMAT_DICT:
data = dict(data)
elif self._series_format == self.FORMAT_SERIES:
# Split into index and data.
try:
(index, data) = zip(*data)
if isinstance(data[0], hszinc.Quantity):
values = [each.value for each in data]
units = data[0].unit
else:
values = data
units = ''
except ValueError:
values = []
index = []
units = ''
#ser = Series(data=data[0].value, index=index)
meta_serie = MetaSeries(data=values, index=index)
meta_serie.add_meta('units', units)
meta_serie.add_meta('point', self._point)
self._state_machine.read_done(result=meta_serie)
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
def _do_done(self, event):
"""
Return the result from the state machine.
"""
self._done(event.result)
class HisReadFrameOperation(state.HaystackOperation):
"""
Read the series data from several 'point' entities and present them in a
concise format.
"""
FORMAT_LIST = 'list' # [{'ts': ts1, 'col1': val1, ...}, {...}, ...]
FORMAT_DICT = 'dict' # {ts1: {'col1': val1, ...}, ts2: ...}
FORMAT_FRAME = 'frame' # pandas.DataFrame
def __init__(self, session, columns, rng, tz, frame_format):
"""
Read the series data and return it.
:param session: Haystack HTTP session object.
:param columns: IDs of historical point objects to read.
:param rng: Range to read from 'point'
:param tz: Timezone to translate timezones to. May be None.
:param frame_format: What format to present the frame in.
"""
super(HisReadFrameOperation, self).__init__()
self._log = session._log.getChild('his_read_frame')
if frame_format not in (self.FORMAT_LIST, self.FORMAT_DICT,
self.FORMAT_FRAME):
raise ValueError('Unrecognised frame_format %s' % frame_format)
if (frame_format == self.FORMAT_FRAME) and (not HAVE_PANDAS):
raise NotImplementedError('pandas not available.')
if isinstance(rng, slice):
rng = ','.join([
hszinc.dump_scalar(p, mode=hszinc.MODE_ZINC)
for p in (rng.start, rng.stop)
])
# Convert the columns to a list of tuples.
strip_ref = lambda r : r.name if isinstance(r, hszinc.Ref) else r
if isinstance(columns, dict):
# Ensure all are strings to references
columns = [(str(c),strip_ref(r)) for c, r in columns.items()]
else:
# Translate to a dict:
columns = [(strip_ref(c), c) for c in columns]
self._session = session
self._columns = columns
self._range = hszinc.dump_scalar(rng, mode=hszinc.MODE_ZINC)
self._tz = _resolve_tz(tz)
self._frame_format = frame_format
self._data_by_ts = {}
self._todo = set([c[0] for c in columns])
self._state_machine = fysom.Fysom(
initial='init', final='done',
events=[
# Event Current State New State
('probe_multi', 'init', 'probing'),
('do_multi_read', 'probing', 'multi_read'),
('all_read_done', 'multi_read', 'postprocess'),
('do_single_read', 'probing', 'single_read'),
('all_read_done', 'single_read', 'postprocess'),
('process_done', 'postprocess', 'done'),
('exception', '*', 'done'),
], callbacks={
'onenterprobing': self._do_probe_multi,
'onentermulti_read': self._do_multi_read,
'onentersingle_read': self._do_single_read,
'onenterpostprocess': self._do_postprocess,
'onenterdone': self._do_done,
})
def go(self):
self._state_machine.probe_multi()
def _do_probe_multi(self, event):
self._log.debug('Probing for multi-his-read support')
self._session.has_features([self._session.FEATURE_HISREAD_MULTI],
callback=self._on_probe_multi)
def _on_probe_multi(self, operation, **kwargs):
try:
result = operation.result
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
return
if result.get(self._session.FEATURE_HISREAD_MULTI):
# Session object supports multi-his-read
self._log.debug('Using multi-his-read support')
self._state_machine.do_multi_read()
else:
# Emulate multi-his-read with separate
self._log.debug('No multi-his-read support, emulating')
self._state_machine.do_single_read()
def _get_ts_rec(self, ts):
try:
return self._data_by_ts[ts]
except KeyError:
rec = {}
self._data_by_ts[ts] = rec
return rec
def _do_multi_read(self, event):
"""
Request the data from the server as a single multi-read request.
"""
self._session.multi_his_read(points=[c[1] for c in self._columns],
rng=self._range, callback=self._on_multi_read)
def _on_multi_read(self, operation, **kwargs):
"""
Handle the multi-valued grid.
"""
try:
grid = operation.result
if self._tz is None:
conv_ts = lambda ts : ts
else:
conv_ts = lambda ts : ts.astimezone(self._tz)
for row in grid:
ts = conv_ts(row['ts'])
rec = self._get_ts_rec(ts)
for (col_idx, (col, _)) in enumerate(self._columns):
val = row.get('v%d' % col_idx)
if (val is not None) or \
(self._frame_format != self.FORMAT_FRAME):
rec[col] = val
self._state_machine.all_read_done()
except: # Catch all exceptions to pass to caller.
self._log.debug('Hit exception', exc_info=1)
self._state_machine.exception(result=AsynchronousException())
def _do_single_read(self, event):
"""
Request the data from the server as multiple single-read requests.
"""
for col, point in self._columns:
self._log.debug('Column %s point %s', col, point)
self._session.his_read(point, self._range,
lambda operation, **kw : self._on_single_read(operation,
col=col))
def _on_single_read(self, operation, col, **kwargs):
"""
Handle the multi-valued grid.
"""
self._log.debug('Response back for column %s', col)
try:
grid = operation.result
#print(grid)
#print('===========')
if self._tz is None:
conv_ts = lambda ts : ts
else:
conv_ts = lambda ts : ts.astimezone(self._tz)
self._log.debug('%d records for %s: %s', len(grid), col, grid)
for row in grid:
ts = conv_ts(row['ts'])
if self._tz is None:
self._tz = ts.tzinfo
rec = self._get_ts_rec(ts)
val = row.get('val')
if (val is not None) or \
(self._frame_format != self.FORMAT_FRAME):
rec[col] = val
self._todo.discard(col)
self._log.debug('Still waiting for: %s', self._todo)
if not self._todo:
# No more to read
self._state_machine.all_read_done()
except: # Catch all exceptions to pass to caller.
self._log.debug('Hit exception', exc_info=1)
self._state_machine.exception(result=AsynchronousException())
def _do_postprocess(self, event):
"""
Convert the dict-of-dicts to the desired frame format.
"""
self._log.debug('Post-processing')
try:
if self._frame_format == self.FORMAT_LIST:
def _merge_ts(item):
rec = item[1].copy()
rec['ts'] = item[0]
return rec
data = list(map(_merge_ts, list(self._data_by_ts.items())))
#print(data)
elif self._frame_format == self.FORMAT_FRAME:
# Build from dict
data = MetaDataFrame.from_dict(self._data_by_ts, orient='index')
def convert_quantity(val):
"""
If value is Quantity, convert to value
"""
if isinstance(val,hszinc.Quantity):
return val.value
else:
return val
def get_units(serie):
try:
first_element = serie.dropna()[0]
except IndexError: # needed for empty results
return ''
if isinstance(first_element, hszinc.Quantity):
return first_element.unit
else:
return ''
for name, serie in data.iteritems():
"""
Convert Quantity and put unit in metadata
"""
data.add_meta(name,get_units(serie))
data[name] = data[name].apply(convert_quantity)
else:
data = self._data_by_ts
self._state_machine.process_done(result=data)
except: # Catch all exceptions to pass to caller.
self._log.debug('Hit exception', exc_info=1)
self._state_machine.exception(result=AsynchronousException())
def _do_done(self, event):
"""
Return the result from the state machine.
"""
self._done(event.result)
class HisWriteSeriesOperation(state.HaystackOperation):
"""
Write the series data to a 'point' entity.
"""
def __init__(self, session, point, series, tz):
"""
Write the series data to the point.
:param session: Haystack HTTP session object.
:param point: ID of historical 'point' object to write.
:param series: Series data to be written to the point.
:param tz: If not None, a datetime.tzinfo instance for this write.
"""
super(HisWriteSeriesOperation, self).__init__()
# We've either been given an Entity instance or a string/reference
# giving the name of an entity.
if isinstance(point, string_types) or isinstance(point, hszinc.Ref):
# We have the name of an entity, we'll need to fetch it.
self._entity_id = point
self._point = None
else:
# We have an entity.
self._point = point
self._entity_id = point.id
self._session = session
self._series = series
self._tz = _resolve_tz(tz)
self._state_machine = fysom.Fysom(
initial='init', final='done',
events=[
# Event Current State New State
('have_tz', 'init', 'write'),
('have_point', 'init', 'get_point_tz'),
('need_point', 'init', 'get_point'),
('have_point', 'get_point', 'get_point_tz'),
('have_tz', 'get_point_tz', 'write'),
('need_equip', 'get_point_tz', 'get_equip'),
('have_equip', 'get_equip', 'get_equip_tz'),
('have_tz', 'get_equip_tz', 'write'),
('need_site', 'get_equip_tz', 'get_site'),
('have_site', 'get_site', 'get_site_tz'),
('have_tz', 'get_site_tz', 'write'),
('write_done', 'write', 'done'),
('exception', '*', 'done'),
], callbacks={
'onenterget_point': self._do_get_point,
'onenterget_point_tz': self._do_get_point_tz,
'onenterget_equip': self._do_get_equip,
'onenterget_equip_tz': self._do_get_equip_tz,
'onenterget_site': self._do_get_site,
'onenterget_site_tz': self._do_get_site_tz,
'onenterwrite': self._do_write,
'onenterdone': self._do_done,
})
def go(self):
if self._tz is not None: # Do we have a timezone?
# We do!
self._state_machine.have_tz()
elif self._point is not None: # Nope, do we have the point?
# We do!
self._state_machine.have_point()
else:
# We need to fetch the point to get its timezone.
self._state_machine.need_point()
def _do_get_point(self, event):
"""
Retrieve the point entity.
"""
self._session.get_entity(self._entity_id, single=True,
callback=self._got_point)
def _got_point(self, operation, **kwargs):
"""
Process the return value from get_entity
"""
try:
self._point = operation.result
self._state_machine.have_point()
except:
self._state_machine.exception(result=AsynchronousException())
def _do_get_point_tz(self, event):
"""
See if the point has a timezone?
"""
if hasattr(self._point, 'tz') and isinstance(self._point.tz, tzinfo):
# We have our timezone.
self._tz = self._point.tz
self._state_machine.have_tz()
else:
# Nope, look at the equip then.
self._state_machine.need_equip()
def _do_get_equip(self, event):
"""
Retrieve the equip entity.
"""
self._point.get_equip(callback=self._got_equip)
def _got_equip(self, operation, **kwargs):
"""
Process the return value from get_entity
"""
try:
equip = operation.result
self._state_machine.have_equip(equip=equip)
except:
self._state_machine.exception(result=AsynchronousException())
def _do_get_equip_tz(self, event):
"""
See if the equip has a timezone?
"""
equip = event.equip
if hasattr(equip, 'tz') and isinstance(equip.tz, tzinfo):
# We have our timezone.
self._tz = equip.tz
self._state_machine.have_tz()
else:
# Nope, look at the site then.
self._state_machine.need_site()
def _do_get_site(self, event):
"""
Retrieve the site entity.
"""
self._point.get_site(callback=self._got_site)
def _got_site(self, operation, **kwargs):
"""
Process the return value from get_entity
"""
try:
site = operation.result
self._state_machine.have_site(site=site)
except:
self._state_machine.exception(result=AsynchronousException())
def _do_get_site_tz(self, event):
"""
See if the site has a timezone?
"""
site = event.site
if hasattr(site, 'tz') and isinstance(site.tz, tzinfo):
# We have our timezone.
self._tz = site.tz
self._state_machine.have_tz()
else:
try:
# Nope, no idea then.
raise ValueError('No timezone specified for operation, '\
'point, equip or site.')
except:
self._state_machine.exception(result=AsynchronousException())
def _do_write(self, event):
"""
Push the data to the server.
"""
try:
# Process the timestamp records into an appropriate format.
if hasattr(self._series, 'to_dict'):
records = self._series.to_dict()
elif not isinstance(self._series, dict):
records = dict(self._series)
else:
records = self._series
if not bool(records):
# No data, skip writing this series.
self._state_machine.write_done(result=None)
return
# Time-shift the records.
if hasattr(self._tz, 'localize'):
localise = lambda ts : self._tz.localize(ts) \
if ts.tzinfo is None else ts.astimezone(self._tz)
else:
localise = lambda ts : ts.replace(tzinfo=self._tz) \
if ts.tzinfo is None else ts.astimezone(self._tz)
records = dict([(localise(ts), val) \
for ts, val in records.items()])
# Write the data
self._session.his_write(point=self._entity_id,
timestamp_records=records, callback=self._on_write)
except:
self._state_machine.exception(result=AsynchronousException())
def _on_write(self, operation, **kwargs):
"""
Handle the write error, if any.
"""
try:
# See if the write succeeded.
grid = operation.result
if not isinstance(grid, hszinc.Grid):
raise TypeError('Unexpected result: %r' % grid)
# Move to the done state.
self._state_machine.write_done(result=None)
except: # Catch all exceptions to pass to caller.
self._state_machine.exception(result=AsynchronousException())
def _do_done(self, event):
"""
Return the result from the state machine.
"""
self._done(event.result)
class HisWriteFrameOperation(state.HaystackOperation):
"""
Write the series data to several 'point' entities.
"""
def __init__(self, session, columns, frame, tz):
"""
Write the series data.
:param session: Haystack HTTP session object.
:param columns: IDs of historical point objects to read.
:param frame: Range to read from 'point'
:param tz: Timezone to translate timezones to.
"""
super(HisWriteFrameOperation, self).__init__()
self._log = session._log.getChild('his_write_frame')
tz = _resolve_tz(tz)
if tz is None:
tz = pytz.utc
if hasattr(tz, 'localize'):
localise = lambda ts : tz.localize(ts) \
if ts.tzinfo is None else ts.astimezone(tz)
else:
localise = lambda ts : ts.replace(tzinfo=tz) \
if ts.tzinfo is None else ts.astimezone(tz)
# Convert frame to list of records.
if HAVE_PANDAS:
# Convert Pandas frame to dict of dicts form.
if isinstance(frame, DataFrame):
self._log.debug('Convert from Pandas DataFrame')
raw_frame = frame.to_dict(orient='dict')
frame = {}
for col, col_data in raw_frame.items():
for ts, val in col_data.items():
try:
frame_rec = frame[ts]
except KeyError:
frame_rec = {}
frame[ts] = frame_rec
frame[col] = val
# Convert dict of dicts to records, de-referencing column names.
if isinstance(frame, dict):
if columns is None:
def _to_rec(item):
(ts, raw_record) = item
record = raw_record.copy()
record['ts'] = ts
return record
else:
def _to_rec(item):
(ts, raw_record) = item
record = {}
for col, val in raw_record.items():
entity = columns[col]
if hasattr(entity, 'id'):
entity = entity.id
if isinstance(entity, hszinc.Ref):
entity = entity.name
record[entity] = val
record['ts'] = ts
return record
frame = list(map(_to_rec, list(frame.items())))
elif columns is not None:
# Columns are aliased. De-alias the column names.
frame = deepcopy(frame)
for row in frame:
ts = row.pop('ts')
raw = row.copy()
row.clear()
row['ts'] = ts
for column, point in columns.items():
try:
value = raw.pop(column)
except KeyError:
self._log.debug('At %s missing column %s (for %s): %s',
ts, column, point, raw)
continue
row[session._obj_to_ref(point).name] = value
# Localise all timestamps, extract columns:
columns = set()
def _localise_rec(r):
r['ts'] = localise(r['ts'])
columns.update(set(r.keys()) - set(['ts']))
return r
frame = list(map(_localise_rec, frame))
self._session = session
self._frame = frame
self._columns = columns
self._todo = columns.copy()
self._tz = _resolve_tz(tz)
self._state_machine = fysom.Fysom(
initial='init', final='done',
events=[
# Event Current State New State
('probe_multi', 'init', 'probing'),
('no_data', 'init', 'done'),
('do_multi_write', 'probing', 'multi_write'),
('all_write_done', 'multi_write', 'done'),
('do_single_write', 'probing', 'single_write'),
('all_write_done', 'single_write', 'done'),
('exception', '*', 'done'),
], callbacks={
'onenterprobing': self._do_probe_multi,
'onentermulti_write': self._do_multi_write,
'onentersingle_write': self._do_single_write,
'onenterdone': self._do_done,
})
def go(self):
if not bool(self._columns):
self._log.debug('No data to write')
self._state_machine.no_data(result=None)
else:
self._state_machine.probe_multi()
def _do_probe_multi(self, event):
self._log.debug('Probing for multi-his-write support')
self._session.has_features([self._session.FEATURE_HISWRITE_MULTI],
callback=self._on_probe_multi)
def _on_probe_multi(self, operation, **kwargs):
try:
result = operation.result
except: # Catch all exceptions to pass to caller.
self._log.warning('Unable to probe multi-his-write support',
exc_info=1)
self._state_machine.exception(result=AsynchronousException())
result = {}
return
self._log.debug('Got result: %s', result)
if result.get(self._session.FEATURE_HISWRITE_MULTI):
# Session object supports multi-his-write
self._log.debug('Using multi-his-write support')
self._state_machine.do_multi_write()
else:
# Emulate multi-his-write with separate
self._log.debug('No multi-his-write support, emulating')
self._state_machine.do_single_write()
def _do_multi_write(self, event):
"""
Request the data from the server as a single multi-read request.
"""
self._session.multi_his_write(self._frame,
callback=self._on_multi_write)
def _on_multi_write(self, operation, **kwargs):
"""
Handle the multi-valued grid.
"""
try:
grid = operation.result
if not isinstance(grid, hszinc.Grid):
raise ValueError('Unexpected result %r' % grid)
self._state_machine.all_write_done(result=None)
except: # Catch all exceptions to pass to caller.
self._log.debug('Hit exception', exc_info=1)
self._state_machine.exception(result=AsynchronousException())
def _do_single_write(self, event):
"""
Submit the data in single write requests.
"""
for point in self._columns:
self._log.debug('Point %s', point)
# Extract a series for this column
series = dict([(r['ts'], r[point]) for r in \
filter(lambda r : r.get(point) is not None, self._frame)])
self._session.his_write_series(point, series,
callback=lambda operation, **kw : \
self._on_single_write(operation, point=point))
def _on_single_write(self, operation, point, **kwargs):
"""
Handle the single write.
"""
self._log.debug('Response back for point %s', point)
try:
res = operation.result
if res is not None:
raise ValueError('Unexpected result %r' % res)
self._todo.discard(point)
self._log.debug('Still waiting for: %s', self._todo)
if not self._todo:
# No more to read
self._state_machine.all_write_done(result=None)
except: # Catch all exceptions to pass to caller.
self._log.debug('Hit exception', exc_info=1)
self._state_machine.exception(result=AsynchronousException())
def _do_done(self, event):
"""
Return the result from the state machine.
"""
self._done(event.result)
if HAVE_PANDAS:
class MetaSeries(Series):
"""
Custom Pandas Serie with meta data
"""
meta = {}
@property
def _constructor(self):
return MetaSeries
def add_meta(self, key, value):
self.meta[key] = value
class MetaDataFrame(DataFrame):
"""
Custom Pandas Dataframe with meta data
Made from MetaSeries
"""
meta = {}
def __init__(self, *args, **kw):
super(MetaDataFrame, self).__init__(*args, **kw)
@property
def _constructor(self):
return MetaDataFrame
_constructor_sliced = MetaSeries
def add_meta(self, key, value):
self.meta[key] = value
|
apache-2.0
|
ngoix/OCRF
|
benchmarks/bench_isolation_forest.py
|
1
|
4472
|
"""
==========================================
IsolationForest benchmark
==========================================
A test of IsolationForest on classical anomaly detection datasets.
"""
print(__doc__)
from time import time
import numpy as np
# import matplotlib.pyplot as plt
# for the cluster to save the fig:
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from sklearn.ensemble import IsolationForest
from sklearn.metrics import roc_curve, precision_recall_curve, auc
from sklearn.datasets import one_class_data
from sklearn.utils import shuffle as sh
from scipy.interpolate import interp1d
from sklearn.utils import timeout, max_time, TimeoutError
np.random.seed(1)
nb_exp = 10
# XXXXXXX Launch without pythonpath (with python) on MASTER (after built)
# # datasets available:
# datasets = ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover',
# 'ionosphere', 'spambase', 'annthyroid', 'arrhythmia',
# 'pendigits', 'pima', 'wilt','internet_ads', 'adult']
# continuous datasets:
datasets = ['http', 'smtp', 'shuttle', 'forestcover',
'ionosphere', 'spambase', 'annthyroid', 'arrhythmia',
'pendigits', 'pima', 'wilt', 'adult']
# # new datasets:
# datasets = ['ionosphere', 'spambase', 'annthyroid', 'arrhythmia',
# 'pendigits', 'pima', 'wilt', 'adult']
# datasets = ['ionosphere']
plt.figure(figsize=(25, 17))
for dat in datasets:
# loading and vectorization
X, y = one_class_data(dat)
n_samples, n_features = np.shape(X)
n_samples_train = n_samples // 2
n_samples_test = n_samples - n_samples_train
n_axis = 1000
x_axis = np.linspace(0, 1, n_axis)
tpr = np.zeros(n_axis)
precision = np.zeros(n_axis)
fit_time = 0
predict_time = 0
try:
for ne in range(nb_exp):
print 'exp num:', ne
X, y = sh(X, y)
# indices = np.arange(X.shape[0])
# np.random.shuffle(indices) # shuffle the dataset
# X = X[indices]
# y = y[indices]
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
# # training only on normal data:
# X_train = X_train[y_train == 0]
# y_train = y_train[y_train == 0]
print('IsolationForest processing...')
model = IsolationForest()
tstart = time()
model.fit(X_train)
fit_time += time() - tstart
tstart = time()
scoring = -model.decision_function(X_test) # the lower,the more normal
predict_time += time() - tstart
fpr_, tpr_, thresholds_ = roc_curve(y_test, scoring)
if predict_time + fit_time > max_time:
raise TimeoutError
f = interp1d(fpr_, tpr_)
tpr += f(x_axis)
tpr[0] = 0.
precision_, recall_ = precision_recall_curve(y_test, scoring)[:2]
# cluster: old version of scipy -> interpol1d needs sorted x_input
arg_sorted = recall_.argsort()
recall_ = recall_[arg_sorted]
precision_ = precision_[arg_sorted]
f = interp1d(recall_, precision_)
precision += f(x_axis)
except TimeoutError:
continue
tpr /= float(nb_exp)
fit_time /= float(nb_exp)
predict_time /= float(nb_exp)
AUC = auc(x_axis, tpr)
precision /= float(nb_exp)
precision[0] = 1.
AUPR = auc(x_axis, precision)
plt.subplot(121)
plt.plot(x_axis, tpr, lw=1, label='%s (area = %0.3f, train-time: %0.2fs, test-time: %0.2fs)' % (dat, AUC, fit_time, predict_time))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate', fontsize=25)
plt.ylabel('True Positive Rate', fontsize=25)
plt.title('Receiver operating characteristic for IsolationForest',
fontsize=25)
plt.legend(loc="lower right", prop={'size': 15})
plt.subplot(122)
plt.plot(x_axis, precision, lw=1, label='%s (area = %0.3f)'
% (dat, AUPR))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('Recall', fontsize=25)
plt.ylabel('Precision', fontsize=25)
plt.title('Precision-Recall curve', fontsize=25)
plt.legend(loc="lower right", prop={'size': 15})
plt.savefig('results_ocrf/bench_iforest_roc_pr_unsupervised_factorized')
|
bsd-3-clause
|
altairpearl/scikit-learn
|
examples/bicluster/bicluster_newsgroups.py
|
142
|
7183
|
"""
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis], cols].sum() but
# much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
|
bsd-3-clause
|
MadsJensen/CAA
|
hilbert_preprocessing.py
|
1
|
3552
|
import mne
from my_settings import *
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
n_jobs = 3
for subject in [subjects_select[-1]]:
raw = mne.io.Raw(save_folder + "%s_filtered_ica_mc_raw_tsss.fif" % subject,
preload=True)
raw.resample(250, n_jobs=n_jobs, verbose=True)
raw.filter(8, 12, n_jobs=n_jobs, verbose=True)
include = []
picks = mne.pick_types(raw.info, meg=True, eeg=True, stim=False, eog=False,
include=include, exclude='bads')
raw.apply_hilbert(picks, n_jobs=n_jobs, verbose=True)
raw.save(save_folder + "%s_hilbert_ica_mc_raw_tsss.fif" % subject,
overwrite=True)
tmin, tmax = -0.5, 1.5 # Epoch time
# All the behavioral results
results = pd.read_csv(log_folder + "results_all.csv")
# select only the relevant subject
log_tmp = results[results.subject == int(subject)].reset_index()
raw.del_proj(0)
# Select events to extract epochs from.
event_id = {"all_trials": 99}
# Setup for reading the raw data
events = mne.find_events(raw, min_duration=0.015)
events = mne.event.merge_events(events, [1, 2, 4, 8], 99,
replace_events=True)
event_id = {}
epoch_ids = []
for i, row in log_tmp.iterrows():
if row.condition_type == "ctl":
epoch_name = "ctl"
epoch_id = "1"
elif row.condition_type == "ent":
epoch_name = "ent"
epoch_id = "2"
if row.condition_side == "left":
epoch_name = epoch_name + "/" + "left"
epoch_id = epoch_id + "1"
elif row.condition_side == "right":
epoch_name = epoch_name + "/" + "right"
epoch_id = epoch_id + "0"
if row.congruent is True:
epoch_name = epoch_name + "/" + "cong"
epoch_id = epoch_id + "1"
elif row.congruent is False:
epoch_name = epoch_name + "/" + "incong"
epoch_id = epoch_id + "0"
if row.correct is True:
epoch_name = epoch_name + "/" + "correct"
epoch_id = epoch_id + "1"
elif row.correct is False:
epoch_name = epoch_name + "/" + "incorrect"
epoch_id = epoch_id + "0"
if row.in_phase is True:
epoch_name = epoch_name + "/" + "in_phase"
epoch_id = epoch_id + "1"
elif row.in_phase is False:
epoch_name = epoch_name + "/" + "out_phase"
epoch_id = epoch_id + "0"
epoch_name = epoch_name + "/" + str(row.PAS)
epoch_id = epoch_id + str(row.PAS)
epoch_ids.append(int(epoch_id))
if epoch_name is not event_id:
event_id[str(epoch_name)] = int(epoch_id)
idx = np.arange(0, len(events), 4)
for i in range(len(events[idx])):
events[idx[i]][2] = epoch_ids[i]
# picks = mne.pick_types(raw.info, meg=True, eeg=True, stim=False,
# eog=False,
# include=include, exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, -0.2), reject=reject_params,
add_eeg_ref=True,
preload=False)
epochs.drop_bad_epochs(reject_params)
fig = epochs.plot_drop_log(subject=subject, show=False)
fig.savefig(epochs_folder + "pics/hilbert_%s_drop_log.png" % subject)
epochs.save(epochs_folder + "%s_hilbert_trial_start-epo.fif" % subject)
|
bsd-3-clause
|
mweisman/QGIS
|
python/plugins/processing/algs/RasterLayerHistogram.py
|
6
|
3219
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
RasterLayerHistogram.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from PyQt4.QtCore import *
from qgis.core import *
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.parameters.ParameterNumber import ParameterNumber
from processing.parameters.ParameterRaster import ParameterRaster
from processing.outputs.OutputTable import OutputTable
from processing.outputs.OutputHTML import OutputHTML
from processing.tools import dataobjects
from processing.tools import raster
class RasterLayerHistogram(GeoAlgorithm):
INPUT = 'INPUT'
PLOT = 'PLOT'
TABLE = 'TABLE'
BINS = 'BINS'
def processAlgorithm(self, progress):
uri = self.getParameterValue(self.INPUT)
layer = dataobjects.getObjectFromUri(uri)
outputplot = self.getOutputValue(self.PLOT)
outputtable = self.getOutputFromName(self.TABLE)
values = raster.scanraster(layer, progress)
nbins = self.getParameterValue(self.BINS)
# ALERT: this is potentially blocking if the layer is too big
plt.close()
valueslist = []
for v in values:
if v is not None:
valueslist.append(v)
(n, bins, values) = plt.hist(valueslist, nbins)
fields = [QgsField('CENTER_VALUE', QVariant.Double),
QgsField('NUM_ELEM', QVariant.Double)]
writer = outputtable.getTableWriter(fields)
for i in xrange(len(values)):
writer.addRecord([str(bins[i]) + '-' + str(bins[i + 1]), n[i]])
plotFilename = outputplot + '.png'
lab.savefig(plotFilename)
f = open(outputplot, 'w')
f.write('<img src="' + plotFilename + '"/>')
f.close()
def defineCharacteristics(self):
self.name = 'Raster layer histogram'
self.group = 'Graphics'
self.addParameter(ParameterRaster(self.INPUT, 'Input layer'))
self.addParameter(ParameterNumber(self.BINS, 'Number of bins', 2,
None, 10))
self.addOutput(OutputHTML(self.PLOT, 'Output plot'))
self.addOutput(OutputTable(self.TABLE, 'Output table'))
|
gpl-2.0
|
jseabold/statsmodels
|
statsmodels/examples/ex_generic_mle.py
|
5
|
14932
|
from functools import partial
import numpy as np
from scipy import stats
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
from statsmodels.tools.numdiff import approx_fprime, approx_hess
data = sm.datasets.spector.load(as_pandas=False)
data.exog = sm.add_constant(data.exog, prepend=False)
# in this dir
probit_mod = sm.Probit(data.endog, data.exog)
probit_res = probit_mod.fit()
loglike = probit_mod.loglike
score = probit_mod.score
mod = GenericLikelihoodModel(data.endog, data.exog*2, loglike, score)
res = mod.fit(method="nm", maxiter = 500)
def probitloglike(params, endog, exog):
"""
Log likelihood for the probit
"""
q = 2*endog - 1
X = exog
return np.add.reduce(stats.norm.logcdf(q*np.dot(X,params)))
model_loglike = partial(probitloglike, endog=data.endog, exog=data.exog)
mod = GenericLikelihoodModel(data.endog, data.exog, loglike=model_loglike)
res = mod.fit(method="nm", maxiter=500)
print(res)
np.allclose(res.params, probit_res.params, rtol=1e-4)
print(res.params, probit_res.params)
#datal = sm.datasets.longley.load(as_pandas=False)
datal = sm.datasets.ccard.load(as_pandas=False)
datal.exog = sm.add_constant(datal.exog, prepend=False)
# Instance of GenericLikelihood model does not work directly, because loglike
# cannot get access to data in self.endog, self.exog
nobs = 5000
rvs = np.random.randn(nobs,6)
datal.exog = rvs[:,:-1]
datal.exog = sm.add_constant(datal.exog, prepend=False)
datal.endog = 1 + rvs.sum(1)
show_error = False
show_error2 = 1#False
if show_error:
def loglike_norm_xb(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma)
mod_norm = GenericLikelihoodModel(datal.endog, datal.exog, loglike_norm_xb)
res_norm = mod_norm.fit(method="nm", maxiter = 500)
print(res_norm.params)
if show_error2:
def loglike_norm_xb(params, endog, exog):
beta = params[:-1]
sigma = params[-1]
#print exog.shape, beta.shape
xb = np.dot(exog, beta)
#print xb.shape, stats.norm.logpdf(endog, loc=xb, scale=sigma).shape
return stats.norm.logpdf(endog, loc=xb, scale=sigma).sum()
model_loglike3 = partial(loglike_norm_xb,
endog=datal.endog, exog=datal.exog)
mod_norm = GenericLikelihoodModel(datal.endog, datal.exog, model_loglike3)
res_norm = mod_norm.fit(start_params=np.ones(datal.exog.shape[1]+1),
method="nm", maxiter = 5000)
print(res_norm.params)
class MygMLE(GenericLikelihoodModel):
# just for testing
def loglike(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma).sum()
def loglikeobs(self, params):
beta = params[:-1]
sigma = params[-1]
xb = np.dot(self.exog, beta)
return stats.norm.logpdf(self.endog, loc=xb, scale=sigma)
mod_norm2 = MygMLE(datal.endog, datal.exog)
#res_norm = mod_norm.fit(start_params=np.ones(datal.exog.shape[1]+1), method="nm", maxiter = 500)
res_norm2 = mod_norm2.fit(start_params=[1.]*datal.exog.shape[1]+[1], method="nm", maxiter = 500)
np.allclose(res_norm.params, res_norm2.params)
print(res_norm2.params)
res2 = sm.OLS(datal.endog, datal.exog).fit()
start_params = np.hstack((res2.params, np.sqrt(res2.mse_resid)))
res_norm3 = mod_norm2.fit(start_params=start_params, method="nm", maxiter = 500,
retall=0)
print(start_params)
print(res_norm3.params)
print(res2.bse)
print(res_norm3.bse)
print('llf', res2.llf, res_norm3.llf)
bse = np.sqrt(np.diag(np.linalg.inv(res_norm3.model.hessian(res_norm3.params))))
res_norm3.model.score(res_norm3.params)
#fprime in fit option cannot be overwritten, set to None, when score is defined
# exception is fixed, but I do not think score was supposed to be called
res_bfgs = mod_norm2.fit(start_params=start_params, method="bfgs", fprime=None,
maxiter=500, retall=0)
hb=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
hf=-approx_hess(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
hh = (hf+hb)/2.
print(np.linalg.eigh(hh))
grad = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
print(grad)
gradb = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=-1e-4)
gradf = -approx_fprime(res_norm3.params, mod_norm2.loglike, epsilon=1e-4)
print((gradb+gradf)/2.)
print(res_norm3.model.score(res_norm3.params))
print(res_norm3.model.score(start_params))
mod_norm2.loglike(start_params/2.)
print(np.linalg.inv(-1*mod_norm2.hessian(res_norm3.params)))
print(np.sqrt(np.diag(res_bfgs.cov_params())))
print(res_norm3.bse)
print("MLE - OLS parameter estimates")
print(res_norm3.params[:-1] - res2.params)
print("bse diff in percent")
print((res_norm3.bse[:-1] / res2.bse)*100. - 100)
'''
Optimization terminated successfully.
Current function value: 12.818804
Iterations 6
Optimization terminated successfully.
Current function value: 12.818804
Iterations: 439
Function evaluations: 735
Optimization terminated successfully.
Current function value: 12.818804
Iterations: 439
Function evaluations: 735
<statsmodels.model.LikelihoodModelResults object at 0x02131290>
[ 1.6258006 0.05172931 1.42632252 -7.45229732] [ 1.62581004 0.05172895 1.42633234 -7.45231965]
Warning: Maximum number of function evaluations has been exceeded.
[ -1.18109149 246.94438535 -16.21235536 24.05282629 -324.80867176
274.07378453]
Warning: Maximum number of iterations has been exceeded
[ 17.57107 -149.87528787 19.89079376 -72.49810777 -50.06067953
306.14170418]
Optimization terminated successfully.
Current function value: 506.488765
Iterations: 339
Function evaluations: 550
[ -3.08181404 234.34702702 -14.99684418 27.94090839 -237.1465136
284.75079529]
[ -3.08181304 234.34701361 -14.99684381 27.94088692 -237.14649571
274.6857294 ]
[ 5.51471653 80.36595035 7.46933695 82.92232357 199.35166485]
llf -506.488764864 -506.488764864
Optimization terminated successfully.
Current function value: 506.488765
Iterations: 9
Function evaluations: 13
Gradient evaluations: 13
(array([ 2.41772580e-05, 1.62492628e-04, 2.79438138e-04,
1.90996240e-03, 2.07117946e-01, 1.28747174e+00]), array([[ 1.52225754e-02, 2.01838216e-02, 6.90127235e-02,
-2.57002471e-04, -5.25941060e-01, -8.47339404e-01],
[ 2.39797491e-01, -2.32325602e-01, -9.36235262e-01,
3.02434938e-03, 3.95614029e-02, -1.02035585e-01],
[ -2.11381471e-02, 3.01074776e-02, 7.97208277e-02,
-2.94955832e-04, 8.49402362e-01, -5.20391053e-01],
[ -1.55821981e-01, -9.66926643e-01, 2.01517298e-01,
1.52397702e-03, 4.13805882e-03, -1.19878714e-02],
[ -9.57881586e-01, 9.87911166e-02, -2.67819451e-01,
1.55192932e-03, -1.78717579e-02, -2.55757014e-02],
[ -9.96486655e-04, -2.03697290e-03, -2.98130314e-03,
-9.99992985e-01, -1.71500426e-05, 4.70854949e-06]]))
[[ -4.91007768e-05 -7.28732630e-07 -2.51941401e-05 -2.50111043e-08
-4.77484718e-08 -9.72022463e-08]]
[[ -1.64845915e-08 -2.87059265e-08 -2.88764568e-07 -6.82121026e-09
2.84217094e-10 -1.70530257e-09]]
[ -4.90678076e-05 -6.71320777e-07 -2.46166110e-05 -1.13686838e-08
-4.83169060e-08 -9.37916411e-08]
[ -4.56753924e-05 -6.50857146e-07 -2.31756303e-05 -1.70530257e-08
-4.43378667e-08 -1.75592936e-02]
[[ 2.99386348e+01 -1.24442928e+02 9.67254672e+00 -1.58968536e+02
-5.91960010e+02 -2.48738183e+00]
[ -1.24442928e+02 5.62972166e+03 -5.00079203e+02 -7.13057475e+02
-7.82440674e+03 -1.05126925e+01]
[ 9.67254672e+00 -5.00079203e+02 4.87472259e+01 3.37373299e+00
6.96960872e+02 7.69866589e-01]
[ -1.58968536e+02 -7.13057475e+02 3.37373299e+00 6.82417837e+03
4.84485862e+03 3.21440021e+01]
[ -5.91960010e+02 -7.82440674e+03 6.96960872e+02 4.84485862e+03
3.43753691e+04 9.37524459e+01]
[ -2.48738183e+00 -1.05126925e+01 7.69866589e-01 3.21440021e+01
9.37524459e+01 5.23915258e+02]]
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> print res_norm3.model.score(res_norm3.params)
[ -4.90678076e-05 -6.71320777e-07 -2.46166110e-05 -1.13686838e-08
-4.83169060e-08 -9.37916411e-08]
>>> print res_norm3.model.score(start_params)
[ -4.56753924e-05 -6.50857146e-07 -2.31756303e-05 -1.70530257e-08
-4.43378667e-08 -1.75592936e-02]
>>> mod_norm2.loglike(start_params/2.)
-598.56178102781314
>>> print np.linalg.inv(-1*mod_norm2.hessian(res_norm3.params))
[[ 2.99386348e+01 -1.24442928e+02 9.67254672e+00 -1.58968536e+02
-5.91960010e+02 -2.48738183e+00]
[ -1.24442928e+02 5.62972166e+03 -5.00079203e+02 -7.13057475e+02
-7.82440674e+03 -1.05126925e+01]
[ 9.67254672e+00 -5.00079203e+02 4.87472259e+01 3.37373299e+00
6.96960872e+02 7.69866589e-01]
[ -1.58968536e+02 -7.13057475e+02 3.37373299e+00 6.82417837e+03
4.84485862e+03 3.21440021e+01]
[ -5.91960010e+02 -7.82440674e+03 6.96960872e+02 4.84485862e+03
3.43753691e+04 9.37524459e+01]
[ -2.48738183e+00 -1.05126925e+01 7.69866589e-01 3.21440021e+01
9.37524459e+01 5.23915258e+02]]
>>> print np.sqrt(np.diag(res_bfgs.cov_params()))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
>>> print res_norm3.bse
[ 5.47162086 75.03147114 6.98192136 82.60858536 185.40595756
22.88919522]
>>> res_norm3.conf_int
<bound method LikelihoodModelResults.conf_int of <statsmodels.model.LikelihoodModelResults object at 0x021317F0>>
>>> res_norm3.conf_int()
array([[0.96421437, 1.01999835],
[0.99251725, 1.04863332],
[0.95721328, 1.01246222],
[0.97134549, 1.02695393],
[0.97050081, 1.02660988],
[0.97773434, 1.03290028],
[0.97529207, 1.01428874]])
>>> res_norm3.params
array([ -3.08181304, 234.34701361, -14.99684381, 27.94088692,
-237.14649571, 274.6857294 ])
>>> res2.params
array([ -3.08181404, 234.34702702, -14.99684418, 27.94090839,
-237.1465136 ])
>>>
>>> res_norm3.params - res2.params
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: shape mismatch: objects cannot be broadcast to a single shape
>>> res_norm3.params[:-1] - res2.params
array([ 9.96859735e-07, -1.34122981e-05, 3.72278400e-07,
-2.14645839e-05, 1.78919019e-05])
>>>
>>> res_norm3.bse[:-1] - res2.bse
array([ -0.04309567, -5.33447922, -0.48741559, -0.31373822, -13.94570729])
>>> (res_norm3.bse[:-1] / res2.bse) - 1
array([-0.00781467, -0.06637735, -0.06525554, -0.00378352, -0.06995531])
>>> (res_norm3.bse[:-1] / res2.bse)*100. - 100
array([-0.7814667 , -6.6377355 , -6.52555369, -0.37835193, -6.99553089])
>>> np.sqrt(np.diag(np.linalg.inv(res_norm3.model.hessian(res_bfgs.params))))
array([ NaN, NaN, NaN, NaN, NaN, NaN])
>>> np.sqrt(np.diag(np.linalg.inv(-res_norm3.model.hessian(res_bfgs.params))))
array([ 5.10032831, 74.34988912, 6.96522122, 76.7091604 ,
169.8117832 , 22.91695494])
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> res2.bse
array([ 5.51471653, 80.36595035, 7.46933695, 82.92232357,
199.35166485])
>>>
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(-res_norm3.model.hessian(res_bfgs.params))))
>>> (bse_bfgs[:-1] / res2.bse)*100. - 100
array([ -7.51422527, -7.4858335 , -6.74913633, -7.49275094, -14.8179759 ])
>>> hb=-approx_hess(res_bfgs.params, mod_norm2.loglike, epsilon=-1e-4)
>>> hf=-approx_hess(res_bfgs.params, mod_norm2.loglike, epsilon=1e-4)
>>> hh = (hf+hb)/2.
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(-hh)))
>>> bse_bfgs
array([ NaN, NaN, NaN, NaN, NaN, NaN])
>>> bse_bfgs = np.sqrt(np.diag(np.linalg.inv(hh)))
>>> np.diag(hh)
array([ 9.81680159e-01, 1.39920076e-02, 4.98101826e-01,
3.60955710e-04, 9.57811608e-04, 1.90709670e-03])
>>> np.diag(np.inv(hh))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'module' object has no attribute 'inv'
>>> np.diag(np.linalg.inv(hh))
array([ 2.64875153e+01, 5.91578496e+03, 5.13279911e+01,
6.11533345e+03, 3.33775960e+04, 5.24357391e+02])
>>> res2.bse**2
array([ 3.04120984e+01, 6.45868598e+03, 5.57909945e+01,
6.87611175e+03, 3.97410863e+04])
>>> bse_bfgs
array([ 5.14660231, 76.91414015, 7.1643556 , 78.20059751,
182.69536402, 22.89885131])
>>> bse_bfgs - res_norm3.bse
array([-0.32501855, 1.88266901, 0.18243424, -4.40798785, -2.71059354,
0.00965609])
>>> (bse_bfgs[:-1] / res2.bse)*100. - 100
array([-6.67512508, -4.29511526, -4.0831115 , -5.69415552, -8.35523538])
>>> (res_norm3.bse[:-1] / res2.bse)*100. - 100
array([-0.7814667 , -6.6377355 , -6.52555369, -0.37835193, -6.99553089])
>>> (bse_bfgs / res_norm3.bse)*100. - 100
array([-5.94007812, 2.50917247, 2.61295176, -5.33599242, -1.46197759,
0.04218624])
>>> bse_bfgs
array([ 5.14660231, 76.91414015, 7.1643556 , 78.20059751,
182.69536402, 22.89885131])
>>> res_norm3.bse
array([ 5.47162086, 75.03147114, 6.98192136, 82.60858536,
185.40595756, 22.88919522])
>>> res2.bse
array([ 5.51471653, 80.36595035, 7.46933695, 82.92232357,
199.35166485])
>>> dir(res_bfgs)
['__class__', '__delattr__', '__dict__', '__doc__', '__getattribute__', '__hash__', '__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__str__', '__weakref__', 'bse', 'conf_int', 'cov_params', 'f_test', 'initialize', 'llf', 'mle_retvals', 'mle_settings', 'model', 'normalized_cov_params', 'params', 'scale', 't', 't_test']
>>> res_bfgs.scale
1.0
>>> res2.scale
81083.015420213851
>>> res2.mse_resid
81083.015420213851
>>> print np.sqrt(np.diag(np.linalg.inv(-1*mod_norm2.hessian(res_bfgs.params))))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
>>> print np.sqrt(np.diag(np.linalg.inv(-1*res_bfgs.model.hessian(res_bfgs.params))))
[ 5.10032831 74.34988912 6.96522122 76.7091604 169.8117832
22.91695494]
Is scale a misnomer, actually scale squared, i.e. variance of error term ?
'''
print(res_norm3.model.score_obs(res_norm3.params).shape)
jac = res_norm3.model.score_obs(res_norm3.params)
print(np.sqrt(np.diag(np.dot(jac.T, jac)))/start_params)
jac2 = res_norm3.model.score_obs(res_norm3.params, centered=True)
print(np.sqrt(np.diag(np.linalg.inv(np.dot(jac.T, jac)))))
print(res_norm3.bse)
print(res2.bse)
|
bsd-3-clause
|
M4573R/BuildingMachineLearningSystemsWithPython
|
ch10/chapter.py
|
20
|
4997
|
import numpy as np
import mahotas as mh
image = mh.imread('scene00.jpg')
from matplotlib import pyplot as plt
plt.imshow(image)
plt.show()
image = mh.colors.rgb2grey(image, dtype=np.uint8)
plt.imshow(image) # Display the image
plt.gray()
thresh = mh.thresholding.otsu(image)
print('Otsu threshold is {}.'.format(thresh))
# Otsu threshold is 138.
plt.imshow(image > thresh)
im16 = mh.gaussian_filter(image,16)
im = mh.demos.load('lenna')
r,g,b = im.transpose(2,0,1)
r12 = mh.gaussian_filter(r, 12.)
g12 = mh.gaussian_filter(g, 12.)
b12 = mh.gaussian_filter(b, 12.)
im12 = mh.as_rgb(r12,g12,b12)
h, w = r.shape # height and width
Y, X = np.mgrid[:h,:w]
Y = Y-h/2. # center at h/2
Y = Y / Y.max() # normalize to -1 .. +1
X = X-w/2.
X = X / X.max()
C = np.exp(-2.*(X**2+ Y**2))
# Normalize again to 0..1
C = C - C.min()
C = C / C.ptp()
C = C[:,:,None] # This adds a dummy third dimension to C
ringed = mh.stretch(im*C + (1-C)*im12)
haralick_features = mh.features.haralick(image)
haralick_features_mean = np.mean(haralick_features, axis=0)
haralick_features_all = np.ravel(haralick_features)
from glob import glob
images = glob('../SimpleImageDataset/*.jpg')
features = []
labels = []
for im in images:
labels.append(im[:-len('00.jpg')])
im = mh.imread(im)
im = mh.colors.rgb2gray(im, dtype=np.uint8)
features.append(mh.features.haralick(im).ravel())
features = np.array(features)
labels = np.array(labels)
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
clf = Pipeline([('preproc', StandardScaler()),
('classifier', LogisticRegression())])
from sklearn import cross_validation
cv = cross_validation.LeaveOneOut(len(images))
scores = cross_validation.cross_val_score(
clf, features, labels, cv=cv)
print('Accuracy: {:.1%}'.format(scores.mean()))
# Accuracy: 81.1%
def chist(im):
im = im // 64
r,g,b = im.transpose((2,0,1))
pixels = 1 * r + 4 * b + 16 * g
hist = np.bincount(pixels.ravel(), minlength=64)
hist = hist.astype(float)
hist = np.log1p(hist)
return hist
features = []
for im in images:
im = mh.imread(im)
features.append(chist(im))
features = []
for im in images:
imcolor = mh.imread(im)
im = mh.colors.rgb2gray(imcolor, dtype=np.uint8)
features.append(np.concatenate([
mh.features.haralick(im).ravel(),
chist(imcolor),
]))
scores = cross_validation.cross_val_score(
clf, features, labels, cv=cv)
print('Accuracy: {:.1%}'.format(scores.mean()))
# Accuracy: 95.6%
features = []
for im in images:
imcolor = mh.imread(im)
# Ignore everything in the 200 pixels close to the borders
imcolor = imcolor[200:-200, 200:-200]
im = mh.colors.rgb2gray(imcolor, dtype=np.uint8)
features.append(np.concatenate([
mh.features.haralick(im).ravel(),
chist(imcolor),
]))
sc = StandardScaler()
features = sc.fit_transform(features)
from scipy.spatial import distance
dists = distance.squareform(distance.pdist(features))
fig, axes = plt.subplots(2, 9)
for ci,i in enumerate(range(0,90,10)):
left = images[i]
dists_left = dists[i]
right = dists_left.argsort()
# right[0] is the same as left[i], so pick the next closest element
right = right[1]
right = images[right]
left = mh.imread(left)
right = mh.imread(right)
axes[0, ci].imshow(left)
axes[1, ci].imshow(right)
from sklearn.grid_search import GridSearchCV
C_range = 10.0 ** np.arange(-4, 3)
grid = GridSearchCV(LogisticRegression(), param_grid={'C' : C_range})
clf = Pipeline([('preproc', StandardScaler()),
('classifier', grid)])
cv = cross_validation.KFold(len(features), 5,
shuffle=True, random_state=123)
scores = cross_validation.cross_val_score(
clf, features, labels, cv=cv)
print('Accuracy: {:.1%}'.format(scores.mean()))
from mahotas.features import surf
image = mh.demos.load('lena')
image = mh.colors.rgb2gray(image, dtype=np.uint8)
descriptors = surf.surf(image, descriptor_only=True)
from mahotas.features import surf
descriptors = surf.dense(image, spacing=16)
alldescriptors = []
for im in images:
im = mh.imread(im, as_grey=True)
im = im.astype(np.uint8)
alldescriptors.append(surf.dense(image, spacing=16))
# get all descriptors into a single array
concatenated = np.concatenate(alldescriptors)
print('Number of descriptors: {}'.format(
len(concatenated)))
# use only every 64th vector
concatenated = concatenated[::64]
from sklearn.cluster import KMeans # FIXME CAPITALIZATION
k = 256
km = KMeans(k)
km.fit(concatenated)
features = []
for d in alldescriptors:
c = km.predict(d)
features.append(
np.array([np.sum(c == ci) for ci in range(k)])
)
# build single array and convert to float
features = np.array(features, dtype=float)
scores = cross_validation.cross_val_score(
clf, features, labels, cv=cv)
print('Accuracy: {:.1%}'.format(scores.mean()))
# Accuracy: 62.6%
|
mit
|
roryyorke/python-control
|
examples/pvtol-lqr.py
|
3
|
6908
|
# pvtol_lqr.m - LQR design for vectored thrust aircraft
# RMM, 14 Jan 03
#
# This file works through an LQR based design problem, using the
# planar vertical takeoff and landing (PVTOL) aircraft example from
# Astrom and Murray, Chapter 5. It is intended to demonstrate the
# basic functionality of the python-control package.
#
import os
import numpy as np
import matplotlib.pyplot as plt # MATLAB plotting functions
from control.matlab import * # MATLAB-like functions
#
# System dynamics
#
# These are the dynamics for the PVTOL system, written in state space
# form.
#
# System parameters
m = 4 # mass of aircraft
J = 0.0475 # inertia around pitch axis
r = 0.25 # distance to center of force
g = 9.8 # gravitational constant
c = 0.05 # damping factor (estimated)
# State space dynamics
xe = [0, 0, 0, 0, 0, 0] # equilibrium point of interest
ue = [0, m*g] # (note these are lists, not matrices)
# TODO: The following objects need converting from np.matrix to np.array
# This will involve re-working the subsequent equations as the shapes
# See below.
# Dynamics matrix (use matrix type so that * works for multiplication)
A = np.matrix(
[[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, (-ue[0]*np.sin(xe[2]) - ue[1]*np.cos(xe[2]))/m, -c/m, 0, 0],
[0, 0, (ue[0]*np.cos(xe[2]) - ue[1]*np.sin(xe[2]))/m, 0, -c/m, 0],
[0, 0, 0, 0, 0, 0]]
)
# Input matrix
B = np.matrix(
[[0, 0], [0, 0], [0, 0],
[np.cos(xe[2])/m, -np.sin(xe[2])/m],
[np.sin(xe[2])/m, np.cos(xe[2])/m],
[r/J, 0]]
)
# Output matrix
C = np.matrix([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0]])
D = np.matrix([[0, 0], [0, 0]])
#
# Construct inputs and outputs corresponding to steps in xy position
#
# The vectors xd and yd correspond to the states that are the desired
# equilibrium states for the system. The matrices Cx and Cy are the
# corresponding outputs.
#
# The way these vectors are used is to compute the closed loop system
# dynamics as
#
# xdot = Ax + B u => xdot = (A-BK)x + K xd
# u = -K(x - xd) y = Cx
#
# The closed loop dynamics can be simulated using the "step" command,
# with K*xd as the input vector (assumes that the "input" is unit size,
# so that xd corresponds to the desired steady state.
#
xd = np.matrix([[1], [0], [0], [0], [0], [0]])
yd = np.matrix([[0], [1], [0], [0], [0], [0]])
#
# Extract the relevant dynamics for use with SISO library
#
# The current python-control library only supports SISO transfer
# functions, so we have to modify some parts of the original MATLAB
# code to extract out SISO systems. To do this, we define the 'lat' and
# 'alt' index vectors to consist of the states that are are relevant
# to the lateral (x) and vertical (y) dynamics.
#
# Indices for the parts of the state that we want
lat = (0, 2, 3, 5)
alt = (1, 4)
# Decoupled dynamics
Ax = (A[lat, :])[:, lat] # ! not sure why I have to do it this way
Bx = B[lat, 0]
Cx = C[0, lat]
Dx = D[0, 0]
Ay = (A[alt, :])[:, alt] # ! not sure why I have to do it this way
By = B[alt, 1]
Cy = C[1, alt]
Dy = D[1, 1]
# Label the plot
plt.clf()
plt.suptitle("LQR controllers for vectored thrust aircraft (pvtol-lqr)")
#
# LQR design
#
# Start with a diagonal weighting
Qx1 = np.diag([1, 1, 1, 1, 1, 1])
Qu1a = np.diag([1, 1])
K, X, E = lqr(A, B, Qx1, Qu1a)
K1a = np.matrix(K)
# Close the loop: xdot = Ax - B K (x-xd)
# Note: python-control requires we do this 1 input at a time
# H1a = ss(A-B*K1a, B*K1a*concatenate((xd, yd), axis=1), C, D);
# (T, Y) = step(H1a, T=np.linspace(0,10,100));
# TODO: The following equations will need modifying when converting from np.matrix to np.array
# because the results and even intermediate calculations will be different with numpy arrays
# For example:
# Bx = B[lat, 0]
# Will need to be changed to:
# Bx = B[lat, 0].reshape(-1, 1)
# (if we want it to have the same shape as before)
# For reference, here is a list of the correct shapes of these objects:
# A: (6, 6)
# B: (6, 2)
# C: (2, 6)
# D: (2, 2)
# xd: (6, 1)
# yd: (6, 1)
# Ax: (4, 4)
# Bx: (4, 1)
# Cx: (1, 4)
# Dx: ()
# Ay: (2, 2)
# By: (2, 1)
# Cy: (1, 2)
# Step response for the first input
H1ax = ss(Ax - Bx*K1a[0, lat], Bx*K1a[0, lat]*xd[lat, :], Cx, Dx)
Yx, Tx = step(H1ax, T=np.linspace(0, 10, 100))
# Step response for the second input
H1ay = ss(Ay - By*K1a[1, alt], By*K1a[1, alt]*yd[alt, :], Cy, Dy)
Yy, Ty = step(H1ay, T=np.linspace(0, 10, 100))
plt.subplot(221)
plt.title("Identity weights")
# plt.plot(T, Y[:,1, 1], '-', T, Y[:,2, 2], '--')
plt.plot(Tx.T, Yx.T, '-', Ty.T, Yy.T, '--')
plt.plot([0, 10], [1, 1], 'k-')
plt.axis([0, 10, -0.1, 1.4])
plt.ylabel('position')
plt.legend(('x', 'y'), loc='lower right')
# Look at different input weightings
Qu1a = np.diag([1, 1])
K1a, X, E = lqr(A, B, Qx1, Qu1a)
H1ax = ss(Ax - Bx*K1a[0, lat], Bx*K1a[0, lat]*xd[lat, :], Cx, Dx)
Qu1b = (40 ** 2)*np.diag([1, 1])
K1b, X, E = lqr(A, B, Qx1, Qu1b)
H1bx = ss(Ax - Bx*K1b[0, lat], Bx*K1b[0, lat]*xd[lat, :], Cx, Dx)
Qu1c = (200 ** 2)*np.diag([1, 1])
K1c, X, E = lqr(A, B, Qx1, Qu1c)
H1cx = ss(Ax - Bx*K1c[0, lat], Bx*K1c[0, lat]*xd[lat, :], Cx, Dx)
[Y1, T1] = step(H1ax, T=np.linspace(0, 10, 100))
[Y2, T2] = step(H1bx, T=np.linspace(0, 10, 100))
[Y3, T3] = step(H1cx, T=np.linspace(0, 10, 100))
plt.subplot(222)
plt.title("Effect of input weights")
plt.plot(T1.T, Y1.T, 'b-')
plt.plot(T2.T, Y2.T, 'b-')
plt.plot(T3.T, Y3.T, 'b-')
plt.plot([0, 10], [1, 1], 'k-')
plt.axis([0, 10, -0.1, 1.4])
# arcarrow([1.3, 0.8], [5, 0.45], -6)
plt.text(5.3, 0.4, 'rho')
# Output weighting - change Qx to use outputs
Qx2 = C.T*C
Qu2 = 0.1*np.diag([1, 1])
K, X, E = lqr(A, B, Qx2, Qu2)
K2 = np.matrix(K)
H2x = ss(Ax - Bx*K2[0, lat], Bx*K2[0, lat]*xd[lat, :], Cx, Dx)
H2y = ss(Ay - By*K2[1, alt], By*K2[1, alt]*yd[alt, :], Cy, Dy)
plt.subplot(223)
plt.title("Output weighting")
[Y2x, T2x] = step(H2x, T=np.linspace(0, 10, 100))
[Y2y, T2y] = step(H2y, T=np.linspace(0, 10, 100))
plt.plot(T2x.T, Y2x.T, T2y.T, Y2y.T)
plt.ylabel('position')
plt.xlabel('time')
plt.ylabel('position')
plt.legend(('x', 'y'), loc='lower right')
#
# Physically motivated weighting
#
# Shoot for 1 cm error in x, 10 cm error in y. Try to keep the angle
# less than 5 degrees in making the adjustments. Penalize side forces
# due to loss in efficiency.
#
Qx3 = np.diag([100, 10, 2*np.pi/5, 0, 0, 0])
Qu3 = 0.1*np.diag([1, 10])
(K, X, E) = lqr(A, B, Qx3, Qu3)
K3 = np.matrix(K)
H3x = ss(Ax - Bx*K3[0, lat], Bx*K3[0, lat]*xd[lat, :], Cx, Dx)
H3y = ss(Ay - By*K3[1, alt], By*K3[1, alt]*yd[alt, :], Cy, Dy)
plt.subplot(224)
# step(H3x, H3y, 10)
[Y3x, T3x] = step(H3x, T=np.linspace(0, 10, 100))
[Y3y, T3y] = step(H3y, T=np.linspace(0, 10, 100))
plt.plot(T3x.T, Y3x.T, T3y.T, Y3y.T)
plt.title("Physically motivated weights")
plt.xlabel('time')
plt.legend(('x', 'y'), loc='lower right')
if 'PYCONTROL_TEST_EXAMPLES' not in os.environ:
plt.show()
|
bsd-3-clause
|
anielsen001/scipy
|
scipy/cluster/hierarchy.py
|
2
|
96514
|
"""
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
cut_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [2] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [7] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [11] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
from __future__ import division, print_function, absolute_import
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import bisect
from collections import deque
import numpy as np
from . import _hierarchy
import scipy.spatial.distance as distance
from scipy._lib.six import string_types
from scipy._lib.six import xrange
_LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3,
'median': 4, 'ward': 5, 'weighted': 6}
_EUCLIDEAN_METHODS = ('centroid', 'median', 'ward')
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'cut_tree', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
class ClusterWarning(UserWarning):
pass
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, ClusterWarning, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix.
See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage.
See ``linkage`` for more information on the input matrix,
return structure, and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed distance matrix.
See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering.
The input y may be either a 1d compressed distance matrix
or a 2d array of observation vectors.
If y is a 1d compressed distance matrix,
then y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
A :math:`(n-1)` by 4 matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns :math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array. All elements of `y` must be finite,
i.e. no NaNs or infs.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str or function, optional
The distance metric to use in the case that y is a collection of
observation vectors; ignored otherwise. See the ``distance.pdist``
function for a list of valid distance metrics. A custom distance
function can also be used. See the ``distance.pdist`` function for
details.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
Notes
-----
1. For method 'single' an optimized algorithm called SLINK is implemented,
which has :math:`O(n^2)` time complexity.
For methods 'complete', 'average', 'weighted' and 'ward' an algorithm
called nearest-neighbors chain is implemented, which too has time
complexity :math:`O(n^2)`.
For other methods a naive algorithm is implemented with :math:`O(n^3)`
time complexity.
All algorithms use :math:`O(n^2)` memory.
Refer to [1]_ for details about the algorithms.
2. Methods 'centroid', 'median' and 'ward' are correctly defined only if
Euclidean pairwise metric is used. If `y` is passed as precomputed
pairwise distances, then it is a user responsibility to assure that
these distances are in fact Euclidean, otherwise the produced result
will be incorrect.
References
----------
.. [1] Daniel Mullner, "Modern hierarchical, agglomerative clustering
algorithms", `arXiv:1109.2378v1 <http://arxiv.org/abs/1109.2378v1>`_
, 2011.
"""
if method not in _LINKAGE_METHODS:
raise ValueError("Invalid method: {0}".format(method))
y = _convert_to_double(np.asarray(y, order='c'))
if y.ndim == 1:
distance.is_valid_y(y, throw=True, name='y')
[y] = _copy_arrays_if_base_present([y])
elif y.ndim == 2:
if method in _EUCLIDEAN_METHODS and metric != 'euclidean':
raise ValueError("Method '{0}' requires the distance metric "
"to be Euclidean".format(method))
if y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0):
if np.all(y >= 0) and np.allclose(y, y.T):
_warning('The symmetric non-negative hollow observation '
'matrix looks suspiciously like an uncondensed '
'distance matrix')
y = distance.pdist(y, metric)
else:
raise ValueError("`y` must be 1 or 2 dimensional.")
if not np.all(np.isfinite(y)):
raise ValueError("`y` must contain only finite values.")
n = int(distance.num_obs_y(y))
method_code = _LINKAGE_METHODS[method]
if method == 'single':
return _hierarchy.slink(y, n)
elif method in ['complete', 'average', 'weighted', 'ward']:
return _hierarchy.nn_chain(y, n, method_code)
else:
return _hierarchy.linkage(y, n, method_code)
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def __lt__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist < node.dist
def __gt__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist > node.dist
def __eq__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist == node.dist
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the
result of func(n[i]) is stored in L[i]. If not provided, the index
of the original observation to which the node corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def _order_cluster_tree(Z):
"""
Returns clustering nodes in bottom-up order by distance.
Parameters
----------
Z : scipy.cluster.linkage array
The linkage matrix.
Returns
-------
nodes : list
A list of ClusterNode objects.
"""
q = deque()
tree = to_tree(Z)
q.append(tree)
nodes = []
while q:
node = q.popleft()
if not node.is_leaf():
bisect.insort_left(nodes, node)
q.append(node.get_right())
q.append(node.get_left())
return nodes
def cut_tree(Z, n_clusters=None, height=None):
"""
Given a linkage matrix Z, return the cut tree.
Parameters
----------
Z : scipy.cluster.linkage array
The linkage matrix.
n_clusters : array_like, optional
Number of clusters in the tree at the cut point.
height : array_like, optional
The height at which to cut the tree. Only possible for ultrametric
trees.
Returns
-------
cutree : array
An array indicating group membership at each agglomeration step. I.e.,
for a full cut tree, in the first column each data point is in its own
cluster. At the next step, two nodes are merged. Finally all singleton
and non-singleton clusters are in one group. If `n_clusters` or
`height` is given, the columns correspond to the columns of `n_clusters` or
`height`.
Examples
--------
>>> from scipy import cluster
>>> np.random.seed(23)
>>> X = np.random.randn(50, 4)
>>> Z = cluster.hierarchy.ward(X)
>>> cutree = cluster.hierarchy.cut_tree(Z, n_clusters=[5, 10])
>>> cutree[:10]
array([[0, 0],
[1, 1],
[2, 2],
[3, 3],
[3, 4],
[2, 2],
[0, 0],
[1, 5],
[3, 6],
[4, 7]])
"""
nobs = num_obs_linkage(Z)
nodes = _order_cluster_tree(Z)
if height is not None and n_clusters is not None:
raise ValueError("At least one of either height or n_clusters "
"must be None")
elif height is None and n_clusters is None: # return the full cut tree
cols_idx = np.arange(nobs)
elif height is not None:
heights = np.array([x.dist for x in nodes])
cols_idx = np.searchsorted(heights, height)
else:
cols_idx = nobs - np.searchsorted(np.arange(nobs), n_clusters)
try:
n_cols = len(cols_idx)
except TypeError: # scalar
n_cols = 1
cols_idx = np.array([cols_idx])
groups = np.zeros((n_cols, nobs), dtype=int)
last_group = np.arange(nobs)
if 0 in cols_idx:
groups[0] = last_group
for i, node in enumerate(nodes):
idx = node.pre_order()
this_group = last_group.copy()
this_group[idx] = last_group[idx].min()
this_group[this_group > last_group[idx].max()] -= 1
if i + 1 in cols_idx:
groups[np.where(i + 1 == cols_idx)[0]] = this_group
last_group = this_group
return groups.T
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object.
The reference r to the root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
rd : bool, optional
When False, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
Returns
-------
L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# Number of original objects is equal to the number of rows minus 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != bool:
X = X.astype(bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see `linkage` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n-1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy**2
denomB = Zz**2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
r"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical
clustering). See `linkage` documentation for more information on its
form.
d : int, optional
The number of links up to `d` levels below each non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row contains the link
statistics for the non-singleton cluster ``i``. The link statistics are
computed over the link heights for links :math:`d` levels below the
cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is the number
of links included in the calculation; and ``R[i,3]`` is the
inconsistency coefficient,
.. math:: \frac{\mathtt{Z[i,2]} - \mathtt{R[i,0]}} {R[i,1]}
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy.inconsistent(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by this library.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(R) != np.ndarray:
raise TypeError('Variable %spassed as inconsistency matrix is not '
'a numpy array.' % name_str)
if R.dtype != np.double:
raise TypeError('Inconsistency matrix %smust contain doubles '
'(double).' % name_str)
if len(R.shape) != 2:
raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '
'be two-dimensional).' % name_str)
if R.shape[1] != 4:
raise ValueError('Inconsistency matrix %smust have 4 columns.' %
name_str)
if R.shape[0] < 1:
raise ValueError('Inconsistency matrix %smust have at least one '
'row.' % name_str)
if (R[:, 0] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height means.' % name_str)
if (R[:, 1] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height standard deviations.' % name_str)
if (R[:, 2] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'counts.' % name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional array (type double)
with :math:`n` rows and 4 columns. The first two columns must contain
indices between 0 and :math:`2n-1`. For a given row ``i``, the following
two expressions have to hold:
.. math::
0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1
0 \\leq Z[i,1] \\leq i+n-1
I.e. a cluster cannot join another cluster unless the cluster being joined
has been generated.
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(Z) != np.ndarray:
raise TypeError('Passed linkage argument %sis not a valid array.' %
name_str)
if Z.dtype != np.double:
raise TypeError('Linkage matrix %smust contain doubles.' % name_str)
if len(Z.shape) != 2:
raise ValueError('Linkage matrix %smust have shape=2 (i.e. be '
'two-dimensional).' % name_str)
if Z.shape[1] != 4:
raise ValueError('Linkage matrix %smust have 4 columns.' % name_str)
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
raise ValueError('Linkage %scontains negative indices.' %
name_str)
if (Z[:, 2] < 0).any():
raise ValueError('Linkage %scontains negative distances.' %
name_str)
if (Z[:, 3] < 0).any():
raise ValueError('Linkage %scontains negative counts.' %
name_str)
if _check_hierarchy_uses_cluster_before_formed(Z):
raise ValueError('Linkage %suses non-singleton cluster before '
'it is formed.' % name_str)
if _check_hierarchy_uses_cluster_more_than_once(Z):
raise ValueError('Linkage %suses the same cluster more than once.'
% name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` : If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` : Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` : Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` : Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do::
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` : Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do::
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, ``monocrit[i] >= monocrit[j]``.
Returns
-------
fcluster : ndarray
An array of length n. T[i] is the flat cluster number to
which original observation i belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array T of length n is returned. T[i] is the index
of the flat cluster to which the original observation i belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str, optional
The distance metric for calculating pairwise distances. See
`distance.pdist` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
Notes
-----
This function is similar to the MATLAB function clusterdata.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See ``linkage`` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.prelist(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None, above_threshold_color='b'):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot "
"the dendrogram. Use no_plot=True to calculate the "
"dendrogram without plotting.")
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Dependent variable plot height
dvw = mh + mh * 0.05
iv_ticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation in ('top', 'bottom'):
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
else:
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(iv_ticks)
if orientation == 'top':
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
leaf_rot = float(_get_tick_rotation(len(ivl))) if (
leaf_rotation is None) else leaf_rotation
leaf_font = float(_get_tick_text_size(len(ivl))) if (
leaf_font_size is None) else leaf_font_size
ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font)
elif orientation in ('left', 'right'):
if orientation == 'left':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
else:
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(iv_ticks)
if orientation == 'left':
ax.yaxis.set_ticks_position('right')
else:
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
leaf_font = float(_get_tick_text_size(len(ivl))) if (
leaf_font_size is None) else leaf_font_size
if leaf_rotation is not None:
ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font)
else:
ax.set_yticklabels(ivl, size=leaf_font)
# Let's use collections instead. This way there is a separate legend item
# for each tree grouping, rather than stupidly one for each line segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the groupings below the color threshold.
for color in colors_used:
if color != above_threshold_color:
ax.add_collection(colors_to_collections[color])
# If there's a grouping of links above the color threshold, it goes last.
if above_threshold_color in colors_to_collections:
ax.add_collection(colors_to_collections[above_threshold_color])
if contraction_marks is not None:
Ellipse = matplotlib.patches.Ellipse
for (x, y) in contraction_marks:
if orientation in ('left', 'right'):
e = Ellipse((y, x), width=dvw / 100, height=1.0)
else:
e = Ellipse((x, y), width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for use by dendrogram.
Note that this palette is global (i.e. setting it once changes the colors
for all subsequent calls to `dendrogram`) and that it affects only the
the colors below ``color_threshold``.
Note that `dendrogram` also accepts a custom coloring function through its
``link_color_func`` keyword, which is more flexible and non-global.
Parameters
----------
palette : list of str or None
A list of matplotlib color codes. The order of the color codes is the
order in which the colors are cycled through when color thresholding in
the dendrogram.
If ``None``, resets the palette to its default (which is
``['g', 'r', 'c', 'm', 'y', 'k']``).
Returns
-------
None
See Also
--------
dendrogram
Notes
-----
Ability to reset the palette with ``None`` added in Scipy 0.17.0.
Examples
--------
>>> from scipy.cluster import hierarchy
>>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., 400.,
... 754., 564., 138., 219., 869., 669.])
>>> Z = hierarchy.linkage(ytdist, 'single')
>>> dn = hierarchy.dendrogram(Z, no_plot=True)
>>> dn['color_list']
['g', 'b', 'b', 'b', 'b']
>>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k'])
>>> dn = hierarchy.dendrogram(Z, no_plot=True)
>>> dn['color_list']
['c', 'b', 'b', 'b', 'b']
>>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267,
... above_threshold_color='k')
>>> dn['color_list']
['c', 'm', 'm', 'k', 'k']
Now reset the color palette to its default:
>>> hierarchy.set_link_color_palette(None)
"""
if palette is None:
# reset to its default
palette = ['g', 'r', 'c', 'm', 'y', 'k']
elif type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, string_types) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, leaf_font_size=None,
leaf_rotation=None, leaf_label_func=None,
show_contracted=False, link_color_func=None, ax=None,
above_threshold_color='b'):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last ``p`` non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'mlab'``
This corresponds to MATLAB(TM) behavior. (not implemented yet)
``'level'/'mtica'``
No more than ``p`` levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is None or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`
-sized list (or tuple). The ``labels[i]`` value is the text to put
under the :math:`i` th leaf node only if it corresponds to an original
observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descendent'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do::
# First define the leaf label function.
def llf(id):
if id < n:
return str(id)
else:
return '[%d %d %1.2f]' % (id, count, R[n-id,3])
# The text for the leaf nodes is going to be big so force
# a rotation of 90 degrees.
dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example::
dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
above_threshold_color : str, optional
This matplotlib color string sets the color of the links above the
color_threshold. The default is 'b'.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
See Also
--------
linkage, set_link_color_palette
Examples
--------
>>> from scipy.cluster import hierarchy
>>> import matplotlib.pyplot as plt
A very basic example:
>>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268.,
... 400., 754., 564., 138., 219., 869., 669.])
>>> Z = hierarchy.linkage(ytdist, 'single')
>>> plt.figure()
>>> dn = hierarchy.dendrogram(Z)
Now plot in given axes, improve the color scheme and use both vertical and
horizontal orientations:
>>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k'])
>>> fig, axes = plt.subplots(1, 2, figsize=(8, 3))
>>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y',
... orientation='top')
>>> dn2 = hierarchy.dendrogram(Z, ax=axes[1], above_threshold_color='#bcbddc',
... orientation='right')
>>> hierarchy.set_link_color_palette(None) # reset to default after use
>>> plt.show()
"""
# This feature was thought about but never implemented (still useful?):
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
ivl = [] # list of leaves
if color_threshold is None or (isinstance(color_threshold, string_types) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
# Empty list will be filled in _dendrogram_calculate_info
contraction_marks = [] if show_contracted else None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2*n - 2,
iv=0.0,
ivl=ivl,
n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list,
lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list,
leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax,
above_threshold_color=above_threshold_color)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None,
above_threshold_color='b'):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the ``max(Z[*,2]``) for all nodes ``*`` below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# its label is either the empty string or the number of original
# observations belonging to cluster i.
if 2 * n - p > i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = above_threshold_color
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, string_types):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d:
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See `linkage` for more
information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Returns the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.